diff --git a/stable b/stable
index 6a2b0ac..992977a 120000
--- a/stable
+++ b/stable
@@ -1 +1 @@
-v1.0.1
\ No newline at end of file
+v1.1.0
\ No newline at end of file
diff --git a/v1 b/v1
index 6a2b0ac..992977a 120000
--- a/v1
+++ b/v1
@@ -1 +1 @@
-v1.0.1
\ No newline at end of file
+v1.1.0
\ No newline at end of file
diff --git a/v1.1 b/v1.1
new file mode 120000
index 0000000..992977a
--- /dev/null
+++ b/v1.1
@@ -0,0 +1 @@
+v1.1.0
\ No newline at end of file
diff --git a/v1.1.0/404.html b/v1.1.0/404.html
new file mode 100644
index 0000000..fd1e04c
--- /dev/null
+++ b/v1.1.0/404.html
@@ -0,0 +1,22 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/v1.1.0/assets/API.md.dZ6S2l54.js b/v1.1.0/assets/API.md.dZ6S2l54.js
new file mode 100644
index 0000000..0f7aeff
--- /dev/null
+++ b/v1.1.0/assets/API.md.dZ6S2l54.js
@@ -0,0 +1,14 @@
+import{_ as i,c as s,o as e,a7 as a}from"./chunks/framework.Bblfi-75.js";const E=JSON.parse('{"title":"API","description":"","frontmatter":{},"headers":[],"relativePath":"API.md","filePath":"API.md","lastUpdated":null}'),n={name:"API.md"},l=a(`
`,7),t=[l];function r(o,p,d,h,c,k){return e(),s("div",null,t)}const u=i(n,[["render",r]]);export{E as __pageData,u as default};
diff --git a/v1.1.0/assets/API.md.dZ6S2l54.lean.js b/v1.1.0/assets/API.md.dZ6S2l54.lean.js
new file mode 100644
index 0000000..7104b72
--- /dev/null
+++ b/v1.1.0/assets/API.md.dZ6S2l54.lean.js
@@ -0,0 +1 @@
+import{_ as i,c as s,o as e,a7 as a}from"./chunks/framework.Bblfi-75.js";const E=JSON.parse('{"title":"API","description":"","frontmatter":{},"headers":[],"relativePath":"API.md","filePath":"API.md","lastUpdated":null}'),n={name:"API.md"},l=a("",7),t=[l];function r(o,p,d,h,c,k){return e(),s("div",null,t)}const u=i(n,[["render",r]]);export{E as __pageData,u as default};
diff --git a/v1.1.0/assets/app.BmNr5hyI.js b/v1.1.0/assets/app.BmNr5hyI.js
new file mode 100644
index 0000000..24df07e
--- /dev/null
+++ b/v1.1.0/assets/app.BmNr5hyI.js
@@ -0,0 +1,7 @@
+function __vite__mapDeps(indexes) {
+ if (!__vite__mapDeps.viteFileDeps) {
+ __vite__mapDeps.viteFileDeps = []
+ }
+ return indexes.map((i) => __vite__mapDeps.viteFileDeps[i])
+}
+import{j as o,a8 as p,a9 as u,aa as l,ab as c,ac as f,ad as d,ae as m,af as h,ag as g,ah as A,Y as P,d as _,u as v,l as R,z as w,ai as y,aj as C,ak as E,a6 as b}from"./chunks/framework.Bblfi-75.js";import{R as T}from"./chunks/theme.DPekzJrd.js";function i(e){if(e.extends){const a=i(e.extends);return{...a,...e,async enhanceApp(t){a.enhanceApp&&await a.enhanceApp(t),e.enhanceApp&&await e.enhanceApp(t)}}}return e}const s=i(T),S=_({name:"VitePressApp",setup(){const{site:e,lang:a,dir:t}=v();return R(()=>{w(()=>{document.documentElement.lang=a.value,document.documentElement.dir=t.value})}),e.value.router.prefetchLinks&&y(),C(),E(),s.setup&&s.setup(),()=>b(s.Layout)}});async function j(){globalThis.__VITEPRESS__=!0;const e=L(),a=D();a.provide(u,e);const t=l(e.route);return a.provide(c,t),a.component("Content",f),a.component("ClientOnly",d),Object.defineProperties(a.config.globalProperties,{$frontmatter:{get(){return t.frontmatter.value}},$params:{get(){return t.page.value.params}}}),s.enhanceApp&&await s.enhanceApp({app:a,router:e,siteData:m}),{app:a,router:e,data:t}}function D(){return h(S)}function L(){let e=o,a;return g(t=>{let n=A(t),r=null;return n&&(e&&(a=n),(e||a===n)&&(n=n.replace(/\.js$/,".lean.js")),r=P(()=>import(n),__vite__mapDeps([]))),o&&(e=!1),r},s.NotFound)}o&&j().then(({app:e,router:a,data:t})=>{a.go().then(()=>{p(a.route,t.site),e.mount("#app")})});export{j as createApp};
diff --git a/v1.1.0/assets/chunks/@localSearchIndexroot._4_Bk2_S.js b/v1.1.0/assets/chunks/@localSearchIndexroot._4_Bk2_S.js
new file mode 100644
index 0000000..1814047
--- /dev/null
+++ b/v1.1.0/assets/chunks/@localSearchIndexroot._4_Bk2_S.js
@@ -0,0 +1 @@
+const e='{"documentCount":44,"nextId":44,"documentIds":{"0":"/NeuroTreeModels.jl/v1.1.0/API#API","1":"/NeuroTreeModels.jl/v1.1.0/API#Training","2":"/NeuroTreeModels.jl/v1.1.0/API#Inference","3":"/NeuroTreeModels.jl/v1.1.0/design#NeuroTree-A-differentiable-tree-operator-for-tabular-data","4":"/NeuroTreeModels.jl/v1.1.0/design#Overview","5":"/NeuroTreeModels.jl/v1.1.0/design#Architecture","6":"/NeuroTreeModels.jl/v1.1.0/design#Node-weights","7":"/NeuroTreeModels.jl/v1.1.0/design#Leaf-weights","8":"/NeuroTreeModels.jl/v1.1.0/design#Tree-prediction","9":"/NeuroTreeModels.jl/v1.1.0/design#Composability","10":"/NeuroTreeModels.jl/v1.1.0/design#Benchmarks","11":"/NeuroTreeModels.jl/v1.1.0/design#Boston","12":"/NeuroTreeModels.jl/v1.1.0/design#Titanic","13":"/NeuroTreeModels.jl/v1.1.0/design#Year","14":"/NeuroTreeModels.jl/v1.1.0/design#MSRank","15":"/NeuroTreeModels.jl/v1.1.0/design#Yahoo","16":"/NeuroTreeModels.jl/v1.1.0/design#Higgs","17":"/NeuroTreeModels.jl/v1.1.0/design#Discussion","18":"/NeuroTreeModels.jl/v1.1.0/design#References","19":"/NeuroTreeModels.jl/v1.1.0/models#Models","20":"/NeuroTreeModels.jl/v1.1.0/models#NeuroTreeRegressor","21":"/NeuroTreeModels.jl/v1.1.0/models#NeuroTreeModel","22":"/NeuroTreeModels.jl/v1.1.0/quick-start#Getting-started-with-NeuroTreeModels.jl","23":"/NeuroTreeModels.jl/v1.1.0/quick-start#Installation","24":"/NeuroTreeModels.jl/v1.1.0/quick-start#Configuring-a-model","25":"/NeuroTreeModels.jl/v1.1.0/quick-start#Training","26":"/NeuroTreeModels.jl/v1.1.0/quick-start#Inference","27":"/NeuroTreeModels.jl/v1.1.0/quick-start#MLJ","28":"/NeuroTreeModels.jl/v1.1.0/quick-start#Benchmarks","29":"/NeuroTreeModels.jl/v1.1.0/tutorials-classification-iris#Classification-on-Iris-dataset","30":"/NeuroTreeModels.jl/v1.1.0/tutorials-classification-iris#Getting-started","31":"/NeuroTreeModels.jl/v1.1.0/tutorials-classification-iris#Preprocessing","32":"/NeuroTreeModels.jl/v1.1.0/tutorials-classification-iris#Training","33":"/NeuroTreeModels.jl/v1.1.0/tutorials-classification-iris#Diagnosis","34":"/NeuroTreeModels.jl/v1.1.0/tutorials-logistic-titanic#Logistic-Regression-on-Titanic-Dataset","35":"/NeuroTreeModels.jl/v1.1.0/tutorials-logistic-titanic#Getting-started","36":"/NeuroTreeModels.jl/v1.1.0/tutorials-logistic-titanic#Preprocessing","37":"/NeuroTreeModels.jl/v1.1.0/tutorials-logistic-titanic#Training","38":"/NeuroTreeModels.jl/v1.1.0/tutorials-logistic-titanic#Diagnosis","39":"/NeuroTreeModels.jl/v1.1.0/tutorials-regression-boston#Regression-on-Boston-Housing-Dataset","40":"/NeuroTreeModels.jl/v1.1.0/tutorials-regression-boston#Getting-started","41":"/NeuroTreeModels.jl/v1.1.0/tutorials-regression-boston#Preprocessing","42":"/NeuroTreeModels.jl/v1.1.0/tutorials-regression-boston#Training","43":"/NeuroTreeModels.jl/v1.1.0/tutorials-regression-boston#Diagnosis"},"fieldIds":{"title":0,"titles":1,"text":2},"fieldLength":{"0":[1,1,1],"1":[1,1,68],"2":[1,1,19],"3":[8,1,1],"4":[1,8,108],"5":[1,8,149],"6":[2,9,85],"7":[2,9,168],"8":[2,9,1],"9":[1,8,8],"10":[1,8,117],"11":[1,11,31],"12":[1,11,27],"13":[1,11,31],"14":[1,11,27],"15":[1,11,28],"16":[1,11,26],"17":[1,8,116],"18":[1,8,49],"19":[1,1,1],"20":[1,1,293],"21":[1,1,26],"22":[5,1,1],"23":[1,5,4],"24":[3,5,24],"25":[1,5,51],"26":[1,5,5],"27":[1,5,24],"28":[1,5,13],"29":[4,1,30],"30":[2,4,22],"31":[1,4,48],"32":[1,4,56],"33":[1,4,50],"34":[5,1,41],"35":[2,5,24],"36":[1,5,130],"37":[1,5,55],"38":[1,5,43],"39":[5,1,34],"40":[2,5,23],"41":[1,5,52],"42":[1,5,55],"43":[1,5,46]},"averageFieldLength":[1.6818181818181819,5.40909090909091,50.25],"storedFields":{"0":{"title":"API","titles":[]},"1":{"title":"Training","titles":["API"]},"2":{"title":"Inference","titles":["API"]},"3":{"title":"NeuroTree - A differentiable tree operator for tabular data","titles":[]},"4":{"title":"Overview","titles":["NeuroTree - A differentiable tree operator for tabular data"]},"5":{"title":"Architecture","titles":["NeuroTree - A differentiable tree operator for tabular data"]},"6":{"title":"Node weights","titles":["NeuroTree - A differentiable tree operator for tabular data","Architecture"]},"7":{"title":"Leaf weights","titles":["NeuroTree - A differentiable tree operator for tabular data","Architecture"]},"8":{"title":"Tree prediction","titles":["NeuroTree - A differentiable tree operator for tabular data","Architecture"]},"9":{"title":"Composability","titles":["NeuroTree - A differentiable tree operator for tabular data"]},"10":{"title":"Benchmarks","titles":["NeuroTree - A differentiable tree operator for tabular data"]},"11":{"title":"Boston","titles":["NeuroTree - A differentiable tree operator for tabular data","Benchmarks","Tree prediction"]},"12":{"title":"Titanic","titles":["NeuroTree - A differentiable tree operator for tabular data","Benchmarks","Tree prediction"]},"13":{"title":"Year","titles":["NeuroTree - A differentiable tree operator for tabular data","Benchmarks","Tree prediction"]},"14":{"title":"MSRank","titles":["NeuroTree - A differentiable tree operator for tabular data","Benchmarks","Tree prediction"]},"15":{"title":"Yahoo","titles":["NeuroTree - A differentiable tree operator for tabular data","Benchmarks","Tree prediction"]},"16":{"title":"Higgs","titles":["NeuroTree - A differentiable tree operator for tabular data","Benchmarks","Tree prediction"]},"17":{"title":"Discussion","titles":["NeuroTree - A differentiable tree operator for tabular data"]},"18":{"title":"References","titles":["NeuroTree - A differentiable tree operator for tabular data"]},"19":{"title":"Models","titles":[]},"20":{"title":"NeuroTreeRegressor","titles":["Models"]},"21":{"title":"NeuroTreeModel","titles":["Models"]},"22":{"title":"Getting started with NeuroTreeModels.jl","titles":[]},"23":{"title":"Installation","titles":["Getting started with NeuroTreeModels.jl"]},"24":{"title":"Configuring a model","titles":["Getting started with NeuroTreeModels.jl"]},"25":{"title":"Training","titles":["Getting started with NeuroTreeModels.jl"]},"26":{"title":"Inference","titles":["Getting started with NeuroTreeModels.jl"]},"27":{"title":"MLJ","titles":["Getting started with NeuroTreeModels.jl"]},"28":{"title":"Benchmarks","titles":["Getting started with NeuroTreeModels.jl"]},"29":{"title":"Classification on Iris dataset","titles":[]},"30":{"title":"Getting started","titles":["Classification on Iris dataset"]},"31":{"title":"Preprocessing","titles":["Classification on Iris dataset"]},"32":{"title":"Training","titles":["Classification on Iris dataset"]},"33":{"title":"Diagnosis","titles":["Classification on Iris dataset"]},"34":{"title":"Logistic Regression on Titanic Dataset","titles":[]},"35":{"title":"Getting started","titles":["Logistic Regression on Titanic Dataset"]},"36":{"title":"Preprocessing","titles":["Logistic Regression on Titanic Dataset"]},"37":{"title":"Training","titles":["Logistic Regression on Titanic Dataset"]},"38":{"title":"Diagnosis","titles":["Logistic Regression on Titanic Dataset"]},"39":{"title":"Regression on Boston Housing Dataset","titles":[]},"40":{"title":"Getting started","titles":["Regression on Boston Housing Dataset"]},"41":{"title":"Preprocessing","titles":["Regression on Boston Housing Dataset"]},"42":{"title":"Training","titles":["Regression on Boston Housing Dataset"]},"43":{"title":"Diagnosis","titles":["Regression on Boston Housing Dataset"]}},"dirtCount":0,"index":[[">",{"2":{"36":1,"38":2,"41":2}}],["|>",{"2":{"20":1,"27":1}}],["xnew",{"2":{"20":2}}],["x",{"2":{"20":8,"25":1,"27":3,"36":3,"41":4}}],["xgboost",{"2":{"4":1,"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"18":1}}],["qwicen",{"2":{"18":1}}],["687",{"2":{"15":1}}],["635",{"2":{"13":1}}],["624",{"2":{"13":1}}],["626",{"2":{"13":1}}],["627",{"2":{"13":1}}],["6",{"2":{"13":1}}],["652",{"2":{"13":1}}],["615",{"2":{"12":1}}],["673",{"2":{"12":1}}],["561",{"2":{"15":1}}],["540",{"2":{"15":1}}],["547",{"2":{"15":1}}],["545",{"2":{"15":1}}],["558",{"2":{"14":1}}],["553",{"2":{"14":1}}],["554",{"2":{"14":2}}],["503",{"2":{"14":2}}],["504",{"2":{"14":1}}],["5",{"2":{"14":2,"20":2,"24":1,"25":1,"38":2}}],["578",{"2":{"14":1}}],["584",{"2":{"15":1}}],["58",{"2":{"12":1}}],["519",{"2":{"10":1}}],["515",{"2":{"10":1}}],["461",{"2":{"16":1}}],["464",{"2":{"16":2}}],["462",{"2":{"14":1,"16":1}}],["452",{"2":{"16":1}}],["417",{"2":{"15":1}}],["497",{"2":{"14":1}}],["407",{"2":{"12":1}}],["4",{"2":{"11":2,"13":1}}],["9833333333333333",{"2":{"33":1}}],["95",{"2":{"33":1}}],["946",{"2":{"11":1}}],["947",{"2":{"11":1}}],["926",{"2":{"11":1}}],["927",{"2":{"11":1}}],["935",{"2":{"11":1}}],["9",{"2":{"11":2}}],["90",{"2":{"10":1,"17":1}}],["7180",{"2":{"16":1}}],["775",{"2":{"16":1}}],["779",{"2":{"16":1}}],["776",{"2":{"16":2}}],["781",{"2":{"15":1,"16":1}}],["794",{"2":{"15":1}}],["796",{"2":{"15":1}}],["798",{"2":{"15":1}}],["797",{"2":{"15":1}}],["79",{"2":{"13":1}}],["76",{"2":{"13":1}}],["7",{"2":{"10":1,"11":1,"12":1}}],["709",{"2":{"10":1}}],["8985784079860025",{"2":{"43":1}}],["891",{"2":{"10":1}}],["8426966292134831",{"2":{"38":1}}],["8527349228611",{"2":{"38":1}}],["85",{"2":{"38":1}}],["80",{"2":{"13":4}}],["836",{"2":{"12":2}}],["821",{"2":{"12":1}}],["828",{"2":{"12":2}}],["865",{"2":{"11":1}}],["8",{"2":{"11":1,"13":1,"31":1,"36":1,"41":1}}],["877",{"2":{"10":1}}],["y",{"2":{"20":7,"25":2,"27":2}}],["yahoo",{"0":{"15":1},"2":{"17":2}}],["yahoorank",{"2":{"10":1}}],["year",{"0":{"13":1},"2":{"10":1,"17":1}}],["ȳ",{"2":{"7":3}}],["δnw",{"2":{"7":3}}],["δ",{"2":{"7":1}}],["+=",{"2":{"7":1}}],["+1",{"2":{"7":1}}],["+",{"2":{"7":3,"43":2}}],["zygote",{"2":{"7":1}}],["juliatrain",{"2":{"36":1}}],["julia>",{"2":{"33":1,"38":1,"43":1}}],["juliajulia>",{"2":{"33":1,"38":1,"43":1}}],["juliaconfig",{"2":{"32":1,"37":1,"42":1}}],["juliadf",{"2":{"31":1,"36":1,"41":1}}],["juliap",{"2":{"26":1,"33":1,"38":1,"43":1}}],["julianobs",{"2":{"25":1}}],["julianeurotreemodel",{"2":{"21":1}}],["julianeurotreeregressor",{"2":{"20":1}}],["julia",{"2":{"23":1}}],["juliausing",{"2":{"20":2,"24":1,"27":1,"30":1,"35":1,"40":1}}],["juliam",{"2":{"20":2}}],["juliafunction",{"2":{"1":1,"7":2}}],["j",{"2":{"7":6}}],["jl",{"0":{"22":1},"1":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1},"2":{"7":2,"10":1,"20":1,"27":1,"28":1}}],["=>",{"2":{"36":8,"41":4}}],["==",{"2":{"33":2,"38":2}}],["=",{"2":{"5":1,"7":13,"20":17,"24":5,"25":5,"26":1,"27":4,"31":9,"32":2,"33":2,"36":8,"37":2,"38":2,"41":8,"42":2,"43":2}}],["∑",{"2":{"5":1}}],["10",{"2":{"24":1}}],["15",{"2":{"14":1}}],["17",{"2":{"13":1}}],["1330",{"2":{"16":1}}],["1390",{"2":{"16":1}}],["13",{"2":{"11":1}}],["136",{"2":{"10":1,"17":1}}],["19",{"2":{"11":2}}],["192",{"2":{"10":1}}],["18",{"2":{"11":1,"13":1}}],["123",{"2":{"30":1,"35":1,"40":1}}],["12300",{"2":{"16":1}}],["120",{"2":{"15":1}}],["12",{"2":{"11":1,"14":1}}],["11",{"2":{"10":1,"13":1}}],["161",{"2":{"15":1}}],["16",{"2":{"10":1,"24":1}}],["1+leaf",{"2":{"7":2}}],["1",{"2":{"5":3,"7":8,"10":1,"13":1,"14":2,"20":6,"25":1,"31":2,"33":1,"36":2,"41":2}}],["0f",{"2":{"20":1}}],["0326",{"2":{"12":1}}],["0379",{"2":{"12":1}}],["0511",{"2":{"11":1}}],["0648",{"2":{"11":1}}],["000",{"2":{"10":2,"20":1,"25":1}}],["0",{"2":{"5":4,"7":1,"11":9,"12":14,"13":7,"14":11,"15":15,"16":15,"17":1,"20":3,"31":1,"33":2,"36":1,"38":4,"41":1,"43":1}}],["3287859731914597",{"2":{"43":1}}],["37",{"2":{"14":2}}],["375",{"2":{"12":1}}],["39",{"2":{"14":1}}],["390",{"2":{"12":1}}],["388",{"2":{"12":1}}],["382",{"2":{"12":1}}],["345",{"2":{"10":1}}],["3",{"2":{"5":1,"13":1,"20":1}}],["→",{"2":{"5":2}}],["2620",{"2":{"16":1}}],["244",{"2":{"15":1}}],["25",{"2":{"11":1}}],["206",{"2":{"11":1}}],["200",{"2":{"10":1}}],["280",{"2":{"13":1}}],["28",{"2":{"10":1}}],["2^n",{"2":{"20":2}}],["2^depth",{"2":{"7":1,"20":1}}],["2^",{"2":{"7":1}}],["2",{"2":{"5":1,"6":2,"7":3,"10":2,"13":3,"20":3,"32":1,"36":1,"37":1,"42":1,"43":1}}],["hidden",{"2":{"20":2}}],["higher",{"2":{"20":1}}],["highly",{"2":{"18":1}}],["highlighted",{"2":{"5":1}}],["higgs",{"0":{"16":1},"2":{"10":1,"17":1}}],["https",{"2":{"18":2}}],["having",{"2":{"20":1}}],["have",{"2":{"20":1,"34":1}}],["has",{"2":{"17":1,"20":1}}],["harder",{"2":{"17":1}}],["hardsigmoid",{"2":{"17":1}}],["hard",{"2":{"5":3,"6":1}}],["housing",{"0":{"39":1},"1":{"40":1,"41":1,"42":1,"43":1},"2":{"10":1,"39":3}}],["how",{"2":{"5":1,"6":2}}],["hyper",{"2":{"10":1,"20":5}}],["here",{"2":{"5":1}}],["hence",{"2":{"5":1}}],["u",{"2":{"39":1}}],["usage",{"2":{"32":1,"37":1,"42":1}}],["use",{"2":{"7":1,"20":2,"29":1,"32":1,"34":1,"37":1,"39":1,"42":1}}],["used",{"2":{"7":1,"20":2,"34":1}}],["using",{"2":{"5":1,"20":3,"30":5,"32":1,"35":6,"37":1,"40":5,"42":1}}],["unneeded",{"2":{"36":1}}],["unordered",{"2":{"36":2}}],["unknown",{"2":{"17":1}}],["until",{"2":{"10":1}}],["underlying",{"2":{"6":1,"7":1}}],["unlike",{"2":{"4":1,"7":1}}],["lr=2e",{"2":{"32":1,"37":1,"42":1}}],["lr=1",{"2":{"20":1}}],["lt",{"2":{"20":2}}],["large",{"2":{"17":3}}],["layers",{"2":{"20":1}}],["layer",{"2":{"4":1}}],["libraries",{"2":{"28":1}}],["lightgbm",{"2":{"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"18":1}}],["link",{"2":{"10":1}}],["like",{"2":{"4":1,"5":1,"7":1}}],["lw",{"2":{"7":2}}],["load",{"2":{"20":2,"27":1,"30":1,"35":1,"40":1}}],["lower",{"2":{"20":1}}],["loss",{"2":{"20":2,"24":1}}],["loss=",{"2":{"20":3,"32":1,"37":1,"42":1}}],["loops",{"2":{"7":1}}],["logistic",{"0":{"34":1},"1":{"35":1,"36":1,"37":1,"38":1},"2":{"10":2,"34":1}}],["log2",{"2":{"7":1}}],["logloss",{"2":{"1":1,"12":1,"16":1,"20":1,"37":2}}],["logger=false",{"2":{"1":2}}],["length",{"2":{"7":1,"29":2}}],["levelcode",{"2":{"31":1,"36":1}}],["level",{"2":{"7":1,"10":2}}],["levels",{"2":{"4":1,"20":1,"36":1}}],["left",{"2":{"6":1}}],["leads",{"2":{"7":1}}],["leaf",{"0":{"7":1},"2":{"5":10,"7":8,"20":1}}],["leaf3",{"2":{"5":1}}],["learning",{"2":{"4":1,"18":1,"20":2}}],["learn",{"2":{"4":1}}],["learned",{"2":{"4":4}}],["leaves",{"2":{"4":1,"20":1}}],["gt",{"2":{"20":3}}],["github",{"2":{"18":2}}],["gini",{"2":{"11":1,"13":1}}],["given",{"2":{"5":2,"6":1,"20":1}}],["gpuid=0",{"2":{"20":1}}],["gpu",{"2":{"7":1,"20":2}}],["goes",{"2":{"5":1}}],["generator",{"2":{"20":2}}],["generated",{"2":{"10":2}}],["general",{"2":{"4":2,"9":1}}],["getting",{"0":{"22":1,"30":1,"35":1,"40":1},"1":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["get",{"2":{"5":1,"33":1,"38":1,"43":1}}],["grid",{"2":{"10":1}}],["gray",{"2":{"5":1}}],["gradients",{"2":{"20":1}}],["gradient",{"2":{"5":1,"18":2}}],["green",{"2":{"5":1}}],["greediness",{"2":{"4":1}}],["gaussian",{"2":{"1":1,"20":2}}],["bind",{"2":{"20":1}}],["binary",{"2":{"4":1,"5":4,"6":1}}],["bool",{"2":{"36":3}}],["boosted",{"2":{"32":1,"37":1,"42":1}}],["boosting",{"2":{"4":2,"18":3}}],["bostonhousing",{"2":{"41":1}}],["boston",{"0":{"11":1,"39":1},"1":{"40":1,"41":1,"42":1,"43":1},"2":{"10":1,"17":1,"20":1,"27":1,"39":2}}],["both",{"2":{"7":1,"17":1,"20":1}}],["brevity",{"2":{"7":1}}],["break",{"2":{"6":1}}],["branch",{"2":{"7":1}}],["byrow",{"2":{"36":2}}],["by",{"2":{"5":1,"20":2,"33":1,"36":1,"38":1,"39":1,"43":1}}],["batchsize=2048",{"2":{"20":1}}],["batch",{"2":{"7":3,"20":1}}],["basic",{"2":{"5":2}}],["based",{"2":{"5":1,"20":1}}],["backward",{"2":{"7":3}}],["background",{"2":{"5":1}}],["back",{"2":{"5":1}}],["bagging",{"2":{"4":2}}],["blocks",{"2":{"4":1,"20":1}}],["buillt",{"2":{"20":1}}],["building",{"2":{"4":1,"25":1}}],["built",{"2":{"4":1}}],["but",{"2":{"4":1}}],["been",{"2":{"34":1}}],["before",{"2":{"31":1,"41":1}}],["begin",{"2":{"30":1,"35":1,"40":1}}],["benchmarking",{"2":{"28":1}}],["benchmarks",{"0":{"10":1,"28":1},"1":{"11":1,"12":1,"13":1,"14":1,"15":1,"16":1},"2":{"10":1}}],["best",{"2":{"10":1,"20":1}}],["below",{"2":{"6":1,"7":1,"10":1}}],["becomes",{"2":{"5":1}}],["be",{"2":{"1":2,"5":2,"6":1,"7":1,"17":2,"20":7,"33":1,"34":1,"36":2,"38":1}}],["iris",{"0":{"29":1},"1":{"30":1,"31":1,"32":1,"33":1},"2":{"29":2,"31":2,"34":1}}],["id",{"2":{"20":1}}],["identity",{"2":{"20":1}}],["imputed",{"2":{"36":1}}],["imported",{"2":{"20":1}}],["improves",{"2":{"20":1}}],["improvement",{"2":{"17":1}}],["improving",{"2":{"10":1}}],["implementing",{"2":{"20":1}}],["implement",{"2":{"7":1}}],["implementation",{"2":{"5":1,"7":1,"10":1}}],["implements",{"2":{"5":1}}],["i+1",{"2":{"7":1}}],["i>>1",{"2":{"7":4}}],["i",{"2":{"7":9}}],["if",{"2":{"6":1}}],["illustrate",{"2":{"5":1,"6":1}}],["illustrated",{"2":{"4":1}}],["ie",{"2":{"5":1}}],["its",{"2":{"17":1}}],["iteration",{"2":{"7":1}}],["it",{"2":{"4":1,"5":1,"6":1,"7":1,"20":1,"34":1,"39":1}}],["ismissing",{"2":{"36":2}}],["isolate",{"2":{"17":1}}],["isolation",{"2":{"4":1}}],["is",{"2":{"2":1,"4":4,"5":8,"6":3,"7":1,"10":7,"17":2,"20":4,"21":2,"24":1,"25":1,"28":1,"29":1,"34":1,"36":4,"39":1}}],["information",{"2":{"39":1}}],["info",{"2":{"36":1}}],["infer",{"2":{"2":2}}],["inference",{"0":{"2":1,"26":1},"2":{"2":1,"20":1}}],["incl",{"2":{"36":1}}],["included",{"2":{"29":1,"34":1,"39":1}}],["installation",{"0":{"23":1}}],["instance",{"2":{"20":3}}],["instruct",{"2":{"7":1}}],["init",{"2":{"20":1}}],["input",{"2":{"20":2,"36":2}}],["individual",{"2":{"34":1}}],["indicator",{"2":{"36":1}}],["indicating",{"2":{"1":1}}],["indices",{"2":{"31":3,"36":4,"41":4}}],["independent",{"2":{"20":1}}],["indexing",{"2":{"5":1}}],["index",{"2":{"5":2,"7":1}}],["inducing",{"2":{"17":1}}],["in",{"2":{"4":3,"5":6,"6":2,"7":2,"10":3,"17":3,"20":8,"29":1,"34":1,"36":2,"39":2}}],["int",{"2":{"7":1,"31":1,"36":1,"41":1}}],["into",{"2":{"5":2,"6":1,"31":1,"36":1}}],["introduce",{"2":{"5":1}}],["introduces",{"2":{"4":1}}],["integer",{"2":{"20":1}}],["integrate",{"2":{"7":1}}],["integration",{"2":{"4":2}}],["interface",{"2":{"20":3,"27":1}}],["internal",{"2":{"1":1,"20":3}}],["intended",{"2":{"7":1}}],["wd=0",{"2":{"20":1}}],["was",{"2":{"7":2}}],["way",{"2":{"5":1}}],["width",{"2":{"29":2}}],["will",{"2":{"29":1,"30":1,"31":1,"34":2,"35":1,"39":1,"40":1,"41":1}}],["wise",{"2":{"7":1}}],["without",{"2":{"5":1}}],["with",{"0":{"22":1},"1":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1},"2":{"4":1,"5":1,"7":1,"9":1,"10":4,"17":3,"18":1,"20":6,"24":1,"25":1,"36":2}}],["within",{"2":{"4":1,"5":1}}],["whose",{"2":{"20":2}}],["whether",{"2":{"20":1,"36":1}}],["when",{"2":{"20":2}}],["where",{"2":{"2":1,"4":1,"5":2,"6":2,"17":1,"20":1}}],["which",{"2":{"6":1,"7":1,"17":1,"20":1,"29":1,"31":1,"33":1,"34":1,"38":1,"39":1}}],["weakness",{"2":{"17":1}}],["we",{"2":{"5":2,"6":1,"29":1,"30":1,"31":3,"32":4,"33":2,"34":1,"35":1,"37":4,"38":2,"39":1,"40":1,"41":3,"42":4,"43":2}}],["weighted",{"2":{"5":1}}],["weights",{"0":{"6":1,"7":1},"2":{"5":5,"7":5,"20":1}}],["weight",{"2":{"1":2,"5":1,"20":2}}],["work",{"2":{"4":1}}],["census",{"2":{"39":1}}],["create",{"2":{"36":1}}],["criteria",{"2":{"17":1}}],["classifier",{"2":{"34":1}}],["classification",{"0":{"29":1},"1":{"30":1,"31":1,"32":1,"33":1},"2":{"10":3,"18":1,"20":1,"33":1,"34":1}}],["class",{"2":{"31":6}}],["classes",{"2":{"20":1}}],["cabin",{"2":{"36":1}}],["capturing",{"2":{"36":1}}],["called",{"2":{"20":1}}],["categoricalarrays",{"2":{"30":1,"35":1,"40":1}}],["categorical",{"2":{"18":1,"31":2,"36":5}}],["catboost",{"2":{"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"18":1}}],["carry",{"2":{"17":1}}],["cannot",{"2":{"7":1}}],["can",{"2":{"1":1,"5":1,"6":1,"17":3,"20":3,"31":1,"33":2,"36":2,"38":2,"41":1,"43":2}}],["current",{"2":{"7":1}}],["custom",{"2":{"7":1}}],["cw",{"2":{"7":12}}],["cpu",{"2":{"7":1,"20":2,"32":1,"37":1,"42":1}}],["check",{"2":{"20":2}}],["challenge",{"2":{"17":1}}],["challenging",{"2":{"7":1}}],["characteristic",{"2":{"17":1}}],["chain",{"2":{"4":1,"9":1}}],["child",{"2":{"6":2}}],["coalesce",{"2":{"36":1}}],["could",{"2":{"34":1}}],["count",{"2":{"20":1}}],["column",{"2":{"20":1}}],["columns",{"2":{"20":1}}],["collected",{"2":{"39":1}}],["collectively",{"2":{"5":1}}],["collection",{"2":{"4":1,"5":1,"21":1}}],["collaboratively",{"2":{"4":1}}],["cost",{"2":{"20":1}}],["code",{"2":{"10":1}}],["com",{"2":{"18":2}}],["common",{"2":{"7":1,"10":1,"36":1}}],["comparison",{"2":{"10":1}}],["compatible",{"2":{"4":2,"36":1}}],["composing",{"2":{"21":1}}],["composition",{"2":{"4":3}}],["composability",{"0":{"9":1}}],["compliant",{"2":{"7":1}}],["complete",{"2":{"5":1,"20":1}}],["compute",{"2":{"7":1,"20":1}}],["computation",{"2":{"7":1}}],["computing",{"2":{"7":1}}],["concerning",{"2":{"39":1}}],["continuous",{"2":{"20":2}}],["contains",{"2":{"20":1}}],["constructor",{"2":{"24":1,"32":1,"37":1,"42":1}}],["construct",{"2":{"20":2}}],["constructing",{"2":{"20":1}}],["considering",{"2":{"17":1}}],["considered",{"2":{"10":1}}],["consists",{"2":{"7":1,"29":1}}],["convert",{"2":{"31":1,"36":2}}],["conv",{"2":{"7":1}}],["condition",{"2":{"6":3,"17":1}}],["conditions",{"2":{"5":3,"17":1}}],["configuring",{"0":{"24":1}}],["configuration",{"2":{"4":1,"10":1,"24":1,"32":1,"37":1,"42":1}}],["config",{"2":{"1":2,"20":4,"24":1,"25":2,"32":1,"37":1,"42":1}}],["petal",{"2":{"29":2}}],["per",{"2":{"20":1}}],["perspective",{"2":{"5":1}}],["performed",{"2":{"10":1,"28":1}}],["performance",{"2":{"10":1,"17":2,"20":1}}],["performing",{"2":{"7":1}}],["perform",{"2":{"1":1,"6":1}}],["p",{"2":{"20":2,"27":1,"33":3,"38":3,"43":3}}],["pkg=neurotreemodels",{"2":{"20":1}}],["pytorch",{"2":{"18":1}}],["pick",{"2":{"17":1}}],["place",{"2":{"7":1}}],["passengerid",{"2":{"36":1}}],["passengers",{"2":{"34":1}}],["passing",{"2":{"33":1,"38":1,"43":1}}],["pass",{"2":{"32":1,"37":1,"42":1}}],["packages",{"2":{"30":1,"35":1,"40":1}}],["package",{"2":{"29":1,"34":1,"39":1}}],["params",{"2":{"20":1}}],["parameter",{"2":{"10":1,"20":2}}],["parameters",{"2":{"10":1,"20":5}}],["parallelization",{"2":{"7":1}}],["parallelism",{"2":{"7":1}}],["particular",{"2":{"17":1}}],["parts",{"2":{"10":1}}],["part",{"2":{"7":1}}],["path",{"2":{"5":3}}],["potential",{"2":{"17":1}}],["position",{"2":{"7":2}}],["pooinnting",{"2":{"6":1}}],["pointing",{"2":{"6":1}}],["purpose",{"2":{"4":1}}],["processing",{"2":{"36":1}}],["prominent",{"2":{"28":1}}],["proportional",{"2":{"20":1}}],["problems",{"2":{"17":1}}],["problem",{"2":{"10":2,"33":1,"34":1}}],["probability",{"2":{"5":1,"6":1,"7":1}}],["product",{"2":{"5":1}}],["provide",{"2":{"5":2,"20":2}}],["provides",{"2":{"4":1}}],["pruned",{"2":{"5":1}}],["prepare",{"2":{"36":1}}],["preprocess",{"2":{"31":1,"41":1}}],["preprocessing",{"0":{"31":1,"36":1,"41":1}}],["predict",{"2":{"20":2,"27":1}}],["prediction",{"0":{"8":1},"1":{"11":1,"12":1,"13":1,"14":1,"15":1,"16":1},"2":{"4":1,"5":3,"20":1,"21":1}}],["predictions",{"2":{"4":3,"20":4,"33":1,"36":1,"38":1,"43":1}}],["present",{"2":{"17":2}}],["presented",{"2":{"10":1}}],["previous",{"2":{"4":1}}],["print",{"2":{"1":2,"32":1,"37":1,"42":1}}],["virginica",{"2":{"29":1}}],["view",{"2":{"5":2}}],["various",{"2":{"43":1}}],["variables",{"2":{"36":3}}],["variable",{"2":{"1":1,"20":1,"31":2,"36":1,"39":1,"41":1}}],["value",{"2":{"6":2,"17":3,"36":1,"39":1}}],["values",{"2":{"5":2,"20":2,"36":4}}],["versicolor",{"2":{"29":1}}],["very",{"2":{"17":1}}],["verbosity=1",{"2":{"1":2}}],["vector",{"2":{"1":2,"5":1}}],["our",{"2":{"31":1,"32":1,"33":2,"37":1,"38":2,"41":2,"42":1,"43":1}}],["outsize=3",{"2":{"32":1}}],["outsize=1",{"2":{"20":1}}],["outperform",{"2":{"17":1}}],["omitted",{"2":{"7":1}}],["object",{"2":{"20":1}}],["oblivious",{"2":{"18":1}}],["observations",{"2":{"10":5}}],["observation",{"2":{"6":1,"7":1}}],["obtained",{"2":{"5":1,"7":1,"10":1}}],["otherwise",{"2":{"6":1}}],["other",{"2":{"4":2,"7":1,"17":1}}],["optional",{"2":{"32":1,"37":1,"42":1}}],["optimizer",{"2":{"20":1}}],["optimiser",{"2":{"5":1}}],["optimal",{"2":{"4":1,"20":1}}],["operations",{"2":{"7":1,"20":1}}],["operators",{"2":{"4":1,"7":1}}],["operator",{"0":{"3":1},"1":{"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1},"2":{"4":3,"5":1,"9":1}}],["override",{"2":{"20":2}}],["overview",{"0":{"4":1}}],["over",{"2":{"2":1,"4":1,"7":2,"33":1}}],["only",{"2":{"20":2}}],["onecold",{"2":{"33":2}}],["ones",{"2":{"5":1,"6":1,"7":1,"10":1}}],["one",{"2":{"1":1,"5":2,"17":1,"20":3}}],["on",{"0":{"29":1,"34":1,"39":1},"1":{"30":1,"31":1,"32":1,"33":1,"35":1,"36":1,"37":1,"38":1,"40":1,"41":1,"42":1,"43":1},"2":{"1":1,"5":1,"6":1,"7":2,"10":4,"17":3,"18":1,"20":1,"34":1,"36":1}}],["orderedfactor",{"2":{"20":1}}],["order",{"2":{"5":1}}],["or",{"2":{"1":2,"5":1,"7":1,"17":1,"20":4,"21":1,"36":3}}],["of",{"2":{"1":4,"2":1,"4":7,"5":10,"6":3,"7":5,"10":3,"17":8,"20":22,"21":3,"25":1,"29":3,"31":1,"32":1,"33":1,"34":1,"37":1,"38":1,"39":1,"42":1}}],["offset+step÷2",{"2":{"7":1}}],["offset",{"2":{"1":2,"7":8}}],["skipmissing",{"2":{"36":1}}],["same",{"2":{"20":1}}],["sample",{"2":{"5":1}}],["scitype",{"2":{"20":4}}],["scitypes",{"2":{"20":2}}],["schema",{"2":{"20":1}}],["scaling",{"2":{"20":1}}],["scale=1",{"2":{"20":1}}],["scalable",{"2":{"18":1}}],["slower",{"2":{"20":1}}],["system",{"2":{"18":1}}],["symbol",{"2":{"1":2}}],["squeeze",{"2":{"18":1}}],["squared",{"2":{"10":4}}],["short",{"2":{"20":1}}],["shortcoming",{"2":{"4":1}}],["should",{"2":{"17":1,"33":1,"38":1}}],["specifies",{"2":{"31":1}}],["specific",{"2":{"17":1}}],["speed",{"2":{"20":1}}],["sparsity",{"2":{"17":2}}],["split=false",{"2":{"20":1}}],["split",{"2":{"6":1,"10":1,"20":2,"36":1,"41":1}}],["small",{"2":{"17":1}}],["separate",{"2":{"41":1}}],["sepal",{"2":{"29":2}}],["service",{"2":{"39":1}}],["sex",{"2":{"36":5}}],["see",{"2":{"25":1,"34":1}}],["seed",{"2":{"20":1,"30":1,"35":1,"40":1}}],["selected",{"2":{"6":2}}],["selection",{"2":{"6":3,"10":1,"17":1}}],["setdiff",{"2":{"31":2,"36":2,"41":2}}],["setosa",{"2":{"29":1}}],["set",{"2":{"6":3,"20":1,"36":1}}],["sequentially",{"2":{"4":1}}],["simple",{"2":{"33":1}}],["simultaneously",{"2":{"4":3}}],["significantly",{"2":{"20":1}}],["sigma",{"2":{"20":1}}],["situation",{"2":{"17":1}}],["size=1",{"2":{"20":1}}],["size=16",{"2":{"20":1}}],["size",{"2":{"7":4,"20":3}}],["single",{"2":{"5":1,"6":1}}],["since",{"2":{"5":1,"7":1}}],["survived",{"2":{"36":1}}],["survival",{"2":{"34":1}}],["supports",{"2":{"27":1,"36":1}}],["support",{"2":{"18":1,"25":1}}],["substituting",{"2":{"17":1}}],["sum",{"2":{"5":1,"21":1}}],["such",{"2":{"5":1,"6":1,"7":1,"17":2,"36":4,"43":1}}],["sophisticated",{"2":{"36":1}}],["some",{"2":{"36":1}}],["soft",{"2":{"5":2,"6":2,"17":2}}],["source",{"2":{"1":1,"2":1,"10":1,"20":1,"21":1}}],["s",{"2":{"4":1,"5":2,"6":3,"7":1,"17":1,"39":2}}],["std",{"2":{"40":1,"41":4,"43":2}}],["stump",{"2":{"17":1}}],["stem",{"2":{"17":1}}],["step+leaf",{"2":{"7":1}}],["step÷2+leaf",{"2":{"7":1}}],["step",{"2":{"7":2,"36":1}}],["steps",{"2":{"6":1}}],["stops",{"2":{"10":1}}],["stopping",{"2":{"1":3,"10":1,"25":1,"32":2,"37":2,"42":2}}],["statsbase",{"2":{"35":1}}],["status",{"2":{"34":1}}],["statistics",{"2":{"30":1,"35":1,"40":1}}],["state",{"2":{"10":1}}],["started",{"0":{"22":1,"30":1,"35":1,"40":1},"1":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["starting",{"2":{"7":1}}],["stacked",{"2":{"20":1}}],["stack",{"2":{"20":3}}],["stacktree",{"2":{"4":1,"9":1,"21":1}}],["standard",{"2":{"4":1}}],["string",{"2":{"1":2,"36":3}}],["k",{"2":{"7":4}}],["kwargs",{"2":{"20":2}}],["kwarg",{"2":{"1":2}}],["keyword",{"2":{"1":1,"20":2}}],["age",{"2":{"36":5}}],["against",{"2":{"10":3,"28":1}}],["augmented",{"2":{"18":1}}],["auto",{"2":{"7":1,"20":1,"25":1}}],["available",{"2":{"10":1}}],["averaged",{"2":{"4":1}}],["attention",{"2":{"18":1}}],["attempt",{"2":{"7":1}}],["at",{"2":{"7":1,"10":1,"28":1}}],["axes",{"2":{"7":1}}],["apply",{"2":{"43":1}}],["applied",{"2":{"20":3}}],["application",{"2":{"6":1}}],["applicable",{"2":{"5":1,"20":1}}],["approached",{"2":{"34":1}}],["approach",{"2":{"7":1,"36":3}}],["api",{"0":{"0":1},"1":{"1":1,"2":1},"2":{"1":1,"20":3,"36":1}}],["assessment",{"2":{"10":1}}],["associated",{"2":{"5":2}}],["as",{"2":{"5":2,"6":1,"7":3,"10":1,"17":1,"20":8,"34":1,"36":5,"43":1}}],["achieve",{"2":{"17":1}}],["act",{"2":{"20":1}}],["acta=",{"2":{"20":1}}],["activation",{"2":{"17":1,"20":1}}],["actual",{"2":{"7":1,"20":1}}],["acts",{"2":{"5":1,"7":1}}],["according",{"2":{"25":1,"36":1,"41":1}}],["account",{"2":{"4":1}}],["accuracy",{"2":{"12":1,"16":1,"33":1,"38":1}}],["accumulation",{"2":{"7":1}}],["accumulating",{"2":{"7":1}}],["across",{"2":{"4":1}}],["algos",{"2":{"10":1,"17":1}}],["algo",{"2":{"7":1,"10":1}}],["alternative",{"2":{"5":1}}],["although",{"2":{"4":1,"17":1,"34":1}}],["also",{"2":{"4":3,"34":1,"36":1}}],["allows",{"2":{"4":1}}],["all",{"2":{"4":4,"5":1,"6":1,"17":1,"21":1}}],["another",{"2":{"17":1,"36":1}}],["any",{"2":{"5":1,"20":2}}],["an",{"2":{"4":1,"5":2,"20":6,"25":1,"36":2}}],["and",{"2":{"1":1,"4":4,"5":2,"6":1,"7":6,"10":2,"17":3,"18":2,"20":4,"25":1,"29":2,"30":1,"33":1,"35":1,"36":3,"38":1,"40":1,"41":2,"43":1}}],["abs",{"2":{"43":2}}],["absolute",{"2":{"43":1}}],["abstractvector",{"2":{"20":1}}],["abstractrng",{"2":{"20":1}}],["abstractdataframe",{"2":{"1":1,"2":1}}],["above",{"2":{"5":2,"6":1,"20":1,"25":1}}],["ability",{"2":{"4":1}}],["around",{"2":{"38":1}}],["argument",{"2":{"20":1,"32":1,"37":1,"42":1}}],["arguments",{"2":{"1":2,"20":2}}],["art",{"2":{"10":1}}],["architecture",{"0":{"5":1},"1":{"6":1,"7":1,"8":1},"2":{"5":1}}],["area",{"2":{"39":1}}],["aren",{"2":{"4":1}}],["are",{"2":{"4":3,"5":1,"6":2,"7":3,"10":1,"17":1,"20":4,"32":1,"36":3,"37":1,"42":1}}],["additional",{"2":{"25":1}}],["add",{"2":{"23":1}}],["address",{"2":{"4":1}}],["adjoint",{"2":{"7":1}}],["ad",{"2":{"7":3}}],["adopt",{"2":{"5":1}}],["adam",{"2":{"5":1}}],["adapted",{"2":{"4":1}}],["a",{"0":{"3":1,"24":1},"1":{"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1},"2":{"1":3,"2":1,"4":3,"5":13,"6":4,"7":3,"10":2,"17":10,"18":2,"20":12,"21":3,"24":1,"25":1,"31":1,"32":2,"34":2,"36":5,"37":2,"42":2}}],["r",{"2":{"20":1,"25":1}}],["rng=123",{"2":{"20":1}}],["run",{"2":{"10":1}}],["rule",{"2":{"7":1}}],["right",{"2":{"6":1}}],["rows=",{"2":{"20":1}}],["row",{"2":{"7":1}}],["round",{"2":{"36":1,"41":1}}],["rounds=2",{"2":{"32":1,"37":1,"42":1}}],["rounds=9999",{"2":{"1":2}}],["rounds",{"2":{"20":1}}],["routing",{"2":{"5":1}}],["role",{"2":{"5":1}}],["ratio",{"2":{"31":2,"36":2,"41":2}}],["rate",{"2":{"20":1}}],["rather",{"2":{"5":1,"7":1}}],["randperm",{"2":{"31":1,"36":1,"41":1}}],["rand",{"2":{"20":1,"25":1}}],["randn",{"2":{"20":1,"25":1}}],["random",{"2":{"4":1,"10":1,"20":3,"30":2,"35":2,"40":2}}],["range",{"2":{"20":1}}],["ranking",{"2":{"10":2,"17":1}}],["remove",{"2":{"36":1}}],["real",{"2":{"36":1}}],["ready",{"2":{"32":1,"37":1,"42":1}}],["represents",{"2":{"39":1}}],["represented",{"2":{"5":1,"6":1,"7":1}}],["replaced",{"2":{"36":1}}],["report",{"2":{"20":2}}],["recommended",{"2":{"20":1,"36":1}}],["requiring",{"2":{"20":1}}],["required",{"2":{"1":2,"30":1,"35":1,"40":1}}],["references",{"0":{"18":1}}],["referred",{"2":{"6":1}}],["regression",{"0":{"34":1,"39":1},"1":{"35":1,"36":1,"37":1,"38":1,"40":1,"41":1,"42":1,"43":1},"2":{"10":6,"17":1,"18":1,"34":1}}],["regular",{"2":{"5":1,"21":1}}],["relaxing",{"2":{"5":1}}],["relies",{"2":{"5":1}}],["result",{"2":{"20":1}}],["results",{"2":{"20":1}}],["resulted",{"2":{"17":1}}],["resulting",{"2":{"4":1,"5":1}}],["residual",{"2":{"4":2}}],["returning",{"2":{"20":1}}],["returned",{"2":{"20":1}}],["return",{"2":{"1":2,"2":1,"7":2,"20":1}}],["embarked",{"2":{"36":1}}],["eg",{"2":{"20":1}}],["eta",{"2":{"20":1}}],["epochs",{"2":{"20":1}}],["efficient",{"2":{"18":1}}],["effect",{"2":{"17":2}}],["error",{"2":{"10":4,"43":1}}],["enable",{"2":{"32":1,"37":1,"42":1}}],["encountered",{"2":{"20":1}}],["ensembles",{"2":{"18":1}}],["entropt",{"2":{"17":1}}],["end",{"2":{"7":6}}],["engines",{"2":{"7":1}}],["eltype",{"2":{"7":1}}],["elements",{"2":{"6":1}}],["element",{"2":{"5":1,"7":1,"20":2}}],["ecosystem",{"2":{"7":1}}],["either",{"2":{"5":1,"20":2,"21":1,"36":1}}],["excitation",{"2":{"18":1}}],["except",{"2":{"6":1}}],["experiments",{"2":{"17":1}}],["examples",{"2":{"20":1}}],["example",{"2":{"5":1}}],["ex",{"2":{"4":1,"5":1,"17":1}}],["extend",{"2":{"4":1}}],["extent",{"2":{"4":1}}],["each",{"2":{"4":2,"5":5,"7":3,"10":2,"20":3}}],["early",{"2":{"1":3,"10":1,"25":1,"32":2,"37":2,"42":2}}],["evotrees",{"2":{"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"36":1}}],["evaluate",{"2":{"33":1,"38":1}}],["evaluated",{"2":{"10":1}}],["evaluation",{"2":{"1":2,"10":1,"25":1,"43":1}}],["eval",{"2":{"10":3,"33":2,"36":1,"38":2,"41":1,"43":2}}],["every",{"2":{"1":2,"32":1,"37":1,"42":1}}],["missing",{"2":{"17":1,"36":4}}],["minimized",{"2":{"20":1}}],["min",{"2":{"10":4}}],["medv",{"2":{"41":5,"43":2}}],["median",{"2":{"35":1,"36":2,"39":1}}],["mean",{"2":{"30":1,"33":2,"35":1,"36":1,"38":2,"40":1,"41":4,"43":5}}],["meaning",{"2":{"17":1}}],["meaningful",{"2":{"17":1}}],["measurements",{"2":{"29":1}}],["mechanism",{"2":{"17":1}}],["methodology",{"2":{"10":1}}],["methods",{"2":{"5":1}}],["metric=",{"2":{"32":1,"37":1,"42":1}}],["metric=nothing",{"2":{"1":2}}],["metric",{"2":{"1":2,"10":3,"25":1,"43":1}}],["msrank",{"0":{"14":1},"2":{"10":1,"17":2}}],["mse",{"2":{"1":1,"11":1,"13":1,"14":1,"15":1,"20":2,"24":1,"42":2}}],["models",{"0":{"19":1},"1":{"20":1,"21":1},"2":{"20":2}}],["model",{"0":{"24":1},"2":{"10":3,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"20":9,"24":1,"25":1,"31":1,"32":4,"33":2,"34":2,"36":2,"37":4,"38":2,"41":1,"42":4,"43":1}}],["move",{"2":{"7":1}}],["more",{"2":{"7":1,"36":1}}],["made",{"2":{"21":1}}],["mach",{"2":{"20":7,"27":2}}],["machine",{"2":{"4":1,"20":3,"27":1}}],["manujosephv",{"2":{"18":1}}],["manually",{"2":{"7":1}}],["may",{"2":{"17":3}}],["materially",{"2":{"17":1}}],["matrix",{"2":{"7":1}}],["max",{"2":{"7":2,"20":1}}],["mask",{"2":{"5":3,"6":1}}],["mae",{"2":{"1":1,"20":1,"43":1}}],["mu",{"2":{"20":1}}],["multiheadattention",{"2":{"7":1}}],["multiplication",{"2":{"7":1}}],["multiple",{"2":{"4":1}}],["must",{"2":{"1":1,"20":2}}],["m",{"2":{"2":1,"20":4,"25":1,"26":1,"27":2,"32":1,"33":2,"37":1,"38":2,"42":1,"43":2}}],["mldatasets",{"2":{"29":1,"30":1,"31":1,"34":1,"35":1,"36":1,"39":1,"40":1,"41":1}}],["ml",{"2":{"28":1}}],["mlbenchmarks",{"2":{"10":1,"28":1}}],["mlp",{"2":{"9":1}}],["mljbase",{"2":{"20":2,"27":1}}],["mlj",{"0":{"27":1},"2":{"4":1,"20":5,"27":1}}],["mle",{"2":{"1":1,"20":3}}],["mlogloss",{"2":{"1":1,"20":1,"32":2}}],["mdash",{"2":{"1":1,"2":1,"20":1,"21":1}}],["df",{"2":{"31":10,"36":12,"41":6}}],["during",{"2":{"20":1}}],["diagnosis",{"0":{"33":1,"38":1,"43":1}}],["distinguishing",{"2":{"17":1}}],["discussion",{"0":{"17":1}}],["dimensions",{"2":{"7":1}}],["differentiate",{"2":{"7":1}}],["differentiation",{"2":{"7":1}}],["differentiable",{"0":{"3":1},"1":{"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1},"2":{"4":1,"5":2,"6":1,"18":1}}],["different",{"2":{"4":1,"17":1,"29":1}}],["docs",{"2":{"25":1}}],["done",{"2":{"25":1}}],["do",{"2":{"20":2}}],["down",{"2":{"6":1}}],["dot",{"2":{"5":1}}],["dealing",{"2":{"36":1}}],["describes",{"2":{"34":1}}],["define",{"2":{"32":1,"37":1,"42":1}}],["defined",{"2":{"24":1}}],["defaults",{"2":{"20":2}}],["default",{"2":{"17":1,"20":2,"36":1}}],["device",{"2":{"20":1}}],["device=",{"2":{"20":1,"32":1,"37":1,"42":1}}],["deval",{"2":{"1":1,"31":1,"32":2,"33":2,"36":1,"37":2,"38":2,"41":2,"42":2,"43":2}}],["deval=nothing",{"2":{"1":2}}],["determination",{"2":{"20":1}}],["decay",{"2":{"20":1}}],["decisions",{"2":{"5":1,"6":1}}],["decision",{"2":{"5":8,"6":2,"17":1,"18":5}}],["deep",{"2":{"18":3}}],["dense",{"2":{"7":1}}],["derived",{"2":{"6":1,"39":1}}],["derives",{"2":{"6":1}}],["depth=4",{"2":{"32":1,"37":1}}],["depth=5",{"2":{"20":2,"27":1,"42":1}}],["depth=6",{"2":{"20":1}}],["depth",{"2":{"5":2,"7":5,"20":3,"24":1}}],["dataframes",{"2":{"20":1,"24":1,"30":1,"35":1,"40":1}}],["dataframe",{"2":{"20":2,"25":1,"31":1,"36":1,"41":1}}],["datasets",{"2":{"10":1,"17":2}}],["dataset",{"0":{"29":1,"34":1,"39":1},"1":{"30":1,"31":1,"32":1,"33":1,"35":1,"36":1,"37":1,"38":1,"40":1,"41":1,"42":1,"43":1},"2":{"10":1,"17":2,"29":2,"30":1,"31":1,"34":1,"35":1,"39":1,"40":1,"41":1}}],["data",{"0":{"3":1},"1":{"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1},"2":{"1":1,"2":3,"4":1,"10":3,"18":3,"20":2,"33":1,"36":2,"38":1,"41":1,"43":1}}],["dtrain",{"2":{"1":2,"20":6,"25":4,"26":1,"31":1,"32":1,"33":2,"36":1,"37":1,"38":2,"41":4,"42":1,"43":2}}],["tutorial",{"2":{"34":1}}],["tuned",{"2":{"4":2}}],["typical",{"2":{"20":1}}],["typically",{"2":{"20":2}}],["types",{"2":{"29":1}}],["type",{"2":{"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"20":3,"21":1,"31":1}}],["two",{"2":{"17":1}}],["ticket",{"2":{"36":1}}],["tier",{"2":{"17":1}}],["time",{"2":{"11":1,"12":1,"13":1,"14":1,"15":1,"16":1}}],["titanic",{"0":{"12":1,"34":1},"1":{"35":1,"36":1,"37":1,"38":1},"2":{"10":1,"34":2,"36":1}}],["testing",{"2":{"33":1,"38":1,"43":1}}],["test",{"2":{"10":3}}],["terminal",{"2":{"7":1,"20":1}}],["technically",{"2":{"7":1}}],["t",{"2":{"4":1}}],["top",{"2":{"17":1}}],["to",{"2":{"4":2,"5":8,"6":8,"7":6,"17":1,"20":15,"25":1,"30":1,"31":1,"32":3,"33":1,"35":1,"36":5,"37":3,"38":1,"40":1,"41":2,"42":3,"43":1}}],["though",{"2":{"7":1}}],["three",{"2":{"10":1,"29":1}}],["threshold",{"2":{"6":2}}],["through",{"2":{"4":1,"7":2,"25":1}}],["than",{"2":{"5":1,"7":1}}],["that",{"2":{"4":2,"5":3,"6":2,"7":2,"10":1,"17":1,"36":1}}],["this",{"2":{"4":2,"17":1,"29":1,"33":1}}],["them",{"2":{"36":1}}],["then",{"2":{"6":1,"32":1,"33":1,"36":1,"37":1,"38":1,"42":1,"43":1}}],["these",{"2":{"5":1,"6":1}}],["the",{"2":{"1":3,"2":1,"4":14,"5":19,"6":8,"7":15,"10":13,"17":13,"20":26,"21":2,"24":1,"25":3,"27":1,"29":3,"30":2,"31":3,"32":3,"33":1,"34":6,"35":2,"36":4,"37":3,"38":1,"39":5,"40":2,"41":2,"42":3,"43":1}}],["tables",{"2":{"36":1}}],["table",{"2":{"20":1}}],["tabular",{"0":{"3":1},"1":{"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1},"2":{"4":1,"10":1,"18":4,"28":1}}],["taking",{"2":{"17":1}}],["takes",{"2":{"5":1}}],["take",{"2":{"5":1}}],["taken",{"2":{"5":1}}],["tanh",{"2":{"17":1,"20":2}}],["tasks",{"2":{"10":1,"20":1}}],["target",{"2":{"1":3,"20":6,"25":2,"31":2,"32":1,"33":2,"36":3,"37":1,"38":2,"39":1,"41":5,"42":1}}],["true",{"2":{"5":1,"6":2}}],["treat",{"2":{"36":1}}],["treated",{"2":{"36":1}}],["treatment",{"2":{"4":1}}],["trees",{"2":{"4":5,"5":3,"17":1,"18":3,"20":1,"21":1,"24":1}}],["tree",{"0":{"3":1,"8":1},"1":{"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":2,"12":2,"13":2,"14":2,"15":2,"16":2,"17":1,"18":1},"2":{"4":5,"5":9,"6":1,"7":6,"18":3,"20":4,"21":1,"32":1,"37":1,"42":1}}],["trailed",{"2":{"17":1}}],["trained",{"2":{"10":2,"20":1}}],["train",{"2":{"10":2,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"20":1,"31":6,"32":2,"33":2,"36":5,"37":2,"38":2,"41":7,"42":2,"43":2}}],["trainable",{"2":{"5":1}}],["training",{"0":{"1":1,"25":1,"32":1,"37":1,"42":1},"2":{"1":1,"20":6,"25":1,"33":1,"38":1,"43":1}}],["transform",{"2":{"36":4,"41":2}}],["transformer",{"2":{"7":1}}],["translated",{"2":{"6":1}}],["traditional",{"2":{"4":1,"5":1,"6":1,"17":1}}],["tracked",{"2":{"1":1,"10":1}}],["tracking",{"2":{"1":1,"25":1}}],["factor",{"2":{"20":1}}],["false",{"2":{"5":1,"6":2}}],["f0",{"2":{"20":1}}],["functor",{"2":{"20":1}}],["function",{"2":{"1":2,"2":1,"7":3,"20":2}}],["full",{"2":{"7":1,"36":1}}],["flower",{"2":{"31":1}}],["flowers",{"2":{"29":1}}],["floor",{"2":{"7":1}}],["flux",{"2":{"4":1,"7":1}}],["followed",{"2":{"10":1}}],["following",{"2":{"5":1,"10":3,"20":1}}],["forward",{"2":{"7":2}}],["format",{"2":{"36":1}}],["form",{"2":{"5":2,"7":1}}],["forming",{"2":{"4":1}}],["forest",{"2":{"4":2,"18":1}}],["for",{"0":{"3":1},"1":{"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1},"2":{"1":1,"4":3,"5":1,"6":2,"7":6,"10":4,"17":2,"18":3,"20":5,"25":1,"28":1,"29":1,"33":1,"36":2}}],["fetch",{"2":{"5":1}}],["features",{"2":{"10":5,"17":4,"18":1,"20":5,"25":1,"36":3,"41":1}}],["feature",{"2":{"1":3,"6":3,"17":4,"20":3,"25":2,"31":1,"32":1,"36":4,"37":1,"41":1,"42":1}}],["finally",{"2":{"33":1,"43":1}}],["fields",{"2":{"20":2}}],["figure",{"2":{"5":1,"6":1}}],["first",{"2":{"5":2,"6":1,"32":1,"36":2,"37":1,"42":1}}],["fitresult",{"2":{"20":1}}],["fitted",{"2":{"20":2}}],["fit",{"2":{"1":2,"20":5,"25":2,"27":1,"32":2,"37":2,"42":2}}],["friendly",{"2":{"7":1}}],["framework",{"2":{"4":1}}],["from",{"2":{"4":4,"6":1,"7":1,"17":2,"20":1,"36":1,"39":1,"41":1}}],["n=10",{"2":{"32":1,"37":1,"42":1}}],["n=9999",{"2":{"1":2}}],["nrow",{"2":{"31":3,"36":3,"41":3}}],["nrounds=400",{"2":{"32":1,"37":1,"42":1}}],["nrounds=10",{"2":{"20":3,"27":1}}],["nrounds",{"2":{"20":1,"24":1}}],["nfeats",{"2":{"20":2,"25":2}}],["ntrees=64",{"2":{"20":1}}],["n",{"2":{"20":1}}],["need",{"2":{"31":1,"41":1}}],["networks",{"2":{"18":1}}],["neural",{"2":{"18":5}}],["neurotreemoels",{"2":{"17":1}}],["neurotreemodel",{"0":{"21":1},"2":{"2":2,"20":1,"21":3}}],["neurotreemodels",{"0":{"22":1},"1":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1},"2":{"1":2,"2":1,"17":1,"20":4,"21":1,"23":1,"24":1,"25":2,"27":2,"30":1,"32":2,"33":2,"35":1,"37":2,"40":1,"42":2}}],["neurotrees",{"2":{"11":1,"12":1,"13":1,"14":1,"15":1,"16":1}}],["neurotree",{"0":{"3":1},"1":{"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1},"2":{"4":3,"5":3,"6":2,"7":1,"9":1,"17":1,"20":1,"21":1}}],["neurotreeregressor",{"0":{"20":1},"2":{"1":2,"20":10,"24":2,"27":1,"32":2,"37":2,"42":2}}],["ncart",{"2":{"18":1}}],["num",{"2":{"24":1}}],["numeric",{"2":{"17":1}}],["number",{"2":{"5":1,"17":2,"20":7}}],["ndcg",{"2":{"14":1,"15":1}}],["nature",{"2":{"17":1}}],["naturally",{"2":{"7":1}}],["name=nothing",{"2":{"1":4}}],["name",{"2":{"1":3,"20":3,"25":2,"31":2,"32":1,"33":2,"36":3,"37":1,"38":2,"41":1,"42":1}}],["names",{"2":{"1":3,"20":5,"25":3,"31":2,"32":1,"36":3,"37":1,"41":2,"42":1}}],["nw",{"2":{"7":10}}],["nw3",{"2":{"6":1}}],["nw1",{"2":{"6":1}}],["nnlib",{"2":{"4":1}}],["now",{"2":{"32":1,"36":1,"37":1,"42":1}}],["nobs",{"2":{"20":3,"25":2}}],["no",{"2":{"17":1}}],["non",{"2":{"7":1}}],["notably",{"2":{"17":1,"25":1}}],["nothing",{"2":{"7":1}}],["not",{"2":{"7":1,"17":2,"36":1}}],["notoriously",{"2":{"7":1}}],["notion",{"2":{"4":2}}],["nodes",{"2":{"5":2,"20":2}}],["node3",{"2":{"5":1}}],["node1",{"2":{"5":1}}],["node",{"0":{"6":1},"2":{"4":1,"5":2,"6":1,"7":3,"10":1,"17":1,"18":1,"20":1}}],["nbsp",{"2":{"1":1,"2":1,"20":1,"21":1}}]],"serializationVersion":2}';export{e as default};
diff --git a/v1.1.0/assets/chunks/VPLocalSearchBox.CfMydbty.js b/v1.1.0/assets/chunks/VPLocalSearchBox.CfMydbty.js
new file mode 100644
index 0000000..bfef04b
--- /dev/null
+++ b/v1.1.0/assets/chunks/VPLocalSearchBox.CfMydbty.js
@@ -0,0 +1,13 @@
+function __vite__mapDeps(indexes) {
+ if (!__vite__mapDeps.viteFileDeps) {
+ __vite__mapDeps.viteFileDeps = []
+ }
+ return indexes.map((i) => __vite__mapDeps.viteFileDeps[i])
+}
+var It=Object.defineProperty;var Dt=(o,e,t)=>e in o?It(o,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):o[e]=t;var Oe=(o,e,t)=>(Dt(o,typeof e!="symbol"?e+"":e,t),t);import{Y as yt,h as oe,y as $e,al as kt,am as Ot,d as _t,H as xe,an as tt,k as Fe,ao as Rt,ap as Mt,z as Lt,aq as zt,l as _e,U as de,S as Ee,ar as Pt,as as Vt,Z as Bt,j as $t,at as Wt,o as ee,b as Kt,m as k,a2 as Jt,p as j,au as Ut,av as jt,aw as Gt,c as re,n as rt,e as Se,G as at,F as nt,a as ve,t as pe,ax as qt,q as Ht,s as Qt,ay as it,az as Yt,ab as Zt,ah as Xt,aA as er,_ as tr}from"./framework.Bblfi-75.js";import{u as rr,c as ar}from"./theme.DPekzJrd.js";const nr={root:()=>yt(()=>import("./@localSearchIndexroot._4_Bk2_S.js"),__vite__mapDeps([]))};/*!
+* tabbable 6.2.0
+* @license MIT, https://github.com/focus-trap/tabbable/blob/master/LICENSE
+*/var mt=["input:not([inert])","select:not([inert])","textarea:not([inert])","a[href]:not([inert])","button:not([inert])","[tabindex]:not(slot):not([inert])","audio[controls]:not([inert])","video[controls]:not([inert])",'[contenteditable]:not([contenteditable="false"]):not([inert])',"details>summary:first-of-type:not([inert])","details:not([inert])"],Ne=mt.join(","),gt=typeof Element>"u",ue=gt?function(){}:Element.prototype.matches||Element.prototype.msMatchesSelector||Element.prototype.webkitMatchesSelector,Ce=!gt&&Element.prototype.getRootNode?function(o){var e;return o==null||(e=o.getRootNode)===null||e===void 0?void 0:e.call(o)}:function(o){return o==null?void 0:o.ownerDocument},Ie=function o(e,t){var r;t===void 0&&(t=!0);var n=e==null||(r=e.getAttribute)===null||r===void 0?void 0:r.call(e,"inert"),a=n===""||n==="true",i=a||t&&e&&o(e.parentNode);return i},ir=function(e){var t,r=e==null||(t=e.getAttribute)===null||t===void 0?void 0:t.call(e,"contenteditable");return r===""||r==="true"},bt=function(e,t,r){if(Ie(e))return[];var n=Array.prototype.slice.apply(e.querySelectorAll(Ne));return t&&ue.call(e,Ne)&&n.unshift(e),n=n.filter(r),n},wt=function o(e,t,r){for(var n=[],a=Array.from(e);a.length;){var i=a.shift();if(!Ie(i,!1))if(i.tagName==="SLOT"){var s=i.assignedElements(),u=s.length?s:i.children,l=o(u,!0,r);r.flatten?n.push.apply(n,l):n.push({scopeParent:i,candidates:l})}else{var h=ue.call(i,Ne);h&&r.filter(i)&&(t||!e.includes(i))&&n.push(i);var d=i.shadowRoot||typeof r.getShadowRoot=="function"&&r.getShadowRoot(i),v=!Ie(d,!1)&&(!r.shadowRootFilter||r.shadowRootFilter(i));if(d&&v){var y=o(d===!0?i.children:d.children,!0,r);r.flatten?n.push.apply(n,y):n.push({scopeParent:i,candidates:y})}else a.unshift.apply(a,i.children)}}return n},xt=function(e){return!isNaN(parseInt(e.getAttribute("tabindex"),10))},se=function(e){if(!e)throw new Error("No node provided");return e.tabIndex<0&&(/^(AUDIO|VIDEO|DETAILS)$/.test(e.tagName)||ir(e))&&!xt(e)?0:e.tabIndex},or=function(e,t){var r=se(e);return r<0&&t&&!xt(e)?0:r},sr=function(e,t){return e.tabIndex===t.tabIndex?e.documentOrder-t.documentOrder:e.tabIndex-t.tabIndex},Ft=function(e){return e.tagName==="INPUT"},ur=function(e){return Ft(e)&&e.type==="hidden"},lr=function(e){var t=e.tagName==="DETAILS"&&Array.prototype.slice.apply(e.children).some(function(r){return r.tagName==="SUMMARY"});return t},cr=function(e,t){for(var r=0;rsummary:first-of-type"),i=a?e.parentElement:e;if(ue.call(i,"details:not([open]) *"))return!0;if(!r||r==="full"||r==="legacy-full"){if(typeof n=="function"){for(var s=e;e;){var u=e.parentElement,l=Ce(e);if(u&&!u.shadowRoot&&n(u)===!0)return ot(e);e.assignedSlot?e=e.assignedSlot:!u&&l!==e.ownerDocument?e=l.host:e=u}e=s}if(vr(e))return!e.getClientRects().length;if(r!=="legacy-full")return!0}else if(r==="non-zero-area")return ot(e);return!1},yr=function(e){if(/^(INPUT|BUTTON|SELECT|TEXTAREA)$/.test(e.tagName))for(var t=e.parentElement;t;){if(t.tagName==="FIELDSET"&&t.disabled){for(var r=0;r=0)},gr=function o(e){var t=[],r=[];return e.forEach(function(n,a){var i=!!n.scopeParent,s=i?n.scopeParent:n,u=or(s,i),l=i?o(n.candidates):s;u===0?i?t.push.apply(t,l):t.push(s):r.push({documentOrder:a,tabIndex:u,item:n,isScope:i,content:l})}),r.sort(sr).reduce(function(n,a){return a.isScope?n.push.apply(n,a.content):n.push(a.content),n},[]).concat(t)},br=function(e,t){t=t||{};var r;return t.getShadowRoot?r=wt([e],t.includeContainer,{filter:We.bind(null,t),flatten:!1,getShadowRoot:t.getShadowRoot,shadowRootFilter:mr}):r=bt(e,t.includeContainer,We.bind(null,t)),gr(r)},wr=function(e,t){t=t||{};var r;return t.getShadowRoot?r=wt([e],t.includeContainer,{filter:De.bind(null,t),flatten:!0,getShadowRoot:t.getShadowRoot}):r=bt(e,t.includeContainer,De.bind(null,t)),r},le=function(e,t){if(t=t||{},!e)throw new Error("No node provided");return ue.call(e,Ne)===!1?!1:We(t,e)},xr=mt.concat("iframe").join(","),Re=function(e,t){if(t=t||{},!e)throw new Error("No node provided");return ue.call(e,xr)===!1?!1:De(t,e)};/*!
+* focus-trap 7.5.4
+* @license MIT, https://github.com/focus-trap/focus-trap/blob/master/LICENSE
+*/function st(o,e){var t=Object.keys(o);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(o);e&&(r=r.filter(function(n){return Object.getOwnPropertyDescriptor(o,n).enumerable})),t.push.apply(t,r)}return t}function ut(o){for(var e=1;e0){var r=e[e.length-1];r!==t&&r.pause()}var n=e.indexOf(t);n===-1||e.splice(n,1),e.push(t)},deactivateTrap:function(e,t){var r=e.indexOf(t);r!==-1&&e.splice(r,1),e.length>0&&e[e.length-1].unpause()}},Ar=function(e){return e.tagName&&e.tagName.toLowerCase()==="input"&&typeof e.select=="function"},Tr=function(e){return(e==null?void 0:e.key)==="Escape"||(e==null?void 0:e.key)==="Esc"||(e==null?void 0:e.keyCode)===27},ge=function(e){return(e==null?void 0:e.key)==="Tab"||(e==null?void 0:e.keyCode)===9},Nr=function(e){return ge(e)&&!e.shiftKey},Cr=function(e){return ge(e)&&e.shiftKey},ct=function(e){return setTimeout(e,0)},ft=function(e,t){var r=-1;return e.every(function(n,a){return t(n)?(r=a,!1):!0}),r},ye=function(e){for(var t=arguments.length,r=new Array(t>1?t-1:0),n=1;n1?p-1:0),I=1;I
This work introduces NeuroTree a differentiable binary tree operator adapted for the treatment of tabular data.
Address the shortcoming of traditional trees greediness: all node and leaves are learned simultaneously. It provides the ability to learn an optimal configuration across all the tree levels. The notion extent also to the collection of trees that are simultaneously learned.
Extend the notion of forest/bagging and boosting.
Although the predictions from the all the trees forming a NeuroTree operator are averaged, each of the tree prediction is tuned simultaneously. This is different from boosting (ex XGBoost) where each tree is learned sequentially and over the residual from previous trees. Also, unlike random forest and bagging, trees aren't learned in isolation but tuned collaboratively, resulting in predictions that account for all of the other tree predictions.
General operator compatible for composition.
Allows integration within Flux's Chain like other standard operators from NNLib. Composition is also illustrated through the built-in StackTree layer, a residual composition of multiple NeuroTree building blocks.
Compatible with general purpose machine learning framework.
A NeuroTree operator acts as collection of complete binary trees, ie. trees without any pruned node. To be differentiable, hence trainable using first-order gradient based methods (ex. Adam optimiser), each tree path implements a soft decision rather than a hard one like in traditional decision tree.
To introduce the implementation of a NeuroTree, we first get back to the architecture of a basic decision tree.
The above is a binary decision tree of depth 2.
Highlighted in green is the decision path taken for a given sample. It goes into depth number of binary decisions, resulting in the path node1 → node3 → leaf3.
One way to view the role of the decision nodes (gray background) is to provide an index of the leaf prediction to fetch (index 3 in the figure). Such indexing view is applicable given that node routing relies on hard conditions: either true or false.
An alternative perspective that we adopt here is that tree nodes collectively provide weights associated to each leaf. A tree prediction becomes the weighted sum of the leaf's values and the leaf's weights. In regular decision trees, since all conditions are binary, leaf weights take the form of a mask. In the above example, the mask is [0, 0, 1, 0].
By relaxing these hard conditions into soft ones, the mask takes the form of a probability vector associated to each leaf, where ∑(leaf_weights) = 1 and where each each leaf_weight element is [0, 1]. A tree prediction can be obtained with the dot product: leaf_values' * leaf_weights.
The following illustrate how a basic decision tree is represented as a single differentiable tree within NeuroTree:
To illustrate how a NeuroTree derives the soft decision probability (referred to NW1 - NW3 in the above figure), we first break down how a traditional tree split condition is derived from 2 underlying decisions:
Selection of the feature on which to perform the condition.
Such selection can be represented as the application of a binary mask where all elements are set to false except for that single selected feature where it's set to true.
Selection of the condition's threshold value.
For a given observation, if the selected feature's value is below that threshold, then the node decision is set to false (pointing to left child), and true otherwise (pooinnting to right child).
In NeuroTree, these 2 hard steps are translated into soft, differentiable ones.
Computing the leaf weights consists of accumulating the weights through each tree branch. It's the technically more challenging part as such computation cannot be represented as a form of matrix multiplication, unlike other common operators like Dense, Conv or MultiHeadAttention / Transformer. Performing probability accumulation though a tree index naturally leads to in-place element wise operations, which are notoriously not friendly for auto-differentiation engines. Since NeuroTree was intended to integrate with the Flux.jl ecosystem, Zygote.jl acts as the underlying AD, the approach used was to manually implement backward / adjoint of the terminal leaf function and instruct the AD to use that custom rule rather than attempt to differentiate a non-AD compliant function.
Below are the algo and actual implementation of the forward and backward function that compute the leaf weights. For brevity, the loops over each observation of the batch and each tree are omitted. Parallelism, both on CPU and GPU, is obtained through parallelization over the tree and batch dimensions.
For each dataset and algo, the following methodology is followed:
Data is split in three parts: train, eval and test
A random grid of 16 hyper-parameters is generated
For each parameter configuration, a model is trained on train data until the evaluation metric tracked against the eval stops improving (early stopping)
The trained model is evaluated against the test data
The metric presented in below are the ones obtained on the test for the model that generated the best eval metric.
NeuroTreeModels can achieve top tier performance on both small (Boston) and large (Higgs) datasets. Its performance trailed on the two ranking regression problems (MSRank and Yahoo). Although the large number of features is a distinguishing characteristic of the Yahoo dataset, the 136 features of MSRank are not materially different for the YEAR dataset (90 features), and on which NeuroTreeMoels outperform all other algos. Considering that no sparsity mechanism is present in the feature selection for the node conditions, datasets with a very large number of features may present a challenge. Substituting the default tanh activation with a sparsity inducing one such as hardsigmoid or EntrOpt has not resulted in improvement from the experiments.
Another potential weakness may stem from the soft nature of the decision criteria. Traditional trees can isolate the effect of a specific feature value. This can be notably meaningful in a situation where a numeric feature taking a value of 0 may carry a particular meaning (ex. missing, unknown value). Such stump the effect of a feature should be harder to pick with NeuroTree's soft condition.
`,57),h=[l];function o(d,p,g,k,c,y){return i(),e("div",null,h)}const u=t(r,[["render",o]]);export{f as __pageData,u as default};
diff --git a/v1.1.0/assets/design.md.C4dtqWRH.lean.js b/v1.1.0/assets/design.md.C4dtqWRH.lean.js
new file mode 100644
index 0000000..d9e82d5
--- /dev/null
+++ b/v1.1.0/assets/design.md.C4dtqWRH.lean.js
@@ -0,0 +1 @@
+import{_ as t,c as e,o as i,a7 as s}from"./chunks/framework.Bblfi-75.js";const a="/NeuroTreeModels.jl/v1.1.0/assets/decision-tree.Dqh78YdA.png",n="/NeuroTreeModels.jl/v1.1.0/assets/neurotree.vCO5vhkM.png",f=JSON.parse('{"title":"NeuroTree - A differentiable tree operator for tabular data","description":"","frontmatter":{},"headers":[],"relativePath":"design.md","filePath":"design.md","lastUpdated":null}'),r={name:"design.md"},l=s("",57),h=[l];function o(d,p,g,k,c,y){return i(),e("div",null,h)}const u=t(r,[["render",o]]);export{f as __pageData,u as default};
diff --git a/v1.1.0/assets/index.md.6slnvhhB.js b/v1.1.0/assets/index.md.6slnvhhB.js
new file mode 100644
index 0000000..407b701
--- /dev/null
+++ b/v1.1.0/assets/index.md.6slnvhhB.js
@@ -0,0 +1 @@
+import{_ as e,c as t,o as a}from"./chunks/framework.Bblfi-75.js";const p=JSON.parse('{"title":"","description":"","frontmatter":{"layout":"home","hero":{"text":"NeuroTreeModels.jl","tagline":"Differentiable tree-based models for tabular data","image":{"src":"/evo-chevron.png","alt":"Evovest"},"actions":[{"theme":"brand","text":"Quick start","link":"/quick-start"},{"theme":"alt","text":"Design","link":"/design"},{"theme":"alt","text":"Models","link":"/models"},{"theme":"alt","text":"Source code","link":"https://github.com/Evovest/NeuroTreeModels.jl"}]}},"headers":[],"relativePath":"index.md","filePath":"index.md","lastUpdated":null}'),o={name:"index.md"};function r(n,s,l,i,d,c){return a(),t("div")}const h=e(o,[["render",r]]);export{p as __pageData,h as default};
diff --git a/v1.1.0/assets/index.md.6slnvhhB.lean.js b/v1.1.0/assets/index.md.6slnvhhB.lean.js
new file mode 100644
index 0000000..407b701
--- /dev/null
+++ b/v1.1.0/assets/index.md.6slnvhhB.lean.js
@@ -0,0 +1 @@
+import{_ as e,c as t,o as a}from"./chunks/framework.Bblfi-75.js";const p=JSON.parse('{"title":"","description":"","frontmatter":{"layout":"home","hero":{"text":"NeuroTreeModels.jl","tagline":"Differentiable tree-based models for tabular data","image":{"src":"/evo-chevron.png","alt":"Evovest"},"actions":[{"theme":"brand","text":"Quick start","link":"/quick-start"},{"theme":"alt","text":"Design","link":"/design"},{"theme":"alt","text":"Models","link":"/models"},{"theme":"alt","text":"Source code","link":"https://github.com/Evovest/NeuroTreeModels.jl"}]}},"headers":[],"relativePath":"index.md","filePath":"index.md","lastUpdated":null}'),o={name:"index.md"};function r(n,s,l,i,d,c){return a(),t("div")}const h=e(o,[["render",r]]);export{p as __pageData,h as default};
diff --git a/v1.1.0/assets/inter-italic-cyrillic-ext.5XJwZIOp.woff2 b/v1.1.0/assets/inter-italic-cyrillic-ext.5XJwZIOp.woff2
new file mode 100644
index 0000000..2a68729
Binary files /dev/null and b/v1.1.0/assets/inter-italic-cyrillic-ext.5XJwZIOp.woff2 differ
diff --git a/v1.1.0/assets/inter-italic-cyrillic.D6csxwjC.woff2 b/v1.1.0/assets/inter-italic-cyrillic.D6csxwjC.woff2
new file mode 100644
index 0000000..f640351
Binary files /dev/null and b/v1.1.0/assets/inter-italic-cyrillic.D6csxwjC.woff2 differ
diff --git a/v1.1.0/assets/inter-italic-greek-ext.CHOfFY1k.woff2 b/v1.1.0/assets/inter-italic-greek-ext.CHOfFY1k.woff2
new file mode 100644
index 0000000..0021896
Binary files /dev/null and b/v1.1.0/assets/inter-italic-greek-ext.CHOfFY1k.woff2 differ
diff --git a/v1.1.0/assets/inter-italic-greek.9J96vYpw.woff2 b/v1.1.0/assets/inter-italic-greek.9J96vYpw.woff2
new file mode 100644
index 0000000..71c265f
Binary files /dev/null and b/v1.1.0/assets/inter-italic-greek.9J96vYpw.woff2 differ
diff --git a/v1.1.0/assets/inter-italic-latin-ext.BGcWXLrn.woff2 b/v1.1.0/assets/inter-italic-latin-ext.BGcWXLrn.woff2
new file mode 100644
index 0000000..9c1b944
Binary files /dev/null and b/v1.1.0/assets/inter-italic-latin-ext.BGcWXLrn.woff2 differ
diff --git a/v1.1.0/assets/inter-italic-latin.DbsTr1gm.woff2 b/v1.1.0/assets/inter-italic-latin.DbsTr1gm.woff2
new file mode 100644
index 0000000..01fcf20
Binary files /dev/null and b/v1.1.0/assets/inter-italic-latin.DbsTr1gm.woff2 differ
diff --git a/v1.1.0/assets/inter-italic-vietnamese.DHNAd7Wr.woff2 b/v1.1.0/assets/inter-italic-vietnamese.DHNAd7Wr.woff2
new file mode 100644
index 0000000..e4f788e
Binary files /dev/null and b/v1.1.0/assets/inter-italic-vietnamese.DHNAd7Wr.woff2 differ
diff --git a/v1.1.0/assets/inter-roman-cyrillic-ext.DxP3Awbn.woff2 b/v1.1.0/assets/inter-roman-cyrillic-ext.DxP3Awbn.woff2
new file mode 100644
index 0000000..28593cc
Binary files /dev/null and b/v1.1.0/assets/inter-roman-cyrillic-ext.DxP3Awbn.woff2 differ
diff --git a/v1.1.0/assets/inter-roman-cyrillic.CMhn1ESj.woff2 b/v1.1.0/assets/inter-roman-cyrillic.CMhn1ESj.woff2
new file mode 100644
index 0000000..a20adc1
Binary files /dev/null and b/v1.1.0/assets/inter-roman-cyrillic.CMhn1ESj.woff2 differ
diff --git a/v1.1.0/assets/inter-roman-greek-ext.D0mI3NpI.woff2 b/v1.1.0/assets/inter-roman-greek-ext.D0mI3NpI.woff2
new file mode 100644
index 0000000..e3b0be7
Binary files /dev/null and b/v1.1.0/assets/inter-roman-greek-ext.D0mI3NpI.woff2 differ
diff --git a/v1.1.0/assets/inter-roman-greek.JvnBZ4YD.woff2 b/v1.1.0/assets/inter-roman-greek.JvnBZ4YD.woff2
new file mode 100644
index 0000000..f790e04
Binary files /dev/null and b/v1.1.0/assets/inter-roman-greek.JvnBZ4YD.woff2 differ
diff --git a/v1.1.0/assets/inter-roman-latin-ext.ZlYT4o7i.woff2 b/v1.1.0/assets/inter-roman-latin-ext.ZlYT4o7i.woff2
new file mode 100644
index 0000000..715bd90
Binary files /dev/null and b/v1.1.0/assets/inter-roman-latin-ext.ZlYT4o7i.woff2 differ
diff --git a/v1.1.0/assets/inter-roman-latin.Bu8hRsVA.woff2 b/v1.1.0/assets/inter-roman-latin.Bu8hRsVA.woff2
new file mode 100644
index 0000000..a540b7a
Binary files /dev/null and b/v1.1.0/assets/inter-roman-latin.Bu8hRsVA.woff2 differ
diff --git a/v1.1.0/assets/inter-roman-vietnamese.ClpjcLMQ.woff2 b/v1.1.0/assets/inter-roman-vietnamese.ClpjcLMQ.woff2
new file mode 100644
index 0000000..5a9f9cb
Binary files /dev/null and b/v1.1.0/assets/inter-roman-vietnamese.ClpjcLMQ.woff2 differ
diff --git a/v1.1.0/assets/models.md.C_p785aE.js b/v1.1.0/assets/models.md.C_p785aE.js
new file mode 100644
index 0000000..1a54972
--- /dev/null
+++ b/v1.1.0/assets/models.md.C_p785aE.js
@@ -0,0 +1,12 @@
+import{_ as e,c as s,o as i,a7 as a}from"./chunks/framework.Bblfi-75.js";const u=JSON.parse('{"title":"Models","description":"","frontmatter":{},"headers":[],"relativePath":"models.md","filePath":"models.md","lastUpdated":null}'),t={name:"models.md"},n=a(`
A model type for constructing a NeuroTreeRegressor, based on NeuroTreeModels.jl, and implementing both an internal API and the MLJ model interface.
Hyper-parameters
loss=:mse: Loss to be be minimized during training. One of:
:mse
:mae
:logloss
:mlogloss
:gaussian_mle
nrounds=10: Max number of rounds (epochs).
lr=1.0f-2: Learning rate. Must be > 0. A lower eta results in slower learning, typically requiring a higher nrounds.
wd=0.f0: Weight decay applied to the gradients by the optimizer.
batchsize=2048: Batch size.
actA=:tanh: Activation function applied to each of input variable for determination of split node weight. Can be one of:
:tanh
:identity
outsize=1: Number of predictions returned by the model. Typically only used for classification tasks and set to the number of target levels / classes.
depth=6: Depth of a tree. Must be >= 1. A tree of depth 1 has 2 prediction leaf nodes. A complete tree of depth N contains 2^N terminal leaves and 2^N - 1 split nodes. Compute cost is proportional to 2^depth. Typical optimal values are in the 3 to 5 range.
ntrees=64: Number of trees (per stack).
hidden_size=16: Size of hidden layers. Applicable only when stack_size > 1.
stack_size=1: Number of stacked NeuroTree blocks.
init_scale=1.0: Scaling factor applied to the predictions weights. Values in the ]0, 1] short result in best performance.
MLE_tree_split=false: Whether independent models are buillt for each of the 2 parameters (mu, sigma) of the the gaussian_mle loss.
rng=123: Either an integer used as a seed to the random number generator or an actual random number generator (::Random.AbstractRNG).
device=:cpu: Device to use. Either :cpu or :gpu (recommended as it improves significantly the training speed).
gpuID=0: ID of the GPU to use for training.
Internal API
Do config = NeuroTreeRegressor() to construct an instance with default hyper-parameters. Provide keyword arguments to override hyper-parameter defaults, as in NeuroTreeRegressor(loss=...).
Do model = NeuroTreeRegressor() to construct an instance with default hyper-parameters. Provide keyword arguments to override hyper-parameter defaults, as in NeuroTreeRegressor(loss=...).
Training model
In MLJ or MLJBase, bind an instance model to data with mach = machine(model, X, y) where
X: any table of input features (eg, a DataFrame) whose columns each have one of the following element scitypes: Continuous, Count, or <:OrderedFactor; check column scitypes with schema(X)
y: is the target, which can be any AbstractVector whose element scitype is <:Continuous; check the scitype with scitype(y)
Train the machine using fit!(mach, rows=...).
Operations
predict(mach, Xnew): return predictions of the target given features Xnew having the same scitype as X above.
Fitted parameters
The fields of fitted_params(mach) are:
:fitresult: The NeuroTreeModel object.
Report
The fields of report(mach) are:
:features: The names of the features encountered in training.
A NeuroTreeModel is made of a collection of Tree, either regular NeuroTree or StackTree. Prediction is the sum of all the trees composing a NeuroTreeModel.
`,7),o=[n];function r(l,p,d,h,c,k){return i(),s("div",null,o)}const E=e(t,[["render",r]]);export{u as __pageData,E as default};
diff --git a/v1.1.0/assets/models.md.C_p785aE.lean.js b/v1.1.0/assets/models.md.C_p785aE.lean.js
new file mode 100644
index 0000000..208de95
--- /dev/null
+++ b/v1.1.0/assets/models.md.C_p785aE.lean.js
@@ -0,0 +1 @@
+import{_ as e,c as s,o as i,a7 as a}from"./chunks/framework.Bblfi-75.js";const u=JSON.parse('{"title":"Models","description":"","frontmatter":{},"headers":[],"relativePath":"models.md","filePath":"models.md","lastUpdated":null}'),t={name:"models.md"},n=a("",7),o=[n];function r(l,p,d,h,c,k){return i(),s("div",null,o)}const E=e(t,[["render",r]]);export{u as __pageData,E as default};
diff --git a/v1.1.0/assets/neurotree.vCO5vhkM.png b/v1.1.0/assets/neurotree.vCO5vhkM.png
new file mode 100644
index 0000000..5589ff4
Binary files /dev/null and b/v1.1.0/assets/neurotree.vCO5vhkM.png differ
diff --git a/v1.1.0/assets/quick-start.md.DPsqzifF.js b/v1.1.0/assets/quick-start.md.DPsqzifF.js
new file mode 100644
index 0000000..8558c1c
--- /dev/null
+++ b/v1.1.0/assets/quick-start.md.DPsqzifF.js
@@ -0,0 +1,17 @@
+import{_ as s,c as i,o as a,a7 as n}from"./chunks/framework.Bblfi-75.js";const c=JSON.parse('{"title":"Getting started with NeuroTreeModels.jl","description":"","frontmatter":{},"headers":[],"relativePath":"quick-start.md","filePath":"quick-start.md","lastUpdated":null}'),e={name:"quick-start.md"},t=n(`
Building and training a model according to the above config is done with NeuroTreeModels.fit. See the docs for additional features, notably early stopping support through the tracking of an evaluation metric.
We will use the iris dataset, which is included in the MLDatasets package. This dataset consists of measurements of the sepal length, sepal width, petal length, and petal width for three different types of iris flowers: Setosa, Versicolor, and Virginica.
Before we can train our model, we need to preprocess the dataset. We will convert the class variable, which specifies the type of iris flower, into a categorical variable.
Now we are ready to train our model. We first define a model configuration using the NeuroTreeRegressor model constructor. Then, we use NeuroTreeModels.fit to train a boosted tree model. We pass the optional deval argument to enable the usage of early stopping.
Finally, we can get predictions by passing training and testing data to our model. We can then evaluate the accuracy of our model, which should be over 95% for this simple classification problem.
`,15),l=[h];function e(k,p,r,E,d,g){return a(),i("div",null,l)}const c=s(t,[["render",e]]);export{y as __pageData,c as default};
diff --git a/v1.1.0/assets/tutorials-classification-iris.md.DVImRnRk.lean.js b/v1.1.0/assets/tutorials-classification-iris.md.DVImRnRk.lean.js
new file mode 100644
index 0000000..87e1f90
--- /dev/null
+++ b/v1.1.0/assets/tutorials-classification-iris.md.DVImRnRk.lean.js
@@ -0,0 +1 @@
+import{_ as s,c as i,o as a,a7 as n}from"./chunks/framework.Bblfi-75.js";const y=JSON.parse('{"title":"Classification on Iris dataset","description":"","frontmatter":{},"headers":[],"relativePath":"tutorials-classification-iris.md","filePath":"tutorials-classification-iris.md","lastUpdated":null}'),t={name:"tutorials-classification-iris.md"},h=n("",15),l=[h];function e(k,p,r,E,d,g){return a(),i("div",null,l)}const c=s(t,[["render",e]]);export{y as __pageData,c as default};
diff --git a/v1.1.0/assets/tutorials-logistic-titanic.md.SAz6fqoN.js b/v1.1.0/assets/tutorials-logistic-titanic.md.SAz6fqoN.js
new file mode 100644
index 0000000..488d2e6
--- /dev/null
+++ b/v1.1.0/assets/tutorials-logistic-titanic.md.SAz6fqoN.js
@@ -0,0 +1,48 @@
+import{_ as s,c as i,o as a,a7 as n}from"./chunks/framework.Bblfi-75.js";const c=JSON.parse('{"title":"Logistic Regression on Titanic Dataset","description":"","frontmatter":{},"headers":[],"relativePath":"tutorials-logistic-titanic.md","filePath":"tutorials-logistic-titanic.md","lastUpdated":null}'),t={name:"tutorials-logistic-titanic.md"},h=n(`
We will use the Titanic dataset, which is included in the MLDatasets package. It describes the survival status of individual passengers on the Titanic. The model will be approached as a logistic regression problem, although a Classifier model could also have been used (see the Classification - Iris tutorial).
To begin, we will load the required packages and the dataset:
julia
using NeuroTreeModels
+using MLDatasets
+using DataFrames
+using Statistics: mean
+using StatsBase: median
+using CategoricalArrays
+using Random
+Random.seed!(123)
A first step in data processing is to prepare the input features in a model compatible format.
EvoTrees' Tables API supports input that are either Real (incl. Bool) or Categorical. Bool variables are treated as unordered, 2-levels categorical variables. A recommended approach for String features such as Sex is to convert them into an unordered Categorical.
For dealing with features with missing values such as Age, a common approach is to first create an Bool indicator variable capturing the info on whether a value is missing. Then, the missing values can be imputed (replaced by some default values such as mean or median, or more sophisticated approach such as predictions from another model).
Now we are ready to train our model. We first define a model configuration using the NeuroTreeRegressor model constructor. Then, we use NeuroTreeModels.fit to train a boosted tree model. We pass the optional deval argument to enable the usage of early stopping.
`,19),e=[h];function l(k,p,r,d,E,g){return a(),i("div",null,e)}const y=s(t,[["render",l]]);export{c as __pageData,y as default};
diff --git a/v1.1.0/assets/tutorials-logistic-titanic.md.SAz6fqoN.lean.js b/v1.1.0/assets/tutorials-logistic-titanic.md.SAz6fqoN.lean.js
new file mode 100644
index 0000000..925d3a5
--- /dev/null
+++ b/v1.1.0/assets/tutorials-logistic-titanic.md.SAz6fqoN.lean.js
@@ -0,0 +1 @@
+import{_ as s,c as i,o as a,a7 as n}from"./chunks/framework.Bblfi-75.js";const c=JSON.parse('{"title":"Logistic Regression on Titanic Dataset","description":"","frontmatter":{},"headers":[],"relativePath":"tutorials-logistic-titanic.md","filePath":"tutorials-logistic-titanic.md","lastUpdated":null}'),t={name:"tutorials-logistic-titanic.md"},h=n("",19),e=[h];function l(k,p,r,d,E,g){return a(),i("div",null,e)}const y=s(t,[["render",l]]);export{c as __pageData,y as default};
diff --git a/v1.1.0/assets/tutorials-regression-boston.md.MXXBtK1M.js b/v1.1.0/assets/tutorials-regression-boston.md.MXXBtK1M.js
new file mode 100644
index 0000000..d148850
--- /dev/null
+++ b/v1.1.0/assets/tutorials-regression-boston.md.MXXBtK1M.js
@@ -0,0 +1,42 @@
+import{_ as s,c as i,o as a,a7 as n}from"./chunks/framework.Bblfi-75.js";const y=JSON.parse('{"title":"Regression on Boston Housing Dataset","description":"","frontmatter":{},"headers":[],"relativePath":"tutorials-regression-boston.md","filePath":"tutorials-regression-boston.md","lastUpdated":null}'),t={name:"tutorials-regression-boston.md"},h=n(`
We will use the Boston Housing dataset, which is included in the MLDatasets package. It's derived from information collected by the U.S. Census Service concerning housing in the area of Boston. Target variable represents the median housing value.
Before we can train our model, we need to preprocess the dataset. We will split our data according to train and eval indices, and separate features from the target variable.
Now we are ready to train our model. We first define a model configuration using the NeuroTreeRegressor model constructor. Then, we use NeuroTreeModels.fit to train a boosted tree model. We pass the optional deval argument to enable the usage of early stopping.
Finally, we can get predictions by passing training and testing data to our model. We can then apply various evaluation metric, such as the MAE (mean absolute error):
This work introduces NeuroTree a differentiable binary tree operator adapted for the treatment of tabular data.
Address the shortcoming of traditional trees greediness: all node and leaves are learned simultaneously. It provides the ability to learn an optimal configuration across all the tree levels. The notion extent also to the collection of trees that are simultaneously learned.
Extend the notion of forest/bagging and boosting.
Although the predictions from the all the trees forming a NeuroTree operator are averaged, each of the tree prediction is tuned simultaneously. This is different from boosting (ex XGBoost) where each tree is learned sequentially and over the residual from previous trees. Also, unlike random forest and bagging, trees aren't learned in isolation but tuned collaboratively, resulting in predictions that account for all of the other tree predictions.
General operator compatible for composition.
Allows integration within Flux's Chain like other standard operators from NNLib. Composition is also illustrated through the built-in StackTree layer, a residual composition of multiple NeuroTree building blocks.
Compatible with general purpose machine learning framework.
A NeuroTree operator acts as collection of complete binary trees, ie. trees without any pruned node. To be differentiable, hence trainable using first-order gradient based methods (ex. Adam optimiser), each tree path implements a soft decision rather than a hard one like in traditional decision tree.
To introduce the implementation of a NeuroTree, we first get back to the architecture of a basic decision tree.
The above is a binary decision tree of depth 2.
Highlighted in green is the decision path taken for a given sample. It goes into depth number of binary decisions, resulting in the path node1 → node3 → leaf3.
One way to view the role of the decision nodes (gray background) is to provide an index of the leaf prediction to fetch (index 3 in the figure). Such indexing view is applicable given that node routing relies on hard conditions: either true or false.
An alternative perspective that we adopt here is that tree nodes collectively provide weights associated to each leaf. A tree prediction becomes the weighted sum of the leaf's values and the leaf's weights. In regular decision trees, since all conditions are binary, leaf weights take the form of a mask. In the above example, the mask is [0, 0, 1, 0].
By relaxing these hard conditions into soft ones, the mask takes the form of a probability vector associated to each leaf, where ∑(leaf_weights) = 1 and where each each leaf_weight element is [0, 1]. A tree prediction can be obtained with the dot product: leaf_values' * leaf_weights.
The following illustrate how a basic decision tree is represented as a single differentiable tree within NeuroTree:
To illustrate how a NeuroTree derives the soft decision probability (referred to NW1 - NW3 in the above figure), we first break down how a traditional tree split condition is derived from 2 underlying decisions:
Selection of the feature on which to perform the condition.
Such selection can be represented as the application of a binary mask where all elements are set to false except for that single selected feature where it's set to true.
Selection of the condition's threshold value.
For a given observation, if the selected feature's value is below that threshold, then the node decision is set to false (pointing to left child), and true otherwise (pooinnting to right child).
In NeuroTree, these 2 hard steps are translated into soft, differentiable ones.
Computing the leaf weights consists of accumulating the weights through each tree branch. It's the technically more challenging part as such computation cannot be represented as a form of matrix multiplication, unlike other common operators like Dense, Conv or MultiHeadAttention / Transformer. Performing probability accumulation though a tree index naturally leads to in-place element wise operations, which are notoriously not friendly for auto-differentiation engines. Since NeuroTree was intended to integrate with the Flux.jl ecosystem, Zygote.jl acts as the underlying AD, the approach used was to manually implement backward / adjoint of the terminal leaf function and instruct the AD to use that custom rule rather than attempt to differentiate a non-AD compliant function.
Below are the algo and actual implementation of the forward and backward function that compute the leaf weights. For brevity, the loops over each observation of the batch and each tree are omitted. Parallelism, both on CPU and GPU, is obtained through parallelization over the tree and batch dimensions.
For each dataset and algo, the following methodology is followed:
Data is split in three parts: train, eval and test
A random grid of 16 hyper-parameters is generated
For each parameter configuration, a model is trained on train data until the evaluation metric tracked against the eval stops improving (early stopping)
The trained model is evaluated against the test data
The metric presented in below are the ones obtained on the test for the model that generated the best eval metric.
NeuroTreeModels can achieve top tier performance on both small (Boston) and large (Higgs) datasets. Its performance trailed on the two ranking regression problems (MSRank and Yahoo). Although the large number of features is a distinguishing characteristic of the Yahoo dataset, the 136 features of MSRank are not materially different for the YEAR dataset (90 features), and on which NeuroTreeMoels outperform all other algos. Considering that no sparsity mechanism is present in the feature selection for the node conditions, datasets with a very large number of features may present a challenge. Substituting the default tanh activation with a sparsity inducing one such as hardsigmoid or EntrOpt has not resulted in improvement from the experiments.
Another potential weakness may stem from the soft nature of the decision criteria. Traditional trees can isolate the effect of a specific feature value. This can be notably meaningful in a situation where a numeric feature taking a value of 0 may carry a particular meaning (ex. missing, unknown value). Such stump the effect of a feature should be harder to pick with NeuroTree's soft condition.
A model type for constructing a NeuroTreeRegressor, based on NeuroTreeModels.jl, and implementing both an internal API and the MLJ model interface.
Hyper-parameters
loss=:mse: Loss to be be minimized during training. One of:
:mse
:mae
:logloss
:mlogloss
:gaussian_mle
nrounds=10: Max number of rounds (epochs).
lr=1.0f-2: Learning rate. Must be > 0. A lower eta results in slower learning, typically requiring a higher nrounds.
wd=0.f0: Weight decay applied to the gradients by the optimizer.
batchsize=2048: Batch size.
actA=:tanh: Activation function applied to each of input variable for determination of split node weight. Can be one of:
:tanh
:identity
outsize=1: Number of predictions returned by the model. Typically only used for classification tasks and set to the number of target levels / classes.
depth=6: Depth of a tree. Must be >= 1. A tree of depth 1 has 2 prediction leaf nodes. A complete tree of depth N contains 2^N terminal leaves and 2^N - 1 split nodes. Compute cost is proportional to 2^depth. Typical optimal values are in the 3 to 5 range.
ntrees=64: Number of trees (per stack).
hidden_size=16: Size of hidden layers. Applicable only when stack_size > 1.
stack_size=1: Number of stacked NeuroTree blocks.
init_scale=1.0: Scaling factor applied to the predictions weights. Values in the ]0, 1] short result in best performance.
MLE_tree_split=false: Whether independent models are buillt for each of the 2 parameters (mu, sigma) of the the gaussian_mle loss.
rng=123: Either an integer used as a seed to the random number generator or an actual random number generator (::Random.AbstractRNG).
device=:cpu: Device to use. Either :cpu or :gpu (recommended as it improves significantly the training speed).
gpuID=0: ID of the GPU to use for training.
Internal API
Do config = NeuroTreeRegressor() to construct an instance with default hyper-parameters. Provide keyword arguments to override hyper-parameter defaults, as in NeuroTreeRegressor(loss=...).
Do model = NeuroTreeRegressor() to construct an instance with default hyper-parameters. Provide keyword arguments to override hyper-parameter defaults, as in NeuroTreeRegressor(loss=...).
Training model
In MLJ or MLJBase, bind an instance model to data with mach = machine(model, X, y) where
X: any table of input features (eg, a DataFrame) whose columns each have one of the following element scitypes: Continuous, Count, or <:OrderedFactor; check column scitypes with schema(X)
y: is the target, which can be any AbstractVector whose element scitype is <:Continuous; check the scitype with scitype(y)
Train the machine using fit!(mach, rows=...).
Operations
predict(mach, Xnew): return predictions of the target given features Xnew having the same scitype as X above.
Fitted parameters
The fields of fitted_params(mach) are:
:fitresult: The NeuroTreeModel object.
Report
The fields of report(mach) are:
:features: The names of the features encountered in training.
A NeuroTreeModel is made of a collection of Tree, either regular NeuroTree or StackTree. Prediction is the sum of all the trees composing a NeuroTreeModel.
Building and training a model according to the above config is done with NeuroTreeModels.fit. See the docs for additional features, notably early stopping support through the tracking of an evaluation metric.
We will use the iris dataset, which is included in the MLDatasets package. This dataset consists of measurements of the sepal length, sepal width, petal length, and petal width for three different types of iris flowers: Setosa, Versicolor, and Virginica.
Before we can train our model, we need to preprocess the dataset. We will convert the class variable, which specifies the type of iris flower, into a categorical variable.
Now we are ready to train our model. We first define a model configuration using the NeuroTreeRegressor model constructor. Then, we use NeuroTreeModels.fit to train a boosted tree model. We pass the optional deval argument to enable the usage of early stopping.
Finally, we can get predictions by passing training and testing data to our model. We can then evaluate the accuracy of our model, which should be over 95% for this simple classification problem.
We will use the Titanic dataset, which is included in the MLDatasets package. It describes the survival status of individual passengers on the Titanic. The model will be approached as a logistic regression problem, although a Classifier model could also have been used (see the Classification - Iris tutorial).
To begin, we will load the required packages and the dataset:
julia
using NeuroTreeModels
+using MLDatasets
+using DataFrames
+using Statistics: mean
+using StatsBase: median
+using CategoricalArrays
+using Random
+Random.seed!(123)
A first step in data processing is to prepare the input features in a model compatible format.
EvoTrees' Tables API supports input that are either Real (incl. Bool) or Categorical. Bool variables are treated as unordered, 2-levels categorical variables. A recommended approach for String features such as Sex is to convert them into an unordered Categorical.
For dealing with features with missing values such as Age, a common approach is to first create an Bool indicator variable capturing the info on whether a value is missing. Then, the missing values can be imputed (replaced by some default values such as mean or median, or more sophisticated approach such as predictions from another model).
Now we are ready to train our model. We first define a model configuration using the NeuroTreeRegressor model constructor. Then, we use NeuroTreeModels.fit to train a boosted tree model. We pass the optional deval argument to enable the usage of early stopping.
We will use the Boston Housing dataset, which is included in the MLDatasets package. It's derived from information collected by the U.S. Census Service concerning housing in the area of Boston. Target variable represents the median housing value.
Before we can train our model, we need to preprocess the dataset. We will split our data according to train and eval indices, and separate features from the target variable.
Now we are ready to train our model. We first define a model configuration using the NeuroTreeRegressor model constructor. Then, we use NeuroTreeModels.fit to train a boosted tree model. We pass the optional deval argument to enable the usage of early stopping.
Finally, we can get predictions by passing training and testing data to our model. We can then apply various evaluation metric, such as the MAE (mean absolute error):