From 8417041cd11c29c5ffeb91a1e2eab44b9fb08d2f Mon Sep 17 00:00:00 2001 From: Sandy Jiaxin Zhang Date: Fri, 15 Nov 2019 09:01:53 -0500 Subject: [PATCH 1/2] Commiting Assignment 5 --- Assignment 5.Rproj | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 Assignment 5.Rproj diff --git a/Assignment 5.Rproj b/Assignment 5.Rproj new file mode 100644 index 0000000..8e3c2eb --- /dev/null +++ b/Assignment 5.Rproj @@ -0,0 +1,13 @@ +Version: 1.0 + +RestoreWorkspace: Default +SaveWorkspace: Default +AlwaysSaveHistory: Default + +EnableCodeIndexing: Yes +UseSpacesForTab: Yes +NumSpacesForTab: 2 +Encoding: UTF-8 + +RnwWeave: Sweave +LaTeX: pdfLaTeX From 732410b5d9373cef4bb08499534265d26e1ae804 Mon Sep 17 00:00:00 2001 From: Sandy Jiaxin Zhang Date: Fri, 15 Nov 2019 09:24:27 -0500 Subject: [PATCH 2/2] Assignment 5 submission --- Assignment 5.Rmd | 34 +- Assignment-5.html | 879 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 905 insertions(+), 8 deletions(-) create mode 100644 Assignment-5.html diff --git a/Assignment 5.Rmd b/Assignment 5.Rmd index 8838dc9..fb7f3b7 100644 --- a/Assignment 5.Rmd +++ b/Assignment 5.Rmd @@ -8,7 +8,6 @@ For this assignment we will be using data from the Assistments Intelligent Tutor #Install & call libraries ```{r} -install.packages("party", "rpart") library(rpart) library(party) @@ -16,20 +15,21 @@ library(party) ## Part I ```{r} -D1 <- +D1 <- read.csv("~/Desktop/HUDK4050/Assignment 5/intelligent_tutor.csv") ``` ##Classification Tree First we will build a classification tree to predict which students ask a teacher for help, which start a new session, or which give up, based on whether or not the student completed a session (D1$complete) and whether or not they asked for hints (D1$hint.y). ```{r} -c.tree <- rpart(action ~ hint.y + complete, method="class", data=D1) #Notice the standard R notion for a formula X ~ Y +c.tree <- rpart(action ~ hint.y + complete, method = "class", data = D1) #Notice the standard R notion for a formula X ~ Y #Look at the error of this tree printcp(c.tree) #Plot the tree -post(c.tree, file = "tree.ps", title = "Session Completion Action: 1 - Ask teacher, 2 - Start new session, 3 - Give up") +post(c.tree, file = "", title = "Session Completion Action: 1 - Ask teacher, 2 - Start new session, 3 - Give up") + ``` ## Part II @@ -41,24 +41,33 @@ We want to see if we can build a decision tree to help teachers decide which stu #Visualize our outcome variable "score" ```{r} +score <- table(D1$score) + +barplot(score, xlab = "Score") + ``` #Create a categorical outcome variable based on student score to advise the teacher using an "ifelse" statement ```{r} -D1$advice <- +D1$advice <- ifelse(D1$score > 0.70, 1, 0) + ``` #Build a decision tree that predicts "advice" based on how many problems students have answered before, the percentage of those problems they got correct and how many hints they required ```{r} -score_ctree <- +advice_ctree <- rpart(advice ~ prior_prob_count + prior_percent_correct + hints, method="class", data = D1) ``` #Plot tree ```{r} +post(advice_ctree, file = "", title = "") +summary(advice_ctree) ``` Please interpret the tree, which two behaviors do you think the teacher should most closely pay attemtion to? +# With 5 splits, the error is approximately 60%. The generalizability of this is 70%. Would recommend that the teacher +# pay attention to the number of problems answered incorrectly by the student and the number of hints the student requested. #Test Tree Upload the data "intelligent_tutor_new.csv". This is a data set of a differnt sample of students doing the same problems in the same system. We can use the tree we built for the previous data set to try to predict the "advice" we should give the teacher about these new students. @@ -66,16 +75,25 @@ Upload the data "intelligent_tutor_new.csv". This is a data set of a differnt sa ```{r} #Upload new data -D2 <- +D2 <- read.csv("~/Desktop/HUDK4050/Assignment 5/intelligent_tutor_new.csv") #Generate predicted advice using the predict() command for new students based on tree generated from old students -D2$prediction <- +predict(advice_ctree, D2) + +D2$prediction <- predict(advice_ctree, D2) ``` ## Part III Compare the predicted advice with the actual advice that these students recieved. What is the difference between the observed and predicted results? +```{r} +summary(D2$prediction, D2$prior_percent_correct) + +``` + +The measures of centrality seem to be way off. Other descriptive measures seem to be comparable. + ### To Submit Your Assignment Please submit your assignment by first "knitting" your RMarkdown document into an html file and then commit, push and pull request both the RMarkdown file and the html file. diff --git a/Assignment-5.html b/Assignment-5.html new file mode 100644 index 0000000..fc22adf --- /dev/null +++ b/Assignment-5.html @@ -0,0 +1,879 @@ + + + + + + + + + + + + + + + + +Assignment 5 - Decision Trees + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +

For this assignment we will be using data from the Assistments Intelligent Tutoring system. This system gives students hints based on how they perform on math problems.

+

#Install & call libraries

+
library(rpart)
+library(party)
+
## Loading required package: grid
+
## Loading required package: mvtnorm
+
## Loading required package: modeltools
+
## Loading required package: stats4
+
## Loading required package: strucchange
+
## Loading required package: zoo
+
## 
+## Attaching package: 'zoo'
+
## The following objects are masked from 'package:base':
+## 
+##     as.Date, as.Date.numeric
+
## Loading required package: sandwich
+
+

Part I

+
D1 <- read.csv("~/Desktop/HUDK4050/Assignment 5/intelligent_tutor.csv")
+

##Classification Tree First we will build a classification tree to predict which students ask a teacher for help, which start a new session, or which give up, based on whether or not the student completed a session (D1\(complete) and whether or not they asked for hints (D1\)hint.y).

+
c.tree <- rpart(action ~ hint.y + complete, method = "class", data = D1) #Notice the standard R notion for a formula X ~ Y
+
+#Look at the error of this tree
+printcp(c.tree)
+
## 
+## Classification tree:
+## rpart(formula = action ~ hint.y + complete, data = D1, method = "class")
+## 
+## Variables actually used in tree construction:
+## [1] complete hint.y  
+## 
+## Root node error: 250/378 = 0.66138
+## 
+## n= 378 
+## 
+##      CP nsplit rel error xerror     xstd
+## 1 0.052      0     1.000  1.080 0.035132
+## 2 0.012      1     0.948  1.032 0.036200
+## 3 0.010      2     0.936  0.988 0.037008
+
#Plot the tree
+post(c.tree, file = "", title = "Session Completion Action: 1 - Ask teacher, 2 - Start new session, 3 - Give up")
+

## Part II

+

#Regression Tree

+

We want to see if we can build a decision tree to help teachers decide which students to follow up with, based on students’ performance in Assistments. We will create three groups (“teacher should intervene”, “teacher should monitor student progress” and “no action”) based on students’ previous use of the system and how many hints they use. To do this we will be building a decision tree using the “party” package. The party package builds decision trees based on a set of statistical stopping rules.

+

#Visualize our outcome variable “score”

+
score <- table(D1$score)
+
+barplot(score, xlab = "Score")
+

+

#Create a categorical outcome variable based on student score to advise the teacher using an “ifelse” statement

+
D1$advice <- ifelse(D1$score > 0.70, 1, 0)
+

#Build a decision tree that predicts “advice” based on how many problems students have answered before, the percentage of those problems they got correct and how many hints they required

+
advice_ctree <- rpart(advice ~ prior_prob_count + prior_percent_correct + hints, method="class", data = D1)
+

#Plot tree

+
post(advice_ctree, file = "", title = "")
+

+
summary(advice_ctree)
+
## Call:
+## rpart(formula = advice ~ prior_prob_count + prior_percent_correct + 
+##     hints, data = D1, method = "class")
+##   n= 378 
+## 
+##           CP nsplit rel error    xerror       xstd
+## 1 0.23783784      0 1.0000000 1.0702703 0.05248694
+## 2 0.02432432      1 0.7621622 0.7837838 0.05110276
+## 3 0.01621622      5 0.6648649 0.8054054 0.05135624
+## 4 0.01351351      7 0.6324324 0.7729730 0.05096644
+## 5 0.01000000      9 0.6054054 0.7567568 0.05074986
+## 
+## Variable importance
+##      prior_prob_count                 hints prior_percent_correct 
+##                    38                    33                    28 
+## 
+## Node number 1: 378 observations,    complexity param=0.2378378
+##   predicted class=0  expected loss=0.489418  P(node) =1
+##     class counts:   193   185
+##    probabilities: 0.511 0.489 
+##   left son=2 (138 obs) right son=3 (240 obs)
+##   Primary splits:
+##       hints                 < 1.5       to the right, improve=13.968520, (1 missing)
+##       prior_percent_correct < 0.6069813 to the left,  improve= 9.281055, (0 missing)
+##       prior_prob_count      < 40        to the left,  improve= 8.811016, (0 missing)
+##   Surrogate splits:
+##       prior_prob_count      < 506       to the right, agree=0.663, adj=0.080, (1 split)
+##       prior_percent_correct < 0.9276555 to the right, agree=0.639, adj=0.014, (0 split)
+## 
+## Node number 2: 138 observations,    complexity param=0.02432432
+##   predicted class=0  expected loss=0.3115942  P(node) =0.3650794
+##     class counts:    95    43
+##    probabilities: 0.688 0.312 
+##   left son=4 (73 obs) right son=5 (65 obs)
+##   Primary splits:
+##       prior_percent_correct < 0.6055556 to the left,  improve=5.525343, (0 missing)
+##       prior_prob_count      < 41        to the left,  improve=4.659824, (0 missing)
+##       hints                 < 12.5      to the right, improve=4.528986, (0 missing)
+##   Surrogate splits:
+##       prior_prob_count < 0.5       to the left,  agree=0.819, adj=0.615, (0 split)
+##       hints            < 7.5       to the left,  agree=0.623, adj=0.200, (0 split)
+## 
+## Node number 3: 240 observations,    complexity param=0.02432432
+##   predicted class=1  expected loss=0.4083333  P(node) =0.6349206
+##     class counts:    98   142
+##    probabilities: 0.408 0.592 
+##   left son=6 (161 obs) right son=7 (79 obs)
+##   Primary splits:
+##       prior_prob_count      < 85.5      to the left,  improve=9.975630, (0 missing)
+##       prior_percent_correct < 0.7039435 to the left,  improve=8.503712, (0 missing)
+##       hints                 < 0.5       to the right, improve=0.599005, (1 missing)
+##   Surrogate splits:
+##       prior_percent_correct < 0.4463869 to the left,  agree=0.862, adj=0.582, (0 split)
+## 
+## Node number 4: 73 observations
+##   predicted class=0  expected loss=0.1780822  P(node) =0.1931217
+##     class counts:    60    13
+##    probabilities: 0.822 0.178 
+## 
+## Node number 5: 65 observations,    complexity param=0.02432432
+##   predicted class=0  expected loss=0.4615385  P(node) =0.1719577
+##     class counts:    35    30
+##    probabilities: 0.538 0.462 
+##   left son=10 (24 obs) right son=11 (41 obs)
+##   Primary splits:
+##       hints                 < 12.5      to the right, improve=4.878831, (0 missing)
+##       prior_prob_count      < 37        to the left,  improve=2.559265, (0 missing)
+##       prior_percent_correct < 0.8003451 to the right, improve=1.899529, (0 missing)
+##   Surrogate splits:
+##       prior_prob_count      < 350       to the right, agree=0.708, adj=0.208, (0 split)
+##       prior_percent_correct < 0.8512525 to the right, agree=0.662, adj=0.083, (0 split)
+## 
+## Node number 6: 161 observations,    complexity param=0.02432432
+##   predicted class=0  expected loss=0.4906832  P(node) =0.4259259
+##     class counts:    82    79
+##    probabilities: 0.509 0.491 
+##   left son=12 (37 obs) right son=13 (124 obs)
+##   Primary splits:
+##       prior_percent_correct < 0.3563218 to the right, improve=1.2118060, (0 missing)
+##       prior_prob_count      < 70        to the right, improve=0.9753177, (0 missing)
+##       hints                 < 0.5       to the right, improve=0.5013889, (1 missing)
+##   Surrogate splits:
+##       prior_prob_count < 0.5       to the right, agree=0.981, adj=0.919, (0 split)
+## 
+## Node number 7: 79 observations
+##   predicted class=1  expected loss=0.2025316  P(node) =0.2089947
+##     class counts:    16    63
+##    probabilities: 0.203 0.797 
+## 
+## Node number 10: 24 observations
+##   predicted class=0  expected loss=0.2083333  P(node) =0.06349206
+##     class counts:    19     5
+##    probabilities: 0.792 0.208 
+## 
+## Node number 11: 41 observations,    complexity param=0.01351351
+##   predicted class=1  expected loss=0.3902439  P(node) =0.1084656
+##     class counts:    16    25
+##    probabilities: 0.390 0.610 
+##   left son=22 (29 obs) right son=23 (12 obs)
+##   Primary splits:
+##       prior_prob_count      < 395.5     to the left,  improve=3.19610300, (0 missing)
+##       prior_percent_correct < 0.6779274 to the right, improve=1.79691700, (0 missing)
+##       hints                 < 6.5       to the left,  improve=0.08082257, (0 missing)
+## 
+## Node number 12: 37 observations,    complexity param=0.01621622
+##   predicted class=0  expected loss=0.3783784  P(node) =0.0978836
+##     class counts:    23    14
+##    probabilities: 0.622 0.378 
+##   left son=24 (15 obs) right son=25 (22 obs)
+##   Primary splits:
+##       prior_percent_correct < 0.6525862 to the left,  improve=1.605405, (0 missing)
+##       prior_prob_count      < 40        to the left,  improve=1.287758, (0 missing)
+##   Surrogate splits:
+##       prior_prob_count < 54        to the right, agree=0.649, adj=0.133, (0 split)
+## 
+## Node number 13: 124 observations
+##   predicted class=1  expected loss=0.4758065  P(node) =0.3280423
+##     class counts:    59    65
+##    probabilities: 0.476 0.524 
+## 
+## Node number 22: 29 observations,    complexity param=0.01351351
+##   predicted class=0  expected loss=0.4827586  P(node) =0.07671958
+##     class counts:    15    14
+##    probabilities: 0.517 0.483 
+##   left son=44 (17 obs) right son=45 (12 obs)
+##   Primary splits:
+##       prior_percent_correct < 0.7137862 to the right, improve=1.3847190, (0 missing)
+##       prior_prob_count      < 37        to the left,  improve=1.1970440, (0 missing)
+##       hints                 < 6.5       to the left,  improve=0.1462202, (0 missing)
+##   Surrogate splits:
+##       hints            < 8.5       to the left,  agree=0.724, adj=0.333, (0 split)
+##       prior_prob_count < 91        to the left,  agree=0.655, adj=0.167, (0 split)
+## 
+## Node number 23: 12 observations
+##   predicted class=1  expected loss=0.08333333  P(node) =0.03174603
+##     class counts:     1    11
+##    probabilities: 0.083 0.917 
+## 
+## Node number 24: 15 observations
+##   predicted class=0  expected loss=0.2  P(node) =0.03968254
+##     class counts:    12     3
+##    probabilities: 0.800 0.200 
+## 
+## Node number 25: 22 observations,    complexity param=0.01621622
+##   predicted class=0  expected loss=0.5  P(node) =0.05820106
+##     class counts:    11    11
+##    probabilities: 0.500 0.500 
+##   left son=50 (12 obs) right son=51 (10 obs)
+##   Primary splits:
+##       prior_prob_count      < 35        to the left,  improve=3.3000000, (0 missing)
+##       prior_percent_correct < 0.8042553 to the right, improve=0.9428571, (0 missing)
+##   Surrogate splits:
+##       prior_percent_correct < 0.7928571 to the right, agree=0.727, adj=0.4, (0 split)
+## 
+## Node number 44: 17 observations
+##   predicted class=0  expected loss=0.3529412  P(node) =0.04497354
+##     class counts:    11     6
+##    probabilities: 0.647 0.353 
+## 
+## Node number 45: 12 observations
+##   predicted class=1  expected loss=0.3333333  P(node) =0.03174603
+##     class counts:     4     8
+##    probabilities: 0.333 0.667 
+## 
+## Node number 50: 12 observations
+##   predicted class=0  expected loss=0.25  P(node) =0.03174603
+##     class counts:     9     3
+##    probabilities: 0.750 0.250 
+## 
+## Node number 51: 10 observations
+##   predicted class=1  expected loss=0.2  P(node) =0.02645503
+##     class counts:     2     8
+##    probabilities: 0.200 0.800
+

Please interpret the tree, which two behaviors do you think the teacher should most closely pay attemtion to? # With 5 splits, the error is approximately 60%. The generalizability of this is 70%. Would recommend that the teacher # pay attention to the number of problems answered incorrectly by the student and the number of hints the student requested.

+

#Test Tree Upload the data “intelligent_tutor_new.csv”. This is a data set of a differnt sample of students doing the same problems in the same system. We can use the tree we built for the previous data set to try to predict the “advice” we should give the teacher about these new students.

+
#Upload new data
+
+D2 <- read.csv("~/Desktop/HUDK4050/Assignment 5/intelligent_tutor_new.csv")
+
+#Generate predicted advice using the predict() command for new students based on tree generated from old students
+
+predict(advice_ctree, D2)
+
##              0         1
+## 1   0.80000000 0.2000000
+## 2   0.82191781 0.1780822
+## 3   0.82191781 0.1780822
+## 4   0.33333333 0.6666667
+## 5   0.82191781 0.1780822
+## 6   0.82191781 0.1780822
+## 7   0.80000000 0.2000000
+## 8   0.47580645 0.5241935
+## 9   0.82191781 0.1780822
+## 10  0.80000000 0.2000000
+## 11  0.80000000 0.2000000
+## 12  0.82191781 0.1780822
+## 13  0.80000000 0.2000000
+## 14  0.20253165 0.7974684
+## 15  0.80000000 0.2000000
+## 16  0.33333333 0.6666667
+## 17  0.82191781 0.1780822
+## 18  0.80000000 0.2000000
+## 19  0.20253165 0.7974684
+## 20  0.47580645 0.5241935
+## 21  0.20253165 0.7974684
+## 22  0.20253165 0.7974684
+## 23  0.80000000 0.2000000
+## 24  0.75000000 0.2500000
+## 25  0.82191781 0.1780822
+## 26  0.82191781 0.1780822
+## 27  0.80000000 0.2000000
+## 28  0.82191781 0.1780822
+## 29  0.82191781 0.1780822
+## 30  0.80000000 0.2000000
+## 31  0.20253165 0.7974684
+## 32  0.20253165 0.7974684
+## 33  0.47580645 0.5241935
+## 34  0.20253165 0.7974684
+## 35  0.82191781 0.1780822
+## 36  0.82191781 0.1780822
+## 37  0.20253165 0.7974684
+## 38  0.82191781 0.1780822
+## 39  0.20253165 0.7974684
+## 40  0.82191781 0.1780822
+## 41  0.80000000 0.2000000
+## 42  0.82191781 0.1780822
+## 43  0.20253165 0.7974684
+## 44  0.80000000 0.2000000
+## 45  0.80000000 0.2000000
+## 46  0.20253165 0.7974684
+## 47  0.80000000 0.2000000
+## 48  0.20253165 0.7974684
+## 49  0.80000000 0.2000000
+## 50  0.80000000 0.2000000
+## 51  0.82191781 0.1780822
+## 52  0.80000000 0.2000000
+## 53  0.82191781 0.1780822
+## 54  0.80000000 0.2000000
+## 55  0.80000000 0.2000000
+## 56  0.82191781 0.1780822
+## 57  0.80000000 0.2000000
+## 58  0.80000000 0.2000000
+## 59  0.80000000 0.2000000
+## 60  0.80000000 0.2000000
+## 61  0.80000000 0.2000000
+## 62  0.80000000 0.2000000
+## 63  0.80000000 0.2000000
+## 64  0.82191781 0.1780822
+## 65  0.80000000 0.2000000
+## 66  0.82191781 0.1780822
+## 67  0.82191781 0.1780822
+## 68  0.20253165 0.7974684
+## 69  0.80000000 0.2000000
+## 70  0.82191781 0.1780822
+## 71  0.80000000 0.2000000
+## 72  0.20253165 0.7974684
+## 73  0.82191781 0.1780822
+## 74  0.80000000 0.2000000
+## 75  0.80000000 0.2000000
+## 76  0.80000000 0.2000000
+## 77  0.20253165 0.7974684
+## 78  0.80000000 0.2000000
+## 79  0.20253165 0.7974684
+## 80  0.80000000 0.2000000
+## 81  0.82191781 0.1780822
+## 82  0.20253165 0.7974684
+## 83  0.82191781 0.1780822
+## 84  0.80000000 0.2000000
+## 85  0.82191781 0.1780822
+## 86  0.80000000 0.2000000
+## 87  0.82191781 0.1780822
+## 88  0.20253165 0.7974684
+## 89  0.82191781 0.1780822
+## 90  0.47580645 0.5241935
+## 91  0.20253165 0.7974684
+## 92  0.20253165 0.7974684
+## 93  0.82191781 0.1780822
+## 94  0.82191781 0.1780822
+## 95  0.20253165 0.7974684
+## 96  0.20253165 0.7974684
+## 97  0.80000000 0.2000000
+## 98  0.82191781 0.1780822
+## 99  0.82191781 0.1780822
+## 100 0.80000000 0.2000000
+## 101 0.20253165 0.7974684
+## 102 0.80000000 0.2000000
+## 103 0.82191781 0.1780822
+## 104 0.80000000 0.2000000
+## 105 0.80000000 0.2000000
+## 106 0.82191781 0.1780822
+## 107 0.80000000 0.2000000
+## 108 0.80000000 0.2000000
+## 109 0.08333333 0.9166667
+## 110 0.82191781 0.1780822
+## 111 0.82191781 0.1780822
+## 112 0.20253165 0.7974684
+## 113 0.80000000 0.2000000
+## 114 0.47580645 0.5241935
+## 115 0.64705882 0.3529412
+## 116 0.75000000 0.2500000
+## 117 0.20253165 0.7974684
+## 118 0.82191781 0.1780822
+## 119 0.20253165 0.7974684
+## 120 0.80000000 0.2000000
+## 121 0.20253165 0.7974684
+## 122 0.20253165 0.7974684
+## 123 0.80000000 0.2000000
+## 124 0.80000000 0.2000000
+## 125 0.80000000 0.2000000
+## 126 0.82191781 0.1780822
+## 127 0.79166667 0.2083333
+## 128 0.82191781 0.1780822
+## 129 0.80000000 0.2000000
+## 130 0.20253165 0.7974684
+## 131 0.82191781 0.1780822
+## 132 0.20253165 0.7974684
+## 133 0.82191781 0.1780822
+## 134 0.82191781 0.1780822
+## 135 0.82191781 0.1780822
+## 136 0.82191781 0.1780822
+## 137 0.20253165 0.7974684
+## 138 0.82191781 0.1780822
+## 139 0.82191781 0.1780822
+## 140 0.33333333 0.6666667
+## 141 0.20253165 0.7974684
+## 142 0.20253165 0.7974684
+## 143 0.82191781 0.1780822
+## 144 0.80000000 0.2000000
+## 145 0.20253165 0.7974684
+## 146 0.82191781 0.1780822
+## 147 0.20253165 0.7974684
+## 148 0.20253165 0.7974684
+## 149 0.20253165 0.7974684
+## 150 0.20253165 0.7974684
+## 151 0.80000000 0.2000000
+## 152 0.47580645 0.5241935
+## 153 0.08333333 0.9166667
+## 154 0.82191781 0.1780822
+## 155 0.82191781 0.1780822
+## 156 0.82191781 0.1780822
+## 157 0.80000000 0.2000000
+## 158 0.80000000 0.2000000
+## 159 0.80000000 0.2000000
+## 160 0.80000000 0.2000000
+## 161 0.80000000 0.2000000
+## 162 0.82191781 0.1780822
+## 163 0.20253165 0.7974684
+## 164 0.80000000 0.2000000
+## 165 0.82191781 0.1780822
+## 166 0.82191781 0.1780822
+## 167 0.20253165 0.7974684
+## 168 0.33333333 0.6666667
+## 169 0.20253165 0.7974684
+## 170 0.20253165 0.7974684
+## 171 0.20253165 0.7974684
+## 172 0.20253165 0.7974684
+## 173 0.82191781 0.1780822
+## 174 0.47580645 0.5241935
+## 175 0.20253165 0.7974684
+## 176 0.80000000 0.2000000
+## 177 0.20253165 0.7974684
+## 178 0.80000000 0.2000000
+## 179 0.80000000 0.2000000
+## 180 0.80000000 0.2000000
+## 181 0.80000000 0.2000000
+## 182 0.80000000 0.2000000
+## 183 0.80000000 0.2000000
+## 184 0.80000000 0.2000000
+## 185 0.82191781 0.1780822
+## 186 0.20253165 0.7974684
+## 187 0.20253165 0.7974684
+## 188 0.20253165 0.7974684
+## 189 0.20253165 0.7974684
+## 190 0.20000000 0.8000000
+## 191 0.47580645 0.5241935
+## 192 0.80000000 0.2000000
+## 193 0.20253165 0.7974684
+## 194 0.82191781 0.1780822
+## 195 0.20253165 0.7974684
+## 196 0.20253165 0.7974684
+## 197 0.82191781 0.1780822
+## 198 0.82191781 0.1780822
+## 199 0.82191781 0.1780822
+## 200 0.79166667 0.2083333
+
D2$prediction <- predict(advice_ctree, D2)
+
+
+

Part III

+

Compare the predicted advice with the actual advice that these students recieved. What is the difference between the observed and predicted results?

+
summary(D2$prediction, D2$prior_percent_correct)
+
##        0                 1         
+##  Min.   :0.08333   Min.   :0.1781  
+##  1st Qu.:0.20253   1st Qu.:0.1781  
+##  Median :0.80000   Median :0.2000  
+##  Mean   :0.61454   Mean   :0.3855  
+##  3rd Qu.:0.82192   3rd Qu.:0.7975  
+##  Max.   :0.82192   Max.   :0.9167
+

The measures of centrality seem to be way off. Other descriptive measures seem to be comparable.

+
+

To Submit Your Assignment

+

Please submit your assignment by first “knitting” your RMarkdown document into an html file and then commit, push and pull request both the RMarkdown file and the html file.

+
+
+ + + + +
+ + + + + + + + + + + + + + +