Skip to content

Commit

Permalink
Merge pull request openedx#398 from MITx/kimth/partialgrading
Browse files Browse the repository at this point in the history
Partial scoring
  • Loading branch information
cpennington committed Aug 13, 2012
2 parents 4ea6620 + d79a291 commit 6173af8
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 21 deletions.
13 changes: 1 addition & 12 deletions common/lib/capa/capa/capa_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,21 +154,10 @@ def get_state(self):
def get_max_score(self):
'''
Return maximum score for this problem.
We do this by counting the number of answers available for each question
in the problem. If the Response for a question has a get_max_score() method
then we call that and add its return value to the count. That can be
used to give complex problems (eg programming questions) multiple points.
'''
maxscore = 0
for response, responder in self.responders.iteritems():
if hasattr(responder, 'get_max_score'):
try:
maxscore += responder.get_max_score()
except Exception:
log.debug('responder %s failed to properly return from get_max_score()' % responder) # FIXME
raise
else:
maxscore += len(self.responder_answers[response])
maxscore += responder.get_max_score()
return maxscore

def get_score(self):
Expand Down
30 changes: 23 additions & 7 deletions common/lib/capa/capa/responsetypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ class LoncapaResponse(object):
In addition, these methods are optional:
- get_max_score : if defined, this is called to obtain the maximum score possible for this question
- setup_response : find and note the answer input field IDs for the response; called by __init__
- check_hint_condition : check to see if the student's answers satisfy a particular condition for a hint to be displayed
- render_html : render this Response as HTML (must return XHTML compliant string)
Expand Down Expand Up @@ -134,6 +133,11 @@ def __init__(self, xml, inputfields, context, system=None):
if self.max_inputfields == 1:
self.answer_id = self.answer_ids[0] # for convenience

self.maxpoints = dict()
for inputfield in self.inputfields:
maxpoints = inputfield.get('points','1') # By default, each answerfield is worth 1 point
self.maxpoints.update({inputfield.get('id'): int(maxpoints)})

self.default_answer_map = {} # dict for default answer map (provided in input elements)
for entry in self.inputfields:
answer = entry.get('correct_answer')
Expand All @@ -143,6 +147,12 @@ def __init__(self, xml, inputfields, context, system=None):
if hasattr(self, 'setup_response'):
self.setup_response()

def get_max_score(self):
'''
Return the total maximum points of all answer fields under this Response
'''
return sum(self.maxpoints.values())

def render_html(self, renderer):
'''
Return XHTML Element tree representation of this Response.
Expand Down Expand Up @@ -1067,7 +1077,10 @@ def get_score(self, student_answers):
(err, self.answer_id, convert_files_to_filenames(student_answers)))
raise Exception(err)

self.context.update({'submission': unicode(submission)})
if is_file(submission):
self.context.update({'submission': submission.name})
else:
self.context.update({'submission': submission})

# Prepare xqueue request
#------------------------------------------------------------
Expand Down Expand Up @@ -1114,21 +1127,24 @@ def get_score(self, student_answers):

def update_score(self, score_msg, oldcmap, queuekey):

(valid_score_msg, correct, score, msg) = self._parse_score_msg(score_msg)
(valid_score_msg, correct, points, msg) = self._parse_score_msg(score_msg)
if not valid_score_msg:
oldcmap.set(self.answer_id, msg='Error: Invalid grader reply.')
return oldcmap

correctness = 'incorrect'
if correct:
correctness = 'correct'
correctness = 'correct' if correct else 'incorrect'

self.context['correct'] = correctness # TODO: Find out how this is used elsewhere, if any

# Replace 'oldcmap' with new grading results if queuekey matches.
# If queuekey does not match, we keep waiting for the score_msg whose key actually matches
if oldcmap.is_right_queuekey(self.answer_id, queuekey):
oldcmap.set(self.answer_id, correctness=correctness, msg=msg.replace(' ', ' '), queuekey=None) # Queuekey is consumed
# Sanity check on returned points
if points < 0:
points = 0
elif points > self.maxpoints[self.answer_id]:
points = self.maxpoints[self.answer_id]
oldcmap.set(self.answer_id, npoints=points, correctness=correctness, msg=msg.replace('&nbsp;', '&#160;'), queuekey=None) # Queuekey is consumed
else:
log.debug('CodeResponse: queuekey %s does not match for answer_id=%s.' % (queuekey, self.answer_id))

Expand Down
2 changes: 1 addition & 1 deletion common/lib/xmodule/xmodule/capa_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ def check_problem(self, get):
return {'success': msg}
log.exception("Error in capa_module problem checking")
raise Exception("error in capa_module")

self.attempts = self.attempts + 1
self.lcp.done = True

Expand Down
3 changes: 2 additions & 1 deletion common/lib/xmodule/xmodule/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,8 @@ def test_update_score(self):

new_cmap = CorrectMap()
new_cmap.update(old_cmap)
new_cmap.set(answer_id=answer_ids[i], correctness=correctness, msg='MESSAGE', queuekey=None)
npoints = 1 if correctness=='correct' else 0
new_cmap.set(answer_id=answer_ids[i], npoints=npoints, correctness=correctness, msg='MESSAGE', queuekey=None)

test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i)
self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict())
Expand Down

0 comments on commit 6173af8

Please sign in to comment.