diff --git a/devsite/devsite/seed.py b/devsite/devsite/seed.py index d1931646..3b386021 100644 --- a/devsite/devsite/seed.py +++ b/devsite/devsite/seed.py @@ -20,7 +20,11 @@ from student.models import CourseAccessRole, CourseEnrollment, UserProfile from figures.compat import RELEASE_LINE, GeneratedCertificate -from figures.models import CourseDailyMetrics, SiteDailyMetrics +from figures.models import ( + CourseDailyMetrics, + LearnerCourseGradeMetrics, + SiteDailyMetrics, +) from figures.helpers import as_course_key, as_datetime, days_from, prev_day from figures.pipeline import course_daily_metrics as pipeline_cdm from figures.pipeline import site_daily_metrics as pipeline_sdm @@ -44,6 +48,10 @@ def get_site(): return Site.objects.first() +def today(): + return datetime.datetime.utcnow().date() + + def days_back_list(days_back): end_date = prev_day(datetime.datetime.now()) start_date = days_from(end_date, abs(days_back) * -1) @@ -85,6 +93,8 @@ def seed_course_overviews(data=None): display_org_with_default=rec['org'], number=rec['number'], created=as_datetime(rec['created']).replace(tzinfo=utc), + start=as_datetime(rec['enrollment_start']).replace(tzinfo=utc), + end=as_datetime(rec['enrollment_end']).replace(tzinfo=utc), enrollment_start=as_datetime(rec['enrollment_start']).replace(tzinfo=utc), enrollment_end=as_datetime(rec['enrollment_end']).replace(tzinfo=utc), ) @@ -265,6 +275,49 @@ def seed_site_daily_metrics(data=None): date_for=dt, force_update=True) +def seed_lcgm_for_course(**_kwargs): + """Quick hack to create a number of LCGM records + Improvement is to add a devsite model for "synthetic course policy". This + model specifies course info: points possible, sections possible, number of + learners or learer range, learner completion/progress curve + """ + date_for = _kwargs.get('date_for', datetime.datetime.utcnow().date()) + site = _kwargs.get('site', get_site()) + course_id = _kwargs.get('course_id') + points_possible = _kwargs.get('points_possible', 20) + points_earned = _kwargs.get('points_earned', 10) + sections_possible = _kwargs.get('sections_possible', 10) + sections_worked = _kwargs.get('sections_worked', 5) + for ce in CourseEnrollment.objects.filter(course_id=as_course_key(course_id)): + LearnerCourseGradeMetrics.objects.update_or_create( + site=site, + user=ce.user, + course_id=str(course_id), + date_for=date_for, + defaults=dict( + points_possible=points_possible, + points_earned=points_earned, + sections_possible=sections_possible, + sections_worked=sections_worked + ) + ) + + +def seed_lcgm_all(): + for co in CourseOverview.objects.all(): + print('Seeding LCGM for course {}'.format(str(co.id))) + for i, date_for in enumerate(days_back_list(10)): + seed_args = dict( + date_for=date_for, + course_id=str(co.id), + points_possible=100, + points_earned=i*5, + sections_possible=20, + sections_worked=i*2, + ) + seed_lcgm_for_course(**seed_args) + + def wipe(): clear_non_admin_users() CourseEnrollment.objects.all().delete() @@ -272,6 +325,7 @@ def wipe(): CourseOverview.objects.all().delete() CourseDailyMetrics.objects.all().delete() SiteDailyMetrics.objects.all().delete() + LearnerCourseGradeMetrics.all().delete() def seed_all(): diff --git a/figures/filters.py b/figures/filters.py index be6b1747..d38d730a 100644 --- a/figures/filters.py +++ b/figures/filters.py @@ -9,10 +9,15 @@ See the following for breaking changes when upgrading to Django Filter 1.0: https://django-filter.readthedocs.io/en/master/guide/migration.html#migrating-to-1-0 + +TODO: Rename classes so they eiher all end with "Filter" or "FilterSet" then + update the test class names in "tests/test_filters.py" to match. """ from django.contrib.auth import get_user_model from django.contrib.sites.models import Site +from django.db.models import F + import django_filters from opaque_keys.edx.keys import CourseKey @@ -26,16 +31,15 @@ CourseDailyMetrics, SiteDailyMetrics, CourseMauMetrics, + LearnerCourseGradeMetrics, SiteMauMetrics, ) def char_method_filter(method): """ - method is the method name string - Check if old style first - - Pre v1: + "method" is the method name string + First check for old style (pre version 1 Django Filters) """ if hasattr(django_filters, 'MethodFilter'): return django_filters.MethodFilter(action=method) # pylint: disable=no-member @@ -43,6 +47,17 @@ def char_method_filter(method): return django_filters.CharFilter(method=method) +def boolean_method_filter(method): + """ + "method" is the method name string + First check for old style (pre version 1 Django Filters) + """ + if hasattr(django_filters, 'MethodFilter'): + return django_filters.MethodFilter(action=method) # pylint: disable=no-member + else: + return django_filters.BooleanFilter(method=method) + + class CourseOverviewFilter(django_filters.FilterSet): '''Provides filtering for CourseOverview model objects @@ -101,6 +116,76 @@ class Meta: fields = ['course_id', 'user_id', 'is_active', ] +class EnrollmentMetricsFilter(CourseEnrollmentFilter): + """Filter query params for enrollment metrics + + Consider making 'user_ids' and 'course_ids' be mixins for `user` foreign key + and 'course_id' respectively. Perhaps a class decorator if there's some + unforseen issue with doing a mixin for each + + Filters + + "course_ids" filters on a set of comma delimited course id strings + "user_ids" filters on a set of comma delimited integer user ids + "only_completed" shows only completed records. Django Filter 1.0.4 appears + to only support capitalized "True" as the value in the query string + + The "only_completed" filter is subject to change. We want to be able to + filter on: "hide completed", "show only completed", "show everything" + So we may go with a "choice field" + + Use ``date_for`` for retrieving a specific date + Use ``date_0`` and ``date_1`` for retrieving values in a date range, inclusive + each of these can be used singly to get: + * ``date_0`` to get records greater than or equal + * ``date_1`` to get records less than or equal + + TODO: Add 'is_active' filter - need to find matches in CourseEnrollment + """ + course_ids = char_method_filter(method='filter_course_ids') + user_ids = char_method_filter(method='filter_user_ids') + date = django_filters.DateFromToRangeFilter(name='date_for') + only_completed = boolean_method_filter(method='filter_only_completed') + exclude_completed = boolean_method_filter(method='filter_exclude_completed') + + class Meta: + """ + Allow all field and related filtering except for "site" + """ + model = LearnerCourseGradeMetrics + exclude = ['site'] + + def filter_course_ids(self, queryset, name, value): # pylint: disable=unused-argument + course_ids = [cid.replace(' ', '+') for cid in value.split(',')] + return queryset.filter(course_id__in=course_ids) + + def filter_user_ids(self, queryset, name, value): # pylint: disable=unused-argument + """ + """ + user_ids = [user_id for user_id in value.split(',') if user_id.isdigit()] + return queryset.filter(user_id__in=user_ids) + + def filter_only_completed(self, queryset, name, value): # pylint: disable=unused-argument + """ + The "value" parameter is either `True` or `False` + """ + if value is True: + return queryset.filter(sections_possible__gt=0, + sections_worked=F('sections_possible')) + else: + return queryset + + def filter_exclude_completed(self, queryset, name, value): # pylint: disable=unused-argument + """ + The "value" parameter is either `True` or `False` + """ + if value is True: + # This is a hack until we add `completed` field to LCGM + return queryset.filter(sections_worked__lt=F('sections_possible')) + else: + return queryset + + class UserFilterSet(django_filters.FilterSet): '''Provides filtering for User model objects diff --git a/figures/models.py b/figures/models.py index 69a26d82..c8108e93 100644 --- a/figures/models.py +++ b/figures/models.py @@ -1,5 +1,6 @@ """Defines Figures models +TODO: Create a base "SiteModel" or a "SiteModelMixin" """ from datetime import date @@ -7,6 +8,7 @@ from django.contrib.sites.models import Site from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models +from django.db.models import F from django.utils.encoding import python_2_unicode_compatible from jsonfield import JSONField @@ -186,7 +188,70 @@ class LearnerCourseGradeMetricsManager(models.Manager): """ def most_recent_for_learner_course(self, user, course_id): queryset = self.filter(user=user, course_id=str(course_id)) - return queryset.order_by('-date_for').first() # pylint: disable=no-member + if queryset: + return queryset.order_by('-date_for')[0] # pylint: disable=E1101 + else: + return None + + def most_recent_for_course(self, course_id): + statement = """ \ + SELECT id, user_id, course_id, MAX(date_for) + FROM figures_learnercoursegrademetrics lcgm + WHERE course_id = {course_id} AND + GROUP BY user_id, course_id + """ + return self.raw(statement.format(course_id=str(course_id))) + + def completed_for_site(self, site, **_kwargs): + """Return course_id/user_id pairs that have completed + Initial filters on list of users, listr of course ids + + User IDs can be filtered by passing `user_id=` list of user ids + + Course IDs can be filtered by passing `course_ids=` list of course ids + + Returns a distinct QuerySet dict list of values with keys + 'course_id' and 'user_id' + + We will consider adding a "completed" field to the model for faster + filtering, since we can index on the field. However, we need to evaluate + the additional storage need + """ + qs = self.filter(site=site, + sections_possible__gt=0, + sections_worked=F('sections_possible')) + + # Build out filter. Note, we don't check if the var is iterable + # we let it fail of invalid values passed in + filter_args = dict() + user_ids = _kwargs.get('user_ids', None) + if user_ids: + filter_args['user_id__in'] = user_ids + course_ids = _kwargs.get('course_ids', None) + if course_ids: + # We do the string casting in case couse_ids are CourseKey instance + filter_args['course_id__in'] = [str(key) for key in course_ids] + if filter_args: + qs = qs.filter(**filter_args) # pylint: disable=E1101 + return qs + + def completed_ids_for_site(self, site, **_kwargs): + qs = self.completed_for_site(site, **_kwargs) + return qs.values('course_id', 'user_id').distinct() + + def completed_raw_for_site(self, site, **_kwargs): + """Experimental + """ + statement = """ \ + SELECT id, user_id, course_id, MAX(date_for) + FROM figures_learnercoursegrademetrics lcgm + WHERE site_id = {site} AND + lcgm.sections_possible > 0 AND + lcgm.sections_worked = lcgm.sections_possible + GROUP BY user_id, course_id + ORDER BY user_id, course_id + """ + return self.raw(statement.format(site=site)) @python_2_unicode_compatible @@ -213,6 +278,12 @@ class LearnerCourseGradeMetrics(TimeStampedModel): But for now, using float, as I'm not entirely sure how many decimal places are actually needed and edx-platform uses FloatField in its grades models + + TODO: Add fields + `is_active` - get the 'is_active' value from the enrollment at the time + this record is created + `completed` - This lets us filter on a table column instead of calculating it + TODO: Add index on 'course_id', 'date_for', 'completed' """ site = models.ForeignKey(Site) date_for = models.DateField() @@ -227,6 +298,10 @@ class LearnerCourseGradeMetrics(TimeStampedModel): objects = LearnerCourseGradeMetricsManager() class Meta: + """ + Do we want to add 'site' to the `unique_together` set? + Open edX Course IDs are globally unique, so it is not required + """ unique_together = ('user', 'course_id', 'date_for',) ordering = ('date_for', 'user__username', 'course_id',) @@ -260,6 +335,10 @@ def progress_details(self): sections_possible=self.sections_possible, ) + @property + def completed(self): + return self.sections_worked > 0 and self.sections_worked == self.sections_possible + @python_2_unicode_compatible class PipelineError(TimeStampedModel): diff --git a/figures/serializers.py b/figures/serializers.py index 5d4ce394..2cfc7ff2 100644 --- a/figures/serializers.py +++ b/figures/serializers.py @@ -787,3 +787,26 @@ class CourseMauLiveMetricsSerializer(serializers.Serializer): count = serializers.IntegerField() course_id = serializers.CharField() domain = serializers.CharField() + + +class EnrollmentMetricsSerializer(serializers.ModelSerializer): + """Serializer for LearnerCourseGradeMetrics + """ + user = UserIndexSerializer(read_only=True) + progress_percent = serializers.DecimalField(max_digits=3, + decimal_places=2, + min_value=0.00, + max_value=1.00) + + class Meta: + model = LearnerCourseGradeMetrics + editable = False + fields = ('id', 'user', 'course_id', 'date_for', 'completed', + 'points_earned', 'points_possible', + 'sections_worked', 'sections_possible', + 'progress_percent') + + +class CourseCompletedSerializer(serializers.Serializer): + course_id = serializers.CharField() + user_id = serializers.IntegerField() diff --git a/figures/urls.py b/figures/urls.py index 71fd3810..af92fe7d 100644 --- a/figures/urls.py +++ b/figures/urls.py @@ -100,6 +100,12 @@ views.UserIndexViewSet, base_name='user-index') +# Experimental + +router.register( + r'enrollment-metrics', + views.EnrollmentMetricsViewSet, + base_name='enrollment-metrics') urlpatterns = [ diff --git a/figures/views.py b/figures/views.py index 1229e972..b0381904 100644 --- a/figures/views.py +++ b/figures/views.py @@ -40,6 +40,7 @@ CourseEnrollmentFilter, CourseMauMetricsFilter, CourseOverviewFilter, + EnrollmentMetricsFilter, SiteDailyMetricsFilter, SiteFilterSet, SiteMauMetricsFilter, @@ -48,16 +49,19 @@ from figures.models import ( CourseDailyMetrics, CourseMauMetrics, + LearnerCourseGradeMetrics, SiteDailyMetrics, SiteMauMetrics, ) from figures.serializers import ( + CourseCompletedSerializer, CourseDailyMetricsSerializer, CourseDetailsSerializer, CourseEnrollmentSerializer, CourseIndexSerializer, CourseMauMetricsSerializer, CourseMauLiveMetricsSerializer, + EnrollmentMetricsSerializer, GeneralCourseDataSerializer, LearnerDetailsSerializer, SiteDailyMetricsSerializer, @@ -394,6 +398,64 @@ def get_serializer_context(self): return context +class EnrollmentMetricsViewSet(CommonAuthMixin, viewsets.ReadOnlyModelViewSet): + """Initial viewset for enrollment metrics + + Initial purpose to serve up course progress and completion data + + Because we need to test performance, we want to keep completion data + isolated from other API data + """ + model = LearnerCourseGradeMetrics + pagination_class = FiguresLimitOffsetPagination + serializer_class = EnrollmentMetricsSerializer + filter_backends = (DjangoFilterBackend, ) + # Assess updating to "EnrollmentFilterSet" to filter on list of courses and + # or users, so we can use it to build a filterable table of users, courses + filter_class = EnrollmentMetricsFilter + + def get_queryset(self): + site = django.contrib.sites.shortcuts.get_current_site(self.request) + queryset = LearnerCourseGradeMetrics.objects.filter(site=site) + return queryset + + @list_route() + def completed_ids(self, request): + """Return distinct course id/user id pairs for completed enrollments + + Endpoint is `/figures/api/enrollment-metrics/completed_ids/` + + The default router does not support hyphen in the custom action, so + we need to use the underscore until we implement a custom router + """ + site = django.contrib.sites.shortcuts.get_current_site(self.request) + qs = self.model.objects.completed_ids_for_site(site=site) + page = self.paginate_queryset(qs) + if page is not None: + serializer = CourseCompletedSerializer(page, many=True) + return self.get_paginated_response(serializer.data) + serializer = CourseCompletedSerializer(qs, many=True) + return Response(serializer.data) + + @list_route() + def completed(self, request): + """Experimental endpoint to return completed LCGM records + + This is the same as `/figures/api/enrollment-metrics/?only_completed=True + + Return matching LearnerCourseGradeMetric rows that have completed + enrollments + """ + site = django.contrib.sites.shortcuts.get_current_site(self.request) + qs = self.model.objects.completed_for_site(site=site) + page = self.paginate_queryset(qs) + if page is not None: + serializer = self.get_serializer(page, many=True) + return self.get_paginated_response(serializer.data) + serializer = self.get_serializer(qs, many=True) + return Response(serializer.data) + + class CourseMonthlyMetricsViewSet(CommonAuthMixin, viewsets.ViewSet): """ @@ -635,11 +697,6 @@ def active_users(self, request): return Response(dict(active_users=active_users)) -# -# MAU metrics views -# - - class CourseMauLiveMetricsViewSet(CommonAuthMixin, viewsets.GenericViewSet): serializer_class = CourseMauLiveMetricsSerializer diff --git a/tests/models/test_learner_course_grade_metrics_model.py b/tests/models/test_learner_course_grade_metrics_model.py index 83731858..fe094d86 100644 --- a/tests/models/test_learner_course_grade_metrics_model.py +++ b/tests/models/test_learner_course_grade_metrics_model.py @@ -1,13 +1,10 @@ """Tests Figures GradesCache model - """ - import datetime import pytest from django.contrib.sites.models import Site -from django.utils.timezone import utc - +from django.db.models.query import QuerySet from figures.helpers import as_date from figures.models import LearnerCourseGradeMetrics @@ -15,10 +12,64 @@ CourseEnrollmentFactory, CourseOverviewFactory, LearnerCourseGradeMetricsFactory, - UserFactory + SiteFactory, + UserFactory, + COURSE_ID_STR_TEMPLATE ) +def create_sample_completed_lcgm(site, user_count, course_count): + """Generate test data + + TODO: Make this a parametrized fixture + https://docs.pytest.org/en/3.1.3/example/parametrize.html + + We don't create CourseEnrollment objects because we don't need them as + Figures models try to rely on the content and context of the data in the + LMS and not the LMS models specifically + """ + users = [UserFactory() for i in range(user_count)] + # We need just the course_ids + course_ids = [COURSE_ID_STR_TEMPLATE.format(i) for i in range(course_count)] + + # Two records for each enrollment, one shows not complete, one shows complete + lcgm_data = [ + dict( + date_for='2020-04-01', + points_possible=40, + points_earned=40, + sections_possible=5, + sections_worked=4), + dict( + date_for='2020-05-05', + points_possible=50, + points_earned=50, + sections_possible=5, + sections_worked=5) + ] + lcgm_list = [] + for user in users: + for course_id in course_ids: + for lcgm in lcgm_data: + lcgm_list.append(LearnerCourseGradeMetricsFactory( + site=site, + user=user, + course_id=course_id, + date_for=lcgm['date_for'], + points_possible=lcgm['points_possible'], + points_earned=lcgm['points_earned'], + sections_possible=lcgm['sections_possible'], + sections_worked=lcgm['sections_worked'] + ) + ) + return dict( + lcgm_list=lcgm_list, + users=users, + course_ids=course_ids, + site=site, + ) + + @pytest.mark.django_db def test_most_recent_with_data(db): """Make sure the query works with a couple of existing models @@ -36,7 +87,7 @@ def test_most_recent_with_data(db): newer_lcgm = LearnerCourseGradeMetricsFactory(user=user, course_id=str(course_overview.id), date_for=second_date) - + assert older_lcgm.date_for != newer_lcgm.date_for obj = LearnerCourseGradeMetrics.objects.most_recent_for_learner_course( user=user, course_id=course_overview.id) assert obj == newer_lcgm @@ -45,7 +96,7 @@ def test_most_recent_with_data(db): @pytest.mark.django_db def test_most_recent_with_empty_table(db): """Make sure the query works when there are no models to find - + Tests that the function returns None and does not fail when it cannot find any LearnerCourseGradeMetrics model instances """ @@ -57,6 +108,91 @@ def test_most_recent_with_empty_table(db): assert not obj +@pytest.mark.django_db +class TestLearnerCourseGradeMetricsManager(object): + """Test the Manager methods + + This is a quick hack job to get test coverage and make sure filtering works + """ + @pytest.fixture(autouse=True) + def setup(self, db): + self.site = SiteFactory() + + def test_completed_ids_for_site_empty(self): + """Make sure the method doesn't break if there are no LCGM records + """ + data = LearnerCourseGradeMetrics.objects.completed_ids_for_site(self.site) + assert isinstance(data, QuerySet) + assert not data + + def test_completed_ids_for_site(self): + """ + Example data + ``` + {'course_id': u'course-v1:StarFleetAcademy+SFA2+2161', 'user_id': 1} + ``` + """ + lcgm_data = create_sample_completed_lcgm(self.site, + user_count=2, + course_count=2) + + # Build lcgm w/out completion + LearnerCourseGradeMetrics(site=self.site, + sections_possible=2, + sections_worked=1) + data = LearnerCourseGradeMetrics.objects.completed_ids_for_site(self.site) + # Check we have our expected data + + expected = [(a.id, b) for a in lcgm_data['users'] + for b in lcgm_data['course_ids']] + results = [(rec['user_id'], rec['course_id']) for rec in data] + assert set(results) == set(expected) + + def test_completed_ids_for_user_list(self): + """Test when we filter on a list of user ids + """ + lcgm_data = create_sample_completed_lcgm(self.site, + user_count=4, + course_count=2) + user_ids = [lcgm_data['users'][i].id for i in [0, 2]] + data = LearnerCourseGradeMetrics.objects.completed_ids_for_site(self.site, + user_ids=user_ids) + expected = [(a.id, b) for a in lcgm_data['users'] + for b in lcgm_data['course_ids'] if a.id in user_ids] + results = [(rec['user_id'], rec['course_id']) for rec in data] + assert set(results) == set(expected) + + def test_completed_ids_for_course_list(self): + """Test when we filter on a set of course_ids + """ + lcgm_data = create_sample_completed_lcgm(self.site, + user_count=2, + course_count=4) + course_ids = [lcgm_data['course_ids'][i] for i in [0, 2]] + data = LearnerCourseGradeMetrics.objects.completed_ids_for_site(self.site, + course_ids=course_ids) + expected = [(a.id, b) for a in lcgm_data['users'] + for b in lcgm_data['course_ids'] if b in course_ids] + results = [(rec['user_id'], rec['course_id']) for rec in data] + assert set(results) == set(expected) + + def test_completed_ids_for_user_and_course_lists(self): + """Test when we filter on a set of course_ids + """ + lcgm_data = create_sample_completed_lcgm(self.site, + user_count=4, + course_count=4) + user_ids = [lcgm_data['users'][i].id for i in [0, 2]] + course_ids = [lcgm_data['course_ids'][i] for i in [0, 2]] + data = LearnerCourseGradeMetrics.objects.completed_ids_for_site(self.site, + user_ids=user_ids, + course_ids=course_ids) + expected = [(a.id, b) for a in lcgm_data['users'] + for b in lcgm_data['course_ids'] if b in course_ids and a.id in user_ids] + results = [(rec['user_id'], rec['course_id']) for rec in data] + assert set(results) == set(expected) + + @pytest.mark.django_db class TestLearnerCourseGradeMetrics(object): """Unit tests for figures.models.LearnerCourseGradeMetrics model diff --git a/tests/test_filters.py b/tests/test_filters.py index 7d7d0bf6..0e30c922 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -19,6 +19,7 @@ CourseDailyMetricsFilter, CourseEnrollmentFilter, CourseOverviewFilter, + EnrollmentMetricsFilter, SiteDailyMetricsFilter, CourseMauMetricsFilter, SiteMauMetricsFilter, @@ -30,6 +31,7 @@ SiteDailyMetrics, CourseMauMetrics, SiteMauMetrics, + LearnerCourseGradeMetrics, ) from tests.factories import ( @@ -37,6 +39,7 @@ CourseEnrollmentFactory, CourseMauMetricsFactory, CourseOverviewFactory, + LearnerCourseGradeMetricsFactory, SiteDailyMetricsFactory, SiteMauMetricsFactory, SiteFactory, @@ -101,7 +104,6 @@ def test_filter_course_id(self): course_id = CourseEnrollment.objects.all()[0].course_id expected_results = CourseEnrollment.objects.filter(course_id=course_id) assert expected_results.count() != len(self.course_enrollments) - f = CourseEnrollmentFilter(queryset=expected_results) res = CourseEnrollmentFilter().filter_course_id( queryset=CourseEnrollment.objects.all(), @@ -278,6 +280,54 @@ def test_get_by_date(self): lambda o: o.id, ordered=False) +@pytest.mark.skipif(django_filters_pre_v1(), + reason='Django Filter backward compatibility not implemented') +@pytest.mark.django_db +class EnrollmentMetricsFilterTest(TestCase): + """ + Initially adding coverage where view tests are not covering + """ + def setUp(self): + self.site = SiteFactory() + + self.not_complete = LearnerCourseGradeMetricsFactory(site=self.site, + sections_worked=1, + sections_possible=2) + self.complete = LearnerCourseGradeMetricsFactory(site=self.site, + sections_worked=2, + sections_possible=2) + self.site_qs = LearnerCourseGradeMetrics.objects.filter(site=self.site) + self.filter = EnrollmentMetricsFilter(queryset=self.site_qs) + + def test_filter_only_completed(self): + qs = self.filter.filter_only_completed(queryset=self.site_qs, + name='only_completed', + value=True) + assert qs.count() == 1 and qs[0] == self.complete + + def test_filter_only_completed_no_value(self): + """Test that the method returns the queryset passed in + """ + qs = self.filter.filter_only_completed(queryset=self.site_qs, + name='only_completed', + value=False) + assert qs == self.site_qs + + def test_filter_exclude_completed(self): + qs = self.filter.filter_exclude_completed(queryset=self.site_qs, + name='exclude_completed', + value=True) + assert qs.count() == 1 and qs[0] == self.not_complete + + def test_filter_only_excluded_no_value(self): + """Test that the method returns the queryset passed in + """ + qs = self.filter.filter_exclude_completed(queryset=self.site_qs, + name='exclude_completed', + value=False) + assert qs == self.site_qs + + @pytest.mark.skipif(django_filters_pre_v1(), reason='Django Filter backward compatibility not implemented') @pytest.mark.django_db diff --git a/tests/views/helpers.py b/tests/views/helpers.py index b896f2a7..bf51f0f2 100644 --- a/tests/views/helpers.py +++ b/tests/views/helpers.py @@ -18,3 +18,16 @@ def create_test_users(): UserFactory(username='super_user', is_superuser=True), UserFactory(username='superstaff_user', is_staff=True, is_superuser=True) ] + + +def is_response_paginated(response_data): + """Checks if the response data dict has expected paginated results keys + + Returns True if it finds all the paginated keys, False otherwise + """ + try: + keys = response_data.keys() + except AttributeError: + # If we can't get keys, wer'e certainly not paginated + return False + return set(keys) == set([u'count', u'next', u'previous', u'results']) diff --git a/tests/views/test_enrollment_metrics_viewset.py b/tests/views/test_enrollment_metrics_viewset.py new file mode 100644 index 00000000..f8cc505c --- /dev/null +++ b/tests/views/test_enrollment_metrics_viewset.py @@ -0,0 +1,385 @@ +"""Tests Figures enrollment dta viewset +""" + +from decimal import Decimal +import mock +import pytest + +import django.contrib.sites.shortcuts +from rest_framework import status +from rest_framework.test import APIRequestFactory, force_authenticate + +from figures.models import LearnerCourseGradeMetrics +from figures.views import EnrollmentMetricsViewSet + +from tests.factories import ( + CourseEnrollmentFactory, + CourseOverviewFactory, + LearnerCourseGradeMetricsFactory, + OrganizationFactory, + # OrganizationCourseFactory, + SiteFactory, + UserFactory, +) +from tests.helpers import organizations_support_sites +from tests.views.base import BaseViewTest +from tests.views.helpers import is_response_paginated + +if organizations_support_sites(): + from tests.factories import UserOrganizationMappingFactory + + def map_users_to_org_site(caller, site, users): + org = OrganizationFactory(sites=[site]) + UserOrganizationMappingFactory(user=caller, + organization=org, + is_amc_admin=True) + [UserOrganizationMappingFactory(user=user, + organization=org) for user in users] + # return created objects that the test will need + return caller + + +@pytest.fixture +def enrollment_test_data(): + """Stands up shared test data. We need to revisit this + """ + num_courses = 2 + site = SiteFactory() + course_overviews = [CourseOverviewFactory() for i in range(num_courses)] + # Create a number of enrollments for each course + enrollments = [] + for num_enroll, co in enumerate(course_overviews, 1): + enrollments += [CourseEnrollmentFactory( + course_id=co.id) for i in range(num_enroll)] + + # This is a convenience for the test method + users = [enrollment.user for enrollment in enrollments] + return dict( + site=site, + course_overviews=course_overviews, + enrollments=enrollments, + users=users, + ) + + +@pytest.mark.django_db +class TestEnrollmentMetricsViewSet(BaseViewTest): + """ + Tests EnrollmentMetricsViewSet and EnrollmentMetricsFilter + + This is a quick "throw it together" test class. So there's significant room + for improvement + + In order to get test coverage to pass, there are a few tests here. + The ones testing query params should be able to be collapsed to a single + parametrized test per endpoint, if we restructure and generalize the test + data to have one set of test data serve mulitple conditions. + + TODO: Test main list method with the matrix of filter options + - course_ids, user_ids, + - Above mix with only_completed and with exclude_completed + + One option is to break this into different test classes, one for each action + """ + base_request_path = 'api/enrollment-metrics/' + view_class = EnrollmentMetricsViewSet + + @pytest.fixture(autouse=True) + def setup(self, db, settings): + if organizations_support_sites(): + settings.FEATURES['FIGURES_IS_MULTISITE'] = True + super(TestEnrollmentMetricsViewSet, self).setup(db) + + def check_serialized_data(self, result_rec, obj): + fields = ['id', 'course_id', 'date_for', 'points_earned', + 'points_possible', 'sections_worked', + 'sections_possible'] + expected_keys = fields + ['user', 'completed', 'progress_percent'] + assert set(result_rec.keys()) == set(expected_keys) + + for key in fields: + obj_val = obj.__dict__[key] + if key == 'date_for': + obj_val = str(obj_val) + assert result_rec[key] == obj_val + assert result_rec['completed'] == obj.completed + prog_pct = str(Decimal(obj.progress_percent).quantize(Decimal('.00'))) + assert result_rec['progress_percent'] == prog_pct + + def make_caller(self, site, users): + """Convenience method to create the API caller user + """ + if organizations_support_sites(): + # TODO: set is_staff to False after we have test coverage + caller = UserFactory(is_staff=True) + map_users_to_org_site(caller=caller, site=site, users=users) + else: + caller = UserFactory(is_staff=True) + return caller + + def make_request(self, monkeypatch, request_path, site, caller, action): + """Convenience method to make the API request + + Returns the response object + """ + request = APIRequestFactory().get(request_path) + request.META['HTTP_HOST'] = site.domain + monkeypatch.setattr(django.contrib.sites.shortcuts, + 'get_current_site', + lambda req: site) + force_authenticate(request, user=caller) + view = self.view_class.as_view({'get': action}) + return view(request) + + def test_list_method_all(self, monkeypatch, enrollment_test_data): + site = enrollment_test_data['site'] + users = enrollment_test_data['users'] + + caller = self.make_caller(site, users) + other_site = SiteFactory() + assert site.domain != other_site.domain + LearnerCourseGradeMetricsFactory(site=other_site) + lcgm = [LearnerCourseGradeMetricsFactory(site=site) for i in range(5)] + + response = self.make_request(request_path=self.base_request_path, + monkeypatch=monkeypatch, + site=site, + caller=caller, + action='list') + + assert response.status_code == status.HTTP_200_OK + assert is_response_paginated(response.data) + results = response.data['results'] + # Check keys + result_ids = [obj['id'] for obj in results] + assert set(result_ids) == set([obj.id for obj in lcgm]) + # Spot check the first record + obj = LearnerCourseGradeMetrics.objects.get(id=results[0]['id']) + self.check_serialized_data(results[0], obj) + + def test_list_method_filter_method_course_ids(self, monkeypatch, enrollment_test_data): + site = enrollment_test_data['site'] + users = enrollment_test_data['users'] + + caller = self.make_caller(site, users) + other_site = SiteFactory() + assert site.domain != other_site.domain + LearnerCourseGradeMetricsFactory(site=other_site) + co = CourseOverviewFactory() + LearnerCourseGradeMetricsFactory(site=site) + + co_lcgm = [LearnerCourseGradeMetricsFactory( + site=site, + course_id=str(co.id)) for i in range(5)] + request_path = self.base_request_path + '?course_ids=' + str(co.id) + response = self.make_request(request_path=request_path, + monkeypatch=monkeypatch, + site=site, + caller=caller, + action='list') + + assert response.status_code == status.HTTP_200_OK + assert is_response_paginated(response.data) + results = response.data['results'] + # Check keys + result_ids = [obj['id'] for obj in results] + assert set(result_ids) == set([obj.id for obj in co_lcgm]) + # Spot check the first record + obj = LearnerCourseGradeMetrics.objects.get(id=results[0]['id']) + self.check_serialized_data(results[0], obj) + + def test_list_method_filter_method_user_ids(self, monkeypatch, enrollment_test_data): + site = enrollment_test_data['site'] + users = enrollment_test_data['users'] + + caller = self.make_caller(site, users) + other_site = SiteFactory() + assert site.domain != other_site.domain + check_user = users[0] + LearnerCourseGradeMetricsFactory(site=other_site) + LearnerCourseGradeMetricsFactory(site=site) + # Make sure we show only for our site for the selected user + LearnerCourseGradeMetricsFactory(site=other_site, user=check_user) + user_lcgm = [LearnerCourseGradeMetricsFactory( + site=site, user=check_user) for i in range(3)] + request_path = self.base_request_path + '?user_ids=' + str(check_user.id) + response = self.make_request(request_path=request_path, + monkeypatch=monkeypatch, + site=site, + caller=caller, + action='list') + + assert response.status_code == status.HTTP_200_OK + assert is_response_paginated(response.data) + results = response.data['results'] + # Check keys + result_ids = [obj['id'] for obj in results] + assert set(result_ids) == set([obj.id for obj in user_lcgm]) + # Spot check the first record + obj = LearnerCourseGradeMetrics.objects.get(id=results[0]['id']) + self.check_serialized_data(results[0], obj) + + def test_list_method_filter_method_only_completed(self, monkeypatch, enrollment_test_data): + site = enrollment_test_data['site'] + users = enrollment_test_data['users'] + + caller = self.make_caller(site, users) + other_site = SiteFactory() + assert site.domain != other_site.domain + + # Same test as for "test_completed_method" + LearnerCourseGradeMetricsFactory(site=other_site, + sections_worked=1, + sections_possible=1) + LearnerCourseGradeMetricsFactory(site=site, + sections_worked=1, + sections_possible=5) + completed_lcgm = [LearnerCourseGradeMetricsFactory(site=site, + sections_worked=5, + sections_possible=5) + for i in range(3)] + + request_path = self.base_request_path + '?only_completed=True' + response = self.make_request(request_path=request_path, + monkeypatch=monkeypatch, + site=site, + caller=caller, + action='list') + + assert response.status_code == status.HTTP_200_OK + assert is_response_paginated(response.data) + results = response.data['results'] + # Check keys + result_ids = [obj['id'] for obj in results] + assert set(result_ids) == set([obj.id for obj in completed_lcgm]) + # Spot check the first record + obj = LearnerCourseGradeMetrics.objects.get(id=results[0]['id']) + self.check_serialized_data(results[0], obj) + + def test_list_method_filter_method_exclude_completed(self, monkeypatch, enrollment_test_data): + site = enrollment_test_data['site'] + users = enrollment_test_data['users'] + + caller = self.make_caller(site, users) + other_site = SiteFactory() + assert site.domain != other_site.domain + + # Same test as for "test_completed_method" + LearnerCourseGradeMetricsFactory(site=other_site, + sections_worked=1, + sections_possible=1) + not_completed_lcgm = [LearnerCourseGradeMetricsFactory( + site=site, + sections_worked=1, + sections_possible=5) for i in range(2)] + LearnerCourseGradeMetricsFactory(site=site, + sections_worked=5, + sections_possible=5) + + request_path = self.base_request_path + '?exclude_completed=True' + response = self.make_request(request_path=request_path, + monkeypatch=monkeypatch, + site=site, + caller=caller, + action='list') + + assert response.status_code == status.HTTP_200_OK + assert is_response_paginated(response.data) + results = response.data['results'] + # Check keys + result_ids = [obj['id'] for obj in results] + assert set(result_ids) == set([obj.id for obj in not_completed_lcgm]) + # Spot check the first record + obj = LearnerCourseGradeMetrics.objects.get(id=results[0]['id']) + self.check_serialized_data(results[0], obj) + + def test_completed_ids_method(self, monkeypatch, enrollment_test_data): + site = enrollment_test_data['site'] + users = enrollment_test_data['users'] + caller = self.make_caller(site, users) + other_site = SiteFactory() + assert site.domain != other_site.domain + LearnerCourseGradeMetricsFactory(site=other_site, + sections_worked=1, + sections_possible=1) + # Create an incomplete LCGM rec for our site + LearnerCourseGradeMetricsFactory(site=site, + sections_worked=1, + sections_possible=5) + completed_lcgm = [LearnerCourseGradeMetricsFactory(site=site, + sections_worked=5, + sections_possible=5) + for i in range(3)] + + request_path = self.base_request_path + '/completed_ids/' + response = self.make_request(request_path=request_path, + monkeypatch=monkeypatch, + site=site, + caller=caller, + action='completed_ids') + assert response.status_code == status.HTTP_200_OK + assert is_response_paginated(response.data) + results = response.data['results'] + # Check that the results have our expected keys and only our expected keys + res_keys_list = [elem.keys() for elem in results] + results_key_set = set([item for sublist in res_keys_list for item in sublist]) + assert results_key_set == set(['course_id', 'user_id']) + + # Check that we have the data we're looking for + results_values = [elem.values() for elem in results] + expected_values = [[obj.course_id, obj.user_id] for obj in completed_lcgm] + assert set(map(tuple, results_values)) == set(map(tuple, expected_values)) + + def test_completed_method(self, monkeypatch, enrollment_test_data): + site = enrollment_test_data['site'] + users = enrollment_test_data['users'] + caller = self.make_caller(site, users) + other_site = SiteFactory() + assert site.domain != other_site.domain + # Create an LCGM record for the other site + LearnerCourseGradeMetricsFactory(site=other_site, + sections_worked=1, + sections_possible=1) + # Create an LCGM record for our site that is not completed + LearnerCourseGradeMetricsFactory(site=site, + sections_worked=1, + sections_possible=5) + completed_lcgm = [LearnerCourseGradeMetricsFactory(site=site, + sections_worked=5, + sections_possible=5) + for i in range(3)] + + request_path = self.base_request_path + '/completed/' + response = self.make_request(request_path=request_path, + monkeypatch=monkeypatch, + site=site, + caller=caller, + action='completed') + assert response.status_code == status.HTTP_200_OK + assert is_response_paginated(response.data) + results = response.data['results'] + # Check keys + result_ids = [obj['id'] for obj in results] + assert set(result_ids) == set([obj.id for obj in completed_lcgm]) + # Spot check the first record + obj = LearnerCourseGradeMetrics.objects.get(id=results[0]['id']) + self.check_serialized_data(results[0], obj) + + @pytest.mark.parametrize('action', ['completed', 'completed_ids']) + def test_no_paginate(self, monkeypatch, enrollment_test_data, action): + + site = enrollment_test_data['site'] + users = enrollment_test_data['users'] + caller = self.make_caller(site, users) + request_path = '{}/{}/'.format(self.base_request_path, action) + monkeypatch.setattr(EnrollmentMetricsViewSet, 'paginate_queryset', + lambda self, qs: None) + with mock.patch.object(self.view_class, 'get_paginated_response') as paginate_check: + response = self.make_request(request_path=request_path, + monkeypatch=monkeypatch, + site=site, + caller=caller, + action=action) + assert response.status_code == status.HTTP_200_OK + assert not is_response_paginated(response.data) + assert not paginate_check.called