diff --git a/figures/pipeline/enrollment_metrics_next.py b/figures/pipeline/enrollment_metrics_next.py index b000ae03..9fcf7a0a 100644 --- a/figures/pipeline/enrollment_metrics_next.py +++ b/figures/pipeline/enrollment_metrics_next.py @@ -29,6 +29,7 @@ This module provides """ +from decimal import Decimal from django.db.models import Avg from figures.course import Course from figures.enrollment import is_enrollment_data_out_of_date @@ -101,4 +102,7 @@ def calculate_course_progress(course_id): # have None for progress if there's no data. But check how SQL AVG performs if results['average_progress'] is None: results['average_progress'] = 0.0 + else: + rounded_val = Decimal(results['average_progress']).quantize(Decimal('.00')) + results['average_progress'] = float(rounded_val) return results diff --git a/figures/sites.py b/figures/sites.py index e8be5344..6dddebe1 100644 --- a/figures/sites.py +++ b/figures/sites.py @@ -327,17 +327,25 @@ def get_sites(): """ Get a list of sites for Figures purposes in a configurable manner. - This functions makes use of the `SITES_BACKEND` setting if configured, otherwise + :return list of Site (QuerySet) + + For multisite mode, when `settings.FEATURES['FIGURES_IS_MULTISITE'] == True`, + this functions makes use of the `SITES_BACKEND` setting if configured, otherwise it defaults to _get_all_sites(). - :return list of Site (QuerySet) + For standalone mode, the default site is returned as the single record in + the QuerySet result """ - sites_backend_path = settings.ENV_TOKENS['FIGURES'].get('SITES_BACKEND') - if sites_backend_path: - sites_backend = import_from_path(sites_backend_path) - sites = sites_backend() + if is_multisite(): + sites_backend_path = settings.ENV_TOKENS['FIGURES'].get('SITES_BACKEND') + if sites_backend_path: + sites_backend = import_from_path(sites_backend_path) + sites = sites_backend() + else: + sites = _get_all_sites() else: - sites = _get_all_sites() + # We do the filter so that we are returning a QuerySet object + sites = Site.objects.filter(id__in=[default_site().id]) return sites diff --git a/figures/tasks.py b/figures/tasks.py index 0a885a4e..9dea7f6d 100644 --- a/figures/tasks.py +++ b/figures/tasks.py @@ -20,10 +20,10 @@ from figures.compat import CourseEnrollment, CourseOverview from figures.course import Course -from figures.helpers import as_course_key, as_date, is_past_date +from figures.helpers import as_course_key, as_date, is_past_date, is_multisite from figures.log import log_exec_time from figures.models import EnrollmentData -from figures.sites import get_sites, get_sites_by_id, site_course_ids +from figures.sites import default_site, get_sites, get_sites_by_id, site_course_ids from figures.pipeline.backfill import backfill_enrollment_data_for_site from figures.pipeline.course_daily_metrics import CourseDailyMetricsLoader @@ -314,12 +314,13 @@ def populate_daily_metrics_next(site_id=None, force_update=False): date_for=date_for, ed_next=True, force_update=force_update) - except Exception: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except msg = ('{prefix}:FAIL populate_daily_metrics unhandled site level' - ' exception for site[{site_id}]={domain}') + ' exception for site[{site_id}]={domain}. msg: {msg}') logger.exception(msg.format(prefix=FPD_LOG_PREFIX, site_id=site.id, - domain=site.domain)) + domain=site.domain, + msg=str(ex))) msg = '{prefix}:END:date_for={date_for}, site_count={site_count}' logger.info(msg.format(prefix=FPD_LOG_PREFIX, @@ -510,6 +511,13 @@ def run_figures_monthly_metrics(): WAFFLE_DISABLE_PIPELINE) return - logger.info('Starting figures.tasks.run_figures_monthly_metrics...') - all_sites_jobs = group(populate_monthly_metrics_for_site.s(site.id) for site in get_sites()) - all_sites_jobs.delay() + msg = 'Starting figures.tasks.run_figures_monthly_metrics in "{}"" mode...' + + if is_multisite(): + logger.info(msg.format('multisite')) + all_sites_jobs = group(populate_monthly_metrics_for_site.s(site.id) for site in get_sites()) + all_sites_jobs.delay() + else: + # running standalone, single site, no need to delay subtask + logger.info(msg.format('standalone')) + populate_monthly_metrics_for_site(default_site().id) diff --git a/tests/pipeline/test_enrollment_metrics_next.py b/tests/pipeline/test_enrollment_metrics_next.py index d606cd78..d702403e 100644 --- a/tests/pipeline/test_enrollment_metrics_next.py +++ b/tests/pipeline/test_enrollment_metrics_next.py @@ -4,10 +4,12 @@ See the module docstring for details. """ +from decimal import Decimal import pytest from mock import patch from django.contrib.sites.models import Site +from django.forms import DecimalField from figures.compat import CourseEnrollment from figures.pipeline.enrollment_metrics_next import ( @@ -161,12 +163,30 @@ def test_calc_course_progress_empty(self): def test_calc_course_progress(self): """The course has EnrollmentData records + + The average progress calculator should round the number with a precision + of 3 and a scale of two (two digits to the right of the decimal point) """ - some_percentages = [0.0, 25.0, 50.0] + some_percentages = [0.0, 0.25, 0.50, 0.66] expected_average = sum(some_percentages)/len(some_percentages) + expected_average = float(Decimal(expected_average).quantize(Decimal('.00'))) [ EnrollmentDataFactory(course_id=str(self.course_overview.id), progress_percent=pp) for pp in some_percentages ] results = calculate_course_progress(self.course_overview.id) assert results['average_progress'] == pytest.approx(expected_average) + + # Crude, but checks that we meet the fixed decimal precision requirements + # This will raise a django.core.exceptions.ValidationError if it doesn't pass + DecimalField(max_digits=3, decimal_places=2).clean(results['average_progress']) + + + def test_calc_course_progress_invalid_values(self): + """ Placeholder test method + + Not implementing this yet, but included this test method to consider if + we should add a test for when invalid values are stored in the + EnrollmentData.progress_percent field + """ + pass diff --git a/tests/tasks/test_daily_tasks.py b/tests/tasks/test_daily_tasks.py index 41b29347..df487622 100644 --- a/tests/tasks/test_daily_tasks.py +++ b/tests/tasks/test_daily_tasks.py @@ -63,9 +63,10 @@ from django.contrib.sites.models import Site from waffle.testutils import override_switch -from figures.helpers import as_date, as_datetime +from figures.helpers import as_date, as_datetime, is_multisite from figures.models import (CourseDailyMetrics, SiteDailyMetrics) +from figures.sites import default_site from figures.tasks import (FPD_LOG_PREFIX, populate_single_cdm, @@ -258,6 +259,7 @@ def test_populate_daily_metrics_for_site_site_dne(transactional_db, @pytest.mark.skipif(OPENEDX_RELEASE == GINKGO, reason='Apparent Django 1.8 incompatibility') +@pytest.mark.skipif(not is_multisite(), reason='Multisite only test') @pytest.mark.parametrize('func', [ populate_daily_metrics, populate_daily_metrics_next ]) @@ -305,12 +307,18 @@ def fake_populate_daily_metrics_for_site(site_id, **_kwargs): @pytest.mark.skipif(OPENEDX_RELEASE == GINKGO, reason='Apparent Django 1.8 incompatibility') +@pytest.mark.parametrize('site_func, multisite', [ + (default_site, False), + (SiteFactory, True ) +]) def test_populate_daily_metrics_enrollment_data_error(transactional_db, monkeypatch, - caplog): + caplog, + site_func, + multisite): # Needs to be 'today' so that enrollment data update gets called date_for = date.today() - site = SiteFactory() + site = site_func() def fake_populate_daily_metrics_for_site(**_kwargs): pass @@ -325,6 +333,9 @@ def fake_update_enrollment_data_fails(**kwargs): monkeypatch.setattr('figures.tasks.update_enrollment_data_for_site', fake_update_enrollment_data_fails) + monkeypatch.setattr('figures.sites.is_multisite', lambda: multisite) + monkeypatch.setattr('figures.tasks.is_multisite', lambda: multisite) + populate_daily_metrics(date_for=date_for) last_log = caplog.records[-1] @@ -333,6 +344,7 @@ def fake_update_enrollment_data_fails(**kwargs): prefix=FPD_LOG_PREFIX, site_id=site.id, domain=site.domain) + assert last_log.message == expected_msg @@ -342,8 +354,14 @@ def fake_update_enrollment_data_fails(**kwargs): populate_daily_metrics, populate_daily_metrics_next ]) def test_populate_daily_metrics_multisite(transactional_db, monkeypatch, caplog, func): + """ + """ # Stand up test data site_links = [] + + monkeypatch.setattr('figures.sites.is_multisite', lambda: False) + monkeypatch.setattr('figures.tasks.is_multisite', lambda: False) + for domain in ['alpha.domain', 'bravo.domain']: site_links.append(dict( site=SiteFactory(domain=domain), diff --git a/tests/tasks/test_mau_tasks.py b/tests/tasks/test_mau_tasks.py index 2fbdb1bd..41220642 100644 --- a/tests/tasks/test_mau_tasks.py +++ b/tests/tasks/test_mau_tasks.py @@ -81,6 +81,8 @@ def test_populate_all_mau_multiple_site(transactional_db, monkeypatch): def mock_populate_mau_metrics_for_site(site_id, force_update=False): sites_visited.append(site_id) + monkeypatch.setattr('figures.sites.is_multisite', lambda: True) + monkeypatch.setattr('figures.tasks.is_multisite', lambda: True) monkeypatch.setattr('figures.tasks.populate_mau_metrics_for_site', mock_populate_mau_metrics_for_site) diff --git a/tests/tasks/test_monthly_tasks.py b/tests/tasks/test_monthly_tasks.py index 742fb08e..b5963df6 100644 --- a/tests/tasks/test_monthly_tasks.py +++ b/tests/tasks/test_monthly_tasks.py @@ -73,7 +73,7 @@ def fake_fill_last_smm_month(site): assert last_log.message == expected_log -def test_run_figures_monthly_metrics_with_faked_subtask(transactional_db, monkeypatch): +def test_run_figures_monthly_metrics_with_faked_subtask_multisite(transactional_db, monkeypatch): """Verify we visit the site in the subtask Faking the subtask for the function under test @@ -86,6 +86,8 @@ def fake_populate_monthly_metrics_for_site(celery_task_group): for t in celery_task_group.tasks: sites_visited.extend(t.args) + monkeypatch.setattr('figures.sites.is_multisite', lambda: True) + monkeypatch.setattr('figures.tasks.is_multisite', lambda: True) monkeypatch.setattr('celery.group.delay', fake_populate_monthly_metrics_for_site) run_figures_monthly_metrics() @@ -93,6 +95,30 @@ def fake_populate_monthly_metrics_for_site(celery_task_group): assert set(sites_visited) == set([rec.id for rec in expected_sites]) +def test_run_figures_monthly_metrics_with_faked_subtask_standalone(transactional_db, + monkeypatch, + settings): + """Verify we visit the site in the subtask + + Faking the subtask for the function under test + """ + expected_site = settings.SITE_ID + sites_visited = [] + + def fake_populate_monthly_metrics_for_site(site_id): + sites_visited.append(site_id) + + monkeypatch.setattr('figures.sites.is_multisite', lambda: False) + monkeypatch.setattr('figures.tasks.is_multisite', lambda: False) + monkeypatch.setattr('figures.tasks.populate_monthly_metrics_for_site', + fake_populate_monthly_metrics_for_site) + + run_figures_monthly_metrics() + + assert len(sites_visited) == 1 + assert sites_visited[0] == expected_site + + @pytest.mark.skipif(OPENEDX_RELEASE == GINKGO, reason='Broken test. Apparent Django 1.8 incompatibility') def test_run_figures_monthly_metrics_with_unfaked_subtask(transactional_db, monkeypatch): @@ -107,7 +133,6 @@ def test_run_figures_monthly_metrics_with_unfaked_subtask(transactional_db, monk sites_visited = [] def fake_fill_last_smm_month(site): - # assert site == expected_site sites_visited.append(site) monkeypatch.setattr('figures.tasks.fill_last_smm_month', diff --git a/tests/test_sites.py b/tests/test_sites.py index a86fe7eb..9eefbca4 100644 --- a/tests/test_sites.py +++ b/tests/test_sites.py @@ -450,10 +450,11 @@ def test_get_requested_site_default_behaviour(settings): @pytest.mark.django_db -def test_get_requested_site_custom_backend(settings): +def test_get_requested_site_custom_backend(monkeypatch, settings): """ Test `get_requested_site` can use custom backends. """ + monkeypatch.setattr('figures.sites.is_multisite', lambda: True) orange_site = SiteFactory.create(name='orange site') settings.ENV_TOKENS = { @@ -467,13 +468,14 @@ def test_get_requested_site_custom_backend(settings): @pytest.mark.django_db -def test_get_requested_site_broken_backend(settings): +def test_get_requested_site_broken_backend(monkeypatch, settings): """ Test `get_requested_site` don't hide errors from custom backends. - Figures should keep a simple backend implementation without attempting to fix errors in site configuration or - faulty backends. + Figures should keep a simple backend implementation without attempting to + fix errors in site configuration or faulty backends. """ + monkeypatch.setattr('figures.sites.is_multisite', lambda: True) settings.ENV_TOKENS = { 'FIGURES': { 'REQUESTED_SITE_BACKEND': 'organizations:broken_backend' @@ -485,18 +487,29 @@ def test_get_requested_site_broken_backend(settings): figures.sites.get_requested_site(request=mock.Mock()) -@pytest.mark.skipif(not organizations_support_sites(), reason='needed only in multisite mode') @pytest.mark.django_db -def test_get_sites_default_behaviour(): +def test_get_sites_default_behaviour_multisite(monkeypatch): + + monkeypatch.setattr('figures.sites.is_multisite', lambda: True) default_site = Site.objects.get() # gets the example site another_site = SiteFactory() all_sites = figures.sites.get_sites() assert list(all_sites) == [default_site, another_site], 'Should return all sites.' -@pytest.mark.skipif(not organizations_support_sites(), reason='needed only in multisite mode') @pytest.mark.django_db -def test_get_sites_custom_backend(settings): +def test_get_sites_default_behaviour_standalone(monkeypatch, settings): + + monkeypatch.setattr('figures.sites.is_multisite', lambda: False) + default_site = Site.objects.get(id=settings.SITE_ID) # gets the example site + SiteFactory() # Create another site. We don't need the variable + all_sites = figures.sites.get_sites() + assert len(all_sites) == 1, 'There should be only one site returned' + assert all_sites[0] == default_site, 'The returned site should be the default site' + + +@pytest.mark.django_db +def test_get_sites_custom_backend(monkeypatch, settings): _orange_site = SiteFactory(name='orange site') blue_site_1 = SiteFactory(name='blue site 1') blue_site_2 = SiteFactory(name='blue site 2') @@ -508,19 +521,20 @@ def test_get_sites_custom_backend(settings): 'SITES_BACKEND': 'organizations:get_blue_sites' } } + monkeypatch.setattr('figures.sites.is_multisite', lambda: True) with mock.patch('organizations.get_blue_sites', create=True, return_value=blue_sites): all_sites = figures.sites.get_sites() assert list(all_sites) == [blue_site_1, blue_site_2], 'Should return just blue sites.' -@pytest.mark.skipif(not organizations_support_sites(), reason='needed only in multisite mode') @pytest.mark.django_db -def test_get_sites_broken_backend(settings): +def test_get_sites_broken_backend(monkeypatch, settings): settings.ENV_TOKENS = { 'FIGURES': { 'SITES_BACKEND': 'organizations:broken_backend' } } + monkeypatch.setattr('figures.sites.is_multisite', lambda: True) with mock.patch('organizations.broken_backend', create=True, side_effect=ValueError): with pytest.raises(ValueError): figures.sites.get_sites() # Should fail if the SITES_BACKEND fails diff --git a/tests/views/test_sites_view.py b/tests/views/test_sites_view.py index 595e20fd..446b6db5 100644 --- a/tests/views/test_sites_view.py +++ b/tests/views/test_sites_view.py @@ -44,7 +44,8 @@ def get_expected_results(self, **filter): ('?domain=bravo', {'domain__icontains': 'bravo'}), ('?name=alpha', {'name__icontains': 'alpha'}) ]) - def test_get(self, query_params, filter_args): + def test_get(self, monkeypatch, query_params, filter_args): + monkeypatch.setattr('figures.sites.is_multisite', lambda: True) qp_msg = 'query_params={query_params}' expected_data = Site.objects.filter(**filter_args) request = APIRequestFactory().get(self.request_path + query_params)