{{ _('Appeal reviewer decision') }}
-{{ _('Decision {0}')|format_html(decision_id) }}
+{{ _('Decision {0}')|format_html(decision_cinder_id) }}
{{ _('Decision {0}')|format_html(decision_id) }}
{{ _("We have already reviewed a similar appeal from another reporter, and have reversed our prior decision. We have taken action against the content and/or account holder in accordance with our policies.") }}
{{ _("Because the decision you are appealing has already been overturned, your appeal will not be processed.") }}
{% endif %} + {% elif appealed_decision_overridden %} +{{ _("Thank you for your report.") }}
+{{ _("The decision you are appealing has already been overridden by a new decision, so this decision can't be appealed.") }}
{% else %}{{ _("This decision can't be appealed.") }}
{% endif %} diff --git a/src/olympia/abuse/templates/abuse/emails/CinderActionApproveInitialDecision.txt b/src/olympia/abuse/templates/abuse/emails/CinderActionApproveInitialDecision.txt deleted file mode 100644 index 94e6aea71321..000000000000 --- a/src/olympia/abuse/templates/abuse/emails/CinderActionApproveInitialDecision.txt +++ /dev/null @@ -1,14 +0,0 @@ -Hello, - -Your {{ type }} has been approved on Mozilla Add-ons and it is now available at {{ target_url }}. - -Approved versions: {{ version_list }} - -Thank you. - -More information about Mozilla's add-on policies can be found at {{ policy_document_url }}. - -[{{ reference_id }}] --- -Mozilla Add-ons Team -{{ SITE_URL }} diff --git a/src/olympia/abuse/templates/abuse/emails/CinderActionOverrideApprove.txt b/src/olympia/abuse/templates/abuse/emails/CinderActionOverrideApprove.txt deleted file mode 100644 index d841b7d590ec..000000000000 --- a/src/olympia/abuse/templates/abuse/emails/CinderActionOverrideApprove.txt +++ /dev/null @@ -1 +0,0 @@ -{% include "abuse/emails/CinderActionTargetAppealApprove.txt" with is_override=True %} \ No newline at end of file diff --git a/src/olympia/abuse/templates/abuse/emails/ContentActionApproveInitialDecision.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionApproveInitialDecision.txt new file mode 100644 index 000000000000..1deb99ea6466 --- /dev/null +++ b/src/olympia/abuse/templates/abuse/emails/ContentActionApproveInitialDecision.txt @@ -0,0 +1,25 @@ +Hello, + +{% if not auto_approval %} +Your {{ type }} has been approved on Mozilla Add-ons and it is now available at {{ target_url }}. +{% else %} +Your {{ type }} has been automatically screened and tentatively approved. It is now available at {{ target_url }}. + +Your add-on can be subject to human review at any time. Reviewers may determine that it requires changes or should be removed. If that occurs, you will receive a separate notification with details and next steps. +{% endif %} +{% if version_list %}Approved versions: {{ version_list }} +{% endif %} +{% if manual_reasoning_text %}Comments: {{ manual_reasoning_text }}.{% endif %} + +{% if has_attachment %} +An attachment was provided. {% if dev_url %}To respond or view the file, visit {{ dev_url }}.{% endif %} + +{% endif %} +Thank you. + +More information about Mozilla's add-on policies can be found at {{ policy_document_url }}. + +[{{ reference_id }}] +-- +Mozilla Add-ons Team +{{ SITE_URL }} diff --git a/src/olympia/abuse/templates/abuse/emails/CinderActionBanUser.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionBanUser.txt similarity index 100% rename from src/olympia/abuse/templates/abuse/emails/CinderActionBanUser.txt rename to src/olympia/abuse/templates/abuse/emails/ContentActionBanUser.txt diff --git a/src/olympia/abuse/templates/abuse/emails/CinderActionDeleteCollection.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionDeleteCollection.txt similarity index 100% rename from src/olympia/abuse/templates/abuse/emails/CinderActionDeleteCollection.txt rename to src/olympia/abuse/templates/abuse/emails/ContentActionDeleteCollection.txt diff --git a/src/olympia/abuse/templates/abuse/emails/CinderActionDeleteRating.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionDeleteRating.txt similarity index 100% rename from src/olympia/abuse/templates/abuse/emails/CinderActionDeleteRating.txt rename to src/olympia/abuse/templates/abuse/emails/ContentActionDeleteRating.txt diff --git a/src/olympia/abuse/templates/abuse/emails/CinderActionDisableAddon.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionDisableAddon.txt similarity index 67% rename from src/olympia/abuse/templates/abuse/emails/CinderActionDisableAddon.txt rename to src/olympia/abuse/templates/abuse/emails/ContentActionDisableAddon.txt index b13e99f7e0f1..a9b70fd66a01 100644 --- a/src/olympia/abuse/templates/abuse/emails/CinderActionDisableAddon.txt +++ b/src/olympia/abuse/templates/abuse/emails/ContentActionDisableAddon.txt @@ -4,5 +4,5 @@ Your {{ type }} {{ name }} was manually reviewed by the Mozilla Add-ons team {% Our review found that your content violates the following Mozilla policy or policies: {% include 'abuse/emails/includes/policies.txt' %} -Based on that finding, your {{ type }} has been permanently disabled on {{ target_url }} and is no longer available for download from Mozilla Add-ons, anywhere in the world. Users who have previously installed your add-on will be able to continue using it. +Based on that finding, your {{ type }} has been permanently disabled on {{ target_url }} and is no longer available for download from Mozilla Add-ons, anywhere in the world. {% if is_addon_being_blocked %}In addition, in some cases, users who have previously installed the add-on won't be able to continue using it.{% else %}Users who have previously installed your add-on will be able to continue using it.{% endif %} {% endblock %} diff --git a/src/olympia/abuse/templates/abuse/emails/ContentActionOverrideApprove.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionOverrideApprove.txt new file mode 100644 index 000000000000..934fdad57110 --- /dev/null +++ b/src/olympia/abuse/templates/abuse/emails/ContentActionOverrideApprove.txt @@ -0,0 +1 @@ +{% include "abuse/emails/ContentActionTargetAppealApprove.txt" with is_override=True %} \ No newline at end of file diff --git a/src/olympia/abuse/templates/abuse/emails/CinderActionRejectVersion.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionRejectVersion.txt similarity index 55% rename from src/olympia/abuse/templates/abuse/emails/CinderActionRejectVersion.txt rename to src/olympia/abuse/templates/abuse/emails/ContentActionRejectVersion.txt index 25275728e4b0..051a9e3c5ad4 100644 --- a/src/olympia/abuse/templates/abuse/emails/CinderActionRejectVersion.txt +++ b/src/olympia/abuse/templates/abuse/emails/ContentActionRejectVersion.txt @@ -6,7 +6,7 @@ Our review found that your content violates the following Mozilla policy or poli Affected versions: {{ version_list }} -Based on that finding, those versions of your {{ type }} have been disabled on {{ target_url }} and is no longer available for download from Mozilla Add-ons, anywhere in the world. Users who have previously installed those versions will be able to continue using them. +Based on that finding, those versions of your {{ type }} have been disabled on {{ target_url }} and are no longer available for download from Mozilla Add-ons, anywhere in the world. {% if is_addon_being_blocked %}In addition, in some cases, users who have previously installed those versions won't be able to continue using them.{% else %}Users who have previously installed those versions will be able to continue using them.{% endif %} -You may upload a new version which addresses the policy violation(s). +{% if not is_addon_disabled %}You may upload a new version which addresses the policy violation(s).{% endif %} {% endblock %} diff --git a/src/olympia/abuse/templates/abuse/emails/ContentActionRejectVersionDelayed.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionRejectVersionDelayed.txt new file mode 100644 index 000000000000..ca1ccd5c3b06 --- /dev/null +++ b/src/olympia/abuse/templates/abuse/emails/ContentActionRejectVersionDelayed.txt @@ -0,0 +1,10 @@ +{% extends "abuse/emails/base.txt" %}{% block content %} +Your {{ type }} {{ name }} was manually reviewed by the Mozilla Add-ons team {% if is_third_party_initiated %}based on a report we received from a third party{% else %}in an assessment performed on our own initiative of content that was submitted to Mozilla Add-ons{% endif %}. + +Our review found that your content violates the following Mozilla policy or policies: +{% include 'abuse/emails/includes/policies.txt' %} + +Affected versions: {{ version_list }} + +Based on that finding, those versions of your {{ type }} will be disabled on {{ target_url }} in {{ delayed_rejection_days }} day(s). Once disabled, these versions will no longer be available for download from Mozilla Add-ons, anywhere in the world, but any compliant versions will remain in place, and you may choose to upload a new version that addresses the policy violation. Users who have previously installed the disabled versions will be able to continue using them. +{% endblock %} diff --git a/src/olympia/abuse/templates/abuse/emails/CinderActionTargetAppealApprove.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionTargetAppealApprove.txt similarity index 72% rename from src/olympia/abuse/templates/abuse/emails/CinderActionTargetAppealApprove.txt rename to src/olympia/abuse/templates/abuse/emails/ContentActionTargetAppealApprove.txt index b8faa2295da5..920de839c6bc 100644 --- a/src/olympia/abuse/templates/abuse/emails/CinderActionTargetAppealApprove.txt +++ b/src/olympia/abuse/templates/abuse/emails/ContentActionTargetAppealApprove.txt @@ -3,7 +3,12 @@ Hello, Previously, your {{ type }} was suspended/removed from Mozilla Add-ons, based on a finding that you had violated Mozilla's policies. {% if not is_override %}After reviewing your appeal, we{% else %}We have now{% endif %} determined that the previous decision was incorrect, and based on that determination, we have restored your {{ type }}. It is now available at {{ target_url }}. +{% if manual_reasoning_text %}{{ manual_reasoning_text }}. {% endif %} +{% if has_attachment %} +An attachment was provided. {% if dev_url %}To respond or view the file, visit {{ dev_url }}.{% endif %} + +{% endif %} Thank you. More information about Mozilla's add-on policies can be found at {{ policy_document_url }}. diff --git a/src/olympia/abuse/templates/abuse/emails/CinderActionTargetAppealRemovalAffirmation.txt b/src/olympia/abuse/templates/abuse/emails/ContentActionTargetAppealRemovalAffirmation.txt similarity index 58% rename from src/olympia/abuse/templates/abuse/emails/CinderActionTargetAppealRemovalAffirmation.txt rename to src/olympia/abuse/templates/abuse/emails/ContentActionTargetAppealRemovalAffirmation.txt index 349a1ebb0883..89b7351b84eb 100644 --- a/src/olympia/abuse/templates/abuse/emails/CinderActionTargetAppealRemovalAffirmation.txt +++ b/src/olympia/abuse/templates/abuse/emails/ContentActionTargetAppealRemovalAffirmation.txt @@ -2,8 +2,12 @@ Hello, Previously, your {{ type }} was suspended/removed from Mozilla Add-ons, based on a finding that you had violated Mozilla's policies. -After reviewing your appeal, we determined that the previous decision, that your {{ type }} violates Mozilla's policies, was correct.{% if additional_reasoning %} {{ additional_reasoning }}.{% endif %} Based on that determination, we have denied your appeal, and will not reinstate your {{ type }}. +After reviewing your appeal, we determined that the previous decision, that your {{ type }} violates Mozilla's policies, was correct. {% if manual_reasoning_text %}{{ manual_reasoning_text }}. {% endif %}Based on that determination, we have denied your appeal, and will not reinstate your {{ type }}. +{% if has_attachment %} +An attachment was provided. {% if dev_url %}To respond or view the file, visit {{ dev_url }}.{% endif %} + +{% endif %} More information about Mozilla's add-on policies can be found at {{ policy_document_url }}. Thank you. diff --git a/src/olympia/abuse/templates/abuse/emails/includes/policies.txt b/src/olympia/abuse/templates/abuse/emails/includes/policies.txt index 7ed1749ea9dc..5edc1185e488 100644 --- a/src/olympia/abuse/templates/abuse/emails/includes/policies.txt +++ b/src/olympia/abuse/templates/abuse/emails/includes/policies.txt @@ -1,8 +1,10 @@ -{% if manual_policy_text %} - {{ manual_policy_text }} -{% else %} - {% for policy in policies %} - {# Policies text may contain HTML entities, this is a text email so we consider that safe #} - - {{ policy.full_text|safe }} - {% endfor %} -{% endif %} +{% for policy in policies %} + {# Policies text may contain HTML entities, this is a text email so we consider that safe #} + - {{ policy.full_text|safe }} +{% endfor %} +{% if manual_reasoning_text %}{{ manual_reasoning_text|safe }}. {% endif %} + +{% if has_attachment %} +An attachment was provided. {% if dev_url %}To respond or view the file, visit {{ dev_url }}.{% endif %} + +{% endif %} \ No newline at end of file diff --git a/src/olympia/abuse/templates/abuse/emails/reporter_appeal_ignore.txt b/src/olympia/abuse/templates/abuse/emails/reporter_appeal_approve.txt similarity index 95% rename from src/olympia/abuse/templates/abuse/emails/reporter_appeal_ignore.txt rename to src/olympia/abuse/templates/abuse/emails/reporter_appeal_approve.txt index 6e24bd242a23..54127ccc707d 100644 --- a/src/olympia/abuse/templates/abuse/emails/reporter_appeal_ignore.txt +++ b/src/olympia/abuse/templates/abuse/emails/reporter_appeal_approve.txt @@ -2,6 +2,7 @@ Thank you for your report about {{ name }} on Mozilla Add-ons, and for providing more information about your concerns. After reviewing your appeal, we determined that the previous decision, that this content does not violate Mozilla’s policies ({{ policy_document_url }}), was correct. Based on that determination, we have denied your appeal, and will not take any action against the account or the content. +{{ manual_reasoning_text }}. Thank you for your attention. {% endblocktranslate %}{% endblock %} diff --git a/src/olympia/abuse/templates/abuse/emails/reporter_appeal_takedown_delayed.txt b/src/olympia/abuse/templates/abuse/emails/reporter_appeal_takedown_delayed.txt new file mode 100644 index 000000000000..ee4c2568a77f --- /dev/null +++ b/src/olympia/abuse/templates/abuse/emails/reporter_appeal_takedown_delayed.txt @@ -0,0 +1,5 @@ +{% extends "abuse/emails/base_reporter.txt" %}{% load i18n %}{% block content %}{# L10n: This is an email. Whitespace matters #}{% blocktranslate %} +Thank you for your report about {{ name }} on Mozilla Add-ons, and for providing more information about your concerns. + +After reviewing your appeal, we have determined that our prior decision, that this content does not violate Mozilla's policies ({{ policy_document_url }}), was incorrect. Based on that determination, we have requested the developer make changes. If they do not update their content to correct the violation, we will remove it. +{% endblocktranslate %}{% endblock %} \ No newline at end of file diff --git a/src/olympia/abuse/templates/abuse/emails/reporter_ignore.txt b/src/olympia/abuse/templates/abuse/emails/reporter_content_approve.txt similarity index 100% rename from src/olympia/abuse/templates/abuse/emails/reporter_ignore.txt rename to src/olympia/abuse/templates/abuse/emails/reporter_content_approve.txt diff --git a/src/olympia/abuse/templates/abuse/emails/reporter_disabled_ignore.txt b/src/olympia/abuse/templates/abuse/emails/reporter_disabled_ignore.txt new file mode 100644 index 000000000000..1eae22dc5ba1 --- /dev/null +++ b/src/olympia/abuse/templates/abuse/emails/reporter_disabled_ignore.txt @@ -0,0 +1,12 @@ +Hello, + +Thank you for your report about {{ name }}, at {{ target_url }} that appeared to violate our policies. + +As this content has already been removed before your report was reviewed, your report will not be processed. + +Thank you for your attention. + +[{{ reference_id }}] +-- +Mozilla Add-ons Team +{{ SITE_URL }} diff --git a/src/olympia/abuse/templates/abuse/emails/reporter_invalid_ignore.txt b/src/olympia/abuse/templates/abuse/emails/reporter_invalid_ignore.txt new file mode 100644 index 000000000000..ce2dbf0f21cb --- /dev/null +++ b/src/olympia/abuse/templates/abuse/emails/reporter_invalid_ignore.txt @@ -0,0 +1,11 @@ +Hello, + +Thank you for your report about {{ name }}, at {{ target_url }}. + +Based on the information you submitted, we were unable to identify a violation of Mozilla’s policies ({{ policy_document_url }}). The {{ type }} will therefore not be subject to any moderation action. + +{% with policy=policies|first %}{{ policy.text }}{% endwith %} +[{{ reference_id }}] +-- +Mozilla Add-ons Team +{{ SITE_URL }} diff --git a/src/olympia/abuse/templates/abuse/emails/reporter_takedown_addon_delayed.txt b/src/olympia/abuse/templates/abuse/emails/reporter_takedown_addon_delayed.txt new file mode 100644 index 000000000000..a1dde5ac588c --- /dev/null +++ b/src/olympia/abuse/templates/abuse/emails/reporter_takedown_addon_delayed.txt @@ -0,0 +1,5 @@ +{% extends "abuse/emails/base_reporter.txt" %}{% load i18n %}{% block content %}{# L10n: This is an email. Whitespace matters #}{% blocktranslate %} +Thank you for your report about the following add-on on Mozilla Add-ons: {{ name }}. + +We reviewed your report and the content in question, and found that it violates Mozilla's policies ({{ policy_document_url }}), and have therefore requested the developer make changes. If they do not update their content to correct the violation, we will remove it. +{% endblocktranslate %}{% endblock %} \ No newline at end of file diff --git a/src/olympia/abuse/templates/admin/abuse/cinderpolicy/change_list_object_tools.html b/src/olympia/abuse/templates/admin/abuse/cinderpolicy/change_list_object_tools.html new file mode 100644 index 000000000000..39b1f387e842 --- /dev/null +++ b/src/olympia/abuse/templates/admin/abuse/cinderpolicy/change_list_object_tools.html @@ -0,0 +1,9 @@ +{% extends "admin/change_list_object_tools.html" %} + +{% block object-tools-items %} + +{% endblock %} diff --git a/src/olympia/abuse/tests/assets/cinder_webhook_appeal_confirm_approve.json b/src/olympia/abuse/tests/assets/cinder_webhook_appeal_confirm_approve.json deleted file mode 100644 index 259fc68c9aee..000000000000 --- a/src/olympia/abuse/tests/assets/cinder_webhook_appeal_confirm_approve.json +++ /dev/null @@ -1,88 +0,0 @@ -{ - "event": "decision.created", - "payload": { - "notes": "still no!", - "appeal": { - "appealed_decision": { - "id": "1dad15af-1883-4d54-a7b0-f9c6a5ba1c7f", - "type": "queue_review", - "user": { - "name": "Mathieu Pillard", - "email": "mpillard@mozilla.com", - "groups": [ - { - "name": "Admin" - }, - { - "name": "Everyone" - } - ] - }, - "notes": "no it is fine", - "policies": [ - { - "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", - "name": "Ignore", - "is_illegal": false, - "enforcement_actions": [ - "amo-approve" - ] - } - ], - "enforcement_actions": [ - "amo-approve" - ] - } - }, - "entity": { - "attributes": { - "id": "491585", - "guid": "addcvbdfgcon@exampsdgsdfgle.com", - "name": "Inbox by Gmail", - "slug": "inbox-by-gmail", - "summary": "__MSG_6425858496683260529__", - "version": "2.0", - "homepage": null, - "description": "", - "support_url": null, - "last_updated": "2021-11-24T11:41:51.511358", - "release_notes": "", - "support_email": null, - "promoted_badge": "Recommended", - "average_daily_users": 0 - }, - "entity_schema": "amo_addon" - }, - "source": { - "job": { - "id": "5c7c3e21-8ccd-4d2f-b3b4-429620bd7a63", - "queue": { - "slug": "amo-content-infringement", - "is_multi_review": false - }, - "reports": [], - "created_at": "2024-01-12T15:19:36.532570+00:00" - }, - "user": { - "name": "Mathieu Pillard", - "email": "mpillard@mozilla.com", - "groups": [ - { - "name": "Admin" - }, - { - "name": "Everyone" - } - ] - }, - "decision": { - "id": "76e0006d-1a42-4ec7-9475-148bab1970f1", - "type": "confirm", - "metadata": {} - } - }, - "policies": [], - "timestamp": "2024-01-12T15:20:19.226428+00:00", - "enforcement_actions": [] - } -} diff --git a/src/olympia/abuse/tests/assets/cinder_webhook.json b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/decision.json similarity index 100% rename from src/olympia/abuse/tests/assets/cinder_webhook.json rename to src/olympia/abuse/tests/assets/cinder_webhook_payloads/decision.json diff --git a/src/olympia/abuse/tests/assets/cinder_webhook_payloads/job_actioned_move_to_dev_infringement.json b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/job_actioned_move_to_dev_infringement.json new file mode 100644 index 000000000000..4d415d91ada7 --- /dev/null +++ b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/job_actioned_move_to_dev_infringement.json @@ -0,0 +1,53 @@ +{ + "event": "job.actioned", + "payload": { + "action": "escalated", + "action_made_by": { + "user": { + "email": "awilliamson@test.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ], + "name": "Andrew Williamson" + } + }, + "job": { + "entity": { + "attributes": { + "average_daily_users": 0, + "created": "2022-05-04T07:28:32.414515", + "description": "", + "guid": "arabicoffeetest222@dictionaries.addons.mozilla.org", + "id": "621524", + "last_updated": "2022-05-04T07:28:32.414515", + "name": "Dictionary", + "privacy_policy": "", + "promoted": "", + "promoted_badge": "", + "release_notes": "null", + "slug": "dictionary", + "summary": "Dictionary", + "version": "3.2.2012034.4webext" + }, + "entity_schema": "amo_addon" + }, + "id": "d16e1ebc-b6df-4742-83c1-61f5ed3bd644", + "job_category": "appeal", + "num_reports": 0, + "priority": 0, + "queue": { + "is_multi_review": false, + "slug": "amo-env-addon-infringement" + }, + "status": "open" + }, + "notes": "no", + "source": "manual", + "timestamp": "2024-10-01T13:19:49.546547+00:00" + } + } \ No newline at end of file diff --git a/src/olympia/abuse/tests/assets/cinder_webhook_payloads/override_change_to_approve.json b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/override_change_to_approve.json new file mode 100644 index 000000000000..e621061daedf --- /dev/null +++ b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/override_change_to_approve.json @@ -0,0 +1,218 @@ +{ + "event": "decision.created", + "payload": { + "appeal": { + "appealed_decision": { + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "enforcement_actions_removed": [], + "id": "4dec6a52-41c1-43ea-9897-d914a51e57ff", + "notes": "", + "policies": [ + { + "enforcement_actions": [], + "id": "0d9df565-f249-40f8-8954-e73e65932ca2", + "is_illegal": false, + "is_non_violating": false, + "name": "Acceptable Use" + }, + { + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "id": "4e401e5d-2720-4cea-a367-0d163bad1dcd", + "is_illegal": false, + "is_non_violating": false, + "name": "Controlled substances", + "parent_id": "0d9df565-f249-40f8-8954-e73e65932ca2" + } + ], + "policies_removed": [], + "type": "queue_review", + "user": { + "email": "irusiczki@mozilla.com", + "groups": [ + { + "name": "API Manager" + }, + { + "name": "Base User" + }, + { + "name": "Everyone" + }, + { + "name": "QA" + }, + { + "name": "TaskUs moderators" + } + ], + "name": "Ioana rusiczki" + } + } + }, + "appeals_resolved": [], + "enforcement_actions": [ + "amo-approve" + ], + "enforcement_actions_removed": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "entity": { + "attributes": { + "average_daily_users": 0, + "created": "2024-11-06T06:48:24.833984", + "description": "", + "guid": "{bf8f936b-d9a9-4bb7-bc17-6176920251e5}", + "id": "633963", + "last_updated": "2024-11-06T06:48:24.982270", + "name": "Override policy 3", + "previews": [ + { + "mime_type": "image/jpeg", + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/95e66ed19bc60249d21493e91cf7f03f037e0adbc874e6077ee0a23e090829ff.jpg?Expires=1749451847&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=E7GxwIq0rkmwubkHMk3MzLtqD9uFqKFAVhE7OWPZZdGX317ZJY%2BtzTq1zhYwMHgNZIW0nMTkt21dCKXEDb2AIkdN%2FEtSxermUXT7%2FTLYgC7mv6mEs%2BqDFBsJQDk7ygi%2FZ3rNNz6Bij6tGf%2BL%2FQIzoDtA8nCMPhudxjgCjaNgHPDCUQq0c24i94GY8ipT73tl6GUEXt%2F5vxDUj7px9pfpI4Xa0vbAwO3%2BENI%2Bh5nCedXRqZhvQWEDFy1mKByBmBh0UnVscbFuniOvrbUk5vCGdg7qBe0RicEgt5bK7k6AdCZ9KNvpiub5BG4PW2VhhC2lUWIYm5mK8%2FpxMAuKow1rVA%3D%3D" + } + ], + "privacy_policy": "", + "promoted": "", + "release_notes": "", + "slug": "override-policy-3", + "summary": "Override policy 3", + "version": "1.0" + }, + "entity_schema": "amo_addon" + }, + "notes": "changed our mind", + "point_updates": [], + "policies": [ + { + "enforcement_actions": [ + "amo-approve" + ], + "id": "085f6a1c-46b6-44c2-a6ae-c3a73488aa1e", + "is_illegal": false, + "is_non_violating": true, + "name": "Approve" + } + ], + "policies_removed": [ + { + "enforcement_actions": [], + "id": "0d9df565-f249-40f8-8954-e73e65932ca2", + "is_illegal": false, + "is_non_violating": false, + "name": "Acceptable Use" + }, + { + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "id": "4e401e5d-2720-4cea-a367-0d163bad1dcd", + "is_illegal": false, + "is_non_violating": false, + "name": "Controlled substances", + "parent_id": "0d9df565-f249-40f8-8954-e73e65932ca2" + } + ], + "previous_decision": { + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "enforcement_actions_removed": [], + "id": "d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed", + "metadata": {}, + "notes": "", + "policies": [ + { + "enforcement_actions": [], + "id": "0d9df565-f249-40f8-8954-e73e65932ca2", + "is_illegal": false, + "is_non_violating": false, + "name": "Acceptable Use" + }, + { + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "id": "4e401e5d-2720-4cea-a367-0d163bad1dcd", + "is_illegal": false, + "is_non_violating": false, + "name": "Controlled substances", + "parent_id": "0d9df565-f249-40f8-8954-e73e65932ca2" + } + ], + "policies_removed": [], + "type": "queue_review", + "user": { + "email": "irusiczki@mozilla.com", + "groups": [ + { + "name": "API Manager" + }, + { + "name": "Base User" + }, + { + "name": "Everyone" + }, + { + "name": "QA" + }, + { + "name": "TaskUs moderators" + } + ], + "name": "Ioana rusiczki" + } + }, + "source": { + "decision": { + "id": "3eacdc09-c292-4fcb-a56f-a3d45d5eefeb", + "metadata": {}, + "type": "manual_override" + }, + "user": { + "email": "irusiczki@mozilla.com", + "groups": [ + { + "name": "API Manager" + }, + { + "name": "Base User" + }, + { + "name": "Everyone" + }, + { + "name": "QA" + }, + { + "name": "TaskUs moderators" + } + ], + "name": "Ioana rusiczki" + } + }, + "timestamp": "2024-11-06T06:54:36.962174+00:00" + } + } \ No newline at end of file diff --git a/src/olympia/abuse/tests/assets/cinder_webhook_payloads/reporter_appeal_change_but_still_approve.json b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/reporter_appeal_change_but_still_approve.json new file mode 100644 index 000000000000..fc8959737235 --- /dev/null +++ b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/reporter_appeal_change_but_still_approve.json @@ -0,0 +1,178 @@ +{ + "event": "decision.created", + "payload": { + "notes": "still no!", + "appeal": { + "appealed_decision": { + "id": "70edb73e-1e66-40f0-a860-fbddbd70dd9a", + "type": "queue_review", + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "notes": "", + "policies": [ + { + "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", + "name": "Ignore / Approve", + "is_illegal": false, + "is_non_violating": true, + "enforcement_actions": [ + "amo-approve" + ] + } + ], + "policies_removed": [], + "enforcement_actions": [ + "amo-approve" + ], + "enforcement_actions_removed": [] + } + }, + "entity": { + "attributes": { + "id": "620597", + "guid": "{2e5ff8c8-32fe-46d0-9fc8-6b8986621f3c}", + "name": "SearchbyImageSearchbyImageSearchbyImageSearchbyIma", + "slug": "search-by-image-test2", + "summary": "LongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstri", + "version": "3.6.3", + "homepage": "https://github.com/dessant/search-by-image", + "previews": [ + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/697d12dfed708ef87fdcc1360071a722d860481c9c38d175d416a2cd7447e71a.jpg?Expires=1723647115&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=jAXqz5zf2hjVc54lpCTRoiVi%2B%2BR8rzxWlPT02VwjpAGtLc6D1%2BTGfvVNCHd9GZvqQnS4ln2QOfM51gWpWmMkvSudRR3vAuPDkqApM8W5wzovC1pyhKS%2FokziLn6K03qGlXemusYg6Ktf4Tvel9w%2BPACK74fCYprD8zM5xBNnUmAFJ7Vf1XkFx78AKPfBkPzuXuymHugR8SrW%2FuBkFuDdht4oRSNSlcKw4pLuJPaNlB1J2S99WY6wE8uDRvlFTJNaM2M%2BNseTkATY1k2nnQIXId5tX6ciGfKsldiKcFWFTWwGzEt37AYvtFJlzEiL3St4g1j0hlj8XL3KO93MVNZpAw%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + }, + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/6ac8decb27154faccb8da40684635091fab082aaa1bc9687db2ed876fec2c9b9.jpg?Expires=1723647115&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=J3xntDTAHFwVwdurw8UqCPP2Zaxa5SclJZwF7QACKcwo87ZHS3wgiRYPUrDFDIjEOGwWQkiEx2wgt7wThnYrcsEmdfe6myUCZBK%2BFtfk3n0Dv%2B5QVal3osrehYa3k0n0if%2Ft7t8djimodZNIZoYa4ROocz4Libx0uLzMHrFLHLf41Vos4g0ShRvjeytauFWgmNnZafsS0N1JRl9RrFfW7sNEtGcTJQFHXWn0WZKLvW8Hr91FS6xhqNMCppnKSiFxZFphzf00kX1T%2F4eh3MD4rNUaj71pFw8i7wKW9wXC%2FH9FnT%2BZbtb0YAWXPHx%2BN6fbIYgUfR1tqLG59yEzW9Wvdw%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + }, + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/c55a38534ea30c9171552143018a7a50b9ed3dde7b5bb05bf374f8ad2e8c0bfd.jpg?Expires=1723647116&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=knjz04%2FRtrPZI7lf43%2BfqzS0PSNRxrxwmAoK9MeXfOrQWTYU8Pqinf2Vcq9wS%2BDAo78U9I3tET5BQb0P7vT%2BiGWokwTm%2FF0aL8Q1dooKLmSmqUAiCoFWdKZPTF3lZAvc1HeUUSxAVF3BT%2FnrEAxkzsT4RRB6Klfe064xo6xyIcPpDrqD3sM2oFRv7mSjdTNfvwZOVKgkOEdrhxMdg9WLPrFIapNueu2%2Fvnv8R1GljmXk2%2FD0%2BERB%2BwqhHeUb3jQ9b8kaDbV%2BGb1JgsNDNI3095SKK6m%2B2Ju96p5XXCFJ5Vcdt5mssrqZEXP7iSl1ts%2Bp%2FC%2BUdxfMVs2lIhf39wYNDg%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + }, + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/01a32fc69888de4fdf2e6ab48d19331d12ad61612dd603ce799bc65966319e9e.jpg?Expires=1723647116&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=Vw9Y4g6rlcMKiGdFn%2FRDKy4HraPovs8LVs1z9saw0DFg6JQFTEK5i6PPtnvMMcvPukau5k9ffsFvP%2BDfmDv76Yirwj7i%2BXbLkMKylpXAOgwGfmSoNM9qE6c0It2YBZ8dcCrvs0dK1gHM56RbopInsg4evaXf%2Bhd8o5v%2FhBkWDZTQp60UJsllYV12cN84pi0Sy7p2tXGm50vzVlm3SwBF7Qxu9Ml%2BN7kIO5IVjg5qs9E2NgIzTNFmg4V3tbzmnfT6enmKYtWl9VcgMJ4Vl0YTkHq0Jb9DJWe3HGKUzSfovlJgF7fLUpSLrgnhMGlGWk35UIZ84rAUvo3u9ng5JEL%2F6Q%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + } + ], + "description": "A powerful reverse image search tool, with support for various search engines, such as Google, Bing, Yandex, Baidu and TinEye. test2\nA powerful reverse image search tool, with support for various search engines, such as Google, Bing, Yandex, Baidu and TinEye.", + "support_url": "https://addons-server.readthedocs.io/en/latest/topics/api/addons.html#put-create-or-edit", + "last_updated": "2023-11-10T10:58:23.207421", + "release_notes": "", + "support_email": "rusiczki.ioana@gmail.com", + "promoted_badge": "", + "average_daily_users": 0 + }, + "entity_schema": "amo_addon" + }, + "source": { + "job": { + "id": "5c7c3e21-8ccd-4d2f-b3b4-429620bd7a63", + "queue": { + "slug": "amo-dev-ratings", + "is_multi_review": false + }, + "reports": [], + "created_at": "2024-04-24T17:45:32.008810+00:00" + }, + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "decision": { + "id": "76e0006d-1a42-4ec7-9475-148bab1970f1", + "type": "confirm", + "metadata": {} + } + }, + "policies": [ + { + "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", + "name": "Ignore / Approve", + "is_illegal": false, + "is_non_violating": true, + "enforcement_actions": [ + "amo-approve" + ] + } + ], + "timestamp": "2024-04-24T17:45:32.008810+00:00", + "point_updates": [], + "appeals_resolved": [ + { + "source": "unknown", + "outcome": "denied", + "appealer": { + "attributes": { + "id": "631", + "name": "eviljeff but with a really realy long name", + "email": "awilliamson@mozilla.com", + "fxa_id": "ca1afcbcd0ab4dc490c02345ab1a8cd6", + "created": "2007-03-05T13:09:33" + }, + "entity_schema": "amo_user" + } + } + ], + "policies_removed": [], + "previous_decision": { + "id": "70edb73e-1e66-40f0-a860-fbddbd70dd9a", + "type": "queue_review", + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "notes": "", + "metadata": {}, + "policies": [ + { + "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", + "name": "Ignore / Approve", + "is_illegal": false, + "is_non_violating": true, + "enforcement_actions": [ + "amo-approve" + ] + } + ], + "policies_removed": [], + "enforcement_actions": [ + "amo-approve" + ], + "enforcement_actions_removed": [] + }, + "enforcement_actions": [ + "amo-approve" + ], + "enforcement_actions_removed": [] + } + } \ No newline at end of file diff --git a/src/olympia/abuse/tests/assets/cinder_webhook_appeal_change_to_disable.json b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/reporter_appeal_change_to_disable.json similarity index 68% rename from src/olympia/abuse/tests/assets/cinder_webhook_appeal_change_to_disable.json rename to src/olympia/abuse/tests/assets/cinder_webhook_payloads/reporter_appeal_change_to_disable.json index 837b81e9702d..091c064f1196 100644 --- a/src/olympia/abuse/tests/assets/cinder_webhook_appeal_change_to_disable.json +++ b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/reporter_appeal_change_to_disable.json @@ -4,11 +4,11 @@ "notes": "fine I'll disable it", "appeal": { "appealed_decision": { - "id": "8ae11e9c-d8e8-40a8-adcc-d33361ef3304", + "id": "dece202c-3bca-4d50-8e34-6690f3c065ec", "type": "queue_review", "user": { - "name": "Mathieu Pillard", - "email": "mpillard@mozilla.com", + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", "groups": [ { "name": "Admin" @@ -18,20 +18,23 @@ } ] }, - "notes": "Initially approving", + "notes": "", "policies": [ { "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", - "name": "Ignore", + "name": "Ignore / Approve", "is_illegal": false, + "is_non_violating": true, "enforcement_actions": [ "amo-approve" ] } ], + "policies_removed": [], "enforcement_actions": [ "amo-approve" - ] + ], + "enforcement_actions_removed": [] } }, "entity": { @@ -79,15 +82,15 @@ "job": { "id": "5ab7cb33-a5ab-4dfa-9d72-4c2061ffeb08", "queue": { - "slug": "amo-content-infringement", + "slug": "amo-dev-ratings", "is_multi_review": false }, "reports": [], - "created_at": "2024-01-12T14:52:35.366504+00:00" + "created_at": "2024-04-24T18:17:02.045278+00:00" }, "user": { - "name": "Mathieu Pillard", - "email": "mpillard@mozilla.com", + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", "groups": [ { "name": "Admin" @@ -99,33 +102,103 @@ }, "decision": { "id": "4f18b22c-6078-4934-b395-6a2e01cadf63", - "type": "override", + "type": "confirm", "metadata": {} } }, "policies": [ { - "id": "86d7bf98-288c-4e78-9a63-3f5db96847b1", - "name": "Hate speech", + "id": "7ea512a2-39a6-4cb6-91a0-2ed162192f7f", + "name": "Content", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [] + }, + { + "id": "a5c96c92-2373-4d11-b573-61b0de00d8e0", + "name": "Spam", + "parent_id": "7ea512a2-39a6-4cb6-91a0-2ed162192f7f", "is_illegal": false, + "is_non_violating": false, "enforcement_actions": [ "amo-ban-user", "amo-delete-collection", "amo-delete-rating", - "amo-disable-addon", - "delete-status", - "freeze-account" + "amo-disable-addon" ] } ], - "timestamp": "2024-01-12T14:53:23.438634+00:00", + "timestamp": "2024-04-24T18:19:30.274623+00:00", + "point_updates": [], + "appeals_resolved": [ + { + "source": "unknown", + "outcome": "adjustment", + "appealer": { + "attributes": { + "id": "631", + "name": "eviljeff but with a really realy long name", + "email": "awilliamson@mozilla.com", + "fxa_id": "ca1afcbcd0ab4dc490c02345ab1a8cd6", + "created": "2007-03-05T13:09:33" + }, + "entity_schema": "amo_user" + } + } + ], + "policies_removed": [ + { + "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", + "name": "Ignore / Approve", + "is_illegal": false, + "is_non_violating": true, + "enforcement_actions": [ + "amo-approve" + ] + } + ], + "previous_decision": { + "id": "dece202c-3bca-4d50-8e34-6690f3c065ec", + "type": "queue_review", + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "notes": "", + "metadata": {}, + "policies": [ + { + "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", + "name": "Ignore / Approve", + "is_illegal": false, + "is_non_violating": true, + "enforcement_actions": [ + "amo-approve" + ] + } + ], + "policies_removed": [], + "enforcement_actions": [ + "amo-approve" + ], + "enforcement_actions_removed": [] + }, "enforcement_actions": [ "amo-ban-user", "amo-delete-collection", "amo-delete-rating", - "amo-disable-addon", - "delete-status", - "freeze-account" + "amo-disable-addon" + ], + "enforcement_actions_removed": [ + "amo-approve" ] } -} +} \ No newline at end of file diff --git a/src/olympia/abuse/tests/assets/cinder_webhook_payloads/reporter_appeal_confirm_approve.json b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/reporter_appeal_confirm_approve.json new file mode 100644 index 000000000000..29eeb7142d28 --- /dev/null +++ b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/reporter_appeal_confirm_approve.json @@ -0,0 +1,178 @@ +{ + "event": "decision.created", + "payload": { + "notes": "still no!", + "appeal": { + "appealed_decision": { + "id": "2c9d423b-88ab-404d-a1a3-edb7f69af803", + "type": "queue_review", + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "notes": "", + "policies": [ + { + "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", + "name": "Ignore / Approve", + "is_illegal": false, + "is_non_violating": true, + "enforcement_actions": [ + "amo-approve" + ] + } + ], + "policies_removed": [], + "enforcement_actions": [ + "amo-approve" + ], + "enforcement_actions_removed": [] + } + }, + "entity": { + "attributes": { + "id": "620597", + "guid": "{2e5ff8c8-32fe-46d0-9fc8-6b8986621f3c}", + "name": "SearchbyImageSearchbyImageSearchbyImageSearchbyIma", + "slug": "search-by-image-test2", + "summary": "LongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstri", + "version": "3.6.3", + "homepage": "https://github.com/dessant/search-by-image", + "previews": [ + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/697d12dfed708ef87fdcc1360071a722d860481c9c38d175d416a2cd7447e71a.jpg?Expires=1723647115&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=jAXqz5zf2hjVc54lpCTRoiVi%2B%2BR8rzxWlPT02VwjpAGtLc6D1%2BTGfvVNCHd9GZvqQnS4ln2QOfM51gWpWmMkvSudRR3vAuPDkqApM8W5wzovC1pyhKS%2FokziLn6K03qGlXemusYg6Ktf4Tvel9w%2BPACK74fCYprD8zM5xBNnUmAFJ7Vf1XkFx78AKPfBkPzuXuymHugR8SrW%2FuBkFuDdht4oRSNSlcKw4pLuJPaNlB1J2S99WY6wE8uDRvlFTJNaM2M%2BNseTkATY1k2nnQIXId5tX6ciGfKsldiKcFWFTWwGzEt37AYvtFJlzEiL3St4g1j0hlj8XL3KO93MVNZpAw%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + }, + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/6ac8decb27154faccb8da40684635091fab082aaa1bc9687db2ed876fec2c9b9.jpg?Expires=1723647115&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=J3xntDTAHFwVwdurw8UqCPP2Zaxa5SclJZwF7QACKcwo87ZHS3wgiRYPUrDFDIjEOGwWQkiEx2wgt7wThnYrcsEmdfe6myUCZBK%2BFtfk3n0Dv%2B5QVal3osrehYa3k0n0if%2Ft7t8djimodZNIZoYa4ROocz4Libx0uLzMHrFLHLf41Vos4g0ShRvjeytauFWgmNnZafsS0N1JRl9RrFfW7sNEtGcTJQFHXWn0WZKLvW8Hr91FS6xhqNMCppnKSiFxZFphzf00kX1T%2F4eh3MD4rNUaj71pFw8i7wKW9wXC%2FH9FnT%2BZbtb0YAWXPHx%2BN6fbIYgUfR1tqLG59yEzW9Wvdw%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + }, + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/c55a38534ea30c9171552143018a7a50b9ed3dde7b5bb05bf374f8ad2e8c0bfd.jpg?Expires=1723647116&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=knjz04%2FRtrPZI7lf43%2BfqzS0PSNRxrxwmAoK9MeXfOrQWTYU8Pqinf2Vcq9wS%2BDAo78U9I3tET5BQb0P7vT%2BiGWokwTm%2FF0aL8Q1dooKLmSmqUAiCoFWdKZPTF3lZAvc1HeUUSxAVF3BT%2FnrEAxkzsT4RRB6Klfe064xo6xyIcPpDrqD3sM2oFRv7mSjdTNfvwZOVKgkOEdrhxMdg9WLPrFIapNueu2%2Fvnv8R1GljmXk2%2FD0%2BERB%2BwqhHeUb3jQ9b8kaDbV%2BGb1JgsNDNI3095SKK6m%2B2Ju96p5XXCFJ5Vcdt5mssrqZEXP7iSl1ts%2Bp%2FC%2BUdxfMVs2lIhf39wYNDg%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + }, + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/01a32fc69888de4fdf2e6ab48d19331d12ad61612dd603ce799bc65966319e9e.jpg?Expires=1723647116&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=Vw9Y4g6rlcMKiGdFn%2FRDKy4HraPovs8LVs1z9saw0DFg6JQFTEK5i6PPtnvMMcvPukau5k9ffsFvP%2BDfmDv76Yirwj7i%2BXbLkMKylpXAOgwGfmSoNM9qE6c0It2YBZ8dcCrvs0dK1gHM56RbopInsg4evaXf%2Bhd8o5v%2FhBkWDZTQp60UJsllYV12cN84pi0Sy7p2tXGm50vzVlm3SwBF7Qxu9Ml%2BN7kIO5IVjg5qs9E2NgIzTNFmg4V3tbzmnfT6enmKYtWl9VcgMJ4Vl0YTkHq0Jb9DJWe3HGKUzSfovlJgF7fLUpSLrgnhMGlGWk35UIZ84rAUvo3u9ng5JEL%2F6Q%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + } + ], + "description": "A powerful reverse image search tool, with support for various search engines, such as Google, Bing, Yandex, Baidu and TinEye. test2\nA powerful reverse image search tool, with support for various search engines, such as Google, Bing, Yandex, Baidu and TinEye.", + "support_url": "https://addons-server.readthedocs.io/en/latest/topics/api/addons.html#put-create-or-edit", + "last_updated": "2023-11-10T10:58:23.207421", + "release_notes": "", + "support_email": "rusiczki.ioana@gmail.com", + "promoted_badge": "", + "average_daily_users": 0 + }, + "entity_schema": "amo_addon" + }, + "source": { + "job": { + "id": "5c7c3e21-8ccd-4d2f-b3b4-429620bd7a63", + "queue": { + "slug": "amo-dev-ratings", + "is_multi_review": false + }, + "reports": [], + "created_at": "2024-04-24T17:44:22.657497+00:00" + }, + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "decision": { + "id": "76e0006d-1a42-4ec7-9475-148bab1970f1", + "type": "confirm", + "metadata": {} + } + }, + "policies": [ + { + "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", + "name": "Ignore / Approve", + "is_illegal": false, + "is_non_violating": true, + "enforcement_actions": [ + "amo-approve" + ] + } + ], + "timestamp": "2024-04-24T17:45:32.008810+00:00", + "point_updates": [], + "appeals_resolved": [ + { + "source": "unknown", + "outcome": "denied", + "appealer": { + "attributes": { + "id": "631", + "name": "eviljeff but with a really realy long name", + "email": "awilliamson@mozilla.com", + "fxa_id": "ca1afcbcd0ab4dc490c02345ab1a8cd6", + "created": "2007-03-05T13:09:33" + }, + "entity_schema": "amo_user" + } + } + ], + "policies_removed": [], + "previous_decision": { + "id": "2c9d423b-88ab-404d-a1a3-edb7f69af803", + "type": "queue_review", + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "notes": "", + "metadata": {}, + "policies": [ + { + "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", + "name": "Ignore / Approve", + "is_illegal": false, + "is_non_violating": true, + "enforcement_actions": [ + "amo-approve" + ] + } + ], + "policies_removed": [], + "enforcement_actions": [ + "amo-approve" + ], + "enforcement_actions_removed": [] + }, + "enforcement_actions": [ + "amo-approve" + ], + "enforcement_actions_removed": [] + } + } \ No newline at end of file diff --git a/src/olympia/abuse/tests/assets/cinder_webhook_payloads/target_appeal_change_to_approve.json b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/target_appeal_change_to_approve.json new file mode 100644 index 000000000000..0e3d1d0c3fd8 --- /dev/null +++ b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/target_appeal_change_to_approve.json @@ -0,0 +1,200 @@ +{ + "event": "decision.created", + "payload": { + "notes": "", + "appeal": { + "appealed_decision": { + "id": "17fa4325-2a5b-4124-8b4b-6db40bfd1670", + "type": "queue_review", + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "notes": "", + "policies": [ + { + "id": "7ea512a2-39a6-4cb6-91a0-2ed162192f7f", + "name": "Content", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [] + }, + { + "id": "a5c96c92-2373-4d11-b573-61b0de00d8e0", + "name": "Spam", + "parent_id": "7ea512a2-39a6-4cb6-91a0-2ed162192f7f", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ] + } + ], + "policies_removed": [], + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "enforcement_actions_removed": [] + } + }, + "entity": { + "attributes": { + "id": "568840", + "body": "somethih omsdfdf", + "score": 5, + "created": "2024-04-24T17:56:32.431772" + }, + "entity_schema": "amo_rating" + }, + "source": { + "job": { + "id": "5ab7cb33-a5ab-4dfa-9d72-4c2061ffeb08", + "queue": { + "slug": "amo-dev-ratings", + "is_multi_review": false + }, + "reports": [], + "created_at": "2024-04-25T15:27:03.203545+00:00" + }, + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "decision": { + "id": "e58b0b19-bf06-44bc-a517-2f0230faf707", + "type": "revert", + "metadata": {} + } + }, + "policies": [ + { + "id": "1c5d711a-78b7-4fc2-bdef-9a33024f5e8b", + "name": "Ignore / Approve", + "is_illegal": false, + "is_non_violating": true, + "enforcement_actions": [ + "amo-approve" + ] + } + ], + "timestamp": "2024-04-25T15:27:38.492718+00:00", + "point_updates": [], + "appeals_resolved": [ + { + "source": "unknown", + "outcome": "adjustment", + "appealer": { + "attributes": { + "id": "631", + "name": "eviljeff but with a really realy long name", + "email": "awilliamson@mozilla.com", + "fxa_id": "ca1afcbcd0ab4dc490c02345ab1a8cd6", + "created": "2007-03-05T13:09:33" + }, + "entity_schema": "amo_user" + } + } + ], + "policies_removed": [ + { + "id": "7ea512a2-39a6-4cb6-91a0-2ed162192f7f", + "name": "Content", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [] + }, + { + "id": "a5c96c92-2373-4d11-b573-61b0de00d8e0", + "name": "Spam", + "parent_id": "7ea512a2-39a6-4cb6-91a0-2ed162192f7f", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ] + } + ], + "previous_decision": { + "id": "17fa4325-2a5b-4124-8b4b-6db40bfd1670", + "type": "queue_review", + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "notes": "", + "metadata": {}, + "policies": [ + { + "id": "7ea512a2-39a6-4cb6-91a0-2ed162192f7f", + "name": "Content", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [] + }, + { + "id": "a5c96c92-2373-4d11-b573-61b0de00d8e0", + "name": "Spam", + "parent_id": "7ea512a2-39a6-4cb6-91a0-2ed162192f7f", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ] + } + ], + "policies_removed": [], + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "enforcement_actions_removed": [] + }, + "enforcement_actions": [ + "amo-approve" + ], + "enforcement_actions_removed": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ] + } + } \ No newline at end of file diff --git a/src/olympia/abuse/tests/assets/cinder_webhook_payloads/target_appeal_confirm_disable.json b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/target_appeal_confirm_disable.json new file mode 100644 index 000000000000..96d257709be0 --- /dev/null +++ b/src/olympia/abuse/tests/assets/cinder_webhook_payloads/target_appeal_confirm_disable.json @@ -0,0 +1,220 @@ +{ + "event": "decision.created", + "payload": { + "notes": "", + "appeal": { + "appealed_decision": { + "id": "20ca9df7-447d-42ea-823f-2a5e114dd189", + "type": "queue_review", + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "notes": "", + "policies": [ + { + "id": "0d9df565-f249-40f8-8954-e73e65932ca2", + "name": "Acceptable Use", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [] + }, + { + "id": "a182e453-a30b-44a2-ba02-651e22238257", + "name": "Harassment", + "parent_id": "0d9df565-f249-40f8-8954-e73e65932ca2", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ] + } + ], + "policies_removed": [], + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "enforcement_actions_removed": [] + } + }, + "entity": { + "attributes": { + "id": "620597", + "guid": "{2e5ff8c8-32fe-46d0-9fc8-6b8986621f3c}", + "name": "SearchbyImageSearchbyImageSearchbyImageSearchbyIma", + "slug": "search-by-image-test2", + "summary": "LongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstringLongunbrokenstri", + "version": "3.6.3", + "homepage": "https://github.com/dessant/search-by-image", + "previews": [ + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/697d12dfed708ef87fdcc1360071a722d860481c9c38d175d416a2cd7447e71a.jpg?Expires=1723647115&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=jAXqz5zf2hjVc54lpCTRoiVi%2B%2BR8rzxWlPT02VwjpAGtLc6D1%2BTGfvVNCHd9GZvqQnS4ln2QOfM51gWpWmMkvSudRR3vAuPDkqApM8W5wzovC1pyhKS%2FokziLn6K03qGlXemusYg6Ktf4Tvel9w%2BPACK74fCYprD8zM5xBNnUmAFJ7Vf1XkFx78AKPfBkPzuXuymHugR8SrW%2FuBkFuDdht4oRSNSlcKw4pLuJPaNlB1J2S99WY6wE8uDRvlFTJNaM2M%2BNseTkATY1k2nnQIXId5tX6ciGfKsldiKcFWFTWwGzEt37AYvtFJlzEiL3St4g1j0hlj8XL3KO93MVNZpAw%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + }, + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/6ac8decb27154faccb8da40684635091fab082aaa1bc9687db2ed876fec2c9b9.jpg?Expires=1723647115&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=J3xntDTAHFwVwdurw8UqCPP2Zaxa5SclJZwF7QACKcwo87ZHS3wgiRYPUrDFDIjEOGwWQkiEx2wgt7wThnYrcsEmdfe6myUCZBK%2BFtfk3n0Dv%2B5QVal3osrehYa3k0n0if%2Ft7t8djimodZNIZoYa4ROocz4Libx0uLzMHrFLHLf41Vos4g0ShRvjeytauFWgmNnZafsS0N1JRl9RrFfW7sNEtGcTJQFHXWn0WZKLvW8Hr91FS6xhqNMCppnKSiFxZFphzf00kX1T%2F4eh3MD4rNUaj71pFw8i7wKW9wXC%2FH9FnT%2BZbtb0YAWXPHx%2BN6fbIYgUfR1tqLG59yEzW9Wvdw%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + }, + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/c55a38534ea30c9171552143018a7a50b9ed3dde7b5bb05bf374f8ad2e8c0bfd.jpg?Expires=1723647116&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=knjz04%2FRtrPZI7lf43%2BfqzS0PSNRxrxwmAoK9MeXfOrQWTYU8Pqinf2Vcq9wS%2BDAo78U9I3tET5BQb0P7vT%2BiGWokwTm%2FF0aL8Q1dooKLmSmqUAiCoFWdKZPTF3lZAvc1HeUUSxAVF3BT%2FnrEAxkzsT4RRB6Klfe064xo6xyIcPpDrqD3sM2oFRv7mSjdTNfvwZOVKgkOEdrhxMdg9WLPrFIapNueu2%2Fvnv8R1GljmXk2%2FD0%2BERB%2BwqhHeUb3jQ9b8kaDbV%2BGb1JgsNDNI3095SKK6m%2B2Ju96p5XXCFJ5Vcdt5mssrqZEXP7iSl1ts%2Bp%2FC%2BUdxfMVs2lIhf39wYNDg%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + }, + { + "value": "https://storage.googleapis.com/dev_addons_server_for_svcse_1619/01a32fc69888de4fdf2e6ab48d19331d12ad61612dd603ce799bc65966319e9e.jpg?Expires=1723647116&GoogleAccessId=dev-svcse-1619-uploader%40moz-fx-amo-nonprod.iam.gserviceaccount.com&Signature=Vw9Y4g6rlcMKiGdFn%2FRDKy4HraPovs8LVs1z9saw0DFg6JQFTEK5i6PPtnvMMcvPukau5k9ffsFvP%2BDfmDv76Yirwj7i%2BXbLkMKylpXAOgwGfmSoNM9qE6c0It2YBZ8dcCrvs0dK1gHM56RbopInsg4evaXf%2Bhd8o5v%2FhBkWDZTQp60UJsllYV12cN84pi0Sy7p2tXGm50vzVlm3SwBF7Qxu9Ml%2BN7kIO5IVjg5qs9E2NgIzTNFmg4V3tbzmnfT6enmKYtWl9VcgMJ4Vl0YTkHq0Jb9DJWe3HGKUzSfovlJgF7fLUpSLrgnhMGlGWk35UIZ84rAUvo3u9ng5JEL%2F6Q%3D%3D", + "mime_type": "image/jpeg", + "authentication": null + } + ], + "description": "A powerful reverse image search tool, with support for various search engines, such as Google, Bing, Yandex, Baidu and TinEye. test2\nA powerful reverse image search tool, with support for various search engines, such as Google, Bing, Yandex, Baidu and TinEye.", + "support_url": "https://addons-server.readthedocs.io/en/latest/topics/api/addons.html#put-create-or-edit", + "last_updated": "2023-11-10T10:58:23.207421", + "release_notes": "", + "support_email": "rusiczki.ioana@gmail.com", + "promoted_badge": "", + "average_daily_users": 0 + }, + "entity_schema": "amo_addon" + }, + "source": { + "job": { + "id": "5ab7cb33-a5ab-4dfa-9d72-4c2061ffeb08", + "queue": { + "slug": "amo-dev-ratings", + "is_multi_review": false + }, + "reports": [], + "created_at": "2024-04-25T15:22:00.182296+00:00" + }, + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "decision": { + "id": "c092390f-3dae-4565-ab14-c33a13a6ec68", + "type": "confirm", + "metadata": {} + } + }, + "policies": [ + { + "id": "0d9df565-f249-40f8-8954-e73e65932ca2", + "name": "Acceptable Use", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [] + }, + { + "id": "a182e453-a30b-44a2-ba02-651e22238257", + "name": "Harassment", + "parent_id": "0d9df565-f249-40f8-8954-e73e65932ca2", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ] + } + ], + "timestamp": "2024-04-25T15:22:46.698479+00:00", + "point_updates": [], + "appeals_resolved": [ + { + "source": "unknown", + "outcome": "denied", + "appealer": { + "attributes": { + "id": "631", + "name": "eviljeff but with a really realy long name", + "email": "awilliamson@mozilla.com", + "fxa_id": "ca1afcbcd0ab4dc490c02345ab1a8cd6", + "created": "2007-03-05T13:09:33" + }, + "entity_schema": "amo_user" + } + } + ], + "policies_removed": [], + "previous_decision": { + "id": "20ca9df7-447d-42ea-823f-2a5e114dd189", + "type": "queue_review", + "user": { + "name": "Andrew Williamson", + "email": "awilliamson@mozilla.com", + "groups": [ + { + "name": "Admin" + }, + { + "name": "Everyone" + } + ] + }, + "notes": "", + "metadata": {}, + "policies": [ + { + "id": "0d9df565-f249-40f8-8954-e73e65932ca2", + "name": "Acceptable Use", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [] + }, + { + "id": "a182e453-a30b-44a2-ba02-651e22238257", + "name": "Harassment", + "parent_id": "0d9df565-f249-40f8-8954-e73e65932ca2", + "is_illegal": false, + "is_non_violating": false, + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ] + } + ], + "policies_removed": [], + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "enforcement_actions_removed": [] + }, + "enforcement_actions": [ + "amo-ban-user", + "amo-delete-collection", + "amo-delete-rating", + "amo-disable-addon" + ], + "enforcement_actions_removed": [] + } +} \ No newline at end of file diff --git a/src/olympia/abuse/tests/test_actions.py b/src/olympia/abuse/tests/test_actions.py new file mode 100644 index 000000000000..1331167a7627 --- /dev/null +++ b/src/olympia/abuse/tests/test_actions.py @@ -0,0 +1,1190 @@ +import json +import uuid +from datetime import datetime + +from django.conf import settings +from django.core import mail +from django.urls import reverse + +import responses +from waffle.testutils import override_switch + +from olympia import amo +from olympia.activity.models import ActivityLog, ActivityLogToken +from olympia.addons.models import Addon, AddonUser +from olympia.amo.tests import TestCase, addon_factory, collection_factory, user_factory +from olympia.constants.abuse import DECISION_ACTIONS +from olympia.constants.promoted import RECOMMENDED +from olympia.core import set_user +from olympia.ratings.models import Rating + +from ..actions import ( + ContentActionApproveInitialDecision, + ContentActionApproveNoAction, + ContentActionBanUser, + ContentActionDeleteCollection, + ContentActionDeleteRating, + ContentActionDisableAddon, + ContentActionForwardToLegal, + ContentActionIgnore, + ContentActionOverrideApprove, + ContentActionRejectVersion, + ContentActionRejectVersionDelayed, + ContentActionTargetAppealApprove, + ContentActionTargetAppealRemovalAffirmation, +) +from ..models import AbuseReport, CinderAppeal, CinderJob, CinderPolicy, ContentDecision + + +class BaseTestContentAction: + def setUp(self): + addon = addon_factory() + self.decision = ContentDecision.objects.create( + cinder_id='ab89', + action=DECISION_ACTIONS.AMO_APPROVE, + notes="extra note's", + addon=addon, + action_date=datetime.now(), + ) + self.cinder_job = CinderJob.objects.create( + job_id='1234', decision=self.decision + ) + self.policy = CinderPolicy.objects.create( + uuid='1234', + name='Bad policy', + text='This is bad thing', + parent=CinderPolicy.objects.create( + uuid='p4r3nt', + name='Parent Policy', + text='Parent policy text', + ), + ) + self.decision.policies.add(self.policy) + self.abuse_report_no_auth = AbuseReport.objects.create( + reason=AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE, + guid=addon.guid, + cinder_job=self.cinder_job, + reporter_email='email@domain.com', + ) + self.abuse_report_auth = AbuseReport.objects.create( + reason=AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE, + guid=addon.guid, + cinder_job=self.cinder_job, + reporter=user_factory(), + ) + self.task_user = user_factory(pk=settings.TASK_USER_ID) + # It's the webhook's responsibility to do this before calling the + # action. We need it for the ActivityLog creation to work. + set_user(self.task_user) + + def _test_reporter_takedown_email(self, subject): + assert mail.outbox[0].to == ['email@domain.com'] + assert mail.outbox[1].to == [self.abuse_report_auth.reporter.email] + assert mail.outbox[0].subject == ( + subject + f' [ref:ab89/{self.abuse_report_no_auth.id}]' + ) + assert mail.outbox[1].subject == ( + subject + f' [ref:ab89/{self.abuse_report_auth.id}]' + ) + assert 'have therefore removed' in mail.outbox[0].body + assert 'have therefore removed' in mail.outbox[1].body + assert 'appeal' not in mail.outbox[0].body + assert 'appeal' not in mail.outbox[1].body + assert f'[ref:ab89/{self.abuse_report_no_auth.id}]' in mail.outbox[0].body + assert f'[ref:ab89/{self.abuse_report_auth.id}]' in mail.outbox[1].body + assert 'After reviewing' not in mail.outbox[0].body + assert 'After reviewing' not in mail.outbox[0].body + assert '"' not in mail.outbox[0].body + assert '"' not in mail.outbox[1].body + assert '<b>' not in mail.outbox[0].body + assert '<b>' not in mail.outbox[1].body + assert self.decision.notes not in mail.outbox[0].body + assert self.decision.notes not in mail.outbox[1].body + + def _test_reporter_content_approve_email(self, subject): + assert mail.outbox[0].to == ['email@domain.com'] + assert mail.outbox[1].to == [self.abuse_report_auth.reporter.email] + assert mail.outbox[0].subject == ( + subject + f' [ref:ab89/{self.abuse_report_no_auth.id}]' + ) + assert mail.outbox[1].subject == ( + subject + f' [ref:ab89/{self.abuse_report_auth.id}]' + ) + assert 'does not violate Mozilla' in mail.outbox[0].body + assert 'does not violate Mozilla' in mail.outbox[1].body + assert 'was correct' not in mail.outbox[0].body + assert ( + reverse( + 'abuse.appeal_reporter', + kwargs={ + 'abuse_report_id': self.abuse_report_no_auth.id, + 'decision_cinder_id': self.decision.cinder_id, + }, + ) + in mail.outbox[0].body + ) + assert ( + reverse( + 'abuse.appeal_reporter', + kwargs={ + 'abuse_report_id': self.abuse_report_auth.id, + 'decision_cinder_id': self.decision.cinder_id, + }, + ) + in mail.outbox[1].body + ) + assert f'[ref:ab89/{self.abuse_report_no_auth.id}]' in mail.outbox[0].body + assert f'[ref:ab89/{self.abuse_report_auth.id}]' in mail.outbox[1].body + assert '"' not in mail.outbox[0].body + assert '"' not in mail.outbox[1].body + assert '<b>' not in mail.outbox[0].body + assert '<b>' not in mail.outbox[1].body + assert self.decision.notes not in mail.outbox[0].body + assert self.decision.notes not in mail.outbox[1].body + + def _test_reporter_appeal_takedown_email(self, subject): + assert mail.outbox[0].to == [self.abuse_report_auth.reporter.email] + assert mail.outbox[0].subject == ( + subject + f' [ref:ab89/{self.abuse_report_auth.id}]' + ) + assert 'have removed' in mail.outbox[0].body + assert 'right to appeal' not in mail.outbox[0].body + assert f'[ref:ab89/{self.abuse_report_auth.id}]' in mail.outbox[0].body + assert 'After reviewing' in mail.outbox[0].body + assert '"' not in mail.outbox[0].body + assert '<b>' not in mail.outbox[0].body + assert self.decision.notes not in mail.outbox[0].body + + def _test_reporter_appeal_approve_email(self, subject): + assert mail.outbox[0].to == [self.abuse_report_auth.reporter.email] + assert mail.outbox[0].subject == ( + subject + f' [ref:ab89/{self.abuse_report_auth.id}]' + ) + assert 'does not violate Mozilla' in mail.outbox[0].body + assert 'right to appeal' not in mail.outbox[0].body + assert 'was correct' in mail.outbox[0].body + assert f'[ref:ab89/{self.abuse_report_auth.id}]' in mail.outbox[0].body + assert '"' not in mail.outbox[0].body + assert '<b>' not in mail.outbox[0].body + assert ''' not in mail.outbox[0].body + assert self.decision.notes in mail.outbox[0].body + + def _check_owner_email(self, mail_item, subject, snippet): + user = getattr(self, 'user', getattr(self, 'author', None)) + assert mail_item.to == [user.email] + assert mail_item.subject == subject + ' [ref:ab89]' + assert snippet in mail_item.body + assert '[ref:ab89]' in mail_item.body + assert '"' not in mail_item.body + assert '<b>' not in mail_item.body + assert ''' not in mail_item.body + assert self.decision.notes in mail_item.body + + def _test_owner_takedown_email(self, subject, snippet): + mail_item = mail.outbox[-1] + self._check_owner_email(mail_item, subject, snippet) + assert 'right to appeal' in mail_item.body + assert ( + reverse( + 'abuse.appeal_author', + kwargs={ + 'decision_cinder_id': self.decision.cinder_id, + }, + ) + in mail_item.body + ) + assert ( + '\n - Parent Policy, specifically Bad policy: This is bad thing\n' + in mail_item.body + ) + assert '"' not in mail_item.body + assert '<b>' not in mail_item.body + assert ''' not in mail_item.body + assert self.decision.notes in mail_item.body + + def _test_owner_affirmation_email(self, subject): + mail_item = mail.outbox[0] + self._check_owner_email(mail_item, subject, 'was correct') + assert 'right to appeal' not in mail_item.body + notes = f'{self.decision.notes}. ' if self.decision.notes else '' + assert f' was correct. {notes}Based on that determination' in (mail_item.body) + assert ''' not in mail_item.body + if isinstance(self.decision.target, Addon): + # Verify we used activity mail for Addon related target emails + log_token = ActivityLogToken.objects.get() + assert log_token.uuid.hex in mail_item.reply_to[0] + + def _test_owner_restore_email(self, subject): + mail_item = mail.outbox[0] + assert len(mail.outbox) == 1 + self._check_owner_email(mail_item, subject, 'we have restored') + assert 'right to appeal' not in mail_item.body + assert ''' not in mail_item.body + assert self.decision.notes in mail_item.body + + def _test_approve_appeal_or_override(ContentActionClass): + raise NotImplementedError + + def test_approve_appeal_success(self): + self._test_approve_appeal_or_override(ContentActionTargetAppealApprove) + assert 'After reviewing your appeal' in mail.outbox[0].body + + def test_approve_override(self): + self._test_approve_appeal_or_override(ContentActionOverrideApprove) + assert 'After reviewing your appeal' not in mail.outbox[0].body + + def _test_reporter_no_action_taken( + self, + *, + ActionClass=ContentActionApproveNoAction, + action=DECISION_ACTIONS.AMO_APPROVE, + ): + raise NotImplementedError + + def test_reporter_content_approve_report(self): + subject = self._test_reporter_no_action_taken() + assert len(mail.outbox) == 2 + self._test_reporter_content_approve_email(subject) + + def test_reporter_appeal_approve(self): + original_job = CinderJob.objects.create( + job_id='original', + decision=ContentDecision.objects.create( + addon=self.decision.addon, + user=self.decision.user, + rating=self.decision.rating, + collection=self.decision.collection, + action=DECISION_ACTIONS.AMO_APPROVE, + ), + ) + self.cinder_job.appealed_decisions.add(original_job.decision) + self.abuse_report_no_auth.update(cinder_job=original_job) + self.abuse_report_auth.update(cinder_job=original_job) + CinderAppeal.objects.create( + decision=original_job.decision, reporter_report=self.abuse_report_auth + ) + self.cinder_job.reload() + subject = self._test_reporter_no_action_taken() + assert len(mail.outbox) == 1 # only abuse_report_auth reporter + self._test_reporter_appeal_approve_email(subject) + + def test_owner_content_approve_report_email(self): + # This isn't called by cinder actions, but is triggered by reviewer actions + subject = self._test_reporter_no_action_taken( + ActionClass=ContentActionApproveInitialDecision + ) + assert len(mail.outbox) == 3 + self._test_reporter_content_approve_email(subject) + assert 'has been approved' in mail.outbox[-1].body + + def test_notify_reporters_reporters_provided(self): + action = self.ActionClass(self.decision) + action.notify_reporters(reporter_abuse_reports=[self.abuse_report_no_auth]) + assert len(mail.outbox) == 1 + assert mail.outbox[0].to == ['email@domain.com'] + assert mail.outbox[0].subject.endswith( + f' [ref:ab89/{self.abuse_report_no_auth.id}]' + ) + assert 'have therefore removed' in mail.outbox[0].body + assert f'[ref:ab89/{self.abuse_report_no_auth.id}]' in mail.outbox[0].body + + def test_reporter_ignore_invalid_report(self): + self.decision.policies.first().update() + subject = self._test_reporter_no_action_taken( + ActionClass=ContentActionIgnore, action=DECISION_ACTIONS.AMO_IGNORE + ) + assert len(mail.outbox) == 2 + assert mail.outbox[0].to == ['email@domain.com'] + assert mail.outbox[1].to == [self.abuse_report_auth.reporter.email] + assert mail.outbox[0].subject == ( + subject + f' [ref:ab89/{self.abuse_report_no_auth.id}]' + ) + assert mail.outbox[1].subject == ( + subject + f' [ref:ab89/{self.abuse_report_auth.id}]' + ) + assert f'[ref:ab89/{self.abuse_report_no_auth.id}]' in mail.outbox[0].body + assert f'[ref:ab89/{self.abuse_report_auth.id}]' in mail.outbox[1].body + + for idx in range(0, 1): + assert 'were unable to identify a violation' in mail.outbox[idx].body + assert 'right to appeal' not in mail.outbox[idx].body + assert 'This is bad thing' in mail.outbox[idx].body # policy text + assert 'Bad policy' not in mail.outbox[idx].body # policy name + assert 'Parent' not in mail.outbox[idx].body # parent policy text + + def test_email_content_not_escaped(self): + unsafe_str = '' + self.decision.update(notes=unsafe_str) + action = self.ActionClass(self.decision) + action.notify_owners() + assert unsafe_str in mail.outbox[0].body + + action = ContentActionApproveNoAction(self.decision) + mail.outbox.clear() + action.notify_reporters( + reporter_abuse_reports=[self.abuse_report_auth], is_appeal=True + ) + assert unsafe_str in mail.outbox[0].body + + +class TestContentActionUser(BaseTestContentAction, TestCase): + ActionClass = ContentActionBanUser + + def setUp(self): + super().setUp() + self.user = user_factory(display_name='Bad Hørse') + self.cinder_job.abusereport_set.update(user=self.user, guid=None) + self.decision.update(addon=None, user=self.user) + + def _test_ban_user(self): + self.decision.update(action=DECISION_ACTIONS.AMO_BAN_USER) + action = self.ActionClass(self.decision) + activity = action.process_action() + assert activity.log == amo.LOG.ADMIN_USER_BANNED + assert ActivityLog.objects.count() == 1 + assert activity.arguments == [self.user, self.decision, self.policy] + assert activity.user == self.task_user + assert activity.details == { + 'comments': self.decision.notes, + 'cinder_action': DECISION_ACTIONS.AMO_BAN_USER, + } + + self.user.reload() + self.assertCloseToNow(self.user.banned) + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + subject = f'Mozilla Add-ons: {self.user.name}' + self._test_owner_takedown_email(subject, 'has been suspended') + return subject + + def test_ban_user(self): + subject = self._test_ban_user() + assert len(mail.outbox) == 3 + self._test_reporter_takedown_email(subject) + + def test_ban_user_after_reporter_appeal(self): + original_job = CinderJob.objects.create( + job_id='original', + decision=ContentDecision.objects.create( + user=self.user, action=DECISION_ACTIONS.AMO_APPROVE + ), + ) + self.cinder_job.appealed_decisions.add(original_job.decision) + self.abuse_report_no_auth.update(cinder_job=original_job) + self.abuse_report_auth.update(cinder_job=original_job) + CinderAppeal.objects.create( + decision=original_job.decision, reporter_report=self.abuse_report_auth + ) + subject = self._test_ban_user() + assert len(mail.outbox) == 2 + self._test_reporter_appeal_takedown_email(subject) + + def _test_reporter_no_action_taken( + self, + *, + ActionClass=ContentActionApproveNoAction, + action=DECISION_ACTIONS.AMO_APPROVE, + ): + self.decision.update(action=action) + action = ActionClass(self.decision) + assert action.process_action() is None + + self.user.reload() + assert not self.user.banned + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + return f'Mozilla Add-ons: {self.user.name}' + + def _test_approve_appeal_or_override(self, ContentActionClass): + self.decision.update(action=DECISION_ACTIONS.AMO_APPROVE) + self.user.update(banned=self.days_ago(1), deleted=True) + action = ContentActionClass(self.decision) + activity = action.process_action() + + self.user.reload() + assert not self.user.banned + assert ActivityLog.objects.count() == 1 + assert activity.log == amo.LOG.ADMIN_USER_UNBAN + assert activity.arguments == [self.user, self.decision, self.policy] + assert activity.user == self.task_user + assert activity.details == { + 'comments': self.decision.notes, + 'cinder_action': DECISION_ACTIONS.AMO_APPROVE, + } + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + self._test_owner_restore_email(f'Mozilla Add-ons: {self.user.name}') + + def test_target_appeal_decline(self): + self.user.update(banned=self.days_ago(1), deleted=True) + action = ContentActionTargetAppealRemovalAffirmation(self.decision) + assert action.process_action() is None + + self.user.reload() + assert self.user.banned + assert ActivityLog.objects.count() == 0 + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + self._test_owner_affirmation_email(f'Mozilla Add-ons: {self.user.name}') + + def test_should_hold_action(self): + self.decision.update(action=DECISION_ACTIONS.AMO_BAN_USER) + action = self.ActionClass(self.decision) + assert action.should_hold_action() is False + + self.user.update(email='superstarops@mozilla.com') + assert action.should_hold_action() is True + + self.user.update(email='foo@baa') + assert action.should_hold_action() is False + del self.user.groups_list + self.grant_permission(self.user, 'this:thing') + assert action.should_hold_action() is True + + self.user.groups_list = [] + assert action.should_hold_action() is False + addon = addon_factory(users=[self.user]) + assert action.should_hold_action() is False + self.make_addon_promoted(addon, RECOMMENDED) + assert action.should_hold_action() is True + + self.user.banned = datetime.now() + assert action.should_hold_action() is False + + def test_hold_action(self): + self.decision.update(action=DECISION_ACTIONS.AMO_BAN_USER) + action = self.ActionClass(self.decision) + activity = action.hold_action() + assert activity.log == amo.LOG.HELD_ACTION_ADMIN_USER_BANNED + assert ActivityLog.objects.count() == 1 + assert activity.arguments == [self.user, self.decision, self.policy] + assert activity.user == self.task_user + assert activity.details == { + 'comments': self.decision.notes, + 'cinder_action': DECISION_ACTIONS.AMO_BAN_USER, + } + + +@override_switch('dsa-cinder-forwarded-review', active=True) +@override_switch('dsa-appeals-review', active=True) +class TestContentActionAddon(BaseTestContentAction, TestCase): + ActionClass = ContentActionDisableAddon + + def setUp(self): + super().setUp() + self.author = user_factory() + self.addon = addon_factory(users=(self.author,), name='Bad Addön') + ActivityLog.objects.all().delete() + self.cinder_job.abusereport_set.update(guid=self.addon.guid) + self.decision.update(addon=self.addon) + + def _test_disable_addon(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + action = self.ActionClass(self.decision) + activity = action.process_action() + assert activity + assert activity.log == amo.LOG.FORCE_DISABLE + assert self.addon.reload().status == amo.STATUS_DISABLED + assert ActivityLog.objects.count() == 1 + assert activity.arguments == [self.addon, self.decision, self.policy] + assert activity.user == self.task_user + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + subject = f'Mozilla Add-ons: {self.addon.name}' + self._test_owner_takedown_email(subject, 'permanently disabled') + assert f'Your Extension {self.addon.name}' in mail.outbox[-1].body + return subject + + def test_disable_addon(self): + subject = self._test_disable_addon() + assert len(mail.outbox) == 3 + self._test_reporter_takedown_email(subject) + + def test_disable_addon_after_reporter_appeal(self): + original_job = CinderJob.objects.create( + job_id='original', + decision=ContentDecision.objects.create( + addon=self.addon, action=DECISION_ACTIONS.AMO_APPROVE + ), + ) + self.cinder_job.appealed_decisions.add(original_job.decision) + self.abuse_report_no_auth.update(cinder_job=original_job) + self.abuse_report_auth.update(cinder_job=original_job) + CinderAppeal.objects.create( + decision=original_job.decision, reporter_report=self.abuse_report_auth + ) + subject = self._test_disable_addon() + assert len(mail.outbox) == 2 + self._test_reporter_appeal_takedown_email(subject) + + def _test_approve_appeal_or_override(self, ContentActionClass): + self.addon.update(status=amo.STATUS_DISABLED) + ActivityLog.objects.all().delete() + action = ContentActionClass(self.decision) + activity = action.process_action() + + assert self.addon.reload().status == amo.STATUS_APPROVED + assert activity.log == amo.LOG.FORCE_ENABLE + assert ActivityLog.objects.count() == 1 + assert activity.arguments == [self.addon, self.decision, self.policy] + assert activity.user == self.task_user + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + self._test_owner_restore_email(f'Mozilla Add-ons: {self.addon.name}') + + def _test_reporter_no_action_taken( + self, + *, + ActionClass=ContentActionApproveNoAction, + action=DECISION_ACTIONS.AMO_APPROVE, + ): + self.decision.update(action=action) + action = ActionClass(self.decision) + assert action.process_action() is None + + assert self.addon.reload().status == amo.STATUS_APPROVED + assert ActivityLog.objects.count() == 0 + assert len(mail.outbox) == 0 + self.cinder_job.notify_reporters(action) + action.notify_owners() + return f'Mozilla Add-ons: {self.addon.name}' + + def test_target_appeal_decline(self): + self.addon.update(status=amo.STATUS_DISABLED) + ActivityLog.objects.all().delete() + action = ContentActionTargetAppealRemovalAffirmation(self.decision) + assert action.process_action() is None + + self.addon.reload() + assert self.addon.status == amo.STATUS_DISABLED + assert ActivityLog.objects.count() == 0 + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + self._test_owner_affirmation_email(f'Mozilla Add-ons: {self.addon.name}') + + def test_target_appeal_decline_no_manual_reasoning_text(self): + self.addon.update(status=amo.STATUS_DISABLED) + ActivityLog.objects.all().delete() + self.decision.update(notes='') + action = ContentActionTargetAppealRemovalAffirmation(self.decision) + assert action.process_action() is None + + self.addon.reload() + assert self.addon.status == amo.STATUS_DISABLED + assert ActivityLog.objects.count() == 0 + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + self.decision.update(notes='') + self._test_owner_affirmation_email(f'Mozilla Add-ons: {self.addon.name}') + + def test_notify_owners_with_manual_reasoning_text(self): + self.decision.update( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + notes='some other policy justification', + ) + self.ActionClass(self.decision).notify_owners(extra_context={'policies': ()}) + mail_item = mail.outbox[0] + self._check_owner_email( + mail_item, f'Mozilla Add-ons: {self.addon.name}', 'permanently disabled' + ) + assert 'right to appeal' in mail_item.body + assert ( + reverse( + 'abuse.appeal_author', + kwargs={ + 'decision_cinder_id': self.decision.cinder_id, + }, + ) + in mail_item.body + ) + assert 'Bad policy: This is bad thing' not in mail_item.body + assert 'some other policy justification' in mail_item.body + + def test_notify_owners_with_for_third_party_decision(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + self.ActionClass(self.decision).notify_owners() + mail_item = mail.outbox[0] + self._check_owner_email( + mail_item, f'Mozilla Add-ons: {self.addon.name}', 'permanently disabled' + ) + assert 'right to appeal' in mail_item.body + assert 'in an assessment performed on our own initiative' not in mail_item.body + assert 'based on a report we received from a third party' in mail_item.body + + def test_notify_owners_with_for_proactive_decision(self): + self.cinder_job.delete() + self.abuse_report_auth.delete() + self.abuse_report_no_auth.delete() + self.decision.refresh_from_db() + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + self.ActionClass(self.decision).notify_owners() + mail_item = mail.outbox[0] + self._check_owner_email( + mail_item, f'Mozilla Add-ons: {self.addon.name}', 'permanently disabled' + ) + assert 'right to appeal' in mail_item.body + assert 'in an assessment performed on our own initiative' in mail_item.body + assert 'based on a report we received from a third party' not in mail_item.body + + def test_notify_owners_non_public_url(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + self.addon.update(status=amo.STATUS_DISABLED, _current_version=None) + assert self.addon.get_url_path() == '' + + self.ActionClass(self.decision).notify_owners() + mail_item = mail.outbox[0] + self._check_owner_email( + mail_item, f'Mozilla Add-ons: {self.addon.name}', 'permanently disabled' + ) + assert '/firefox/' not in mail_item.body + assert ( + f'{settings.SITE_URL}/en-US/developers/addon/{self.addon.id}/' + in mail_item.body + ) + + def _test_reject_version(self): + self.decision.update(action=DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON) + action = ContentActionRejectVersion(self.decision) + # process_action isn't implemented for this action currently. + with self.assertRaises(NotImplementedError): + action.process_action() + + subject = f'Mozilla Add-ons: {self.addon.name}' + + assert len(mail.outbox) == 0 + self.cinder_job.notify_reporters(action) + action.notify_owners(extra_context={'version_list': '2.3, 3.45'}) + mail_item = mail.outbox[-1] + self._check_owner_email(mail_item, subject, 'have been disabled') + + assert 'right to appeal' in mail_item.body + assert ( + reverse( + 'abuse.appeal_author', + kwargs={ + 'decision_cinder_id': self.decision.cinder_id, + }, + ) + in mail_item.body + ) + assert 'Bad policy: This is bad thing' in mail_item.body + assert 'Affected versions: 2.3, 3.45' in mail_item.body + return subject + + def test_reject_version(self): + subject = self._test_reject_version() + assert len(mail.outbox) == 3 + self._test_reporter_takedown_email(subject) + + def test_reject_version_after_reporter_appeal(self): + original_job = CinderJob.objects.create( + job_id='original', + decision=ContentDecision.objects.create( + addon=self.addon, action=DECISION_ACTIONS.AMO_APPROVE + ), + ) + self.cinder_job.appealed_decisions.add(original_job.decision) + self.abuse_report_no_auth.update(cinder_job=original_job) + self.abuse_report_auth.update(cinder_job=original_job) + CinderAppeal.objects.create( + decision=original_job.decision, reporter_report=self.abuse_report_auth + ) + subject = self._test_reject_version() + assert len(mail.outbox) == 2 + self._test_reporter_appeal_takedown_email(subject) + + def _test_reject_version_delayed(self): + self.decision.update( + action=DECISION_ACTIONS.AMO_REJECT_VERSION_WARNING_ADDON, + ) + action = ContentActionRejectVersionDelayed(self.decision) + # note: process_action isn't implemented for this action currently. + + subject = f'Mozilla Add-ons: {self.addon.name}' + + assert len(mail.outbox) == 0 + self.cinder_job.notify_reporters(action) + action.notify_owners( + extra_context={ + 'version_list': '2.3, 3.45', + 'delayed_rejection_days': 66, + } + ) + mail_item = mail.outbox[-1] + user = getattr(self, 'user', getattr(self, 'author', None)) + assert mail_item.to == [user.email] + assert mail_item.subject == (f'{subject} [ref:{self.decision.cinder_id}]') + assert 'will be disabled' in mail_item.body + assert f'[ref:{self.decision.cinder_id}]' in mail_item.body + + assert 'right to appeal' not in mail_item.body + assert 'Bad policy: This is bad thing' in mail_item.body + assert 'Affected versions: 2.3, 3.45' in mail_item.body + assert '66 day(s)' in mail_item.body + return subject + + def test_reject_version_delayed(self): + subject = self._test_reject_version_delayed() + assert len(mail.outbox) == 3 + assert mail.outbox[0].to == ['email@domain.com'] + assert mail.outbox[1].to == [self.abuse_report_auth.reporter.email] + assert mail.outbox[0].subject == ( + subject + + f' [ref:{self.decision.cinder_id}/' + + f'{self.abuse_report_no_auth.id}]' + ) + assert mail.outbox[1].subject == ( + subject + f' [ref:{self.decision.cinder_id}/{self.abuse_report_auth.id}]' + ) + assert 'we will remove' in mail.outbox[0].body + assert 'we will remove' in mail.outbox[1].body + assert 'right to appeal' not in mail.outbox[0].body + assert 'right to appeal' not in mail.outbox[1].body + assert ( + f'[ref:{self.decision.cinder_id}/{self.abuse_report_no_auth.id}]' + in mail.outbox[0].body + ) + assert ( + f'[ref:{self.decision.cinder_id}/{self.abuse_report_auth.id}]' + in mail.outbox[1].body + ) + + def test_reject_version_delayed_after_reporter_appeal(self): + original_job = CinderJob.objects.create( + job_id='original', + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=self.addon + ), + ) + self.cinder_job.appealed_decisions.add(original_job.decision) + self.abuse_report_no_auth.update(cinder_job=original_job) + self.abuse_report_auth.update(cinder_job=original_job) + CinderAppeal.objects.create( + decision=original_job.decision, reporter_report=self.abuse_report_auth + ) + subject = self._test_reject_version_delayed() + assert len(mail.outbox) == 2 + assert mail.outbox[0].to == [self.abuse_report_auth.reporter.email] + assert mail.outbox[0].subject == ( + subject + f' [ref:{self.decision.cinder_id}/{self.abuse_report_auth.id}]' + ) + assert 'we will remove' in mail.outbox[0].body + assert 'right to appeal' not in mail.outbox[0].body + assert ( + f'[ref:{self.decision.cinder_id}/{self.abuse_report_auth.id}]' + in mail.outbox[0].body + ) + + def test_notify_owner_with_appeal_waffle_off_doesnt_offer_appeal(self): + self.cinder_job.delete() + self.decision.refresh_from_db() + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + assert not self.decision.is_third_party_initiated + + with override_switch('dsa-appeals-review', active=True): + self.ActionClass(self.decision).notify_owners() + mail_item = mail.outbox[0] + self._check_owner_email( + mail_item, f'Mozilla Add-ons: {self.addon.name}', 'permanently disabled' + ) + assert 'right to appeal' in mail_item.body + mail.outbox.clear() + + with override_switch('dsa-appeals-review', active=False): + self.ActionClass(self.decision).notify_owners() + mail_item = mail.outbox[0] + self._check_owner_email( + mail_item, f'Mozilla Add-ons: {self.addon.name}', 'permanently disabled' + ) + assert 'right to appeal' not in mail_item.body + + def test_should_hold_action(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + action = self.ActionClass(self.decision) + assert action.should_hold_action() is False + + self.make_addon_promoted(self.addon, RECOMMENDED) + assert action.should_hold_action() is True + + self.addon.status = amo.STATUS_DISABLED + assert action.should_hold_action() is False + + def test_hold_action(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + action = self.ActionClass(self.decision) + activity = action.hold_action() + assert activity.log == amo.LOG.HELD_ACTION_FORCE_DISABLE + assert ActivityLog.objects.count() == 1 + assert activity.arguments == [self.addon, self.decision, self.policy] + assert activity.user == self.task_user + assert activity.details == { + 'comments': self.decision.notes, + 'cinder_action': DECISION_ACTIONS.AMO_DISABLE_ADDON, + } + + def test_forward_to_reviewers_no_job(self): + self.decision.update(action=DECISION_ACTIONS.AMO_LEGAL_FORWARD) + self.decision.cinder_job.update(decision=None) + action = ContentActionForwardToLegal(self.decision) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '1234-xyz'}, + status=201, + ) + + action.process_action() + + assert CinderJob.objects.get(job_id='1234-xyz') + request_body = json.loads(responses.calls[0].request.body) + assert request_body['reasoning'] == self.decision.notes + assert request_body['queue_slug'] == 'legal-escalations' + + def test_forward_to_reviewers_with_job(self): + self.decision.update(action=DECISION_ACTIONS.AMO_LEGAL_FORWARD) + action = ContentActionForwardToLegal(self.decision) + responses.add_callback( + responses.POST, + f'{settings.CINDER_SERVER_URL}jobs/{self.cinder_job.job_id}/decision', + callback=lambda r: (201, {}, json.dumps({'uuid': uuid.uuid4().hex})), + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '1234-xyz'}, + status=201, + ) + + action.process_action() + + new_cinder_job = CinderJob.objects.get(job_id='1234-xyz') + assert new_cinder_job != self.cinder_job + assert new_cinder_job.job_id == '1234-xyz' + # The old cinder_job should have a reference to the new job + assert self.cinder_job.reload().forwarded_to_job == new_cinder_job + # And the reports should now be part of the new job instead + assert self.abuse_report_auth.reload().cinder_job == new_cinder_job + assert self.abuse_report_no_auth.reload().cinder_job == new_cinder_job + request_body = json.loads(responses.calls[0].request.body) + assert request_body['reasoning'] == self.decision.notes + assert request_body['queue_slug'] == 'legal-escalations' + + +class TestContentActionCollection(BaseTestContentAction, TestCase): + ActionClass = ContentActionDeleteCollection + + def setUp(self): + super().setUp() + self.author = user_factory() + self.collection = collection_factory( + author=self.author, + name='Bad Collectiôn', + slug='bad-collection', + ) + self.cinder_job.abusereport_set.update(collection=self.collection, guid=None) + self.decision.update(addon=None, collection=self.collection) + + def _test_delete_collection(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DELETE_COLLECTION) + action = self.ActionClass(self.decision) + log_entry = action.process_action() + + assert self.collection.reload() + assert self.collection.deleted + assert self.collection.slug + assert ActivityLog.objects.count() == 1 + activity = ActivityLog.objects.get(action=amo.LOG.COLLECTION_DELETED.id) + assert activity == log_entry + assert activity.arguments == [self.collection, self.decision, self.policy] + assert activity.user == self.task_user + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + subject = f'Mozilla Add-ons: {self.collection.name}' + self._test_owner_takedown_email(subject, 'permanently removed') + return subject + + def test_delete_collection(self): + subject = self._test_delete_collection() + assert len(mail.outbox) == 3 + self._test_reporter_takedown_email(subject) + + def test_delete_collection_after_reporter_appeal(self): + original_job = CinderJob.objects.create( + job_id='original', + decision=ContentDecision.objects.create( + collection=self.collection, action=DECISION_ACTIONS.AMO_APPROVE + ), + ) + self.cinder_job.appealed_decisions.add(original_job.decision) + self.abuse_report_no_auth.update(cinder_job=original_job) + self.abuse_report_auth.update(cinder_job=original_job) + CinderAppeal.objects.create( + decision=original_job.decision, reporter_report=self.abuse_report_auth + ) + subject = self._test_delete_collection() + assert len(mail.outbox) == 2 + self._test_reporter_appeal_takedown_email(subject) + + def _test_reporter_no_action_taken( + self, + *, + ActionClass=ContentActionApproveNoAction, + action=DECISION_ACTIONS.AMO_APPROVE, + ): + self.decision.update(action=action) + action = ActionClass(self.decision) + assert action.process_action() is None + + assert self.collection.reload() + assert not self.collection.deleted + assert ActivityLog.objects.count() == 0 + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + return f'Mozilla Add-ons: {self.collection.name}' + + def _test_approve_appeal_or_override(self, ContentActionClass): + self.collection.update(deleted=True) + action = ContentActionClass(self.decision) + log_entry = action.process_action() + + assert self.collection.reload() + assert not self.collection.deleted + assert ActivityLog.objects.count() == 1 + activity = ActivityLog.objects.get(action=amo.LOG.COLLECTION_UNDELETED.id) + assert activity == log_entry + assert activity.arguments == [self.collection, self.decision, self.policy] + assert activity.user == self.task_user + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + self._test_owner_restore_email(f'Mozilla Add-ons: {self.collection.name}') + + def test_target_appeal_decline(self): + self.collection.update(deleted=True) + action = ContentActionTargetAppealRemovalAffirmation(self.decision) + assert action.process_action() is None + + self.collection.reload() + assert self.collection.deleted + assert ActivityLog.objects.count() == 0 + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + self._test_owner_affirmation_email(f'Mozilla Add-ons: {self.collection.name}') + + def test_should_hold_action(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DELETE_COLLECTION) + action = self.ActionClass(self.decision) + assert action.should_hold_action() is False + + self.collection.update(author=self.task_user) + assert action.should_hold_action() is True + + self.collection.deleted = True + assert action.should_hold_action() is False + + def test_hold_action(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DELETE_COLLECTION) + action = self.ActionClass(self.decision) + activity = action.hold_action() + assert activity.log == amo.LOG.HELD_ACTION_COLLECTION_DELETED + assert ActivityLog.objects.count() == 1 + assert activity.arguments == [self.collection, self.decision, self.policy] + assert activity.user == self.task_user + assert activity.details == { + 'comments': self.decision.notes, + 'cinder_action': DECISION_ACTIONS.AMO_DELETE_COLLECTION, + } + + +class TestContentActionRating(BaseTestContentAction, TestCase): + ActionClass = ContentActionDeleteRating + + def setUp(self): + super().setUp() + self.author = user_factory() + self.rating = Rating.objects.create( + addon=addon_factory(), user=self.author, body='Saying something bad' + ) + self.cinder_job.abusereport_set.update(rating=self.rating, guid=None) + self.decision.update(addon=None, rating=self.rating) + ActivityLog.objects.all().delete() + + def _test_delete_rating(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DELETE_RATING) + action = self.ActionClass(self.decision) + activity = action.process_action() + assert activity.log == amo.LOG.DELETE_RATING + assert ActivityLog.objects.count() == 1 + assert activity.arguments == [ + self.rating, + self.decision, + self.policy, + self.rating.addon, + ] + assert activity.user == self.task_user + assert activity.details == { + 'comments': self.decision.notes, + 'cinder_action': DECISION_ACTIONS.AMO_DELETE_RATING, + 'addon_id': self.rating.addon_id, + 'addon_title': str(self.rating.addon.name), + 'body': self.rating.body, + 'is_flagged': False, + } + + assert self.rating.reload().deleted + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + subject = f'Mozilla Add-ons: "Saying ..." for {self.rating.addon.name}' + self._test_owner_takedown_email(subject, 'permanently removed') + return subject + + def test_delete_rating(self): + subject = self._test_delete_rating() + assert len(mail.outbox) == 3 + self._test_reporter_takedown_email(subject) + + def test_delete_rating_after_reporter_appeal(self): + original_job = CinderJob.objects.create( + job_id='original', + decision=ContentDecision.objects.create( + rating=self.rating, action=DECISION_ACTIONS.AMO_APPROVE + ), + ) + self.cinder_job.appealed_decisions.add(original_job.decision) + self.abuse_report_no_auth.update(cinder_job=original_job) + self.abuse_report_auth.update(cinder_job=original_job) + CinderAppeal.objects.create( + decision=original_job.decision, reporter_report=self.abuse_report_auth + ) + subject = self._test_delete_rating() + assert len(mail.outbox) == 2 + self._test_reporter_appeal_takedown_email(subject) + + def _test_reporter_no_action_taken( + self, + *, + ActionClass=ContentActionApproveNoAction, + action=DECISION_ACTIONS.AMO_APPROVE, + ): + self.decision.update(action=action) + action = ActionClass(self.decision) + assert action.process_action() is None + + assert not self.rating.reload().deleted + assert ActivityLog.objects.count() == 0 + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + return f'Mozilla Add-ons: "Saying ..." for {self.rating.addon.name}' + + def _test_approve_appeal_or_override(self, ContentActionClass): + self.rating.delete() + ActivityLog.objects.all().delete() + action = ContentActionClass(self.decision) + activity = action.process_action() + + assert activity.log == amo.LOG.UNDELETE_RATING + assert ActivityLog.objects.count() == 1 + assert activity.arguments == [ + self.rating, + self.decision, + self.policy, + self.rating.addon, + ] + assert activity.user == self.task_user + assert activity.details == { + 'comments': self.decision.notes, + 'cinder_action': DECISION_ACTIONS.AMO_APPROVE, + 'addon_id': self.rating.addon_id, + 'addon_title': str(self.rating.addon.name), + 'body': self.rating.body, + 'is_flagged': False, + } + assert not self.rating.reload().deleted + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + self._test_owner_restore_email( + f'Mozilla Add-ons: "Saying ..." for {self.rating.addon.name}' + ) + + def test_target_appeal_decline(self): + self.rating.delete() + ActivityLog.objects.all().delete() + action = ContentActionTargetAppealRemovalAffirmation(self.decision) + assert action.process_action() is None + + self.rating.reload() + assert self.rating.deleted + assert ActivityLog.objects.count() == 0 + assert len(mail.outbox) == 0 + + self.cinder_job.notify_reporters(action) + action.notify_owners() + self._test_owner_affirmation_email( + f'Mozilla Add-ons: "Saying ..." for {self.rating.addon.name}' + ) + + def test_should_hold_action(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DELETE_RATING) + action = self.ActionClass(self.decision) + assert action.should_hold_action() is False + + AddonUser.objects.create(addon=self.rating.addon, user=self.rating.user) + assert action.should_hold_action() is False + self.make_addon_promoted(self.rating.addon, RECOMMENDED) + assert action.should_hold_action() is False + self.rating.update( + reply_to=Rating.objects.create( + addon=self.rating.addon, user=user_factory(), body='original' + ) + ) + assert action.should_hold_action() is True + + self.rating.update(deleted=self.rating.id) + assert action.should_hold_action() is False + + def test_hold_action(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DELETE_RATING) + action = self.ActionClass(self.decision) + activity = action.hold_action() + assert activity.log == amo.LOG.HELD_ACTION_DELETE_RATING + assert ActivityLog.objects.count() == 1 + assert activity.arguments == [ + self.rating, + self.decision, + self.policy, + self.rating.addon, + ] + assert activity.user == self.task_user + assert activity.details == { + 'comments': self.decision.notes, + 'cinder_action': DECISION_ACTIONS.AMO_DELETE_RATING, + } diff --git a/src/olympia/abuse/tests/test_admin.py b/src/olympia/abuse/tests/test_admin.py index aef22ab1ad50..b8afdb0b434d 100644 --- a/src/olympia/abuse/tests/test_admin.py +++ b/src/olympia/abuse/tests/test_admin.py @@ -1,5 +1,6 @@ import uuid from datetime import date, datetime +from unittest import mock from urllib.parse import parse_qsl, urlparse from django.conf import settings @@ -585,6 +586,7 @@ def setUpTestData(cls): def setUp(self): self.client.force_login(self.user) self.list_url = reverse('admin:abuse_cinderpolicy_changelist') + self.sync_cinder_policies_url = reverse('admin:abuse_sync_cinder_policies') def test_list_no_permission(self): user = user_factory(email='nobody@mozilla.com') @@ -598,10 +600,14 @@ def test_list(self): zab = CinderPolicy.objects.create(name='Zab', parent=foo, uuid=uuid.uuid4()) lorem = CinderPolicy.objects.create(name='Lorem', uuid=uuid.uuid4()) CinderPolicy.objects.create(name='Ipsum', uuid=uuid.uuid4()) - ReviewActionReason.objects.create(name='Attached to Zab', cinder_policy=zab) - ReviewActionReason.objects.create(name='Attached to Lorem', cinder_policy=lorem) ReviewActionReason.objects.create( - name='Also attached to Lorem', cinder_policy=lorem + name='Attached to Zab', cinder_policy=zab, canned_response='.' + ) + ReviewActionReason.objects.create( + name='Attached to Lorem', cinder_policy=lorem, canned_response='.' + ) + ReviewActionReason.objects.create( + name='Also attached to Lorem', cinder_policy=lorem, canned_response='.' ) with self.assertNumQueries(7): @@ -619,6 +625,14 @@ def test_list(self): doc('#result_list td.field-linked_review_reasons')[2].text_content() == 'Also attached to Lorem\nAttached to Lorem' ) + assert doc('#abuse_sync_cinder_policies') + assert doc('#abuse_sync_cinder_policies')[0].attrib == { + 'formaction': self.sync_cinder_policies_url, + 'formmethod': 'post', + 'type': 'submit', + 'id': 'abuse_sync_cinder_policies', + 'value': 'Sync from Cinder', + } def test_list_order_by_reviewreason(self): foo = CinderPolicy.objects.create(name='Foo') @@ -626,8 +640,12 @@ def test_list_order_by_reviewreason(self): zab = CinderPolicy.objects.create(name='Zab', parent=foo, uuid=uuid.uuid4()) lorem = CinderPolicy.objects.create(name='Lorem', uuid=uuid.uuid4()) CinderPolicy.objects.create(name='Ipsum', uuid=uuid.uuid4()) - ReviewActionReason.objects.create(name='Attached to Zab', cinder_policy=zab) - ReviewActionReason.objects.create(name='Attached to Lorem', cinder_policy=lorem) + ReviewActionReason.objects.create( + name='Attached to Zab', cinder_policy=zab, canned_response='.' + ) + ReviewActionReason.objects.create( + name='Attached to Lorem', cinder_policy=lorem, canned_response='.' + ) with self.assertNumQueries(7): # - 2 savepoints (tests) @@ -635,9 +653,9 @@ def test_list_order_by_reviewreason(self): # - 1 count cinder policies # - 1 cinder policies # - 1 review action reasons - # Linked reason is the 4th field, so we have to pass o=4 parameter + # Linked reason is the 3rd field, so we have to pass o=3 parameter # to order on it. - response = self.client.get(self.list_url, {'o': '4'}) + response = self.client.get(self.list_url, {'o': '3'}) assert response.status_code == 200 doc = pq(response.content) assert len(doc('#result_list tbody tr')) == CinderPolicy.objects.count() @@ -646,3 +664,23 @@ def test_list_order_by_reviewreason(self): doc('#result_list td.field-linked_review_reasons')[4].text_content() == 'Attached to Zab' ) + + def test_sync_policies_no_permission(self): + user = user_factory(email='nobody@mozilla.com') + self.client.force_login(user) + response = self.client.get(self.sync_cinder_policies_url) + assert response.status_code == 403 + response = self.client.post(self.sync_cinder_policies_url) + assert response.status_code == 403 + + def test_sync_policies_wrong_method(self): + response = self.client.get(self.sync_cinder_policies_url) + assert response.status_code == 405 + + @mock.patch('olympia.abuse.admin.sync_cinder_policies.delay') + def test_sync_policies(self, sync_cinder_policies_mock): + response = self.client.post(self.sync_cinder_policies_url, follow=True) + assert response.status_code == 200 + assert response.redirect_chain[-1][0].endswith(self.list_url) + assert response.redirect_chain[-1][1] == 302 + assert sync_cinder_policies_mock.call_count == 1 diff --git a/src/olympia/abuse/tests/test_cinder.py b/src/olympia/abuse/tests/test_cinder.py index f2c6757a3248..3a0299d98ccf 100644 --- a/src/olympia/abuse/tests/test_cinder.py +++ b/src/olympia/abuse/tests/test_cinder.py @@ -1,14 +1,18 @@ import json import os.path import random +import uuid from unittest import mock from django.conf import settings import responses +import waffle +from waffle.testutils import override_switch -from olympia import amo -from olympia.abuse.models import AbuseReport +from olympia import amo, core +from olympia.abuse.models import AbuseReport, CinderJob, ContentDecision +from olympia.activity.models import ActivityLog from olympia.addons.models import Addon, Preview from olympia.amo.tests import ( TestCase, @@ -19,6 +23,12 @@ ) from olympia.amo.tests.test_helpers import get_image_path from olympia.bandwagon.models import Collection, CollectionAddon +from olympia.constants.abuse import ( + DECISION_ACTIONS, + ILLEGAL_CATEGORIES, + ILLEGAL_SUBCATEGORIES, +) +from olympia.constants.promoted import NOT_PROMOTED, NOTABLE, RECOMMENDED from olympia.ratings.models import Rating from olympia.reviewers.models import NeedsHumanReview from olympia.users.models import UserProfile @@ -26,6 +36,7 @@ from ..cinder import ( CinderAddon, + CinderAddonHandledByLegal, CinderAddonHandledByReviewers, CinderCollection, CinderRating, @@ -36,19 +47,24 @@ class BaseTestCinderCase: - cinder_class = None # Override in child classes + CinderClass = None # Override in child classes expected_queue_suffix = None # Override in child classes expected_queries_for_report = -1 # Override in child classes def test_queue(self): target = self._create_dummy_target() - cinder_entity = self.cinder_class(target) + cinder_entity = self.CinderClass(target) assert cinder_entity.queue_suffix == self.expected_queue_suffix assert ( cinder_entity.queue == f'{settings.CINDER_QUEUE_PREFIX}{cinder_entity.queue_suffix}' ) + def test_queue_appeal(self): + target = self._create_dummy_target() + cinder_entity = self.CinderClass(target) + assert cinder_entity.queue == cinder_entity.queue_appeal + def _create_dummy_target(self, **kwargs): raise NotImplementedError @@ -96,7 +112,7 @@ def _test_report(self, target): # loaded before. abuse_report.reload() report = CinderReport(abuse_report) - cinder_instance = self.cinder_class(abuse_report.target) + cinder_instance = self.CinderClass(abuse_report.target) with self.assertNumQueries(self.expected_queries_for_report): assert cinder_instance.report(report=report, reporter=None) == '1234-xyz' assert ( @@ -122,7 +138,7 @@ def test_report(self): def _test_appeal(self, appealer, cinder_instance=None): fake_decision_id = 'decision-id-to-appeal-666' - cinder_instance = cinder_instance or self.cinder_class( + cinder_instance = cinder_instance or self.CinderClass( self._create_dummy_target() ) @@ -134,7 +150,9 @@ def _test_appeal(self, appealer, cinder_instance=None): ) assert ( cinder_instance.appeal( - decision_id=fake_decision_id, appeal_text='reason', appealer=appealer + decision_cinder_id=fake_decision_id, + appeal_text='reason', + appealer=appealer, ) == '67890-abc' ) @@ -146,7 +164,9 @@ def _test_appeal(self, appealer, cinder_instance=None): ) with self.assertRaises(ConnectionError): cinder_instance.appeal( - decision_id=fake_decision_id, appeal_text='reason', appealer=appealer + decision_cinder_id=fake_decision_id, + appeal_text='reason', + appealer=appealer, ) def test_appeal_anonymous(self): @@ -156,19 +176,14 @@ def test_appeal_logged_in(self): self._test_appeal(CinderUnauthenticatedReporter('itsme', 'm@r.io')) def test_get_str(self): - instance = self.cinder_class(self._create_dummy_target()) + instance = self.CinderClass(self._create_dummy_target()) assert instance.get_str(123) == '123' assert instance.get_str(None) == '' assert instance.get_str(' ') == '' - assert instance.get_str('----') == r'\----' - assert instance.get_str('@@@') == r'\@@@' - assert instance.get_str('==') == r'\==' - assert instance.get_str('_') == r'\_' - assert instance.get_str(' _ ') == r'\_' class TestCinderAddon(BaseTestCinderCase, TestCase): - cinder_class = CinderAddon + CinderClass = CinderAddon # 2 queries expected: # - Authors (can't use the listed_authors transformer, we want non-listed as well, # and we have custom limits for batch-sending relationships) @@ -179,9 +194,9 @@ class TestCinderAddon(BaseTestCinderCase, TestCase): def _create_dummy_target(self, **kwargs): return addon_factory(**kwargs) - def test_queue_theme(self): + def test_queue_with_theme(self): target = self._create_dummy_target(type=amo.ADDON_STATICTHEME) - cinder_entity = self.cinder_class(target) + cinder_entity = self.CinderClass(target) expected_queue_suffix = 'themes' assert cinder_entity.queue_suffix == expected_queue_suffix assert ( @@ -189,6 +204,14 @@ def test_queue_theme(self): == f'{settings.CINDER_QUEUE_PREFIX}{cinder_entity.queue_suffix}' ) + def test_queue_appeal(self): + extension = self._create_dummy_target() + assert self.CinderClass(extension).queue_appeal == 'amo-escalations' + + theme = self._create_dummy_target(type=amo.ADDON_STATICTHEME) + # we only have a special queue for extensions + assert self.CinderClass(theme).queue == f'{settings.CINDER_QUEUE_PREFIX}themes' + def test_build_report_payload(self): addon = self._create_dummy_target( homepage='https://home.example.com', @@ -198,8 +221,8 @@ def test_build_report_payload(self): privacy_policy='Söme privacy policy', version_kw={'release_notes': 'Søme release notes'}, ) - message = '- bad addon!' - cinder_addon = self.cinder_class(addon) + message = ' bad addon!' + cinder_addon = self.CinderClass(addon) encoded_message = cinder_addon.get_str(message) abuse_report = AbuseReport.objects.create(guid=addon.guid, message=message) data = cinder_addon.build_report_payload( @@ -218,7 +241,7 @@ def test_build_report_payload(self): 'last_updated': str(addon.last_updated), 'name': str(addon.name), 'privacy_policy': 'Söme privacy policy', - 'promoted_badge': '', + 'promoted': '', 'release_notes': 'Søme release notes', 'slug': addon.slug, 'summary': str(addon.summary), @@ -237,6 +260,8 @@ def test_build_report_payload(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', } @@ -270,6 +295,8 @@ def test_build_report_payload(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -315,6 +342,8 @@ def test_build_report_payload(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -347,12 +376,151 @@ def test_build_report_payload(self): ], } + def test_build_report_payload_promoted_recommended(self): + addon = self._create_dummy_target( + homepage='https://home.example.com', + support_email='support@example.com', + support_url='https://support.example.com/', + description='Sôme description', + privacy_policy='Söme privacy policy', + version_kw={'release_notes': 'Søme release notes'}, + ) + self.make_addon_promoted(addon, group=RECOMMENDED) + message = ' bad addon!' + cinder_addon = self.CinderClass(addon) + encoded_message = cinder_addon.get_str(message) + abuse_report = AbuseReport.objects.create(guid=addon.guid, message=message) + data = cinder_addon.build_report_payload( + report=CinderReport(abuse_report), reporter=None + ) + assert data == { + 'queue_slug': cinder_addon.queue, + 'entity_type': 'amo_addon', + 'entity': { + 'id': str(addon.pk), + 'average_daily_users': addon.average_daily_users, + 'created': str(addon.created), + 'description': str(addon.description), + 'guid': addon.guid, + 'homepage': str(addon.homepage), + 'last_updated': str(addon.last_updated), + 'name': str(addon.name), + 'privacy_policy': 'Söme privacy policy', + 'promoted': 'Recommended', + 'release_notes': 'Søme release notes', + 'slug': addon.slug, + 'summary': str(addon.summary), + 'support_email': str(addon.support_email), + 'support_url': str(addon.support_url), + 'version': addon.current_version.version, + }, + 'reasoning': encoded_message, + 'context': { + 'entities': [ + { + 'attributes': { + 'id': str(abuse_report.pk), + 'created': str(abuse_report.created), + 'locale': None, + 'message': encoded_message, + 'reason': None, + 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, + }, + 'entity_type': 'amo_report', + } + ], + 'relationships': [ + { + 'relationship_type': 'amo_report_of', + 'source_id': str(abuse_report.pk), + 'source_type': 'amo_report', + 'target_id': str(addon.pk), + 'target_type': 'amo_addon', + } + ], + }, + } + + def test_build_report_payload_promoted_notable(self): + addon = self._create_dummy_target( + homepage='https://home.example.com', + support_email='support@example.com', + support_url='https://support.example.com/', + description='Sôme description', + privacy_policy='Söme privacy policy', + version_kw={'release_notes': 'Søme release notes'}, + ) + self.make_addon_promoted(addon, group=NOTABLE) + message = ' bad addon!' + cinder_addon = self.CinderClass(addon) + encoded_message = cinder_addon.get_str(message) + abuse_report = AbuseReport.objects.create(guid=addon.guid, message=message) + data = cinder_addon.build_report_payload( + report=CinderReport(abuse_report), reporter=None + ) + assert data == { + 'queue_slug': cinder_addon.queue, + 'entity_type': 'amo_addon', + 'entity': { + 'id': str(addon.pk), + 'average_daily_users': addon.average_daily_users, + 'created': str(addon.created), + 'description': str(addon.description), + 'guid': addon.guid, + 'homepage': str(addon.homepage), + 'last_updated': str(addon.last_updated), + 'name': str(addon.name), + 'privacy_policy': 'Söme privacy policy', + 'promoted': 'Notable', + 'release_notes': 'Søme release notes', + 'slug': addon.slug, + 'summary': str(addon.summary), + 'support_email': str(addon.support_email), + 'support_url': str(addon.support_url), + 'version': addon.current_version.version, + }, + 'reasoning': encoded_message, + 'context': { + 'entities': [ + { + 'attributes': { + 'id': str(abuse_report.pk), + 'created': str(abuse_report.created), + 'locale': None, + 'message': encoded_message, + 'reason': None, + 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, + }, + 'entity_type': 'amo_report', + } + ], + 'relationships': [ + { + 'relationship_type': 'amo_report_of', + 'source_id': str(abuse_report.pk), + 'source_type': 'amo_report', + 'target_id': str(addon.pk), + 'target_type': 'amo_addon', + } + ], + }, + } + + self.make_addon_promoted(addon, NOT_PROMOTED) + data = cinder_addon.build_report_payload( + report=CinderReport(abuse_report), reporter=None + ) + assert data['entity']['promoted'] == '' + def test_build_report_payload_with_author(self): author = user_factory() addon = self._create_dummy_target(users=[author]) message = '@bad addon!' - encoded_message = rf'\{message}' - cinder_addon = self.cinder_class(addon) + cinder_addon = self.CinderClass(addon) abuse_report = AbuseReport.objects.create(guid=addon.guid, message=message) data = cinder_addon.build_report_payload( @@ -375,9 +543,11 @@ def test_build_report_payload_with_author(self): 'id': str(abuse_report.pk), 'created': str(abuse_report.created), 'locale': None, - 'message': encoded_message, + 'message': message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -422,9 +592,11 @@ def test_build_report_payload_with_author(self): 'id': str(abuse_report.pk), 'created': str(abuse_report.created), 'locale': None, - 'message': encoded_message, + 'message': message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -467,8 +639,8 @@ def test_build_report_payload_with_author(self): def test_build_report_payload_with_author_and_reporter_being_the_same(self): user = user_factory() addon = self._create_dummy_target(users=[user]) - cinder_addon = self.cinder_class(addon) - message = '_self reporting!' + cinder_addon = self.CinderClass(addon) + message = 'self reporting! ' encoded_message = cinder_addon.get_str(message) abuse_report = AbuseReport.objects.create(guid=addon.guid, message=message) @@ -495,6 +667,8 @@ def test_build_report_payload_with_author_and_reporter_being_the_same(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -550,8 +724,8 @@ def test_build_report_payload_with_previews_and_icon( ) (p0, p1) = list(addon.previews.all()) Preview.objects.create(addon=addon, position=5) # No file, ignored - cinder_addon = self.cinder_class(addon) - message = '=report with images' + cinder_addon = self.CinderClass(addon) + message = ' report with images ' encoded_message = cinder_addon.get_str(message) abuse_report = AbuseReport.objects.create(guid=addon.guid, message=message) @@ -586,7 +760,7 @@ def test_build_report_payload_with_previews_and_icon( }, ], 'privacy_policy': '', - 'promoted_badge': '', + 'promoted': '', 'release_notes': '', 'slug': addon.slug, 'summary': str(addon.summary), @@ -605,6 +779,8 @@ def test_build_report_payload_with_previews_and_icon( 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -650,8 +826,8 @@ def test_build_report_payload_with_theme_previews( VersionPreview.objects.create( version=addon.current_version, position=5 ) # No file, ignored - cinder_addon = self.cinder_class(addon) - message = '-report with images' + cinder_addon = self.CinderClass(addon) + message = 'report with images' encoded_message = cinder_addon.get_str(message) abuse_report = AbuseReport.objects.create(guid=addon.guid, message=message) @@ -678,7 +854,7 @@ def test_build_report_payload_with_theme_previews( }, ], 'privacy_policy': '', - 'promoted_badge': '', + 'promoted': '', 'release_notes': '', 'slug': addon.slug, 'summary': str(addon.summary), @@ -697,6 +873,8 @@ def test_build_report_payload_with_theme_previews( 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -718,7 +896,7 @@ def test_build_report_payload_only_includes_first_batch_of_relationships(self): addon = self._create_dummy_target() for _ in range(0, 6): addon.authors.add(user_factory()) - cinder_addon = self.cinder_class(addon) + cinder_addon = self.CinderClass(addon) message = 'report for lots of relationships' abuse_report = AbuseReport.objects.create(guid=addon.guid, message=message) data = cinder_addon.build_report_payload( @@ -760,6 +938,8 @@ def test_build_report_payload_only_includes_first_batch_of_relationships(self): 'message': 'report for lots of relationships', 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -794,7 +974,7 @@ def test_report_additional_context(self): addon = self._create_dummy_target() for _ in range(0, 6): addon.authors.add(user_factory()) - cinder_addon = self.cinder_class(addon) + cinder_addon = self.CinderClass(addon) responses.add( responses.POST, @@ -898,7 +1078,7 @@ def test_report_additional_context_error(self): addon = self._create_dummy_target() for _ in range(0, 6): addon.authors.add(user_factory()) - cinder_addon = self.cinder_class(addon) + cinder_addon = self.CinderClass(addon) responses.add( responses.POST, @@ -910,63 +1090,89 @@ def test_report_additional_context_error(self): cinder_addon.report_additional_context() +@override_switch('dsa-abuse-reports-review', active=True) +@override_switch('dsa-appeals-review', active=True) +@override_switch('dsa-cinder-forwarded-review', active=True) class TestCinderAddonHandledByReviewers(TestCinderAddon): - cinder_class = CinderAddonHandledByReviewers - # Expected queries is a bit larger here because of activity log and - # needs human review checks + insertion. - # - 1 Fetch Version - # - 2 Fetch Translations for that Version - # - 3 Fetch NeedsHumanReview - # - 4 Fetch (task) User - # - 5 Create ActivityLog - # - 6 Create ActivityLogComment - # - 7 Create ActivityLogComment - # - 8 Create VersionLog - # - 9 Create NeedsHumanReview - # - 10 Fetch Versions to flag - # - 11 Update due date on Versions - # - 12 Fetch Latest signed Version - # The last 2 are for rendering the payload to Cinder like CinderAddon: - # - 13 Fetch Addon authors - # - 14 Fetch Promoted Addon - expected_queries_for_report = 14 + CinderClass = CinderAddonHandledByReviewers + # For rendering the payload to Cinder like CinderAddon: + # - 1 Fetch Addon authors + # - 2 Fetch Promoted Addon + expected_queries_for_report = 2 expected_queue_suffix = 'addon-infringement' - def test_queue(self): - super().test_queue() - # For this class the property should be guaranteed to be static. - assert self.cinder_class.queue == 'amo-env-addon-infringement' - - def test_queue_theme(self): + def test_queue_with_theme(self): # Contrary to reports handled by Cinder moderators, for reports handled # by AMO reviewers the queue should remain the same regardless of the # addon-type. target = self._create_dummy_target(type=amo.ADDON_STATICTHEME) - cinder_entity = self.cinder_class(target) + cinder_entity = self.CinderClass(target) assert cinder_entity.queue_suffix == self.expected_queue_suffix assert ( cinder_entity.queue == f'{settings.CINDER_QUEUE_PREFIX}{cinder_entity.queue_suffix}' ) + def test_queue_appeal(self): + # Contrary to reports handled by Cinder moderators, for reports handled + # by AMO reviewers there is no special queue. + BaseTestCinderCase.test_queue_appeal(self) + def setUp(self): - user_factory(id=settings.TASK_USER_ID) + self.task_user = user_factory(id=settings.TASK_USER_ID) def test_report(self): addon = self._create_dummy_target() - addon.current_version.file.update(is_signed=True) + # Make sure this is testing the case where no user is set (we fall back + # to the task user). + assert core.get_user() is None + # Trigger switch_is_active to ensure it's cached to make db query + # count more predictable. + waffle.switch_is_active('dsa-abuse-reports-review') self._test_report(addon) + # Adding the NHR is done by post_report(). + assert addon.current_version.needshumanreview_set.count() == 0 + job = CinderJob.objects.create(job_id='1234-xyz') + cinder_instance = self.CinderClass(addon) + with self.assertNumQueries(11): + # - 1 Fetch Cinder Decision + # - 2 Fetch NeedsHumanReview + # - 3 Create NeedsHumanReview + # - 4 Query if due_date is needed for version + # - 5 Query existing versions for due dates to inherit + # - 6 Update due date on Version + # - 7 Fetch task user + # - 8 Create ActivityLog + # - 9 Update ActivityLog + # - 10 Create ActivityLogComment + # - 11 Create VersionLog + cinder_instance.post_report(job) assert ( addon.current_version.needshumanreview_set.get().reason - == NeedsHumanReview.REASON_ABUSE_ADDON_VIOLATION + == NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION + ) + assert addon.current_version.reload().due_date + assert ActivityLog.objects.for_versions(addon.current_version).filter( + action=amo.LOG.NEEDS_HUMAN_REVIEW_CINDER.id ) + @override_switch('dsa-abuse-reports-review', active=False) + def test_report_waffle_switch_off(self): + addon = self._create_dummy_target() + # Trigger switch_is_active to ensure it's cached to make db query + # count more predictable. + waffle.switch_is_active('dsa-abuse-reports-review') + # We are no longer doing the queries for the activitylog, needshumanreview + # etc since the waffle switch is off. So we're back to the same number of + # queries made by the reports that go to Cinder. + self.expected_queries_for_report = TestCinderAddon.expected_queries_for_report + self._test_report(addon) + assert addon.current_version.needshumanreview_set.count() == 0 + def test_report_with_version(self): addon = self._create_dummy_target() - addon.current_version.file.update(is_signed=True) other_version = version_factory( - addon=addon, - file_kw={'is_signed': True, 'status': amo.STATUS_AWAITING_REVIEW}, + addon=addon, file_kw={'status': amo.STATUS_AWAITING_REVIEW} ) responses.add( responses.POST, @@ -978,44 +1184,133 @@ def test_report_with_version(self): guid=addon.guid, addon_version=other_version.version ) report = CinderReport(abuse_report) - cinder_instance = self.cinder_class(addon, other_version) - assert cinder_instance.report(report=report, reporter=None) + cinder_instance = self.CinderClass(addon, version_string=other_version.version) assert cinder_instance.report(report=report, reporter=None) + job = CinderJob.objects.create(job_id='1234-xyz') assert not addon.current_version.needshumanreview_set.exists() - # We called report() multiple times but there should be only one + cinder_instance.post_report(job) + cinder_instance.post_report(job) + cinder_instance.post_report(job) + # We called post_report() multiple times but there should be only one # needs human review instance. assert ( other_version.needshumanreview_set.get().reason - == NeedsHumanReview.REASON_ABUSE_ADDON_VIOLATION + == NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION ) + assert other_version.reload().due_date def test_appeal_anonymous(self): addon = self._create_dummy_target() - addon.current_version.file.update(is_signed=True) self._test_appeal( - CinderUnauthenticatedReporter('itsme', 'm@r.io'), self.cinder_class(addon) + CinderUnauthenticatedReporter('itsme', 'm@r.io'), self.CinderClass(addon) ) assert ( addon.current_version.needshumanreview_set.get().reason - == NeedsHumanReview.REASON_ABUSE_ADDON_VIOLATION_APPEAL + == NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL ) + assert addon.current_version.reload().due_date def test_appeal_logged_in(self): addon = self._create_dummy_target() - addon.current_version.file.update(is_signed=True) - self._test_appeal(CinderUser(user_factory()), self.cinder_class(addon)) + self._test_appeal(CinderUser(user_factory()), self.CinderClass(addon)) assert ( addon.current_version.needshumanreview_set.get().reason - == NeedsHumanReview.REASON_ABUSE_ADDON_VIOLATION_APPEAL + == NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL + ) + assert addon.current_version.reload().due_date + + def test_appeal_specific_version(self): + addon = self._create_dummy_target() + other_version = version_factory( + addon=addon, + channel=amo.CHANNEL_UNLISTED, + file_kw={'status': amo.STATUS_AWAITING_REVIEW}, + ) + self._test_appeal( + CinderUser(user_factory()), + self.CinderClass(addon, version_string=other_version.version), + ) + assert not addon.current_version.needshumanreview_set.exists() + assert ( + other_version.needshumanreview_set.get().reason + == NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL + ) + assert not addon.current_version.reload().due_date + assert other_version.reload().due_date + + def test_appeal_no_current_version(self): + addon = self._create_dummy_target( + status=amo.STATUS_NULL, file_kw={'status': amo.STATUS_DISABLED} + ) + version = addon.versions.last() + assert not addon.current_version + self._test_appeal( + CinderUser(user_factory()), + self.CinderClass(addon), + ) + assert ( + version.needshumanreview_set.get().reason + == NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL ) + assert version.reload().due_date + + @override_switch('dsa-appeals-review', active=False) + def test_appeal_waffle_switch_off(self): + addon = self._create_dummy_target() + # We are no longer doing the queries for the activitylog, needshumanreview + # etc since the waffle switch is off. So we're back to the same number of + # queries made by the reports that go to Cinder. + self.expected_queries_for_report = TestCinderAddon.expected_queries_for_report + self._test_appeal(CinderUser(user_factory()), self.CinderClass(addon)) + assert addon.current_version.needshumanreview_set.count() == 0 + + def test_report_with_ongoing_appeal(self): + addon = self._create_dummy_target() + job = CinderJob.objects.create(job_id='1234-xyz') + job.appealed_decisions.add( + ContentDecision.objects.create( + addon=addon, + cinder_id='1234-decision', + action=DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, + ) + ) + # Trigger switch_is_active to ensure it's cached to make db query + # count more predictable. + waffle.switch_is_active('dsa-abuse-reports-review') + self._test_report(addon) + cinder_instance = self.CinderClass(addon) + cinder_instance.post_report(job) + # The add-on does not get flagged again while the appeal is ongoing. + assert addon.current_version.needshumanreview_set.count() == 0 + + def test_report_with_ongoing_forwarded_appeal(self): + addon = self._create_dummy_target() + job = CinderJob.objects.create(job_id='1234-xyz') + CinderJob.objects.create(forwarded_to_job=job) + job.appealed_decisions.add( + ContentDecision.objects.create( + addon=addon, + cinder_id='1234-decision', + action=DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, + ) + ) + # Trigger switch_is_active to ensure it's cached to make db query + # count more predictable. + waffle.switch_is_active('dsa-abuse-reports-review') + self._test_report(addon) + cinder_instance = self.CinderClass(addon) + cinder_instance.post_report(job) + # The add-on does not get flagged again while the appeal is ongoing. + assert addon.current_version.needshumanreview_set.count() == 0 def test_create_decision(self): target = self._create_dummy_target() + cinder_id = uuid.uuid4().hex responses.add( responses.POST, f'{settings.CINDER_SERVER_URL}create_decision', - json={'uuid': '123'}, + json={'uuid': cinder_id}, status=201, ) responses.add( @@ -1024,15 +1319,19 @@ def test_create_decision(self): json={'error': 'reason'}, status=400, ) - cinder_instance = self.cinder_class(target) + cinder_instance = self.CinderClass(target) assert ( cinder_instance.create_decision( - reasoning='some review text', policy_uuids=['12345678'] + action=DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON.api_value, + reasoning='some review text', + policy_uuids=['12345678'], ) - == '123' + == cinder_id ) request = responses.calls[0].request request_body = json.loads(request.body) + assert request_body['enforcement_actions_slugs'] == ['amo-reject-version-addon'] + assert request_body['enforcement_actions_update_strategy'] == 'set' assert request_body['policy_uuids'] == ['12345678'] assert request_body['reasoning'] == 'some review text' assert request_body['entity']['id'] == str(target.id) @@ -1040,12 +1339,58 @@ def test_create_decision(self): # Last response is a 400, we raise for that. with self.assertRaises(ConnectionError): cinder_instance.create_decision( - reasoning='some review text', policy_uuids=['12345678'] + action='something', + reasoning='some review text', + policy_uuids=['12345678'], + ) + + def test_create_job_decision(self): + target = self._create_dummy_target() + job = CinderJob.objects.create(job_id='1234') + cinder_id = uuid.uuid4().hex + + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}jobs/{job.job_id}/decision', + json={'uuid': cinder_id}, + status=201, + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}jobs/{job.job_id}/decision', + json={'error': 'reason'}, + status=400, + ) + cinder_instance = self.CinderClass(target) + assert ( + cinder_instance.create_job_decision( + job_id=job.job_id, + action=DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON.api_value, + reasoning='some review text', + policy_uuids=['12345678'], + ) + == cinder_id + ) + request = responses.calls[0].request + request_body = json.loads(request.body) + assert request_body['enforcement_actions_slugs'] == ['amo-reject-version-addon'] + assert request_body['enforcement_actions_update_strategy'] == 'set' + assert request_body['policy_uuids'] == ['12345678'] + assert request_body['reasoning'] == 'some review text' + assert 'entity' not in request_body + + # Last response is a 400, we raise for that. + with self.assertRaises(ConnectionError): + cinder_instance.create_job_decision( + job_id=job.job_id, + action='something', + reasoning='some review text', + policy_uuids=['12345678'], ) def test_close_job(self): target = self._create_dummy_target() - job_id = '123' + job_id = uuid.uuid4().hex responses.add( responses.POST, f'{settings.CINDER_SERVER_URL}jobs/{job_id}/cancel', @@ -1058,12 +1403,248 @@ def test_close_job(self): json={'error': 'reason'}, status=400, ) - cinder_instance = self.cinder_class(target) + cinder_instance = self.CinderClass(target) assert cinder_instance.close_job(job_id=job_id) == job_id + def _setup_post_queue_move_test(self): + addon = self._create_dummy_target() + listed_version = addon.current_version + unlisted_version = version_factory(addon=addon, channel=amo.CHANNEL_UNLISTED) + ActivityLog.objects.all().delete() + cinder_instance = self.CinderClass(addon) + cinder_job = CinderJob.objects.create(target_addon=addon, job_id='1') + AbuseReport.objects.create( + reason=AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE, + guid=addon.guid, + cinder_job=cinder_job, + reporter_email='email@domain.com', + ) + AbuseReport.objects.create( + reason=AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE, + guid=addon.guid, + cinder_job=cinder_job, + reporter=user_factory(), + ) + return cinder_instance, cinder_job, listed_version, unlisted_version + + def _check_post_queue_move_test(self, listed_version, unlisted_version): + assert listed_version.addon.reload().status == amo.STATUS_APPROVED + assert ( + listed_version.reload().needshumanreview_set.get().reason + == NeedsHumanReview.REASONS.CINDER_ESCALATION + ) + assert not unlisted_version.reload().needshumanreview_set.exists() + assert listed_version.reload().due_date + assert not unlisted_version.reload().due_date + assert ActivityLog.objects.count() == 1 + activity = ActivityLog.objects.filter( + action=amo.LOG.NEEDS_HUMAN_REVIEW_CINDER.id + ).get() + assert activity.arguments == [listed_version] + assert activity.user == self.task_user + + def test_post_queue_move(self): + cinder_instance, cinder_job, listed_version, unlisted_version = ( + self._setup_post_queue_move_test() + ) + + cinder_instance.post_queue_move(job=cinder_job) + + self._check_post_queue_move_test(listed_version, unlisted_version) + + def test_post_queue_move_specific_version(self): + # but if we have a version specified, we flag that version + cinder_instance, cinder_job, listed_version, unlisted_version = ( + self._setup_post_queue_move_test() + ) + other_version = version_factory( + addon=listed_version.addon, file_kw={'status': amo.STATUS_DISABLED} + ) + assert not other_version.due_date + cinder_job.abusereport_set.update(addon_version=other_version.version) + ActivityLog.objects.all().delete() + + cinder_instance.post_queue_move(job=cinder_job) + + assert not listed_version.reload().needshumanreview_set.exists() + assert not unlisted_version.reload().needshumanreview_set.exists() + assert other_version.reload().due_date + assert ( + other_version.needshumanreview_set.get().reason + == NeedsHumanReview.REASONS.CINDER_ESCALATION + ) + assert ActivityLog.objects.count() == 1 + activity = ActivityLog.objects.get(action=amo.LOG.NEEDS_HUMAN_REVIEW_CINDER.id) + assert activity.arguments == [other_version] + assert activity.user == self.task_user + + def test_workflow_recreate(self): + cinder_instance, cinder_job, listed_version, unlisted_version = ( + self._setup_post_queue_move_test() + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '2'}, + status=201, + ) + + assert cinder_instance.workflow_recreate(notes='foo', job=cinder_job) == '2' + assert json.loads(responses.calls[0].request.body)['reasoning'] == 'foo' + + self._check_post_queue_move_test(listed_version, unlisted_version) + + def test_post_queue_move_no_versions_to_flag(self): + cinder_instance, cinder_job, listed_version, unlisted_version = ( + self._setup_post_queue_move_test() + ) + NeedsHumanReview.objects.create( + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, version=listed_version + ) + NeedsHumanReview.objects.create( + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, version=unlisted_version + ) + assert NeedsHumanReview.objects.count() == 2 + ActivityLog.objects.all().delete() + + cinder_instance.post_queue_move(job=cinder_job) + assert NeedsHumanReview.objects.count() == 2 + assert ActivityLog.objects.count() == 0 + + def test_workflow_recreate_no_versions_to_flag(self): + cinder_instance, cinder_job, listed_version, unlisted_version = ( + self._setup_post_queue_move_test() + ) + NeedsHumanReview.objects.create( + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, version=listed_version + ) + NeedsHumanReview.objects.create( + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, version=unlisted_version + ) + assert NeedsHumanReview.objects.count() == 2 + ActivityLog.objects.all().delete() + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '2'}, + status=201, + ) + assert cinder_instance.workflow_recreate(notes=None, job=cinder_job) == '2' + assert NeedsHumanReview.objects.count() == 2 + assert ActivityLog.objects.count() == 0 + + @override_switch('dsa-cinder-forwarded-review', active=False) + def test_post_queue_move_waffle_switch_off(self): + # Escalation when the waffle switch is off is essentially a no-op on + # AMO side. + cinder_instance, cinder_job, listed_version, unlisted_version = ( + self._setup_post_queue_move_test() + ) + + cinder_instance.post_queue_move(job=cinder_job) + + assert listed_version.addon.reload().status == amo.STATUS_APPROVED + assert not listed_version.reload().needshumanreview_set.exists() + assert not listed_version.due_date + assert not unlisted_version.reload().needshumanreview_set.exists() + assert not unlisted_version.due_date + assert ActivityLog.objects.count() == 0 + + other_version = version_factory( + addon=listed_version.addon, + file_kw={'status': amo.STATUS_DISABLED}, + ) + assert not other_version.due_date + ActivityLog.objects.all().delete() + cinder_job.abusereport_set.update(addon_version=other_version.version) + cinder_instance.post_queue_move(job=cinder_job) + assert not listed_version.reload().needshumanreview_set.exists() + assert not unlisted_version.reload().needshumanreview_set.exists() + other_version.reload() + assert not other_version.due_date + assert not listed_version.reload().needshumanreview_set.exists() + assert not unlisted_version.reload().needshumanreview_set.exists() + + @override_switch('dsa-cinder-forwarded-review', active=False) + def test_workflow_recreate_waffle_switch_off(self): + cinder_instance, cinder_job, listed_version, unlisted_version = ( + self._setup_post_queue_move_test() + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '2'}, + status=201, + ) + assert cinder_instance.workflow_recreate(notes='', job=cinder_job) == '2' + + assert listed_version.addon.reload().status == amo.STATUS_APPROVED + assert not listed_version.reload().needshumanreview_set.exists() + assert not listed_version.due_date + assert not unlisted_version.reload().needshumanreview_set.exists() + assert not unlisted_version.due_date + assert ActivityLog.objects.count() == 0 + + +class TestCinderAddonHandledByLegal(TestCinderAddon): + CinderClass = CinderAddonHandledByLegal + # For rendering the payload to Cinder like CinderAddon: + # - 1 Fetch Addon authors + # - 2 Fetch Promoted Addon + expected_queries_for_report = 2 + expected_queue_suffix = None + + def test_queue(self): + extension = self._create_dummy_target() + assert self.CinderClass(extension).queue == 'legal-escalations' + + def test_queue_appeal(self): + extension = self._create_dummy_target() + assert self.CinderClass(extension).queue_appeal == 'legal-escalations' + + def test_queue_with_theme(self): + # Contrary to reports handled by Cinder moderators, for reports handled + # by legal the queue should remain the same regardless of the + # addon-type. + target = self._create_dummy_target(type=amo.ADDON_STATICTHEME) + assert self.CinderClass(target).queue_appeal == 'legal-escalations' + + def test_workflow_recreate(self): + """Test that a job is created in the legal queue.""" + # Specifically create signed files because there are some circumstances where we + # filter out unsigned files from NeedsHumanReview and we don't want a false + # positive. + addon = self._create_dummy_target(file_kw={'is_signed': True}) + listed_version = addon.current_version + unlisted_version = version_factory( + addon=addon, channel=amo.CHANNEL_UNLISTED, file_kw={'is_signed': True} + ) + cinder_instance = self.CinderClass(addon) + cinder_job = CinderJob.objects.create(target_addon=addon, job_id='1') + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '2'}, + status=201, + ) + + assert cinder_instance.workflow_recreate(notes='foo', job=cinder_job) == '2' + + # Check that we've not inadvertently changed the status + assert listed_version.addon.reload().status == amo.STATUS_APPROVED + # And check there have been no needshumanreview instances created or activity + # - only reviewer tools handled jobs should generated needshumanreviews + assert not listed_version.reload().needshumanreview_set.exists() + assert not unlisted_version.reload().needshumanreview_set.exists() + assert ( + ActivityLog.objects.filter(action=amo.LOG.NEEDS_HUMAN_REVIEW.id).count() + == 0 + ) + assert json.loads(responses.calls[0].request.body)['reasoning'] == 'foo' + class TestCinderUser(BaseTestCinderCase, TestCase): - cinder_class = CinderUser + CinderClass = CinderUser # 2 queries expected: # - Related add-ons # - Number of listed add-ons @@ -1080,8 +1661,8 @@ def test_build_report_payload(self): occupation='Blah', homepage='http://home.example.com', ) - message = '@bad person!' - cinder_user = self.cinder_class(user) + message = ' bad person!' + cinder_user = self.CinderClass(user) encoded_message = cinder_user.get_str(message) abuse_report = AbuseReport.objects.create(user=user, message=message) @@ -1115,6 +1696,8 @@ def test_build_report_payload(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', } @@ -1148,6 +1731,8 @@ def test_build_report_payload(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -1193,6 +1778,8 @@ def test_build_report_payload(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -1228,8 +1815,8 @@ def test_build_report_payload(self): def test_build_report_payload_with_author_and_reporter_being_the_same(self): user = self._create_dummy_target() addon = addon_factory(users=[user]) - cinder_user = self.cinder_class(user) - message = '_I dont like this guy' + cinder_user = self.CinderClass(user) + message = 'I dont like this guy' encoded_message = cinder_user.get_str(message) abuse_report = AbuseReport.objects.create(user=user, message=message) @@ -1247,7 +1834,7 @@ def test_build_report_payload_with_author_and_reporter_being_the_same(self): 'guid': addon.guid, 'last_updated': str(addon.last_updated), 'name': str(addon.name), - 'promoted_badge': '', + 'promoted': '', 'slug': addon.slug, 'summary': str(addon.summary), }, @@ -1260,6 +1847,8 @@ def test_build_report_payload_with_author_and_reporter_being_the_same(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -1302,7 +1891,7 @@ def test_build_report_payload_with_author_and_reporter_being_the_same(self): def test_build_report_payload_addon_author(self): user = self._create_dummy_target() addon = addon_factory(users=[user]) - cinder_user = self.cinder_class(user) + cinder_user = self.CinderClass(user) message = '@bad person!' encoded_message = cinder_user.get_str(message) abuse_report = AbuseReport.objects.create(user=user, message=message) @@ -1321,7 +1910,7 @@ def test_build_report_payload_addon_author(self): 'guid': addon.guid, 'last_updated': str(addon.last_updated), 'name': str(addon.name), - 'promoted_badge': '', + 'promoted': '', 'slug': addon.slug, 'summary': str(addon.summary), }, @@ -1334,6 +1923,8 @@ def test_build_report_payload_addon_author(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -1372,7 +1963,7 @@ def test_build_report_payload_addon_author(self): 'guid': addon.guid, 'last_updated': str(addon.last_updated), 'name': str(addon.name), - 'promoted_badge': '', + 'promoted': '', 'slug': addon.slug, 'summary': str(addon.summary), }, @@ -1385,6 +1976,8 @@ def test_build_report_payload_addon_author(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -1442,7 +2035,7 @@ def test_build_report_payload_with_picture( user.update(picture_type='image/png') message = '=bad person!' - cinder_user = self.cinder_class(user) + cinder_user = self.CinderClass(user) encoded_message = cinder_user.get_str(message) abuse_report = AbuseReport.objects.create(user=user, message=message) @@ -1480,6 +2073,8 @@ def test_build_report_payload_with_picture( 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', } @@ -1510,7 +2105,7 @@ def test_build_report_payload_only_includes_first_batch_of_relationships(self): user = self._create_dummy_target() for _ in range(0, 6): user.addons.add(addon_factory()) - cinder_user = self.cinder_class(user) + cinder_user = self.CinderClass(user) message = 'report for lots of relationships' abuse_report = AbuseReport.objects.create(user=user, message=message) data = cinder_user.build_report_payload( @@ -1532,7 +2127,7 @@ def test_build_report_payload_only_includes_first_batch_of_relationships(self): 'id': str(first_addon.pk), 'last_updated': str(first_addon.last_updated), 'name': str(first_addon.name), - 'promoted_badge': '', + 'promoted': '', 'slug': str(first_addon.slug), 'summary': str(first_addon.summary), }, @@ -1546,7 +2141,7 @@ def test_build_report_payload_only_includes_first_batch_of_relationships(self): 'id': str(second_addon.pk), 'last_updated': str(second_addon.last_updated), 'name': str(second_addon.name), - 'promoted_badge': '', + 'promoted': '', 'slug': str(second_addon.slug), 'summary': str(second_addon.summary), }, @@ -1560,6 +2155,8 @@ def test_build_report_payload_only_includes_first_batch_of_relationships(self): 'message': 'report for lots of relationships', 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -1594,7 +2191,7 @@ def test_report_additional_context(self): user = self._create_dummy_target() for _ in range(0, 6): user.addons.add(addon_factory()) - cinder_user = self.cinder_class(user) + cinder_user = self.CinderClass(user) responses.add( responses.POST, @@ -1619,7 +2216,7 @@ def test_report_additional_context(self): 'id': str(third_addon.pk), 'last_updated': str(third_addon.last_updated), 'name': str(third_addon.name), - 'promoted_badge': '', + 'promoted': '', 'slug': str(third_addon.slug), 'summary': str(third_addon.summary), }, @@ -1633,7 +2230,7 @@ def test_report_additional_context(self): 'id': str(fourth_addon.pk), 'last_updated': str(fourth_addon.last_updated), 'name': str(fourth_addon.name), - 'promoted_badge': '', + 'promoted': '', 'slug': str(fourth_addon.slug), 'summary': str(fourth_addon.summary), }, @@ -1670,7 +2267,7 @@ def test_report_additional_context(self): 'id': str(fifth_addon.pk), 'last_updated': str(fifth_addon.last_updated), 'name': str(fifth_addon.name), - 'promoted_badge': '', + 'promoted': '', 'slug': str(fifth_addon.slug), 'summary': str(fifth_addon.summary), }, @@ -1684,7 +2281,7 @@ def test_report_additional_context(self): 'id': str(sixth_addon.pk), 'last_updated': str(sixth_addon.last_updated), 'name': str(sixth_addon.name), - 'promoted_badge': '', + 'promoted': '', 'slug': str(sixth_addon.slug), 'summary': str(sixth_addon.summary), }, @@ -1714,7 +2311,7 @@ def test_report_additional_context_error(self): user = self._create_dummy_target() for _ in range(0, 6): user.addons.add(addon_factory()) - cinder_user = self.cinder_class(user) + cinder_user = self.CinderClass(user) responses.add( responses.POST, @@ -1727,7 +2324,7 @@ def test_report_additional_context_error(self): class TestCinderRating(BaseTestCinderCase, TestCase): - cinder_class = CinderRating + CinderClass = CinderRating expected_queries_for_report = 1 # For the author expected_queue_suffix = 'ratings' @@ -1742,7 +2339,7 @@ def _create_dummy_target(self, **kwargs): def test_build_report_payload(self): rating = self._create_dummy_target() - cinder_rating = self.cinder_class(rating) + cinder_rating = self.CinderClass(rating) message = '-bad rating!' encoded_message = cinder_rating.get_str(message) abuse_report = AbuseReport.objects.create(rating=rating, message=message) @@ -1780,6 +2377,8 @@ def test_build_report_payload(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -1806,7 +2405,7 @@ def test_build_report_payload(self): def test_build_report_payload_with_author_and_reporter_being_the_same(self): rating = self._create_dummy_target() user = rating.user - cinder_rating = self.cinder_class(rating) + cinder_rating = self.CinderClass(rating) message = '@my own words!' encoded_message = cinder_rating.get_str(message) abuse_report = AbuseReport.objects.create(rating=rating, message=message) @@ -1844,6 +2443,8 @@ def test_build_report_payload_with_author_and_reporter_being_the_same(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -1883,7 +2484,7 @@ def test_build_report_payload_developer_reply(self): rating = Rating.objects.create( addon=self.addon, user=addon_author, reply_to=original_rating ) - cinder_rating = self.cinder_class(rating) + cinder_rating = self.CinderClass(rating) message = '-bad reply!' encoded_message = cinder_rating.get_str(message) abuse_report = AbuseReport.objects.create(rating=rating, message=message) @@ -1930,6 +2531,8 @@ def test_build_report_payload_developer_reply(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -1962,7 +2565,7 @@ def test_build_report_payload_developer_reply(self): class TestCinderCollection(BaseTestCinderCase, TestCase): - cinder_class = CinderCollection + CinderClass = CinderCollection expected_queries_for_report = 1 # For the author expected_queue_suffix = 'collections' @@ -1986,7 +2589,7 @@ def _create_dummy_target(self, **kwargs): def test_build_report_payload(self): collection = self._create_dummy_target() - cinder_collection = self.cinder_class(collection) + cinder_collection = self.CinderClass(collection) message = '@bad collection!' encoded_message = cinder_collection.get_str(message) abuse_report = AbuseReport.objects.create( @@ -2017,6 +2620,8 @@ def test_build_report_payload(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -2054,7 +2659,7 @@ def test_build_report_payload(self): def test_build_report_payload_with_author_and_reporter_being_the_same(self): collection = self._create_dummy_target() - cinder_collection = self.cinder_class(collection) + cinder_collection = self.CinderClass(collection) user = collection.author message = '=Collect me!' encoded_message = cinder_collection.get_str(message) @@ -2086,6 +2691,8 @@ def test_build_report_payload_with_author_and_reporter_being_the_same(self): 'message': encoded_message, 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, }, 'entity_type': 'amo_report', }, @@ -2130,41 +2737,47 @@ def test_build_report_payload_with_author_and_reporter_being_the_same(self): class TestCinderReport(TestCase): - cinder_class = CinderReport + CinderClass = CinderReport def test_reason_in_attributes(self): abuse_report = AbuseReport.objects.create( guid=addon_factory().guid, reason=AbuseReport.REASONS.POLICY_VIOLATION, ) - assert self.cinder_class(abuse_report).get_attributes() == { + assert self.CinderClass(abuse_report).get_attributes() == { 'id': str(abuse_report.pk), 'created': str(abuse_report.created), 'locale': None, 'message': '', 'reason': "DSA: It violates Mozilla's Add-on Policies", 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, } def test_locale_in_attributes(self): abuse_report = AbuseReport.objects.create( guid=addon_factory().guid, application_locale='en_US' ) - assert self.cinder_class(abuse_report).get_attributes() == { + assert self.CinderClass(abuse_report).get_attributes() == { 'id': str(abuse_report.pk), 'created': str(abuse_report.created), 'locale': 'en_US', 'message': '', 'reason': None, 'considers_illegal': False, + 'illegal_category': None, + 'illegal_subcategory': None, } def test_considers_illegal(self): abuse_report = AbuseReport.objects.create( guid=addon_factory().guid, reason=AbuseReport.REASONS.ILLEGAL, + illegal_category=ILLEGAL_CATEGORIES.ANIMAL_WELFARE, + illegal_subcategory=ILLEGAL_SUBCATEGORIES.OTHER, ) - assert self.cinder_class(abuse_report).get_attributes() == { + assert self.CinderClass(abuse_report).get_attributes() == { 'id': str(abuse_report.pk), 'created': str(abuse_report.created), 'locale': None, @@ -2173,4 +2786,6 @@ def test_considers_illegal(self): 'DSA: It violates the law or contains content that violates the law' ), 'considers_illegal': True, + 'illegal_category': 'STATEMENT_CATEGORY_ANIMAL_WELFARE', + 'illegal_subcategory': 'KEYWORD_OTHER', } diff --git a/src/olympia/abuse/tests/test_commands.py b/src/olympia/abuse/tests/test_commands.py index 293d1a1ea06c..268e434b7f95 100644 --- a/src/olympia/abuse/tests/test_commands.py +++ b/src/olympia/abuse/tests/test_commands.py @@ -1,106 +1,79 @@ +from django.conf import settings from django.core.management import call_command -from olympia.abuse.models import AbuseReport, CinderJob -from olympia.amo.tests import TestCase, addon_factory, user_factory - - -class TestFillCinderJob(TestCase): - def test_fill_somehow_no_abuse_reports(self): - job = CinderJob.objects.create(job_id='job123') - - call_command('fill_cinderjobs_denormalized_fields') - - job.reload() - assert not job.target_addon - assert not job.resolvable_in_reviewer_tools - - def test_fill_addon(self): - addon_factory() # Extra add-on, shouldn't matter. - addon = addon_factory() - job = CinderJob.objects.create(job_id='job123') - report = AbuseReport.objects.create(guid=addon.guid, cinder_job=job) - - call_command('fill_cinderjobs_denormalized_fields') - - job.reload() - assert job.target_addon == report.target == addon - assert not job.resolvable_in_reviewer_tools - - def test_fill_appealed_job(self): - addon_factory() # Extra add-on, shouldn't matter. - addon = addon_factory() - job = CinderJob.objects.create( - job_id='job123', appeal_job=CinderJob.objects.create(job_id='appeal123') - ) - report = AbuseReport.objects.create(guid=addon.guid, cinder_job=job) - - call_command('fill_cinderjobs_denormalized_fields') - - job.reload() - assert job.target_addon == report.target == addon - assert not job.resolvable_in_reviewer_tools - job.appeal_job.reload() - assert job.appeal_job.target_addon == report.target == addon - assert not job.appeal_job.resolvable_in_reviewer_tools - - def test_fill_non_addon(self): - user = user_factory() - job = CinderJob.objects.create(job_id='job123') - AbuseReport.objects.create(user=user, cinder_job=job) - - call_command('fill_cinderjobs_denormalized_fields') - - job.reload() - assert job.target_addon is None - assert not job.resolvable_in_reviewer_tools - - def test_fill_resolvable_in_reviewer_tools(self): - addon_factory() # Extra add-on, shouldn't matter. - addon = addon_factory() - job = CinderJob.objects.create(job_id='job123') - report = AbuseReport.objects.create( - guid=addon.guid, - cinder_job=job, - location=AbuseReport.LOCATION.BOTH, - reason=AbuseReport.REASONS.POLICY_VIOLATION, - ) - - call_command('fill_cinderjobs_denormalized_fields') - - job.reload() - assert job.target_addon == report.target == addon - assert job.resolvable_in_reviewer_tools - - def test_fill_not_resolvable_in_reviewer_tools(self): - addon_factory() # Extra add-on, shouldn't matter. - addon = addon_factory() - job = CinderJob.objects.create(job_id='job123') - # Location makes it not resolvable in reviewer tools unless escalated - # even though the reason is policy violation. - report = AbuseReport.objects.create( - guid=addon.guid, - cinder_job=job, - location=AbuseReport.LOCATION.AMO, - reason=AbuseReport.REASONS.POLICY_VIOLATION, - ) - - call_command('fill_cinderjobs_denormalized_fields') - - job.reload() - assert job.target_addon == report.target == addon - assert not job.resolvable_in_reviewer_tools - - def test_fill_escalated_addon(self): - addon_factory() # Extra add-on, shouldn't matter. - addon = addon_factory() - job = CinderJob.objects.create( - job_id='job123', - decision_action=CinderJob.DECISION_ACTIONS.AMO_ESCALATE_ADDON, - ) - report = AbuseReport.objects.create(guid=addon.guid, cinder_job=job) - - call_command('fill_cinderjobs_denormalized_fields') - - job.reload() - assert job.target_addon == report.target == addon - assert job.resolvable_in_reviewer_tools +import pytest +import responses + +from olympia.amo.tests import addon_factory +from olympia.constants.abuse import DECISION_ACTIONS + +from ..models import AbuseReport, CinderJob, ContentDecision + + +@pytest.mark.django_db +def test_backfill_cinder_escalations(): + addon = addon_factory() + job_with_reports = CinderJob.objects.create( + job_id='1', + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, addon=addon + ), + ) + abuse = AbuseReport.objects.create(guid=addon.guid, cinder_job=job_with_reports) + appeal_job = CinderJob.objects.create( + job_id='2', + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, addon=addon + ), + ) + appealled_decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, addon=addon, appeal_job=appeal_job + ) + + # And some jobs/decisions that should be skipped: + # decision that wasn't an escalation (or isn't any longer) + CinderJob.objects.create( + job_id='3', + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=addon + ), + ) + # decision without an associated cinder job (shouldn't occur, but its handled) + ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, addon=addon + ) + # decision that already has a forwarded job created, so we don't need to backfill + CinderJob.objects.create( + job_id='4', + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, addon=addon + ), + forwarded_to_job=CinderJob.objects.create(job_id='5'), + ) + assert CinderJob.objects.count() == 5 + assert ContentDecision.objects.count() == 6 + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '6'}, + status=201, + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '7'}, + status=201, + ) + + call_command('backfill_cinder_escalations') + assert CinderJob.objects.count() == 7 + assert ContentDecision.objects.count() == 6 + + new_job_with_reports = job_with_reports.reload().forwarded_to_job + assert new_job_with_reports + assert new_job_with_reports.resolvable_in_reviewer_tools is True + assert abuse.reload().cinder_job == new_job_with_reports + new_appeal_job = appeal_job.reload().forwarded_to_job + assert new_appeal_job + assert new_appeal_job.resolvable_in_reviewer_tools is True + assert appealled_decision.reload().appeal_job == new_appeal_job diff --git a/src/olympia/abuse/tests/test_models.py b/src/olympia/abuse/tests/test_models.py index ab0fe413daa7..24f7e602aab2 100644 --- a/src/olympia/abuse/tests/test_models.py +++ b/src/olympia/abuse/tests/test_models.py @@ -1,22 +1,51 @@ import json +import uuid from datetime import datetime from unittest import mock +from urllib import parse from django.conf import settings from django.core import mail from django.core.exceptions import ImproperlyConfigured, ValidationError +from django.core.files.base import ContentFile from django.db.utils import IntegrityError +from django.urls import reverse import pytest import responses +from waffle.testutils import override_switch from olympia import amo -from olympia.activity.models import ActivityLog -from olympia.amo.tests import TestCase, addon_factory, collection_factory, user_factory -from olympia.constants.abuse import APPEAL_EXPIRATION_DAYS +from olympia.activity.models import ActivityLog, AttachmentLog +from olympia.addons.models import Addon +from olympia.amo.tests import ( + TestCase, + addon_factory, + collection_factory, + user_factory, + version_factory, + version_review_flags_factory, +) +from olympia.constants.abuse import ( + APPEAL_EXPIRATION_DAYS, + DECISION_ACTIONS, + ILLEGAL_CATEGORIES, + ILLEGAL_SUBCATEGORIES, +) +from olympia.constants.promoted import RECOMMENDED +from olympia.core import set_user from olympia.ratings.models import Rating -from olympia.reviewers.models import ReviewActionReason - +from olympia.reviewers.models import NeedsHumanReview +from olympia.versions.models import Version, VersionReviewerFlags + +from ..actions import ( + ContentActionBanUser, + ContentActionDeleteCollection, + ContentActionDeleteRating, + ContentActionOverrideApprove, + ContentActionTargetAppealApprove, + ContentActionTargetAppealRemovalAffirmation, +) from ..cinder import ( CinderAddon, CinderAddonHandledByReviewers, @@ -25,22 +54,18 @@ CinderUnauthenticatedReporter, CinderUser, ) -from ..models import AbuseReport, CinderJob, CinderPolicy -from ..utils import ( - CinderActionApproveInitialDecision, - CinderActionBanUser, - CinderActionDeleteCollection, - CinderActionDeleteRating, - CinderActionDisableAddon, - CinderActionEscalateAddon, - CinderActionNotImplemented, - CinderActionOverrideApprove, - CinderActionTargetAppealApprove, - CinderActionTargetAppealRemovalAffirmation, +from ..models import ( + AbuseReport, + AbuseReportManager, + CinderAppeal, + CinderJob, + CinderPolicy, + CinderQueueMove, + ContentDecision, ) -class TestAbuse(TestCase): +class TestAbuseReport(TestCase): fixtures = ['base/addon_3615', 'base/user_999'] def test_choices(self): @@ -58,6 +83,7 @@ def test_choices(self): (10, 'Signed'), (11, 'System'), (12, 'Privileged'), + (13, 'Not required'), ) assert AbuseReport.ADDON_SIGNATURES.api_choices == ( (None, None), @@ -73,6 +99,7 @@ def test_choices(self): (10, 'signed'), (11, 'system'), (12, 'privileged'), + (13, 'not_required'), ) assert AbuseReport.REASONS.choices == ( @@ -248,6 +275,168 @@ def test_choices(self): (3, 'both'), ) + assert ILLEGAL_CATEGORIES.choices == ( + (None, 'None'), + (1, 'Animal welfare'), + (2, 'Consumer information infringements'), + (3, 'Data protection and privacy violations'), + (4, 'Illegal or harmful speech'), + (5, 'Intellectual property infringements'), + (6, 'Negative effects on civic discourse or elections'), + (7, 'Non-consensual behavior'), + (8, 'Pornography or sexualized content'), + (9, 'Protection of minors'), + (10, 'Risk for public security'), + (11, 'Scams or fraud'), + (12, 'Self-harm'), + (13, 'Unsafe, non-compliant, or prohibited products'), + (14, 'Violence'), + (15, 'Other'), + ) + assert ILLEGAL_CATEGORIES.api_choices == ( + (None, None), + (1, 'animal_welfare'), + (2, 'consumer_information'), + (3, 'data_protection_and_privacy_violations'), + (4, 'illegal_or_harmful_speech'), + (5, 'intellectual_property_infringements'), + (6, 'negative_effects_on_civic_discourse_or_elections'), + (7, 'non_consensual_behaviour'), + (8, 'pornography_or_sexualized_content'), + (9, 'protection_of_minors'), + (10, 'risk_for_public_security'), + (11, 'scams_and_fraud'), + (12, 'self_harm'), + (13, 'unsafe_and_prohibited_products'), + (14, 'violence'), + (15, 'other'), + ) + + assert ILLEGAL_SUBCATEGORIES.choices == ( + (None, 'None'), + (1, 'Something else'), + (2, 'Insufficient information on traders'), + (3, 'Non-compliance with pricing regulations'), + ( + 4, + 'Hidden advertisement or commercial communication, including ' + 'by influencers', + ), + ( + 5, + 'Misleading information about the characteristics of the goods ' + 'and services', + ), + (6, 'Misleading information about the consumer’s rights'), + (7, 'Biometric data breach'), + (8, 'Missing processing ground for data'), + (9, 'Right to be forgotten'), + (10, 'Data falsification'), + (11, 'Defamation'), + (12, 'Discrimination'), + ( + 13, + 'Illegal incitement to violence and hatred based on protected ' + 'characteristics (hate speech)', + ), + (14, 'Design infringements'), + (15, 'Geographical indications infringements'), + (16, 'Patent infringements'), + (17, 'Trade secret infringements'), + (18, 'Violation of EU law relevant to civic discourse or elections'), + (19, 'Violation of national law relevant to civic discourse or elections'), + ( + 20, + 'Misinformation, disinformation, foreign information manipulation ' + 'and interference', + ), + (21, 'Non-consensual image sharing'), + ( + 22, + 'Non-consensual items containing deepfake or similar technology ' + "using a third party's features", + ), + (23, 'Online bullying/intimidation'), + (24, 'Stalking'), + (25, 'Adult sexual material'), + (26, 'Image-based sexual abuse (excluding content depicting minors)'), + (27, 'Age-specific restrictions concerning minors'), + (28, 'Child sexual abuse material'), + (29, 'Grooming/sexual enticement of minors'), + (30, 'Illegal organizations'), + (31, 'Risk for environmental damage'), + (32, 'Risk for public health'), + (33, 'Terrorist content'), + (34, 'Inauthentic accounts'), + (35, 'Inauthentic listings'), + (36, 'Inauthentic user reviews'), + (37, 'Impersonation or account hijacking'), + (38, 'Phishing'), + (39, 'Pyramid schemes'), + (40, 'Content promoting eating disorders'), + (41, 'Self-mutilation'), + (42, 'Suicide'), + (43, 'Prohibited or restricted products'), + (44, 'Unsafe or non-compliant products'), + (45, 'Coordinated harm'), + (46, 'Gender-based violence'), + (47, 'Human exploitation'), + (48, 'Human trafficking'), + (49, 'General calls or incitement to violence and/or hatred'), + ) + assert ILLEGAL_SUBCATEGORIES.api_choices == ( + (None, None), + (1, 'other'), + (2, 'insufficient_information_on_traders'), + (3, 'noncompliance_pricing'), + (4, 'hidden_advertisement'), + (5, 'misleading_info_goods_services'), + (6, 'misleading_info_consumer_rights'), + (7, 'biometric_data_breach'), + (8, 'missing_processing_ground'), + (9, 'right_to_be_forgotten'), + (10, 'data_falsification'), + (11, 'defamation'), + (12, 'discrimination'), + (13, 'hate_speech'), + (14, 'design_infringement'), + (15, 'geographic_indications_infringement'), + (16, 'patent_infringement'), + (17, 'trade_secret_infringement'), + (18, 'violation_eu_law'), + (19, 'violation_national_law'), + (20, 'misinformation_disinformation_disinformation'), + (21, 'non_consensual_image_sharing'), + (22, 'non_consensual_items_deepfake'), + (23, 'online_bullying_intimidation'), + (24, 'stalking'), + (25, 'adult_sexual_material'), + (26, 'image_based_sexual_abuse'), + (27, 'age_specific_restrictions_minors'), + (28, 'child_sexual_abuse_material'), + (29, 'grooming_sexual_enticement_minors'), + (30, 'illegal_organizations'), + (31, 'risk_environmental_damage'), + (32, 'risk_public_health'), + (33, 'terrorist_content'), + (34, 'inauthentic_accounts'), + (35, 'inauthentic_listings'), + (36, 'inauthentic_user_reviews'), + (37, 'impersonation_account_hijacking'), + (38, 'phishing'), + (39, 'pyramid_schemes'), + (40, 'content_promoting_eating_disorders'), + (41, 'self_mutilation'), + (42, 'suicide'), + (43, 'prohibited_products'), + (44, 'unsafe_products'), + (45, 'coordinated_harm'), + (46, 'gender_based_violence'), + (47, 'human_exploitation'), + (48, 'human_trafficking'), + (49, 'incitement_violence_hatred'), + ) + def test_type(self): addon = addon_factory(guid='@lol') report = AbuseReport.objects.create(guid=addon.guid) @@ -277,7 +466,7 @@ def test_target(self): assert report.target is None addon = addon_factory(guid='@lol') - del report._target_addon + del report.addon assert report.target == addon user = user_factory() @@ -292,6 +481,52 @@ def test_target(self): report.update(rating=None, collection=collection) assert report.target == collection + def test_is_individually_actionable(self): + report = AbuseReport.objects.create( + guid='@lol', reason=AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE + ) + assert report.is_individually_actionable is False + addon = addon_factory(guid='@lol') + user = user_factory() + for target in ( + {'guid': addon.guid}, + {'user': user}, + {'rating': Rating.objects.create(user=user, addon=addon, rating=5)}, + {'collection': collection_factory()}, + ): + report.update( + reason=AbuseReport.REASONS.FEEDBACK_SPAM, + **{ + 'guid': None, + 'user': None, + 'rating': None, + 'collection': None, + **target, + }, + ) + assert report.is_individually_actionable is False + report.update(reason=AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE) + assert report.is_individually_actionable is True + + report.update( + guid=addon.guid, + user=None, + rating=None, + collection=None, + addon_version=addon.current_version.version, + ) + assert report.is_individually_actionable is True + + self.make_addon_unlisted(addon) + assert report.is_individually_actionable is False + + self.make_addon_listed(addon) + Version.objects.get(version=report.addon_version).delete() + assert report.is_individually_actionable is True + + Version.unfiltered.get(version=report.addon_version).delete(hard=True) + assert report.is_individually_actionable is False + def test_is_handled_by_reviewers(self): addon = addon_factory() abuse_report = AbuseReport.objects.create( @@ -342,8 +577,16 @@ def test_constraint(self): report.user_id = None constraint.validate(AbuseReport, report) + def test_illegal_category_cinder_value_no_illegal_category(self): + report = AbuseReport() + assert not report.illegal_category_cinder_value + + def test_illegal_subcategory_cinder_value_no_illegal_subcategory(self): + report = AbuseReport() + assert not report.illegal_subcategory_cinder_value + -class TestAbuseManager(TestCase): +class TestAbuseReportManager(TestCase): def test_for_addon_finds_by_author(self): addon = addon_factory(users=[user_factory()]) report = AbuseReport.objects.create(user=addon.listed_authors[0]) @@ -360,6 +603,86 @@ def test_for_addon_finds_by_original_guid(self): report = AbuseReport.objects.create(guid='foo@bar') assert list(AbuseReport.objects.for_addon(addon)) == [report] + def test_is_individually_actionable_q(self): + actionable_reason = AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE + user = user_factory() + addon = addon_factory(guid='@lol') + addon_report = AbuseReport.objects.create( + guid=addon.guid, reason=actionable_reason + ) + user_report = AbuseReport.objects.create(user=user, reason=actionable_reason) + collection_report = AbuseReport.objects.create( + collection=collection_factory(), + reason=actionable_reason, + ) + rating_report = AbuseReport.objects.create( + rating=Rating.objects.create(user=user, addon=addon, rating=5), + reason=actionable_reason, + ) + listed_version_report = AbuseReport.objects.create( + guid=addon.guid, + addon_version=addon.current_version.version, + reason=actionable_reason, + ) + listed_deleted_version_report = AbuseReport.objects.create( + guid=addon.guid, + addon_version=version_factory(addon=addon, deleted=True).version, + reason=actionable_reason, + ) + + # some reports that aren't individually actionable: + # non-actionable reason + AbuseReport.objects.create( + guid=addon.guid, reason=AbuseReport.REASONS.FEEDBACK_SPAM + ) + AbuseReport.objects.create(user=user, reason=AbuseReport.REASONS.FEEDBACK_SPAM) + AbuseReport.objects.create( + collection=collection_factory(), reason=AbuseReport.REASONS.FEEDBACK_SPAM + ) + AbuseReport.objects.create( + rating=Rating.objects.create(user=user, addon=addon, rating=5), + reason=AbuseReport.REASONS.FEEDBACK_SPAM, + ) + # guid doesn't exist + AbuseReport.objects.create(guid='dfdf', reason=actionable_reason) + # unlisted version + AbuseReport.objects.create( + guid=addon.guid, + addon_version=version_factory( + addon=addon, channel=amo.CHANNEL_UNLISTED + ).version, + reason=actionable_reason, + ) + # invalid version + AbuseReport.objects.create( + guid=addon.guid, + addon_version='123456', + reason=actionable_reason, + ) + # no version specified for addon with only unlisted versions + AbuseReport.objects.create( + guid=addon_factory(version_kw={'channel': amo.CHANNEL_UNLISTED}).guid, + reason=actionable_reason, + ) + # no version specified for addon with no public versions + AbuseReport.objects.create( + guid=addon_factory(file_kw={'status': amo.STATUS_DISABLED}).guid, + reason=actionable_reason, + ) + + assert set( + AbuseReport.objects.filter( + AbuseReportManager.is_individually_actionable_q() + ) + ) == { + addon_report, + collection_report, + user_report, + rating_report, + listed_version_report, + listed_deleted_version_report, + } + class TestCinderJobManager(TestCase): def test_for_addon(self): @@ -374,7 +697,11 @@ def test_for_addon_appealed(self): appeal_job = CinderJob.objects.create(job_id='appeal', target_addon=addon) original_job = CinderJob.objects.create( job_id='original', - appeal_job=appeal_job, + decision=ContentDecision.objects.create( + appeal_job=appeal_job, + addon=addon, + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + ), target_addon=addon, ) AbuseReport.objects.create(guid=addon.guid, cinder_job=original_job) @@ -382,15 +709,14 @@ def test_for_addon_appealed(self): def test_unresolved(self): job = CinderJob.objects.create(job_id='1') - AbuseReport.objects.create(guid='3456', cinder_job=job) - AbuseReport.objects.create( - guid='5678', - cinder_job=CinderJob.objects.create( - job_id='2', decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON + addon = addon_factory() + CinderJob.objects.create( + job_id='2', + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, addon=addon ), ) qs = CinderJob.objects.unresolved() - assert len(qs) == 1 assert list(qs) == [job] def test_reviewer_handled(self): @@ -400,7 +726,12 @@ def test_reviewer_handled(self): location=AbuseReport.LOCATION.BOTH, cinder_job=CinderJob.objects.create(job_id=1), ) - job = CinderJob.objects.create(job_id=2) + job = CinderJob.objects.create( + job_id=2, + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, addon=addon_factory() + ), + ) AbuseReport.objects.create( guid=addon_factory().guid, reason=AbuseReport.REASONS.POLICY_VIOLATION, @@ -422,131 +753,94 @@ def test_reviewer_handled(self): appeal_job = CinderJob.objects.create( job_id=4, resolvable_in_reviewer_tools=True ) - job.update(appeal_job=appeal_job) + job.decision.update(appeal_job=appeal_job) qs = CinderJob.objects.resolvable_in_reviewer_tools() assert list(qs) == [job, appeal_job] - not_policy_report.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_ESCALATE_ADDON, - resolvable_in_reviewer_tools=True, - ) + not_policy_report.cinder_job.update(resolvable_in_reviewer_tools=True) + CinderJob.objects.create(forwarded_to_job=not_policy_report.cinder_job) qs = CinderJob.objects.resolvable_in_reviewer_tools() assert list(qs) == [not_policy_report.cinder_job, job, appeal_job] class TestCinderJob(TestCase): + def setUp(self): + user_factory(id=settings.TASK_USER_ID) + def test_target(self): cinder_job = CinderJob.objects.create(job_id='1234') - # edge case, but handle having no associated abuse_reports + # edge case, but handle having no associated abuse_reports, decisions or appeals assert cinder_job.target is None + # case when CinderJob.target_addon is set addon = addon_factory() - abuse_report = AbuseReport.objects.create( - guid=addon.guid, - reason=AbuseReport.REASONS.ILLEGAL, - location=AbuseReport.LOCATION.BOTH, - cinder_job=cinder_job, + cinder_job.update(target_addon=addon) + assert cinder_job.target_addon == cinder_job.target == addon + + # case when there is already a decision + cinder_job.update( + target_addon=None, + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=addon + ), ) - assert cinder_job.target == abuse_report.target == addon + assert cinder_job.decision.target == cinder_job.target == addon + # case when this is an appeal job (no decision), but the appeal had a decision appeal_job = CinderJob.objects.create(job_id='fake_appeal_job_id') - cinder_job.update(appeal_job=appeal_job) - assert appeal_job.target == cinder_job.target == addon - - appeal_appeal_job = CinderJob.objects.create(job_id='fake_appeal_appeal_job_id') - appeal_job.update(appeal_job=appeal_appeal_job) - assert ( - appeal_appeal_job.target == appeal_job.target == cinder_job.target == addon - ) - - def test_target_addon(self): - addon = addon_factory() - cinder_job = CinderJob.objects.create(job_id='1234', target_addon=addon) - assert cinder_job.target == addon - - def test_initial_abuse_report(self): - cinder_job = CinderJob.objects.create(job_id='1234') - assert cinder_job.initial_abuse_report is None + cinder_job.decision.update(appeal_job=appeal_job) + assert cinder_job.decision.target == appeal_job.target == addon - addon = addon_factory() + # case when there is no appeal, no decision yet, no target_addon, + # but an initial abuse report abuse_report = AbuseReport.objects.create( guid=addon.guid, reason=AbuseReport.REASONS.ILLEGAL, location=AbuseReport.LOCATION.BOTH, - cinder_job=cinder_job, - ) - assert cinder_job.initial_abuse_report == abuse_report - - appeal_job = CinderJob.objects.create(job_id='fake_appeal_job_id') - cinder_job.update(appeal_job=appeal_job) - assert ( - appeal_job.initial_abuse_report - == cinder_job.initial_abuse_report - == abuse_report - ) - - appeal_appeal_job = CinderJob.objects.create(job_id='fake_appeal_appeal_job_id') - appeal_job.update(appeal_job=appeal_appeal_job) - assert ( - appeal_appeal_job.initial_abuse_report - == appeal_job.initial_abuse_report - == cinder_job.initial_abuse_report - == abuse_report + cinder_job=CinderJob.objects.create(job_id='from abuse report'), ) + assert abuse_report.target == abuse_report.cinder_job.target == addon def test_get_entity_helper(self): addon = addon_factory() user = user_factory() - abuse_report = AbuseReport.objects.create( - guid=addon.guid, - reason=AbuseReport.REASONS.ILLEGAL, - location=AbuseReport.LOCATION.BOTH, - ) - helper = CinderJob.get_entity_helper(abuse_report) - # location is in REVIEWER_HANDLED (BOTH) but reason is not (ILLEGAL) + helper = CinderJob.get_entity_helper(addon, resolved_in_reviewer_tools=False) + # e.g. location is in REVIEWER_HANDLED (BOTH) but reason is not (ILLEGAL) assert isinstance(helper, CinderAddon) assert not isinstance(helper, CinderAddonHandledByReviewers) assert helper.addon == addon - assert helper.version is None - abuse_report.update(reason=AbuseReport.REASONS.POLICY_VIOLATION) - helper = CinderJob.get_entity_helper(abuse_report) - # now reason is in REVIEWER_HANDLED it will be reported differently + helper = CinderJob.get_entity_helper(addon, resolved_in_reviewer_tools=True) + # if now reason is in REVIEWER_HANDLED it will be reported differently assert isinstance(helper, CinderAddon) assert isinstance(helper, CinderAddonHandledByReviewers) assert helper.addon == addon - assert helper.version is None + assert helper.version_string is None - abuse_report.update(addon_version=addon.current_version.version) - helper = CinderJob.get_entity_helper(abuse_report) + helper = CinderJob.get_entity_helper( + addon, + resolved_in_reviewer_tools=True, + addon_version_string=addon.current_version.version, + ) # if we got a version too we pass it on to the helper assert isinstance(helper, CinderAddon) assert isinstance(helper, CinderAddonHandledByReviewers) assert helper.addon == addon - assert helper.version == addon.current_version - - abuse_report.update(location=AbuseReport.LOCATION.AMO) - helper = CinderJob.get_entity_helper(abuse_report) - # but not if the location is not in REVIEWER_HANDLED (i.e. AMO) - assert isinstance(helper, CinderAddon) - assert not isinstance(helper, CinderAddonHandledByReviewers) - assert helper.addon == addon - assert helper.version == addon.current_version + assert helper.version_string == addon.current_version.version - abuse_report.update(guid=None, user=user, addon_version=None) - helper = CinderJob.get_entity_helper(abuse_report) + helper = CinderJob.get_entity_helper(user, resolved_in_reviewer_tools=False) assert isinstance(helper, CinderUser) assert helper.user == user rating = Rating.objects.create(addon=addon, user=user, rating=4) - abuse_report.update(user=None, rating=rating) - helper = CinderJob.get_entity_helper(abuse_report) + helper = CinderJob.get_entity_helper(rating, resolved_in_reviewer_tools=False) assert isinstance(helper, CinderRating) assert helper.rating == rating collection = collection_factory() - abuse_report.update(rating=None, collection=collection) - helper = CinderJob.get_entity_helper(abuse_report) + helper = CinderJob.get_entity_helper( + collection, resolved_in_reviewer_tools=False + ) assert isinstance(helper, CinderCollection) assert helper.collection == collection @@ -569,8 +863,11 @@ def test_get_cinder_reporter(self): assert entity.user == authenticated_user def test_report(self): + addon = addon_factory() abuse_report = AbuseReport.objects.create( - guid=addon_factory().guid, reason=AbuseReport.REASONS.ILLEGAL + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter_email='some@email.com', ) responses.add( responses.POST, @@ -590,7 +887,7 @@ def test_report(self): # And check if we get back the same job_id for a subsequent report we update another_report = AbuseReport.objects.create( - guid=addon_factory().guid, reason=AbuseReport.REASONS.ILLEGAL + guid=addon.guid, reason=AbuseReport.REASONS.ILLEGAL ) CinderJob.report(another_report) cinder_job.reload() @@ -599,6 +896,83 @@ def test_report(self): assert cinder_job.target_addon == abuse_report.target assert not cinder_job.resolvable_in_reviewer_tools + def check_report_with_already_removed_content(self, abuse_report): + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_decision', + json={'uuid': uuid.uuid4().hex}, + status=201, + ) + CinderJob.report(abuse_report) + assert not CinderJob.objects.exists() + assert len(mail.outbox) == 1 + assert mail.outbox[0].to == ['some@email.com'] + assert 'already been removed' in mail.outbox[0].body + assert ContentDecision.objects.exists() + decision = ContentDecision.objects.get() + assert decision.action == DECISION_ACTIONS.AMO_CLOSED_NO_ACTION + self.assertCloseToNow(decision.action_date) + + def test_report_with_disabled_addon(self): + addon = addon_factory() + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter_email='some@email.com', + ) + addon.update(status=amo.STATUS_DISABLED) + self.check_report_with_already_removed_content(abuse_report) + + def test_report_with_banned_user(self): + user = user_factory() + abuse_report = AbuseReport.objects.create( + user=user, + reason=AbuseReport.REASONS.ILLEGAL, + reporter_email='some@email.com', + ) + user.update(banned=datetime.now()) + self.check_report_with_already_removed_content(abuse_report) + + def test_report_with_deleted_collection(self): + collection = collection_factory() + abuse_report = AbuseReport.objects.create( + collection=collection, + reason=AbuseReport.REASONS.ILLEGAL, + reporter_email='some@email.com', + ) + collection.delete() + self.check_report_with_already_removed_content(abuse_report) + + def test_report_with_deleted_rating(self): + rating = Rating.objects.create(addon=addon_factory(), user=user_factory()) + abuse_report = AbuseReport.objects.create( + rating=rating, + reason=AbuseReport.REASONS.ILLEGAL, + reporter_email='some@email.com', + ) + rating.delete() + self.check_report_with_already_removed_content(abuse_report) + + def test_report_with_outstanding_rejection(self): + self.test_report() + assert len(mail.outbox) == 0 + addon = Addon.objects.get() + CinderJob.objects.get().update( + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_REJECT_VERSION_WARNING_ADDON, addon=addon + ) + ) + report_after_delayed_rejection = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter_email='email@domain.com', + ) + CinderJob.report(report_after_delayed_rejection) + assert CinderJob.objects.count() == 1 + + assert len(mail.outbox) == 1 + assert mail.outbox[0].to == ['email@domain.com'] + def test_report_resolvable_in_reviewer_tools(self): abuse_report = AbuseReport.objects.create( guid=addon_factory().guid, @@ -632,88 +1006,180 @@ def test_report_resolvable_in_reviewer_tools(self): assert cinder_job.target_addon == abuse_report.target assert cinder_job.resolvable_in_reviewer_tools - def test_get_action_helper(self): - DECISION_ACTIONS = CinderJob.DECISION_ACTIONS - cinder_job = CinderJob.objects.create(job_id='1234') - helper = cinder_job.get_action_helper() - assert helper.cinder_job == cinder_job - assert helper.__class__ == CinderActionNotImplemented - - action_to_class = ( - (DECISION_ACTIONS.AMO_BAN_USER, CinderActionBanUser), - (DECISION_ACTIONS.AMO_DISABLE_ADDON, CinderActionDisableAddon), - (DECISION_ACTIONS.AMO_ESCALATE_ADDON, CinderActionEscalateAddon), - (DECISION_ACTIONS.AMO_DELETE_COLLECTION, CinderActionDeleteCollection), - (DECISION_ACTIONS.AMO_DELETE_RATING, CinderActionDeleteRating), - (DECISION_ACTIONS.AMO_APPROVE, CinderActionApproveInitialDecision), + def test_handle_job_recreated(self): + addon = addon_factory() + decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, addon=addon, notes='blah' ) - action_existing_to_class = { - (new_action, existing_action): ActionClass - for new_action, ActionClass in action_to_class - for existing_action in DECISION_ACTIONS.values - } - for action in DECISION_ACTIONS.REMOVING.values: - action_existing_to_class[ - (DECISION_ACTIONS.AMO_APPROVE, action) - ] = CinderActionTargetAppealApprove - action_existing_to_class[ - (action, action) - ] = CinderActionTargetAppealRemovalAffirmation + job = CinderJob.objects.create( + job_id='1234', target_addon=addon, decision=decision + ) + report = AbuseReport.objects.create(guid=addon.guid, cinder_job=job) + assert not job.resolvable_in_reviewer_tools - for ( - new_action, - existing_action, - ), ActionClass in action_existing_to_class.items(): - cinder_job.update(decision_action=new_action) - helper = cinder_job.get_action_helper(existing_action) - assert helper.__class__ == ActionClass - assert helper.cinder_job == cinder_job + job.handle_job_recreated(new_job_id='5678') - # and repeat for the override edge case - for action in DECISION_ACTIONS.REMOVING.values: - action_existing_to_class[ - (DECISION_ACTIONS.AMO_APPROVE, action) - ] = CinderActionOverrideApprove - action_existing_to_class[(action, action)] = CinderActionNotImplemented + job.reload() + new_job = job.forwarded_to_job + assert new_job.job_id == '5678' + assert list(new_job.forwarded_from_jobs.all()) == [job] + assert new_job.resolvable_in_reviewer_tools + assert new_job.target_addon == addon + assert report.reload().cinder_job == new_job + + def test_handle_job_recreated_existing_forwarded_job(self): + addon = addon_factory() + exisiting_escalation_job = CinderJob.objects.create( + job_id='5678', target_addon=addon + ) + other_forwarded_job = CinderJob.objects.create( + job_id='9999', target_addon=addon, forwarded_to_job=exisiting_escalation_job + ) - for ( - new_action, - existing_action, - ), ActionClass in action_existing_to_class.items(): - cinder_job.update(decision_action=new_action) - helper = cinder_job.get_action_helper(existing_action, override=True) - assert helper.__class__ == ActionClass + decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, addon=addon, notes='blah' + ) + old_job = CinderJob.objects.create( + job_id='1234', target_addon=addon, decision=decision + ) + report = AbuseReport.objects.create(guid=addon.guid, cinder_job=old_job) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + ) + + old_job.handle_job_recreated(new_job_id='5678') + + old_job.reload() + exisiting_escalation_job.reload() + assert old_job.forwarded_to_job == exisiting_escalation_job + assert list(exisiting_escalation_job.forwarded_from_jobs.all()) == [ + other_forwarded_job, + old_job, + ] + assert list(exisiting_escalation_job.abusereport_set.all()) == [report] + assert report.reload().cinder_job == exisiting_escalation_job + assert NeedsHumanReview.objects.filter( + is_active=True, reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION + ).exists() # it's not cleared + assert NeedsHumanReview.objects.filter( + is_active=True, reason=NeedsHumanReview.REASONS.CINDER_ESCALATION + ).exists() # and neither is the CINDER_ESCALATION NHR + + def test_handle_job_recreated_existing_report_job(self): + addon = addon_factory() + exisiting_report_job = CinderJob.objects.create( + job_id='5678', target_addon=addon + ) + existing_report = AbuseReport.objects.create( + guid=addon.guid, cinder_job=exisiting_report_job + ) + + decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, addon=addon, notes='blah' + ) + old_job = CinderJob.objects.create( + job_id='1234', target_addon=addon, decision=decision + ) + report = AbuseReport.objects.create(guid=addon.guid, cinder_job=old_job) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + ) + + old_job.handle_job_recreated(new_job_id='5678') + + old_job.reload() + exisiting_report_job.reload() + assert old_job.forwarded_to_job == exisiting_report_job + assert list(exisiting_report_job.forwarded_from_jobs.all()) == [old_job] + assert list(exisiting_report_job.abusereport_set.all()) == [ + existing_report, + report, + ] + assert report.reload().cinder_job == exisiting_report_job + assert not NeedsHumanReview.objects.filter( + is_active=True, + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + ).exists() # it's cleared + assert NeedsHumanReview.objects.filter( + is_active=True, + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + ).exists() # the CINDER_ESCALATION NHR isn't though + + def test_handle_job_recreated_appeal(self): + addon = addon_factory() + decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, addon=addon, notes='blah' + ) + appeal_job = CinderJob.objects.create( + job_id='1234', target_addon=addon, decision=decision + ) + original_job = CinderJob.objects.create( + job_id='0000', + target_addon=addon, + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon, + notes='its okay', + appeal_job=appeal_job, + ), + ) + report = AbuseReport.objects.create(guid=addon.guid, cinder_job=original_job) + CinderAppeal.objects.create( + decision=original_job.decision, reporter_report=report + ) + assert not appeal_job.resolvable_in_reviewer_tools + + appeal_job.handle_job_recreated(new_job_id='5678') + + appeal_job.reload() + new_job = appeal_job.forwarded_to_job + assert new_job.job_id == '5678' + assert list(new_job.forwarded_from_jobs.all()) == [appeal_job] + assert new_job.resolvable_in_reviewer_tools + assert new_job.target_addon == addon + assert original_job.decision.reload().appeal_job == new_job def test_process_decision(self): cinder_job = CinderJob.objects.create(job_id='1234') - new_date = datetime(2023, 1, 1) + target = user_factory() + AbuseReport.objects.create(user=target, cinder_job=cinder_job) policy_a = CinderPolicy.objects.create(uuid='123-45', name='aaa', text='AAA') policy_b = CinderPolicy.objects.create(uuid='678-90', name='bbb', text='BBB') with mock.patch.object( - CinderActionBanUser, 'process_action' + ContentActionBanUser, 'process_action' ) as action_mock, mock.patch.object( - CinderActionBanUser, 'notify_owners' + ContentActionBanUser, 'notify_owners' ) as notify_mock: - action_mock.return_value = True + action_mock.return_value = (True, mock.Mock(id=999)) cinder_job.process_decision( - decision_id='12345', - decision_date=new_date, - decision_action=CinderJob.DECISION_ACTIONS.AMO_BAN_USER.value, + decision_cinder_id='12345', + decision_action=DECISION_ACTIONS.AMO_BAN_USER.value, decision_notes='teh notes', policy_ids=['123-45', '678-90'], ) - assert cinder_job.decision_id == '12345' - assert cinder_job.decision_date == new_date - assert cinder_job.decision_action == CinderJob.DECISION_ACTIONS.AMO_BAN_USER - assert cinder_job.decision_notes == 'teh notes' + assert cinder_job.decision.cinder_id == '12345' + assert cinder_job.decision.action == DECISION_ACTIONS.AMO_BAN_USER + assert cinder_job.decision.notes == 'teh notes' + assert cinder_job.decision.user == target assert action_mock.call_count == 1 assert notify_mock.call_count == 1 - assert list(cinder_job.policies.all()) == [policy_a, policy_b] + assert list(cinder_job.decision.policies.all()) == [policy_a, policy_b] def test_process_decision_with_duplicate_parent(self): cinder_job = CinderJob.objects.create(job_id='1234') - new_date = datetime(2023, 1, 1) + target = user_factory() + AbuseReport.objects.create(user=target, cinder_job=cinder_job) parent_policy = CinderPolicy.objects.create( uuid='678-90', name='bbb', text='BBB' ) @@ -722,205 +1188,210 @@ def test_process_decision_with_duplicate_parent(self): ) with mock.patch.object( - CinderActionBanUser, 'process_action' + ContentActionBanUser, 'process_action' ) as action_mock, mock.patch.object( - CinderActionBanUser, 'notify_owners' + ContentActionBanUser, 'notify_owners' ) as notify_mock: - action_mock.return_value = True + action_mock.return_value = (True, None) cinder_job.process_decision( - decision_id='12345', - decision_date=new_date, - decision_action=CinderJob.DECISION_ACTIONS.AMO_BAN_USER.value, + decision_cinder_id='12345', + decision_action=DECISION_ACTIONS.AMO_BAN_USER.value, decision_notes='teh notes', policy_ids=['123-45', '678-90'], ) - assert cinder_job.decision_id == '12345' - assert cinder_job.decision_date == new_date - assert cinder_job.decision_action == CinderJob.DECISION_ACTIONS.AMO_BAN_USER - assert cinder_job.decision_notes == 'teh notes' + assert cinder_job.decision.cinder_id == '12345' + assert cinder_job.decision.action == DECISION_ACTIONS.AMO_BAN_USER + assert cinder_job.decision.notes == 'teh notes' + assert cinder_job.decision.user == target assert action_mock.call_count == 1 assert notify_mock.call_count == 1 - assert list(cinder_job.policies.all()) == [policy] + assert list(cinder_job.decision.policies.all()) == [policy] - def test_process_decision_escalate_addon(self): + def test_process_decision_escalate_addon_action(self): addon = addon_factory() cinder_job = CinderJob.objects.create(job_id='1234', target_addon=addon) + report = AbuseReport.objects.create(guid=addon.guid, cinder_job=cinder_job) assert not cinder_job.resolvable_in_reviewer_tools - new_date = datetime(2024, 1, 1) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '5678'}, + status=201, + ) + cinder_job.process_decision( - decision_id='12345', - decision_date=new_date, - decision_action=CinderJob.DECISION_ACTIONS.AMO_ESCALATE_ADDON, + decision_cinder_id='12345', + decision_action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, decision_notes='blah', policy_ids=[], ) - assert cinder_job.decision_id == '12345' - assert cinder_job.decision_date == new_date - assert ( - cinder_job.decision_action == CinderJob.DECISION_ACTIONS.AMO_ESCALATE_ADDON + cinder_job.reload() + assert cinder_job.decision + assert cinder_job.decision.action == DECISION_ACTIONS.AMO_ESCALATE_ADDON + assert cinder_job.decision.notes == 'blah' + + new_job = cinder_job.forwarded_to_job + assert new_job + assert new_job.job_id == '5678' + assert list(new_job.forwarded_from_jobs.all()) == [cinder_job] + assert new_job.resolvable_in_reviewer_tools + assert new_job.target_addon == addon + assert report.reload().cinder_job == new_job + + @override_switch('dsa-cinder-forwarded-review', active=True) + def test_process_queue_move_into_reviewer_handled(self): + addon = addon_factory(file_kw={'is_signed': True}) + cinder_job = CinderJob.objects.create(job_id='1234', target_addon=addon) + assert not cinder_job.resolvable_in_reviewer_tools + assert NeedsHumanReview.objects.count() == 0 + + cinder_job.process_queue_move( + new_queue='amo-env-addon-infringement', notes='notes!' ) - assert cinder_job.decision_notes == 'blah' - assert cinder_job.resolvable_in_reviewer_tools - assert cinder_job.target_addon == addon - def test_appeal_as_target(self): - addon = addon_factory() - abuse_report = AbuseReport.objects.create( - guid=addon.guid, - reason=AbuseReport.REASONS.ILLEGAL, - reporter=user_factory(), - cinder_job=CinderJob.objects.create( - decision_id='4815162342-lost', - decision_date=self.days_ago(179), - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, - target_addon=addon, - ), + assert cinder_job.resolvable_in_reviewer_tools is True + assert len(mail.outbox) == 0 + assert NeedsHumanReview.objects.count() == 1 + nhr = NeedsHumanReview.objects.get() + assert nhr.reason == NeedsHumanReview.REASONS.CINDER_ESCALATION + assert nhr.version == addon.current_version + assert CinderQueueMove.objects.filter( + cinder_job=cinder_job, to_queue='amo-env-addon-infringement', notes='notes!' + ).exists() + + def test_process_queue_move_out_of_reviewer_handled(self): + # Not yet implemented, so just check it's silently ignored + addon = addon_factory(file_kw={'is_signed': True}) + cinder_job = CinderJob.objects.create( + job_id='1234', target_addon=addon, resolvable_in_reviewer_tools=True ) - assert not abuse_report.reporter_appeal_date - responses.add( - responses.POST, - f'{settings.CINDER_SERVER_URL}appeal', - json={'external_id': '2432615184-tsol'}, - status=201, + NeedsHumanReview.objects.create( + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + version=addon.current_version, ) - abuse_report.cinder_job.appeal( - abuse_report=abuse_report, - appeal_text='appeal text', - user=user_factory(), - is_reporter=False, - ) + cinder_job.process_queue_move(new_queue='amo-env-listings', notes='out') - abuse_report.cinder_job.reload() - assert abuse_report.cinder_job.appeal_job_id - assert abuse_report.cinder_job.appeal_job.job_id == '2432615184-tsol' - assert abuse_report.cinder_job.appeal_job.target_addon == addon - abuse_report.reload() - assert not abuse_report.reporter_appeal_date - assert not abuse_report.appellant_job + assert cinder_job.resolvable_in_reviewer_tools is True + assert len(mail.outbox) == 0 + assert NeedsHumanReview.objects.count() == 1 + assert CinderQueueMove.objects.filter( + cinder_job=cinder_job, to_queue='amo-env-listings', notes='out' + ).exists() - def test_appeal_as_reporter(self): - addon = addon_factory() + def test_process_queue_move_other_queue_movement(self): + # we don't need to about these other queue moves, so just check it's silently + # ignored + addon = addon_factory(file_kw={'is_signed': True}) + cinder_job = CinderJob.objects.create(job_id='1234', target_addon=addon) + + cinder_job.process_queue_move(new_queue='amo-env-some-other-queue', notes='?') + + assert not cinder_job.resolvable_in_reviewer_tools + assert len(mail.outbox) == 0 + assert NeedsHumanReview.objects.count() == 0 + assert CinderQueueMove.objects.filter( + cinder_job=cinder_job, to_queue='amo-env-some-other-queue', notes='?' + ).exists() + + def _test_resolve_job(self, activity_action, cinder_action, *, expect_target_email): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + cinder_job = CinderJob.objects.create(job_id='999') + flags = version_review_flags_factory( + version=addon.current_version, + pending_rejection=self.days_ago(1), + pending_rejection_by=user_factory(), + pending_content_rejection=False, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + ) + # pretend there is a pending rejection that's resolving this job + cinder_job.pending_rejections.add(flags) abuse_report = AbuseReport.objects.create( guid=addon.guid, - reason=AbuseReport.REASONS.ILLEGAL, + reason=AbuseReport.REASONS.POLICY_VIOLATION, + location=AbuseReport.LOCATION.ADDON, + cinder_job=cinder_job, reporter=user_factory(), ) - abuse_report.update( - cinder_job=CinderJob.objects.create( - decision_id='4815162342-lost', - decision_date=self.days_ago(179), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, - target_addon=addon, - ) - ) - assert not abuse_report.reporter_appeal_date responses.add( responses.POST, - f'{settings.CINDER_SERVER_URL}appeal', - json={'external_id': '2432615184-tsol'}, + f'{settings.CINDER_SERVER_URL}jobs/{cinder_job.job_id}/decision', + json={'uuid': uuid.uuid4().hex}, status=201, ) + policies = [CinderPolicy.objects.create(name='policy', uuid='12345678')] - abuse_report.cinder_job.appeal( - abuse_report=abuse_report, - appeal_text='appeal text', - user=abuse_report.reporter, - is_reporter=True, + log_entry = ActivityLog.objects.create( + activity_action, + abuse_report.target, + abuse_report.target.current_version, + *policies, + details={ + 'comments': 'some review text', + 'cinder_action': cinder_action.constant, + }, + user=user_factory(), ) - abuse_report.cinder_job.reload() - assert abuse_report.cinder_job.appeal_job - assert abuse_report.cinder_job.appeal_job.job_id == '2432615184-tsol' - assert abuse_report.cinder_job.appeal_job.target_addon == addon - abuse_report.reload() - assert abuse_report.appellant_job.job_id == '2432615184-tsol' - assert abuse_report.reporter_appeal_date + cinder_job.resolve_job(log_entry=log_entry) - def test_appeal_as_reporter_already_appealed(self): - addon = addon_factory() - abuse_report = AbuseReport.objects.create( - guid=addon.guid, - reason=AbuseReport.REASONS.ILLEGAL, - reporter=user_factory(), - ) - abuse_report.update( - cinder_job=CinderJob.objects.create( - decision_id='4815162342-lost', - decision_date=self.days_ago(179), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, - target_addon=addon, - ) - ) - # Pretend there was already an appeal job from a different reporter. - # Make that resolvable in reviewer tools as if it had been escalated, - # to ensure the get_or_create() call that we make can't trigger an - # IntegrityError because of the additional parameters (job_id must - # be the only field we use to retrieve the job). - abuse_report.cinder_job.update( - appeal_job=CinderJob.objects.create( - job_id='2432615184-tsol', - target_addon=addon, - resolvable_in_reviewer_tools=True, + request = responses.calls[0].request + request_body = json.loads(request.body) + assert request_body['policy_uuids'] == ['12345678'] + assert request_body['reasoning'] == 'some review text' + assert 'entity' not in request_body + cinder_job.reload() + assert cinder_job.decision.action == cinder_action + self.assertCloseToNow(cinder_job.decision.action_date) + assert list(cinder_job.decision.policies.all()) == policies + assert len(mail.outbox) == (2 if expect_target_email else 1) + assert mail.outbox[0].to == [abuse_report.reporter.email] + assert 'requested the developer' not in mail.outbox[0].body + if expect_target_email: + assert mail.outbox[1].to == [addon_developer.email] + assert str(log_entry.id) in mail.outbox[1].extra_headers['Message-ID'] + assert 'some review text' in mail.outbox[1].body + assert ( + str(abuse_report.target.current_version.version) in mail.outbox[1].body ) + assert 'days' not in mail.outbox[1].body + assert cinder_job.pending_rejections.count() == 0 + assert not NeedsHumanReview.objects.filter( + is_active=True, reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION + ).exists() + assert NeedsHumanReview.objects.filter(is_active=True).count() == 2 + assert ( + log_entry.reload().contentdecisionlog_set.get().decision + == cinder_job.decision ) - assert not abuse_report.reporter_appeal_date - responses.add( - responses.POST, - f'{settings.CINDER_SERVER_URL}appeal', - json={'external_id': '2432615184-tsol'}, - status=201, - ) - - abuse_report.cinder_job.appeal( - abuse_report=abuse_report, - appeal_text='appeal text', - user=abuse_report.reporter, - is_reporter=True, - ) - - abuse_report.cinder_job.reload() - assert abuse_report.cinder_job.appeal_job - assert abuse_report.cinder_job.appeal_job.job_id == '2432615184-tsol' - assert abuse_report.cinder_job.appeal_job.target_addon == addon - abuse_report.reload() - assert abuse_report.appellant_job.job_id == '2432615184-tsol' - assert abuse_report.reporter_appeal_date - def test_appeal_improperly_configured_reporter(self): - cinder_job = CinderJob.objects.create( - decision_id='4815162342-lost', - decision_date=self.days_ago(179), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + def test_resolve_job_notify_owner(self): + self._test_resolve_job( + amo.LOG.REJECT_VERSION, + DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, + expect_target_email=True, ) - with self.assertRaises(ImproperlyConfigured): - cinder_job.appeal( - abuse_report=None, - appeal_text='No abuse_report but is_reporter is True', - user=user_factory(), - is_reporter=True, - ) - def test_appeal_improperly_configured_author(self): - abuse_report = AbuseReport.objects.create( - guid=addon_factory().guid, - reason=AbuseReport.REASONS.ILLEGAL, - reporter=user_factory(), - ) - cinder_job = CinderJob.objects.create( - decision_id='4815162342-lost', - decision_date=self.days_ago(179), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + def test_resolve_job_no_email_to_owner(self): + self._test_resolve_job( + amo.LOG.CONFIRM_AUTO_APPROVED, + DECISION_ACTIONS.AMO_APPROVE, + expect_target_email=False, ) - with self.assertRaises(ImproperlyConfigured): - cinder_job.appeal( - abuse_report=abuse_report, - appeal_text='No user but is_reporter is False', - user=None, - is_reporter=False, - ) - def _test_resolve_job(self, activity_action, *, expect_target_email): + def test_resolve_job_delayed(self): cinder_job = CinderJob.objects.create(job_id='999') addon_developer = user_factory() abuse_report = AbuseReport.objects.create( @@ -930,71 +1401,219 @@ def _test_resolve_job(self, activity_action, *, expect_target_email): cinder_job=cinder_job, reporter=user_factory(), ) + policies = [CinderPolicy.objects.create(name='policy', uuid='12345678')] responses.add( responses.POST, - f'{settings.CINDER_SERVER_URL}create_decision', - json={'uuid': '123'}, + f'{settings.CINDER_SERVER_URL}jobs/{cinder_job.job_id}/decision', + json={'uuid': uuid.uuid4().hex}, status=201, ) + log_entry = ActivityLog.objects.create( + amo.LOG.REJECT_VERSION_DELAYED, + abuse_report.target, + abuse_report.target.current_version, + *policies, + details={ + 'comments': 'some review text', + 'delayed_rejection_days': '14', + 'cinder_action': 'AMO_REJECT_VERSION_WARNING_ADDON', + }, + user=user_factory(), + ) + NeedsHumanReview.objects.create( + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + version=abuse_report.target.current_version, + ) + assert abuse_report.target.current_version.due_date + + cinder_job.resolve_job(log_entry=log_entry) + + cinder_job.reload() + assert cinder_job.decision.action == ( + DECISION_ACTIONS.AMO_REJECT_VERSION_WARNING_ADDON + ) + self.assertCloseToNow(cinder_job.decision.action_date) + assert list(cinder_job.decision.policies.all()) == policies + assert set(cinder_job.pending_rejections.all()) == set( + VersionReviewerFlags.objects.filter( + version=abuse_report.target.current_version + ) + ) + assert len(mail.outbox) == 2 + assert mail.outbox[0].to == [abuse_report.reporter.email] + assert 'requested the developer' in mail.outbox[0].body + assert mail.outbox[1].to == [addon_developer.email] + assert str(log_entry.id) in mail.outbox[1].extra_headers['Message-ID'] + assert 'some review text' in mail.outbox[1].body + assert str(abuse_report.target.current_version.version) in mail.outbox[1].body + assert '14 day(s)' in mail.outbox[1].body + assert not NeedsHumanReview.objects.filter(is_active=True).exists() + abuse_report.target.current_version.reload() + assert not abuse_report.target.current_version.due_date + assert ( + log_entry.reload().contentdecisionlog_set.get().decision + == cinder_job.decision + ) + + def test_resolve_job_appeal_not_third_party(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + appeal_job = CinderJob.objects.create( + job_id='999', + ) + CinderJob.objects.create( + job_id='998', + decision=ContentDecision.objects.create( + addon=addon, action=DECISION_ACTIONS.AMO_APPROVE, appeal_job=appeal_job + ), + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + ) responses.add( responses.POST, - f'{settings.CINDER_SERVER_URL}jobs/{cinder_job.job_id}/cancel', - json={'external_id': cinder_job.job_id}, - status=200, + f'{settings.CINDER_SERVER_URL}jobs/{appeal_job.job_id}/decision', + json={'uuid': uuid.uuid4().hex}, + status=201, ) policies = [CinderPolicy.objects.create(name='policy', uuid='12345678')] - review_action_reason = ReviewActionReason.objects.create( - cinder_policy=policies[0] - ) - log_entry = ActivityLog.objects.create( - activity_action, - abuse_report.target, - abuse_report.target.current_version, - review_action_reason, - details={'comments': 'some review text'}, + amo.LOG.FORCE_DISABLE, + addon, + addon.current_version, + *policies, + details={ + 'comments': 'some review text', + 'cinder_action': 'AMO_DISABLE_ADDON', + }, user=user_factory(), ) - cinder_job.resolve_job( - decision=CinderJob.DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, - log_entry=log_entry, - ) + appeal_job.resolve_job(log_entry=log_entry) request = responses.calls[0].request request_body = json.loads(request.body) assert request_body['policy_uuids'] == ['12345678'] assert request_body['reasoning'] == 'some review text' - assert request_body['entity']['id'] == str(abuse_report.target.id) - cinder_job.reload() - assert cinder_job.decision_action == ( - CinderJob.DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON + assert 'entity' not in request_body + appeal_job.reload() + assert appeal_job.decision.action == DECISION_ACTIONS.AMO_DISABLE_ADDON + self.assertCloseToNow(appeal_job.decision.action_date) + assert list(appeal_job.decision.policies.all()) == policies + assert len(mail.outbox) == 1 + + assert mail.outbox[0].to == [addon_developer.email] + assert str(log_entry.id) in mail.outbox[0].extra_headers['Message-ID'] + assert 'some review text' in mail.outbox[0].body + assert 'days' not in mail.outbox[0].body + assert 'in an assessment performed on our own initiative' in mail.outbox[0].body + assert appeal_job.pending_rejections.count() == 0 + assert not NeedsHumanReview.objects.filter( + is_active=True, reason=NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL + ).exists() + assert NeedsHumanReview.objects.filter(is_active=True).count() == 2 + assert ( + log_entry.reload().contentdecisionlog_set.get().decision + == appeal_job.decision ) - self.assertCloseToNow(cinder_job.decision_date) - assert list(cinder_job.policies.all()) == policies - assert len(mail.outbox) == (2 if expect_target_email else 1) - assert mail.outbox[0].to == [abuse_report.reporter.email] - if expect_target_email: - assert mail.outbox[1].to == [addon_developer.email] - assert str(log_entry.id) in mail.outbox[1].extra_headers['Message-ID'] - assert 'some review text' in mail.outbox[1].body - assert ( - str(abuse_report.target.current_version.version) in mail.outbox[1].body - ) - def test_resolve_job_notify_owner(self): - self._test_resolve_job(amo.LOG.REJECT_VERSION, expect_target_email=True) + def test_resolve_job_appeal_with_new_report(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + appeal_job = CinderJob.objects.create( + job_id='999', + ) + AbuseReport.objects.create( + reporter_email='reporter@email.com', cinder_job=appeal_job, guid=addon.guid + ) + CinderJob.objects.create( + job_id='998', + decision=ContentDecision.objects.create( + addon=addon, action=DECISION_ACTIONS.AMO_APPROVE, appeal_job=appeal_job + ), + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}jobs/{appeal_job.job_id}/decision', + json={'uuid': uuid.uuid4().hex}, + status=201, + ) + policies = [CinderPolicy.objects.create(name='policy', uuid='12345678')] + log_entry = ActivityLog.objects.create( + amo.LOG.FORCE_DISABLE, + addon, + addon.current_version, + *policies, + details={ + 'comments': 'some review text', + 'cinder_action': 'AMO_DISABLE_ADDON', + }, + user=user_factory(), + ) - def test_resolve_job_no_email_to_owner(self): - # note: this is a false scenario - AMO_REJECT_VERSION_ADDON would never happen - # with a CONFIRM_AUTO_APPROVED log entry, we're just testing hide_developer - self._test_resolve_job(amo.LOG.CONFIRM_AUTO_APPROVED, expect_target_email=False) + appeal_job.resolve_job(log_entry=log_entry) - def test_resolve_job_duplicate_policy(self): - cinder_job = CinderJob.objects.create(job_id='999') + appeal_job.reload() + assert appeal_job.decision.action == DECISION_ACTIONS.AMO_DISABLE_ADDON + assert len(mail.outbox) == 2 + + assert mail.outbox[1].to == [addon_developer.email] + assert str(log_entry.id) in mail.outbox[1].extra_headers['Message-ID'] + assert 'assessment performed on our own initiative' not in mail.outbox[1].body + assert mail.outbox[0].to == ['reporter@email.com'] + assert not NeedsHumanReview.objects.filter( + is_active=True, reason=NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL + ).exists() + # We are only removing NHR with the reason matching what we're doing. + assert NeedsHumanReview.objects.filter( + is_active=True, reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION + ).exists() + assert NeedsHumanReview.objects.filter(is_active=True).count() == 2 + assert ( + log_entry.reload().contentdecisionlog_set.get().decision + == appeal_job.decision + ) + + def test_resolve_job_forwarded(self): addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + cinder_job = CinderJob.objects.create(job_id='999') + CinderJob.objects.create(forwarded_to_job=cinder_job) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + ) abuse_report = AbuseReport.objects.create( - guid=addon_factory(users=[addon_developer]).guid, + guid=addon.guid, reason=AbuseReport.REASONS.POLICY_VIOLATION, location=AbuseReport.LOCATION.ADDON, cinder_job=cinder_job, @@ -1002,345 +1621,542 @@ def test_resolve_job_duplicate_policy(self): ) responses.add( responses.POST, - f'{settings.CINDER_SERVER_URL}create_decision', - json={'uuid': '123'}, + f'{settings.CINDER_SERVER_URL}jobs/{cinder_job.job_id}/decision', + json={'uuid': uuid.uuid4().hex}, status=201, ) - responses.add( - responses.POST, - f'{settings.CINDER_SERVER_URL}jobs/{cinder_job.job_id}/cancel', - json={'external_id': cinder_job.job_id}, - status=200, - ) - parent_policy = CinderPolicy.objects.create( - name='parent policy', uuid='12345678' - ) - policy = CinderPolicy.objects.create( - name='policy', uuid='4815162342', parent=parent_policy - ) - review_action_reason1 = ReviewActionReason.objects.create(cinder_policy=policy) - review_action_reason2 = ReviewActionReason.objects.create( - cinder_policy=parent_policy - ) + policies = [CinderPolicy.objects.create(name='policy', uuid='12345678')] log_entry = ActivityLog.objects.create( - amo.LOG.REJECT_VERSION, + amo.LOG.FORCE_DISABLE, abuse_report.target, abuse_report.target.current_version, - review_action_reason1, - review_action_reason2, - details={'comments': 'some review text'}, + *policies, + details={ + 'comments': 'some review text', + 'cinder_action': 'AMO_DISABLE_ADDON', + }, user=user_factory(), ) - cinder_job.resolve_job( - decision=CinderJob.DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, - log_entry=log_entry, - ) + cinder_job.resolve_job(log_entry=log_entry) request = responses.calls[0].request request_body = json.loads(request.body) - assert request_body['policy_uuids'] == [policy.uuid] + assert request_body['policy_uuids'] == ['12345678'] assert request_body['reasoning'] == 'some review text' - assert request_body['entity']['id'] == str(abuse_report.target.id) cinder_job.reload() - assert cinder_job.decision_action == ( - CinderJob.DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON - ) - self.assertCloseToNow(cinder_job.decision_date) - # Parent policy was a duplicate since we already have its child, and - # has been ignored. - assert list(cinder_job.policies.all()) == [policy] + assert cinder_job.decision.action == DECISION_ACTIONS.AMO_DISABLE_ADDON + self.assertCloseToNow(cinder_job.decision.action_date) + assert list(cinder_job.decision.policies.all()) == policies assert len(mail.outbox) == 2 assert mail.outbox[0].to == [abuse_report.reporter.email] + assert 'requested the developer' not in mail.outbox[0].body assert mail.outbox[1].to == [addon_developer.email] assert str(log_entry.id) in mail.outbox[1].extra_headers['Message-ID'] assert 'some review text' in mail.outbox[1].body - assert str(abuse_report.target.current_version.version) in mail.outbox[1].body + assert not NeedsHumanReview.objects.filter( + is_active=True, reason=NeedsHumanReview.REASONS.CINDER_ESCALATION + ).exists() + assert NeedsHumanReview.objects.filter(is_active=True).count() == 2 - def test_abuse_reports(self): + def test_all_abuse_reports(self): job = CinderJob.objects.create(job_id='fake_job_id') - assert list(job.abuse_reports) == [] + assert list(job.all_abuse_reports) == [] - report = AbuseReport.objects.create(guid=addon_factory().guid, cinder_job=job) - assert list(job.abuse_reports) == [report] + addon = addon_factory() + report = AbuseReport.objects.create(guid=addon.guid, cinder_job=job) + assert list(job.all_abuse_reports) == [report] - report2 = AbuseReport.objects.create(guid=addon_factory().guid, cinder_job=job) - assert list(job.abuse_reports) == [report, report2] + report2 = AbuseReport.objects.create(guid=addon.guid, cinder_job=job) + assert list(job.all_abuse_reports) == [report, report2] appeal_job = CinderJob.objects.create(job_id='fake_appeal_job_id') - job.update(appeal_job=appeal_job) + job.update( + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon, + appeal_job=appeal_job, + ) + ) - assert appeal_job.abuse_reports == [report, report2] - assert list(job.abuse_reports) == [report, report2] + assert appeal_job.all_abuse_reports == [report, report2] + assert list(job.all_abuse_reports) == [report, report2] appeal_appeal_job = CinderJob.objects.create(job_id='fake_appeal_appeal_job_id') - appeal_job.update(appeal_job=appeal_appeal_job) + appeal_job.update( + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon, + appeal_job=appeal_appeal_job, + ) + ) - assert list(appeal_appeal_job.abuse_reports) == [report, report2] - assert list(appeal_job.abuse_reports) == [report, report2] - assert list(job.abuse_reports) == [report, report2] + assert list(appeal_appeal_job.all_abuse_reports) == [report, report2] + assert list(appeal_job.all_abuse_reports) == [report, report2] + assert list(job.all_abuse_reports) == [report, report2] + + report3 = AbuseReport.objects.create(guid=addon.guid, cinder_job=appeal_job) + report4 = AbuseReport.objects.create( + guid=addon.guid, cinder_job=appeal_appeal_job + ) + assert list(appeal_appeal_job.all_abuse_reports) == [ + report, + report2, + report3, + report4, + ] + assert list(appeal_job.all_abuse_reports) == [report, report2, report3] + assert list(job.all_abuse_reports) == [report, report2] + + # Now test the scenario where the original decision was an override instead of + # the first decision. The reports should still be found by all_abuse_reports. + job.decision.update(appeal_job=None) + ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon, + appeal_job=appeal_job, + override_of=job.decision, + ) + assert list(appeal_appeal_job.all_abuse_reports) == [ + report, + report2, + report3, + report4, + ] + assert list(appeal_job.all_abuse_reports) == [report, report2, report3] + assert list(job.all_abuse_reports) == [report, report2] def test_is_appeal(self): job = CinderJob.objects.create(job_id='fake_job_id') assert not job.is_appeal appeal = CinderJob.objects.create(job_id='an appeal job') - job.update(appeal_job=appeal) + job.update( + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon_factory(), + appeal_job=appeal, + ) + ) job.reload() assert not job.is_appeal assert appeal.is_appeal + def test_clear_needs_human_review_flags(self): + def nhr_exists(reason): + return NeedsHumanReview.objects.filter( + reason=reason, is_active=True + ).exists() + + addon = addon_factory() + job = CinderJob.objects.create( + job_id='1', + target_addon=addon, + resolvable_in_reviewer_tools=True, + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=addon + ), + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + ) + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL, + ) + + # for a non-forwarded or appealed job, this should clear the abuse NHR only + job.clear_needs_human_review_flags() + assert not nhr_exists(NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION) + assert nhr_exists(NeedsHumanReview.REASONS.CINDER_ESCALATION) + assert nhr_exists(NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL) + + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION, + ) + # if the job is forwarded, we make sure that there are no other forwarded jobs + CinderJob.objects.create(job_id='2', target_addon=addon, forwarded_to_job=job) + other_forward = CinderJob.objects.create( + job_id='3', + target_addon=addon, + resolvable_in_reviewer_tools=True, + ) + CinderJob.objects.create( + job_id='4', target_addon=addon, forwarded_to_job=other_forward + ) + job.clear_needs_human_review_flags() + assert nhr_exists(NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION) + assert nhr_exists(NeedsHumanReview.REASONS.CINDER_ESCALATION) + assert nhr_exists(NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL) + + # unless the other job is closed too + other_forward.update( + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=addon + ) + ) + job.clear_needs_human_review_flags() + assert nhr_exists(NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION) + assert not nhr_exists(NeedsHumanReview.REASONS.CINDER_ESCALATION) + assert nhr_exists(NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL) + + NeedsHumanReview.objects.create( + version=addon.current_version, + reason=NeedsHumanReview.REASONS.CINDER_ESCALATION, + ) + # similarly if the job is an appeal we make sure that there are no other appeals + CinderJob.objects.create( + job_id='5', + target_addon=addon, + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=addon, appeal_job=job + ), + ) + job.forwarded_from_jobs.get().delete() + other_appeal = CinderJob.objects.create( + job_id='6', + target_addon=addon, + resolvable_in_reviewer_tools=True, + ) + CinderJob.objects.create( + job_id='7', + target_addon=addon, + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon, + appeal_job=other_appeal, + ), + ) + job.clear_needs_human_review_flags() + assert nhr_exists(NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION) + assert nhr_exists(NeedsHumanReview.REASONS.CINDER_ESCALATION) + assert nhr_exists(NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL) + + # unless the other job is closed too + other_appeal.update( + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=addon + ) + ) + job.clear_needs_human_review_flags() + assert nhr_exists(NeedsHumanReview.REASONS.ABUSE_ADDON_VIOLATION) + assert nhr_exists(NeedsHumanReview.REASONS.CINDER_ESCALATION) + assert not nhr_exists(NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL) + + def test_final_decision(self): + addon = addon_factory() + job = CinderJob.objects.create(job_id='1') + assert job.final_decision is None + + decision = ContentDecision.objects.create( + addon=addon, action=DECISION_ACTIONS.AMO_DISABLE_ADDON + ) + job.update(decision=decision) + assert job.final_decision == decision + + override = ContentDecision.objects.create( + addon=addon, action=DECISION_ACTIONS.AMO_DISABLE_ADDON, override_of=decision + ) + assert job.final_decision == override + -class TestCinderJobCanBeAppealed(TestCase): +class TestContentDecisionCanBeAppealed(TestCase): def setUp(self): self.reporter = user_factory() self.author = user_factory() self.addon = addon_factory(users=[self.author]) - self.initial_job = CinderJob.objects.create(job_id='fake_initial_job_id') - self.initial_report = AbuseReport.objects.create( - guid=self.addon.guid, - cinder_job=self.initial_job, - reporter=self.reporter, - reason=AbuseReport.REASONS.ILLEGAL, + self.decision = ContentDecision.objects.create( + cinder_id='fake_decision_id', + action=DECISION_ACTIONS.AMO_APPROVE, + addon=self.addon, + action_date=datetime.now(), ) def test_appealed_decision_already_made(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, - ) - assert not self.initial_job.appealed_decision_already_made() + assert not self.decision.appealed_decision_already_made() appeal_job = CinderJob.objects.create( job_id='fake_appeal_job_id', ) - self.initial_job.update(appeal_job=appeal_job) - assert not self.initial_job.appealed_decision_already_made() + self.decision.update(appeal_job=appeal_job) + assert not self.decision.appealed_decision_already_made() - appeal_job.update(decision_id='appeal decision id') - assert self.initial_job.appealed_decision_already_made() + appeal_job.update( + decision=ContentDecision.objects.create( + cinder_id='appeal decision id', + addon=self.addon, + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + ) + ) + assert self.decision.appealed_decision_already_made() def test_reporter_can_appeal_approve_decision(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + initial_report = AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, ) - assert self.initial_job.can_be_appealed( - is_reporter=True, abuse_report=self.initial_report + assert self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report ) def test_reporter_cant_appeal_approve_decision_if_abuse_report_is_not_passed(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, ) - assert not self.initial_job.can_be_appealed(is_reporter=True) + assert not self.decision.can_be_appealed(is_reporter=True) def test_reporter_cant_appeal_non_approve_decision(self): + initial_report = AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, + ) + assert self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report + ) for decision_action in ( - CinderJob.DECISION_ACTIONS.NO_DECISION, - CinderJob.DECISION_ACTIONS.AMO_ESCALATE_ADDON, - CinderJob.DECISION_ACTIONS.AMO_BAN_USER, - CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, - CinderJob.DECISION_ACTIONS.AMO_DELETE_RATING, - CinderJob.DECISION_ACTIONS.AMO_DELETE_COLLECTION, + action + for action, _ in DECISION_ACTIONS + if action not in DECISION_ACTIONS.APPEALABLE_BY_REPORTER ): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=decision_action, + self.decision.update( + action=decision_action, + addon=self.addon, ) - assert not self.initial_job.can_be_appealed( - is_reporter=True, abuse_report=self.initial_report + assert not self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report ) def test_reporter_cant_appeal_approve_decision_already_appealed(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, - ) - appeal_job = CinderJob.objects.create( + initial_report = AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, + ) + assert self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report + ) + appeal_job = CinderJob.objects.create( job_id='fake_appeal_job_id', ) - self.initial_job.update(appeal_job=appeal_job) - self.initial_report.update( - reporter_appeal_date=datetime.now(), appellant_job=appeal_job + self.decision.update(appeal_job=appeal_job) + CinderAppeal.objects.create( + decision=self.decision, reporter_report=initial_report + ) + assert not self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report + ) + + def test_reporter_cant_appeal_approve_decision_overridden(self): + initial_report = AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, ) - assert not self.initial_job.can_be_appealed( - is_reporter=True, abuse_report=self.initial_report + assert self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report ) + override = ContentDecision.objects.create( + addon=self.addon, + action=self.decision.action, + override_of=self.decision, + action_date=datetime.now(), + ) + assert not self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report + ) + # but can appeal the override + assert override.can_be_appealed(is_reporter=True, abuse_report=initial_report) def test_reporter_can_appeal_approve_decision_already_appealed_someone_else(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + initial_report = AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, ) appeal_job = CinderJob.objects.create( job_id='fake_appeal_job_id', ) - self.initial_job.update(appeal_job=appeal_job) - AbuseReport.objects.create( + self.decision.update(appeal_job=appeal_job) + report = AbuseReport.objects.create( guid=self.addon.guid, - cinder_job=self.initial_job, + cinder_job=initial_report.cinder_job, reporter=user_factory(), - reporter_appeal_date=datetime.now(), - appellant_job=appeal_job, reason=AbuseReport.REASONS.ILLEGAL, ) - assert self.initial_job.can_be_appealed( - is_reporter=True, abuse_report=self.initial_report + CinderAppeal.objects.create(decision=self.decision, reporter_report=report) + assert self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report ) def test_reporter_cant_appeal_approve_decision_already_appealed_and_decided(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + initial_report = AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, ) appeal_job = CinderJob.objects.create( job_id='fake_appeal_job_id', - decision_date=datetime.now(), - decision_id='fake_appeal_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + decision=ContentDecision.objects.create( + cinder_id='fake_appeal_decision_id', + action=DECISION_ACTIONS.AMO_APPROVE, + addon=self.addon, + ), ) - self.initial_job.update(appeal_job=appeal_job) - AbuseReport.objects.create( + self.decision.update(appeal_job=appeal_job) + report = AbuseReport.objects.create( guid=self.addon.guid, - cinder_job=self.initial_job, - appellant_job=appeal_job, + cinder_job=initial_report.cinder_job, reporter=user_factory(), - reporter_appeal_date=datetime.now(), reason=AbuseReport.REASONS.ILLEGAL, ) - assert not self.initial_job.can_be_appealed( - is_reporter=True, abuse_report=self.initial_report + CinderAppeal.objects.create(decision=self.decision, reporter_report=report) + assert not self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report ) - def test_reporter_cant_appeal_appealed_decision(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, - ) + def test_reporter_can_appeal_appealed_decision(self): appeal_job = CinderJob.objects.create( job_id='fake_appeal_job_id', - decision_date=datetime.now(), - decision_id='fake_appeal_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + decision=ContentDecision.objects.create( + cinder_id='fake_appeal_decision_id', + action=DECISION_ACTIONS.AMO_APPROVE, + addon=self.addon, + action_date=datetime.now(), + ), ) - self.initial_job.update(appeal_job=appeal_job) - self.initial_report.update( - reporter_appeal_date=datetime.now(), appellant_job=appeal_job + report = AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, ) - # We should never end up in this situation where an AbuseReport is tied - # to a CinderJob from an appeal, but if that somehow happens we want to - # make sure it's impossible for a reporter to appeal an appeal. + CinderAppeal.objects.create(decision=self.decision, reporter_report=report) + self.decision.update(appeal_job=appeal_job) + # We can end up in this situation where an AbuseReport is tied + # to a CinderJob from an appeal, and if that somehow happens we want to + # make sure it's possible for a reporter to appeal an appeal. new_report = AbuseReport.objects.create( guid=self.addon.guid, cinder_job=appeal_job, reporter=user_factory(), - reporter_appeal_date=datetime.now(), reason=AbuseReport.REASONS.ILLEGAL, ) - assert not appeal_job.can_be_appealed(is_reporter=True, abuse_report=new_report) + assert appeal_job.decision.can_be_appealed( + is_reporter=True, abuse_report=new_report + ) def test_reporter_cant_appeal_past_expiration_delay(self): - self.initial_job.update( - decision_date=self.days_ago(APPEAL_EXPIRATION_DAYS + 1), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + initial_report = AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, + ) + assert self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report ) - assert not self.initial_job.can_be_appealed( - is_reporter=True, abuse_report=self.initial_report + self.decision.update(action_date=self.days_ago(APPEAL_EXPIRATION_DAYS + 1)) + assert not self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report ) - def test_author_can_appeal_disable_decision(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, + def test_reporter_cant_appeal_when_no_action_date(self): + initial_report = AbuseReport.objects.create( + guid=self.addon.guid, + cinder_job=CinderJob.objects.create(decision=self.decision), + reporter=self.reporter, + reason=AbuseReport.REASONS.ILLEGAL, ) - assert self.initial_job.can_be_appealed(is_reporter=False) + assert self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report + ) + self.decision.update(action_date=None) + assert not self.decision.can_be_appealed( + is_reporter=True, abuse_report=initial_report + ) + + def test_author_can_appeal_disable_decision(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + assert self.decision.can_be_appealed(is_reporter=False) def test_author_can_appeal_delete_decision_rating(self): - user = user_factory() rating = Rating.objects.create( - addon=self.addon, user=user, rating=1, body='blah' + addon=self.addon, user=self.author, rating=1, body='blah' ) - self.initial_report.update(guid=None, rating=rating) - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_DELETE_RATING, + self.decision.update( + action=DECISION_ACTIONS.AMO_DELETE_RATING, addon=None, rating=rating ) - self.initial_job.can_be_appealed(is_reporter=False) + self.decision.can_be_appealed(is_reporter=False) def test_author_can_appeal_delete_decision_collection(self): - user = user_factory() - collection = collection_factory(author=user) - self.initial_report.update(guid=None, collection=collection) - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_DELETE_COLLECTION, + collection = collection_factory(author=self.author) + self.decision.update( + action=DECISION_ACTIONS.AMO_DELETE_COLLECTION, + addon=None, + collection=collection, ) - self.initial_job.can_be_appealed(is_reporter=False) + self.decision.can_be_appealed(is_reporter=False) def test_author_can_appeal_ban_user(self): - user = user_factory() - self.initial_report.update(guid=None, user=user) - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_BAN_USER, + self.decision.update( + action=DECISION_ACTIONS.AMO_BAN_USER, addon=None, user=self.author ) - self.initial_job.can_be_appealed(is_reporter=False) + self.decision.can_be_appealed(is_reporter=False) - def test_author_cant_appeal_approve_or_escalation_decision(self): - for decision_action in ( - CinderJob.DECISION_ACTIONS.NO_DECISION, - CinderJob.DECISION_ACTIONS.AMO_ESCALATE_ADDON, - CinderJob.DECISION_ACTIONS.AMO_APPROVE, - ): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=decision_action, - ) - assert not self.initial_job.can_be_appealed(is_reporter=False) + def test_author_cant_appeal_approve_decision(self): + self.decision.update(action=DECISION_ACTIONS.AMO_APPROVE) + assert not self.decision.can_be_appealed(is_reporter=False) def test_author_cant_appeal_disable_decision_already_appealed(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, - ) - appeal_job = CinderJob.objects.create( - job_id='fake_appeal_job_id', - ) - self.initial_job.update(appeal_job=appeal_job) - assert not self.initial_job.can_be_appealed(is_reporter=False) + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + assert self.decision.can_be_appealed(is_reporter=False) + appeal_job = CinderJob.objects.create(job_id='fake_appeal_job_id') + self.decision.update(appeal_job=appeal_job) + assert not self.decision.can_be_appealed(is_reporter=False) + + def test_author_cant_appeal_disable_decision_overridden(self): + self.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + assert self.decision.can_be_appealed(is_reporter=False) + override = ContentDecision.objects.create( + addon=self.addon, + action=self.decision.action, + override_of=self.decision, + action_date=datetime.now(), + ) + assert not self.decision.can_be_appealed(is_reporter=False) + # but can appeal the override + assert override.can_be_appealed(is_reporter=False) def test_author_can_appeal_appealed_decision(self): - self.initial_job.update( - decision_date=datetime.now(), - decision_id='fake_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, - ) appeal_job = CinderJob.objects.create( job_id='fake_appeal_job_id', - decision_date=datetime.now(), - decision_id='fake_appeal_decision_id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, + decision=ContentDecision.objects.create( + cinder_id='fake_appeal_decision_id', + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=self.addon, + action_date=datetime.now(), + ), ) - self.initial_job.update(appeal_job=appeal_job) - assert appeal_job.can_be_appealed(is_reporter=False) + self.decision.update(appeal_job=appeal_job) + assert appeal_job.decision.can_be_appealed(is_reporter=False) class TestCinderPolicy(TestCase): @@ -1459,3 +2275,1448 @@ def test_without_parents_if_their_children_are_present(self): parent_policy, lone_policy, } + + +@override_switch('dsa-abuse-reports-review', active=True) +@override_switch('dsa-appeals-review', active=True) +class TestContentDecision(TestCase): + def setUp(self): + # It's the webhook's responsibility to do this before calling the + # action. We need it for the ActivityLog creation to work. + self.task_user = user_factory(pk=settings.TASK_USER_ID) + set_user(self.task_user) + + def test_originating_job(self): + decision = ContentDecision() + assert decision.originating_job is None + + job = CinderJob(job_id='123') + decision.cinder_job = job + assert decision.originating_job == job + + new_decision = ContentDecision() + assert new_decision.originating_job is None + + new_decision.override_of = decision + assert new_decision.originating_job == job + + decision.cinder_job = None + assert new_decision.originating_job is None + + def test_get_reference_id(self): + decision = ContentDecision() + assert decision.get_reference_id() == 'NoClass#None' + assert decision.get_reference_id(short=False) == 'Decision "" for NoClass #None' + + decision.addon = addon_factory() + assert decision.get_reference_id() == f'Addon#{decision.addon.id}' + assert ( + decision.get_reference_id(short=False) + == f'Decision "" for Addon #{decision.addon.id}' + ) + + decision.cinder_id = '1234' + assert decision.get_reference_id() == '1234' + assert ( + decision.get_reference_id(short=False) + == f'Decision "1234" for Addon #{decision.addon.id}' + ) + + def test_target(self): + addon = addon_factory(guid='@lol') + decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=addon + ) + assert decision.target == addon + + user = user_factory() + decision.update(addon=None, user=user) + assert decision.target == user + + rating = Rating.objects.create(user=user, addon=addon, rating=5) + decision.update(user=None, rating=rating) + assert decision.target == rating + + collection = collection_factory() + decision.update(rating=None, collection=collection) + assert decision.target == collection + + def test_is_third_party_initiated(self): + addon = addon_factory() + current_decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, addon=addon + ) + assert not current_decision.is_third_party_initiated + + current_job = CinderJob.objects.create( + decision=current_decision, job_id=uuid.uuid4().hex + ) + current_decision.refresh_from_db() + assert not current_decision.is_third_party_initiated + + AbuseReport.objects.create(guid=addon.guid, cinder_job=current_job) + current_decision.refresh_from_db() + assert current_decision.is_third_party_initiated + + def test_is_third_party_initiated_appeal(self): + addon = addon_factory() + current_decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon, + ) + current_job = CinderJob.objects.create( + decision=current_decision, job_id=uuid.uuid4().hex + ) + original_job = CinderJob.objects.create( + job_id='456', + decision=ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=addon, appeal_job=current_job + ), + ) + assert not current_decision.is_third_party_initiated + + AbuseReport.objects.create(guid=addon.guid, cinder_job=original_job) + assert current_decision.is_third_party_initiated + + def test_get_action_helper(self): + addon = addon_factory() + decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, addon=addon + ) + targets = { + ContentActionBanUser: {'user': user_factory()}, + ContentActionDeleteCollection: {'collection': collection_factory()}, + ContentActionDeleteRating: { + 'rating': Rating.objects.create(addon=addon, user=user_factory()) + }, + } + action_to_class = [ + (decision_action, ContentDecision.get_action_helper_class(decision_action)) + for decision_action in DECISION_ACTIONS.values + ] + # base cases, where it's a decision without an override or appeal involved + action_existing_to_class = { + (new_action, None, None): ActionClass + for new_action, ActionClass in action_to_class + } + + for action in DECISION_ACTIONS.REMOVING.values: + # add appeal success cases + action_existing_to_class[(DECISION_ACTIONS.AMO_APPROVE, None, action)] = ( + ContentActionTargetAppealApprove + ) + action_existing_to_class[ + (DECISION_ACTIONS.AMO_APPROVE_VERSION, None, action) + ] = ContentActionTargetAppealApprove + # add appeal denial cases + action_existing_to_class[(action, None, action)] = ( + ContentActionTargetAppealRemovalAffirmation + ) + # add override from takedown to approve cases + action_existing_to_class[(DECISION_ACTIONS.AMO_APPROVE, action, None)] = ( + ContentActionOverrideApprove + ) + action_existing_to_class[ + (DECISION_ACTIONS.AMO_APPROVE_VERSION, action, None) + ] = ContentActionOverrideApprove + # and override from takedown to ignore + action_existing_to_class[(DECISION_ACTIONS.AMO_IGNORE, action, None)] = ( + ContentActionOverrideApprove + ) + + for ( + new_action, + overridden_action, + appealed_action, + ), ActionClass in action_existing_to_class.items(): + decision.update( + **{ + 'action': new_action, + 'addon': None, + 'rating': None, + 'collection': None, + 'user': None, + **targets.get(ActionClass, {'addon': addon}), + } + ) + helper = decision.get_action_helper( + appealed_action=appealed_action, overridden_action=overridden_action + ) + assert helper.__class__ == ActionClass + assert helper.decision == decision + assert helper.reporter_template_path == ActionClass.reporter_template_path + assert ( + helper.reporter_appeal_template_path + == ActionClass.reporter_appeal_template_path + ) + + action_existing_to_class_no_reporter_emails = { + (action, action): ContentDecision.get_action_helper_class(action) + for action in DECISION_ACTIONS.REMOVING.values + } + for ( + new_action, + overridden_action, + ), ActionClass in action_existing_to_class_no_reporter_emails.items(): + decision.update( + **{ + 'action': new_action, + 'addon': None, + 'rating': None, + 'collection': None, + 'user': None, + **targets.get(ActionClass, {'addon': addon}), + } + ) + helper = decision.get_action_helper( + appealed_action=None, overridden_action=overridden_action + ) + assert helper.reporter_template_path is None + assert helper.reporter_appeal_template_path is None + assert ActionClass.reporter_template_path is not None + assert ActionClass.reporter_appeal_template_path is not None + + def _test_appeal_as_target(self, *, resolvable_in_reviewer_tools, expected_queue): + addon = addon_factory( + status=amo.STATUS_DISABLED, + file_kw={'is_signed': True, 'status': amo.STATUS_DISABLED}, + ) + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter=user_factory(), + cinder_job=CinderJob.objects.create( + target_addon=addon, + resolvable_in_reviewer_tools=resolvable_in_reviewer_tools, + decision=ContentDecision.objects.create( + cinder_id='4815162342-lost', + action_date=self.days_ago(179), + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon, + ), + ), + ) + appeal_response = responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}appeal', + json={'external_id': '2432615184-tsol'}, + status=201, + ) + + abuse_report.cinder_job.decision.appeal( + abuse_report=abuse_report, + appeal_text='appeal text', + user=user_factory(), + is_reporter=False, + ) + + abuse_report.cinder_job.reload() + assert abuse_report.cinder_job.decision.appeal_job_id + assert abuse_report.cinder_job.decision.appeal_job.job_id == '2432615184-tsol' + assert abuse_report.cinder_job.decision.appeal_job.target_addon == addon + abuse_report.reload() + assert not hasattr(abuse_report, 'cinderappeal') + assert CinderAppeal.objects.count() == 1 + appeal_text_obj = CinderAppeal.objects.get() + assert appeal_text_obj.text == 'appeal text' + assert appeal_text_obj.decision == abuse_report.cinder_job.decision + assert appeal_text_obj.reporter_report is None + + assert appeal_response.call_count == 1 + request = responses.calls[0].request + request_body = json.loads(request.body) + assert request_body['reasoning'] == 'appeal text' + assert request_body['decision_to_appeal_id'] == str( + abuse_report.cinder_job.decision.cinder_id + ) + assert request_body['queue_slug'] == expected_queue + + return abuse_report.cinder_job.decision.appeal_job.reload() + + def test_appeal_as_target_from_resolved_in_cinder(self): + appeal_job = self._test_appeal_as_target( + resolvable_in_reviewer_tools=False, expected_queue='amo-escalations' + ) + assert not appeal_job.resolvable_in_reviewer_tools + assert not ( + NeedsHumanReview.objects.all() + .filter(reason=NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL) + .exists() + ) + + def test_appeal_as_target_from_resolved_in_amo(self): + appeal_job = self._test_appeal_as_target( + resolvable_in_reviewer_tools=True, + expected_queue='amo-env-addon-infringement', + ) + assert appeal_job.resolvable_in_reviewer_tools + assert ( + NeedsHumanReview.objects.all() + .filter(reason=NeedsHumanReview.REASONS.ADDON_REVIEW_APPEAL) + .exists() + ) + addon = Addon.unfiltered.get() + assert addon in Addon.unfiltered.get_queryset_for_pending_queues() + + def test_appeal_as_target_improperly_configured(self): + addon = addon_factory() + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter=user_factory(), + cinder_job=CinderJob.objects.create( + decision=ContentDecision.objects.create( + cinder_id='4815162342-lost', + action_date=self.days_ago(179), + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon, + ), + target_addon=addon, + ), + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}appeal', + json={'external_id': '2432615184-tsol'}, + status=201, + ) + + with self.assertRaises(ImproperlyConfigured): + abuse_report.cinder_job.decision.appeal( + abuse_report=abuse_report, + appeal_text='appeal text', + # Can't pass user=None for a target appeal, unless it's + # specifically a user ban (see test_appeal_as_target_banned()). + user=None, + is_reporter=False, + ) + + abuse_report.cinder_job.reload() + assert not abuse_report.cinder_job.decision.appeal_job_id + abuse_report.reload() + assert not hasattr(abuse_report, 'cinderappeal') + + def test_appeal_as_target_ban_improperly_configured(self): + addon = addon_factory() + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter=user_factory(), + cinder_job=CinderJob.objects.create( + decision=ContentDecision.objects.create( + cinder_id='4815162342-lost', + action_date=self.days_ago(179), + # This (target is an add-on, decision is a user ban) shouldn't + # be possible but we want to make sure this is handled + # explicitly. + action=DECISION_ACTIONS.AMO_BAN_USER, + addon=addon, + ), + target_addon=addon, + ), + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}appeal', + json={'external_id': '2432615184-tsol'}, + status=201, + ) + + with self.assertRaises(ImproperlyConfigured): + abuse_report.cinder_job.decision.appeal( + abuse_report=abuse_report, + appeal_text='appeal text', + # user=None is allowed here since the original decision was a + # ban, the target user can no longer log in but should be + # allowed to appeal. In this instance though, the target of the + # abuse report was not a user so this shouldn't be possible and + # we should raise an error. + user=None, + is_reporter=False, + ) + + abuse_report.cinder_job.reload() + assert not abuse_report.cinder_job.decision.appeal_job_id + abuse_report.reload() + assert not hasattr(abuse_report, 'cinderappeal') + + def test_appeal_as_target_banned(self): + target = user_factory() + abuse_report = AbuseReport.objects.create( + user=target, + reason=AbuseReport.REASONS.ILLEGAL, + reporter=user_factory(), + cinder_job=CinderJob.objects.create( + decision=ContentDecision.objects.create( + cinder_id='4815162342-lost', + action_date=self.days_ago(179), + action=DECISION_ACTIONS.AMO_BAN_USER, + user=target, + ) + ), + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}appeal', + json={'external_id': '2432615184-tsol'}, + status=201, + ) + + abuse_report.cinder_job.decision.appeal( + abuse_report=abuse_report, + appeal_text='appeal text', + # user=None is allowed here since the original decision was a ban, + # the target user can no longer log in but should be allowed to + # appeal. + user=None, + is_reporter=False, + ) + + abuse_report.cinder_job.reload() + assert abuse_report.cinder_job.decision.appeal_job_id + assert abuse_report.cinder_job.decision.appeal_job.job_id == '2432615184-tsol' + abuse_report.reload() + assert not hasattr(abuse_report, 'cinderappeal') + + def test_appeal_as_reporter(self): + addon = addon_factory() + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter=user_factory(), + ) + abuse_report.update( + cinder_job=CinderJob.objects.create( + target_addon=addon, + decision=ContentDecision.objects.create( + cinder_id='4815162342-lost', + action_date=self.days_ago(179), + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon, + ), + ) + ) + appeal_response = responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}appeal', + json={'external_id': '2432615184-tsol'}, + status=201, + ) + + abuse_report.cinder_job.decision.appeal( + abuse_report=abuse_report, + appeal_text='appeal text', + user=abuse_report.reporter, + is_reporter=True, + ) + + abuse_report.cinder_job.reload() + assert abuse_report.cinder_job.decision.appeal_job + assert abuse_report.cinder_job.decision.appeal_job.job_id == '2432615184-tsol' + assert abuse_report.cinder_job.decision.appeal_job.target_addon == addon + abuse_report.reload() + assert abuse_report.cinderappeal + assert CinderAppeal.objects.count() == 1 + appeal_text_obj = CinderAppeal.objects.get() + assert appeal_text_obj.text == 'appeal text' + assert appeal_text_obj.decision == abuse_report.cinder_job.decision + assert appeal_text_obj.reporter_report == abuse_report + + assert appeal_response.call_count == 1 + request = responses.calls[0].request + request_body = json.loads(request.body) + assert request_body['reasoning'] == 'appeal text' + assert request_body['decision_to_appeal_id'] == str( + abuse_report.cinder_job.decision.cinder_id + ) + assert request_body['queue_slug'] == 'amo-escalations' + + def test_appeal_as_reporter_already_appealed(self): + addon = addon_factory() + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter=user_factory(), + ) + abuse_report.update( + cinder_job=CinderJob.objects.create( + target_addon=addon, + decision=ContentDecision.objects.create( + cinder_id='4815162342-lost', + action_date=self.days_ago(179), + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon, + ), + ) + ) + # Pretend there was already an appeal job from a different reporter. + # Make that resolvable in reviewer tools as if it had been escalated, + # to ensure the get_or_create() call that we make can't trigger an + # IntegrityError because of the additional parameters (job_id must + # be the only field we use to retrieve the job). + abuse_report.cinder_job.decision.update( + appeal_job=CinderJob.objects.create( + job_id='2432615184-tsol', + target_addon=addon, + resolvable_in_reviewer_tools=True, + ) + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}appeal', + json={'external_id': '2432615184-tsol'}, + status=201, + ) + + abuse_report.cinder_job.decision.appeal( + abuse_report=abuse_report, + appeal_text='appeal text', + user=abuse_report.reporter, + is_reporter=True, + ) + + abuse_report.cinder_job.reload() + assert abuse_report.cinder_job.decision.appeal_job + assert abuse_report.cinder_job.decision.appeal_job.job_id == '2432615184-tsol' + assert abuse_report.cinder_job.decision.appeal_job.target_addon == addon + abuse_report.reload() + assert abuse_report.cinderappeal + + def test_appeal_as_reporter_specific_version(self): + addon = addon_factory(version_kw={'human_review_date': datetime.now()}) + original_version = addon.current_version + version_factory(addon=addon, human_review_date=datetime.now()) + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter=user_factory(), + addon_version=original_version.version, + ) + abuse_report.update( + cinder_job=CinderJob.objects.create( + target_addon=addon, + resolvable_in_reviewer_tools=True, + decision=ContentDecision.objects.create( + cinder_id='4815162342-lost', + action_date=self.days_ago(179), + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon, + ), + ) + ) + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}appeal', + json={'external_id': '2432615184-tsol'}, + status=201, + ) + assert not original_version.due_date + + abuse_report.cinder_job.decision.appeal( + abuse_report=abuse_report, + appeal_text='appeal text', + user=abuse_report.reporter, + is_reporter=True, + ) + + abuse_report.cinder_job.reload() + assert abuse_report.cinder_job.decision.appeal_job + assert abuse_report.cinder_job.decision.appeal_job.job_id == '2432615184-tsol' + assert abuse_report.cinder_job.decision.appeal_job.target_addon == addon + abuse_report.reload() + assert abuse_report.cinderappeal + assert CinderAppeal.objects.count() == 1 + appeal_text_obj = CinderAppeal.objects.get() + assert appeal_text_obj.text == 'appeal text' + assert appeal_text_obj.decision == abuse_report.cinder_job.decision + assert appeal_text_obj.reporter_report == abuse_report + assert original_version.reload().due_date + + def test_appeal_improperly_configured_reporter(self): + cinder_job = CinderJob.objects.create( + decision=ContentDecision.objects.create( + cinder_id='4815162342-lost', + action_date=self.days_ago(179), + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon_factory(), + ) + ) + with self.assertRaises(ImproperlyConfigured): + cinder_job.decision.appeal( + abuse_report=None, + appeal_text='No abuse_report but is_reporter is True', + user=user_factory(), + is_reporter=True, + ) + + def test_appeal_improperly_configured_author(self): + addon = addon_factory() + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + reporter=user_factory(), + ) + cinder_job = CinderJob.objects.create( + decision=ContentDecision.objects.create( + cinder_id='4815162342-lost', + action_date=self.days_ago(179), + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon, + ) + ) + with self.assertRaises(ImproperlyConfigured): + cinder_job.decision.appeal( + abuse_report=abuse_report, + appeal_text='No user but is_reporter is False', + user=None, + is_reporter=False, + ) + + def _test_notify_reviewer_decision( + self, + decision, + activity_action, + cinder_action, + *, + expect_email=True, + expect_create_decision_call, + expect_create_job_decision_call, + extra_log_details=None, + expected_decision_object_count=1, + ): + create_decision_response = responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_decision', + json={'uuid': uuid.uuid4().hex}, + status=201, + ) + cinder_job_id = (job := getattr(decision, 'cinder_job', None)) and job.job_id + create_job_decision_response = responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}jobs/{cinder_job_id}/decision', + json={'uuid': uuid.uuid4().hex}, + status=201, + ) + policies = [ + CinderPolicy.objects.create( + name='policy', uuid='12345678', text='some policy text' + ) + ] + entity_helper = CinderJob.get_entity_helper( + decision.addon, resolved_in_reviewer_tools=True + ) + addon_version = decision.addon.versions.all()[0] + cinder_action = cinder_action or getattr(activity_action, 'cinder_action', None) + log_entry = ActivityLog.objects.create( + activity_action, + decision.addon, + addon_version, + *policies, + details={ + 'comments': 'some review text', + 'cinder_action': cinder_action.constant, + **(extra_log_details or {}), + }, + user=user_factory(), + ) + + decision.notify_reviewer_decision( + log_entry=log_entry, + entity_helper=entity_helper, + ) + + assert decision.action == cinder_action + assert decision.notes == 'some review text' + if expect_create_decision_call: + assert create_decision_response.call_count == 1 + assert create_job_decision_response.call_count == 0 + request = responses.calls[0].request + request_body = json.loads(request.body) + assert request_body['policy_uuids'] == ['12345678'] + assert request_body['reasoning'] == 'some review text' + assert request_body['entity']['id'] == str(decision.addon.id) + assert request_body['enforcement_actions_slugs'] == [ + cinder_action.api_value + ] + self.assertCloseToNow(decision.action_date) + assert list(decision.policies.all()) == policies + assert decision.id + elif expect_create_job_decision_call: + assert create_decision_response.call_count == 0 + assert create_job_decision_response.call_count == 1 + request = responses.calls[0].request + request_body = json.loads(request.body) + assert request_body['policy_uuids'] == ['12345678'] + assert request_body['reasoning'] == 'some review text' + assert 'entity' not in request_body + assert request_body['enforcement_actions_slugs'] == [ + cinder_action.api_value + ] + self.assertCloseToNow(decision.action_date) + assert list(decision.policies.all()) == policies + assert decision.id + else: + assert create_decision_response.call_count == 0 + assert create_job_decision_response.call_count == 0 + assert CinderPolicy.contentdecision_set.through.objects.count() == 0 + assert not decision.id + assert ContentDecision.objects.count() == expected_decision_object_count + if expected_decision_object_count > 0: + assert log_entry.reload().contentdecisionlog_set.get().decision == decision + + if expect_email: + assert len(mail.outbox) == 1 + assert mail.outbox[0].to == [decision.addon.authors.first().email] + assert str(log_entry.id) in mail.outbox[0].extra_headers['Message-ID'] + assert str(addon_version) in mail.outbox[0].body + assert 'days' not in mail.outbox[0].body + assert 'some review text' in mail.outbox[0].body + assert 'some policy text' not in mail.outbox[0].body + AttachmentLog.objects.create( + activity_log=log_entry, + file=ContentFile('Pseudo File', name='attachment.txt'), + ) + decision.notify_reviewer_decision( + log_entry=log_entry, + entity_helper=entity_helper, + ) + assert 'An attachment was provided.' not in mail.outbox[0].body + assert 'To respond or view the file,' not in mail.outbox[0].body + assert 'An attachment was provided.' in mail.outbox[1].body + assert 'To respond or view the file,' in mail.outbox[1].body + else: + assert len(mail.outbox) == 0 + + def test_notify_reviewer_decision_first_decision(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + decision = ContentDecision(addon=addon) + self._test_notify_reviewer_decision( + decision, + amo.LOG.REJECT_VERSION, + DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, + expect_create_decision_call=True, + expect_create_job_decision_call=False, + ) + assert parse.quote(f'/firefox/addon/{addon.slug}/') in mail.outbox[0].body + assert '/developers/' not in mail.outbox[0].body + + def test_notify_reviewer_decision_override_decision(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + previous_decision = ContentDecision.objects.create( + addon=addon, + action=DECISION_ACTIONS.AMO_REJECT_VERSION_WARNING_ADDON, + action_date=datetime.now(), + ) + decision = ContentDecision(addon=addon, override_of=previous_decision) + self._test_notify_reviewer_decision( + decision, + amo.LOG.REJECT_VERSION, + DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, + expect_create_decision_call=True, + expect_create_job_decision_call=False, + expected_decision_object_count=2, + ) + assert parse.quote(f'/firefox/addon/{addon.slug}/') in mail.outbox[0].body + assert '/developers/' not in mail.outbox[0].body + + def test_notify_reviewer_decision_unlisted_version(self): + addon_developer = user_factory() + addon = addon_factory( + users=[addon_developer], version_kw={'channel': amo.CHANNEL_UNLISTED} + ) + decision = ContentDecision(addon=addon) + self._test_notify_reviewer_decision( + decision, + amo.LOG.REJECT_VERSION, + DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, + expect_create_decision_call=True, + expect_create_job_decision_call=False, + ) + assert '/firefox/' not in mail.outbox[0].body + assert ( + f'{settings.SITE_URL}/en-US/developers/addon/{addon.id}/' + in mail.outbox[0].body + ) + + def test_notify_reviewer_decision_first_decision_no_email_to_owner(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + decision = ContentDecision(addon=addon) + decision.cinder_job = CinderJob.objects.create(job_id='1234') + self._test_notify_reviewer_decision( + decision, + amo.LOG.CONFIRM_AUTO_APPROVED, + DECISION_ACTIONS.AMO_APPROVE, + expect_email=False, + expect_create_decision_call=False, + expect_create_job_decision_call=True, + ) + + def test_notify_reviewer_decision_override_decision_no_email_to_owner(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + previous_decision = ContentDecision.objects.create( + addon=addon, + action=DECISION_ACTIONS.AMO_REJECT_VERSION_WARNING_ADDON, + action_date=datetime.now(), + ) + previous_decision.cinder_job = CinderJob.objects.create( + job_id='1234', decision=previous_decision + ) + decision = ContentDecision(addon=addon, override_of=previous_decision) + self._test_notify_reviewer_decision( + decision, + amo.LOG.CONFIRM_AUTO_APPROVED, + DECISION_ACTIONS.AMO_APPROVE, + expect_email=False, + expect_create_decision_call=True, + expect_create_job_decision_call=False, + expected_decision_object_count=2, + ) + + def test_no_create_decision_for_approve_without_a_job(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + decision = ContentDecision(addon=addon) + assert not hasattr(decision, 'cinder_job') + self._test_notify_reviewer_decision( + decision, + amo.LOG.APPROVE_VERSION, + DECISION_ACTIONS.AMO_APPROVE_VERSION, + expect_create_decision_call=False, + expect_create_job_decision_call=False, + expect_email=True, + expected_decision_object_count=0, + ) + + def test_notify_reviewer_decision_auto_approve_email_for_non_human_review(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + decision = ContentDecision(addon=addon) + self._test_notify_reviewer_decision( + decision, + amo.LOG.APPROVE_VERSION, + DECISION_ACTIONS.AMO_APPROVE_VERSION, + expect_email=True, + expect_create_decision_call=False, + expect_create_job_decision_call=False, + expected_decision_object_count=0, + extra_log_details={'human_review': False}, + ) + assert 'automatically screened and tentatively approved' in mail.outbox[0].body + + def test_notify_reviewer_decision_auto_approve_email_for_human_review(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + decision = ContentDecision(addon=addon) + self._test_notify_reviewer_decision( + decision, + amo.LOG.APPROVE_VERSION, + DECISION_ACTIONS.AMO_APPROVE_VERSION, + expect_email=True, + expect_create_decision_call=False, + expect_create_job_decision_call=False, + expected_decision_object_count=0, + extra_log_details={'human_review': True}, + ) + assert 'has been approved' in mail.outbox[0].body + + def test_notify_reviewer_decision_no_cinder_action_in_activity_log(self): + addon = addon_factory() + log_entry = ActivityLog.objects.create( + amo.LOG.APPROVE_VERSION, + addon, + addon.current_version, + details={'comments': 'some review text'}, + user=user_factory(), + ) + + with self.assertRaises(ImproperlyConfigured): + ContentDecision().notify_reviewer_decision( + log_entry=log_entry, entity_helper=None + ) + + def test_notify_reviewer_decision_invalid_cinder_action_in_activity_log(self): + addon = addon_factory() + log_entry = ActivityLog.objects.create( + amo.LOG.APPROVE_VERSION, + addon, + addon.current_version, + details={'comments': 'some review text', 'cinder_action': 'NOT_AN_ACTION'}, + user=user_factory(), + ) + + with self.assertRaises(ImproperlyConfigured): + ContentDecision().notify_reviewer_decision( + log_entry=log_entry, entity_helper=None + ) + + def test_notify_reviewer_decision_rejection_blocking(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + decision = ContentDecision(addon=addon) + self._test_notify_reviewer_decision( + decision, + amo.LOG.REJECT_VERSION, + DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, + expect_create_decision_call=True, + expect_create_job_decision_call=False, + extra_log_details={ + 'is_addon_being_blocked': True, + 'is_addon_being_disabled': False, + }, + ) + assert ( + 'Users who have previously installed those versions will be able to' + not in mail.outbox[0].body + ) + assert ( + "users who have previously installed those versions won't be able to" + in mail.outbox[0].body + ) + assert ( + 'You may upload a new version which addresses the policy violation(s)' + in mail.outbox[0].body + ) + + def test_notify_reviewer_decision_rejection_blocking_addon_being_disabled(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer]) + decision = ContentDecision(addon=addon) + self._test_notify_reviewer_decision( + decision, + amo.LOG.REJECT_VERSION, + DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, + expect_create_decision_call=True, + expect_create_job_decision_call=False, + extra_log_details={ + 'is_addon_being_blocked': True, + 'is_addon_being_disabled': True, + }, + ) + assert ( + 'Users who have previously installed those versions will be able to' + not in mail.outbox[0].body + ) + assert ( + "users who have previously installed those versions won't be able to" + in mail.outbox[0].body + ) + assert ( + 'You may upload a new version which addresses the policy violation(s)' + not in mail.outbox[0].body + ) + + def test_notify_reviewer_decision_rejection_addon_already_disabled(self): + addon_developer = user_factory() + addon = addon_factory(users=[addon_developer], status=amo.STATUS_DISABLED) + decision = ContentDecision(addon=addon) + self._test_notify_reviewer_decision( + decision, + amo.LOG.REJECT_VERSION, + DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON, + expect_create_decision_call=True, + expect_create_job_decision_call=False, + ) + assert ( + 'Users who have previously installed those versions will be able to' + in mail.outbox[0].body + ) + assert ( + "users who have previously installed those versions won't be able to" + not in mail.outbox[0].body + ) + assert ( + 'You may upload a new version which addresses the policy violation(s)' + not in mail.outbox[0].body + ) + + def test_notify_reviewer_decision_legal_forward(self): + """Test a reviewer "decision" to forward to legal. Because there is no job there + is no decision though, so we don't expect any decision to be notified to Cinder. + """ + addon_developer = user_factory() + # Set to disabled because we already don't create decisions for approvals. + addon = addon_factory(users=[addon_developer], status=amo.STATUS_DISABLED) + decision = ContentDecision(addon=addon) + # Check there isn't a job already so our .get later isn't a false positive. + assert not CinderJob.objects.exists() + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '123456'}, + status=201, + ) + self._test_notify_reviewer_decision( + decision, + amo.LOG.REQUEST_LEGAL, + None, + # as above, we arne't making a decision on a job, so no call is expected + expect_create_decision_call=False, + expect_create_job_decision_call=False, + expected_decision_object_count=0, + # and certainly no email to the developer + expect_email=False, + ) + assert CinderJob.objects.get().job_id == '123456' + + def _test_process_action_ban_user_outcome(self, decision): + self.assertCloseToNow(decision.action_date) + self.assertCloseToNow(decision.user.reload().banned) + assert ( + ActivityLog.objects.filter(action=amo.LOG.ADMIN_USER_BANNED.id).count() == 1 + ) + assert 'appeal' in mail.outbox[0].body + + def test_process_action_ban_user_held(self): + user = user_factory(email='superstarops@mozilla.com') + decision = ContentDecision.objects.create( + user=user, action=DECISION_ACTIONS.AMO_BAN_USER + ) + assert decision.action_date is None + decision.process_action() + assert decision.action_date is None + assert not user.reload().banned + assert ( + ActivityLog.objects.filter( + action=amo.LOG.HELD_ACTION_ADMIN_USER_BANNED.id + ).count() + == 1 + ) + assert len(mail.outbox) == 0 + + decision.process_action(release_hold=True) + self._test_process_action_ban_user_outcome(decision) + + def test_process_action_ban_user(self): + user = user_factory() + decision = ContentDecision.objects.create( + user=user, action=DECISION_ACTIONS.AMO_BAN_USER + ) + assert decision.action_date is None + decision.process_action() + self._test_process_action_ban_user_outcome(decision) + + def _test_process_action_disable_addon_outcome(self, decision): + self.assertCloseToNow(decision.action_date) + assert decision.addon.reload().status == amo.STATUS_DISABLED + assert ActivityLog.objects.filter(action=amo.LOG.FORCE_DISABLE.id).count() == 1 + assert 'appeal' in mail.outbox[0].body + + def test_process_action_disable_addon_held(self): + addon = addon_factory(users=[user_factory()]) + self.make_addon_promoted(addon, RECOMMENDED, approve_version=True) + decision = ContentDecision.objects.create( + addon=addon, action=DECISION_ACTIONS.AMO_DISABLE_ADDON + ) + assert decision.action_date is None + decision.process_action() + assert decision.action_date is None + assert addon.reload().status == amo.STATUS_APPROVED + assert ( + ActivityLog.objects.filter( + action=amo.LOG.HELD_ACTION_FORCE_DISABLE.id + ).count() + == 1 + ) + assert len(mail.outbox) == 0 + + decision.process_action(release_hold=True) + self._test_process_action_disable_addon_outcome(decision) + + def test_process_action_disable_addon(self): + addon = addon_factory(users=[user_factory()]) + decision = ContentDecision.objects.create( + addon=addon, action=DECISION_ACTIONS.AMO_DISABLE_ADDON + ) + assert decision.action_date is None + decision.process_action() + self._test_process_action_disable_addon_outcome(decision) + + def _test_process_action_delete_collection_outcome(self, decision): + self.assertCloseToNow(decision.action_date) + assert decision.collection.reload().deleted + assert ( + ActivityLog.objects.filter(action=amo.LOG.COLLECTION_DELETED.id).count() + == 1 + ) + assert 'appeal' in mail.outbox[0].body + + def test_process_action_delete_collection_held(self): + collection = collection_factory(author=self.task_user) + decision = ContentDecision.objects.create( + collection=collection, action=DECISION_ACTIONS.AMO_DELETE_COLLECTION + ) + assert decision.action_date is None + decision.process_action() + assert decision.action_date is None + assert not collection.reload().deleted + assert ( + ActivityLog.objects.filter( + action=amo.LOG.HELD_ACTION_COLLECTION_DELETED.id + ).count() + == 1 + ) + assert len(mail.outbox) == 0 + + decision.process_action(release_hold=True) + self._test_process_action_delete_collection_outcome(decision) + + def test_process_action_delete_collection(self): + collection = collection_factory(author=user_factory()) + decision = ContentDecision.objects.create( + collection=collection, action=DECISION_ACTIONS.AMO_DELETE_COLLECTION + ) + assert decision.action_date is None + decision.process_action() + self._test_process_action_delete_collection_outcome(decision) + + def _test_process_action_delete_rating_outcome(self, decision): + self.assertCloseToNow(decision.action_date) + assert decision.rating.reload().deleted + assert ActivityLog.objects.filter(action=amo.LOG.DELETE_RATING.id).count() == 1 + assert 'appeal' in mail.outbox[0].body + + def test_process_action_delete_rating_held(self): + user = user_factory() + addon = addon_factory(users=[user]) + rating = Rating.objects.create( + addon=addon, + user=user, + body='reply', + reply_to=Rating.objects.create( + addon=addon, user=user_factory(), body='sdsd' + ), + ) + decision = ContentDecision.objects.create( + rating=rating, action=DECISION_ACTIONS.AMO_DELETE_RATING + ) + self.make_addon_promoted(rating.addon, RECOMMENDED, approve_version=True) + assert decision.action_date is None + mail.outbox.clear() + + decision.process_action() + assert decision.action_date is None + assert not rating.reload().deleted + assert ( + ActivityLog.objects.filter( + action=amo.LOG.HELD_ACTION_DELETE_RATING.id + ).count() + == 1 + ) + assert len(mail.outbox) == 0 + + decision.process_action(release_hold=True) + self._test_process_action_delete_rating_outcome(decision) + + def test_process_action_delete_rating(self): + rating = Rating.objects.create(addon=addon_factory(), user=user_factory()) + decision = ContentDecision.objects.create( + rating=rating, action=DECISION_ACTIONS.AMO_DELETE_RATING + ) + assert decision.action_date is None + decision.process_action() + self._test_process_action_delete_rating_outcome(decision) + + def test_get_target_review_url(self): + addon = addon_factory() + decision = ContentDecision.objects.create( + addon=addon, action=DECISION_ACTIONS.AMO_DISABLE_ADDON + ) + assert decision.get_target_review_url() == reverse( + 'reviewers.decision_review', args=(decision.id,) + ) + + def test_get_target_display(self): + decision = ContentDecision.objects.create( + addon=addon_factory(), action=DECISION_ACTIONS.AMO_DISABLE_ADDON + ) + assert decision.get_target_display() == 'Extension' + + decision.update(addon=None, user=user_factory()) + assert decision.get_target_display() == 'User profile' + + decision.update(user=None, collection=collection_factory()) + assert decision.get_target_display() == 'Collection' + + decision.update( + collection=None, + rating=Rating.objects.create(addon=addon_factory(), user=user_factory()), + ) + assert decision.get_target_display() == 'Rating' + + def test_get_target_name(self): + decision = ContentDecision.objects.create( + addon=addon_factory(), action=DECISION_ACTIONS.AMO_DISABLE_ADDON + ) + assert decision.get_target_name() == str(decision.addon.name) + + decision.update(addon=None, user=user_factory()) + assert decision.get_target_name() == decision.user.name + + decision.update(user=None, collection=collection_factory()) + assert decision.get_target_name() == decision.collection.name + + decision.update( + collection=None, + rating=Rating.objects.create( + addon=addon_factory(), user=user_factory(), body='something' + ), + ) + assert ( + decision.get_target_name() + == f'"something" for {decision.rating.addon.name}' + ) + + +@pytest.mark.django_db +@pytest.mark.parametrize( + 'illegal_category,expected', + [ + (None, None), + ( + ILLEGAL_CATEGORIES.ANIMAL_WELFARE, + 'STATEMENT_CATEGORY_ANIMAL_WELFARE', + ), + ( + ILLEGAL_CATEGORIES.CONSUMER_INFORMATION, + 'STATEMENT_CATEGORY_CONSUMER_INFORMATION', + ), + ( + ILLEGAL_CATEGORIES.DATA_PROTECTION_AND_PRIVACY_VIOLATIONS, + 'STATEMENT_CATEGORY_DATA_PROTECTION_AND_PRIVACY_VIOLATIONS', + ), + ( + ILLEGAL_CATEGORIES.ILLEGAL_OR_HARMFUL_SPEECH, + 'STATEMENT_CATEGORY_ILLEGAL_OR_HARMFUL_SPEECH', + ), + ( + ILLEGAL_CATEGORIES.INTELLECTUAL_PROPERTY_INFRINGEMENTS, + 'STATEMENT_CATEGORY_INTELLECTUAL_PROPERTY_INFRINGEMENTS', + ), + ( + ILLEGAL_CATEGORIES.NEGATIVE_EFFECTS_ON_CIVIC_DISCOURSE_OR_ELECTIONS, + 'STATEMENT_CATEGORY_NEGATIVE_EFFECTS_ON_CIVIC_DISCOURSE_OR_ELECTIONS', + ), + ( + ILLEGAL_CATEGORIES.NON_CONSENSUAL_BEHAVIOUR, + 'STATEMENT_CATEGORY_NON_CONSENSUAL_BEHAVIOUR', + ), + ( + ILLEGAL_CATEGORIES.PORNOGRAPHY_OR_SEXUALIZED_CONTENT, + 'STATEMENT_CATEGORY_PORNOGRAPHY_OR_SEXUALIZED_CONTENT', + ), + ( + ILLEGAL_CATEGORIES.PROTECTION_OF_MINORS, + 'STATEMENT_CATEGORY_PROTECTION_OF_MINORS', + ), + ( + ILLEGAL_CATEGORIES.RISK_FOR_PUBLIC_SECURITY, + 'STATEMENT_CATEGORY_RISK_FOR_PUBLIC_SECURITY', + ), + ( + ILLEGAL_CATEGORIES.SCAMS_AND_FRAUD, + 'STATEMENT_CATEGORY_SCAMS_AND_FRAUD', + ), + (ILLEGAL_CATEGORIES.SELF_HARM, 'STATEMENT_CATEGORY_SELF_HARM'), + ( + ILLEGAL_CATEGORIES.UNSAFE_AND_PROHIBITED_PRODUCTS, + 'STATEMENT_CATEGORY_UNSAFE_AND_PROHIBITED_PRODUCTS', + ), + (ILLEGAL_CATEGORIES.VIOLENCE, 'STATEMENT_CATEGORY_VIOLENCE'), + (ILLEGAL_CATEGORIES.OTHER, 'STATEMENT_CATEGORY_OTHER'), + ], +) +def test_illegal_category_cinder_value(illegal_category, expected): + addon = addon_factory() + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + illegal_category=illegal_category, + ) + assert abuse_report.illegal_category_cinder_value == expected + + +@pytest.mark.django_db +@pytest.mark.parametrize( + 'illegal_subcategory,expected', + [ + (None, None), + (ILLEGAL_SUBCATEGORIES.OTHER, 'KEYWORD_OTHER'), + ( + ILLEGAL_SUBCATEGORIES.INSUFFICIENT_INFORMATION_ON_TRADERS, + 'KEYWORD_INSUFFICIENT_INFORMATION_ON_TRADERS', + ), + ( + ILLEGAL_SUBCATEGORIES.NONCOMPLIANCE_PRICING, + 'KEYWORD_NONCOMPLIANCE_PRICING', + ), + ( + ILLEGAL_SUBCATEGORIES.HIDDEN_ADVERTISEMENT, + 'KEYWORD_HIDDEN_ADVERTISEMENT', + ), + ( + ILLEGAL_SUBCATEGORIES.MISLEADING_INFO_GOODS_SERVICES, + 'KEYWORD_MISLEADING_INFO_GOODS_SERVICES', + ), + ( + ILLEGAL_SUBCATEGORIES.MISLEADING_INFO_CONSUMER_RIGHTS, + 'KEYWORD_MISLEADING_INFO_CONSUMER_RIGHTS', + ), + ( + ILLEGAL_SUBCATEGORIES.BIOMETRIC_DATA_BREACH, + 'KEYWORD_BIOMETRIC_DATA_BREACH', + ), + ( + ILLEGAL_SUBCATEGORIES.MISSING_PROCESSING_GROUND, + 'KEYWORD_MISSING_PROCESSING_GROUND', + ), + ( + ILLEGAL_SUBCATEGORIES.RIGHT_TO_BE_FORGOTTEN, + 'KEYWORD_RIGHT_TO_BE_FORGOTTEN', + ), + ( + ILLEGAL_SUBCATEGORIES.DATA_FALSIFICATION, + 'KEYWORD_DATA_FALSIFICATION', + ), + (ILLEGAL_SUBCATEGORIES.DEFAMATION, 'KEYWORD_DEFAMATION'), + (ILLEGAL_SUBCATEGORIES.DISCRIMINATION, 'KEYWORD_DISCRIMINATION'), + (ILLEGAL_SUBCATEGORIES.HATE_SPEECH, 'KEYWORD_HATE_SPEECH'), + ( + ILLEGAL_SUBCATEGORIES.DESIGN_INFRINGEMENT, + 'KEYWORD_DESIGN_INFRINGEMENT', + ), + ( + ILLEGAL_SUBCATEGORIES.GEOGRAPHIC_INDICATIONS_INFRINGEMENT, + 'KEYWORD_GEOGRAPHIC_INDICATIONS_INFRINGEMENT', + ), + ( + ILLEGAL_SUBCATEGORIES.PATENT_INFRINGEMENT, + 'KEYWORD_PATENT_INFRINGEMENT', + ), + ( + ILLEGAL_SUBCATEGORIES.TRADE_SECRET_INFRINGEMENT, + 'KEYWORD_TRADE_SECRET_INFRINGEMENT', + ), + ( + ILLEGAL_SUBCATEGORIES.VIOLATION_EU_LAW, + 'KEYWORD_VIOLATION_EU_LAW', + ), + ( + ILLEGAL_SUBCATEGORIES.VIOLATION_NATIONAL_LAW, + 'KEYWORD_VIOLATION_NATIONAL_LAW', + ), + ( + ILLEGAL_SUBCATEGORIES.MISINFORMATION_DISINFORMATION_DISINFORMATION, + 'KEYWORD_MISINFORMATION_DISINFORMATION_DISINFORMATION', + ), + ( + ILLEGAL_SUBCATEGORIES.NON_CONSENSUAL_IMAGE_SHARING, + 'KEYWORD_NON_CONSENSUAL_IMAGE_SHARING', + ), + ( + ILLEGAL_SUBCATEGORIES.NON_CONSENSUAL_ITEMS_DEEPFAKE, + 'KEYWORD_NON_CONSENSUAL_ITEMS_DEEPFAKE', + ), + ( + ILLEGAL_SUBCATEGORIES.ONLINE_BULLYING_INTIMIDATION, + 'KEYWORD_ONLINE_BULLYING_INTIMIDATION', + ), + (ILLEGAL_SUBCATEGORIES.STALKING, 'KEYWORD_STALKING'), + ( + ILLEGAL_SUBCATEGORIES.ADULT_SEXUAL_MATERIAL, + 'KEYWORD_ADULT_SEXUAL_MATERIAL', + ), + ( + ILLEGAL_SUBCATEGORIES.IMAGE_BASED_SEXUAL_ABUSE, + 'KEYWORD_IMAGE_BASED_SEXUAL_ABUSE', + ), + ( + ILLEGAL_SUBCATEGORIES.AGE_SPECIFIC_RESTRICTIONS_MINORS, + 'KEYWORD_AGE_SPECIFIC_RESTRICTIONS_MINORS', + ), + ( + ILLEGAL_SUBCATEGORIES.CHILD_SEXUAL_ABUSE_MATERIAL, + 'KEYWORD_CHILD_SEXUAL_ABUSE_MATERIAL', + ), + ( + ILLEGAL_SUBCATEGORIES.GROOMING_SEXUAL_ENTICEMENT_MINORS, + 'KEYWORD_GROOMING_SEXUAL_ENTICEMENT_MINORS', + ), + ( + ILLEGAL_SUBCATEGORIES.ILLEGAL_ORGANIZATIONS, + 'KEYWORD_ILLEGAL_ORGANIZATIONS', + ), + ( + ILLEGAL_SUBCATEGORIES.RISK_ENVIRONMENTAL_DAMAGE, + 'KEYWORD_RISK_ENVIRONMENTAL_DAMAGE', + ), + ( + ILLEGAL_SUBCATEGORIES.RISK_PUBLIC_HEALTH, + 'KEYWORD_RISK_PUBLIC_HEALTH', + ), + ( + ILLEGAL_SUBCATEGORIES.TERRORIST_CONTENT, + 'KEYWORD_TERRORIST_CONTENT', + ), + ( + ILLEGAL_SUBCATEGORIES.INAUTHENTIC_ACCOUNTS, + 'KEYWORD_INAUTHENTIC_ACCOUNTS', + ), + ( + ILLEGAL_SUBCATEGORIES.INAUTHENTIC_LISTINGS, + 'KEYWORD_INAUTHENTIC_LISTINGS', + ), + ( + ILLEGAL_SUBCATEGORIES.INAUTHENTIC_USER_REVIEWS, + 'KEYWORD_INAUTHENTIC_USER_REVIEWS', + ), + ( + ILLEGAL_SUBCATEGORIES.IMPERSONATION_ACCOUNT_HIJACKING, + 'KEYWORD_IMPERSONATION_ACCOUNT_HIJACKING', + ), + (ILLEGAL_SUBCATEGORIES.PHISHING, 'KEYWORD_PHISHING'), + (ILLEGAL_SUBCATEGORIES.PYRAMID_SCHEMES, 'KEYWORD_PYRAMID_SCHEMES'), + ( + ILLEGAL_SUBCATEGORIES.CONTENT_PROMOTING_EATING_DISORDERS, + 'KEYWORD_CONTENT_PROMOTING_EATING_DISORDERS', + ), + (ILLEGAL_SUBCATEGORIES.SELF_MUTILATION, 'KEYWORD_SELF_MUTILATION'), + (ILLEGAL_SUBCATEGORIES.SUICIDE, 'KEYWORD_SUICIDE'), + ( + ILLEGAL_SUBCATEGORIES.PROHIBITED_PRODUCTS, + 'KEYWORD_PROHIBITED_PRODUCTS', + ), + (ILLEGAL_SUBCATEGORIES.UNSAFE_PRODUCTS, 'KEYWORD_UNSAFE_PRODUCTS'), + ( + ILLEGAL_SUBCATEGORIES.COORDINATED_HARM, + 'KEYWORD_COORDINATED_HARM', + ), + ( + ILLEGAL_SUBCATEGORIES.GENDER_BASED_VIOLENCE, + 'KEYWORD_GENDER_BASED_VIOLENCE', + ), + ( + ILLEGAL_SUBCATEGORIES.HUMAN_EXPLOITATION, + 'KEYWORD_HUMAN_EXPLOITATION', + ), + ( + ILLEGAL_SUBCATEGORIES.HUMAN_TRAFFICKING, + 'KEYWORD_HUMAN_TRAFFICKING', + ), + ( + ILLEGAL_SUBCATEGORIES.INCITEMENT_VIOLENCE_HATRED, + 'KEYWORD_INCITEMENT_VIOLENCE_HATRED', + ), + ], +) +def test_illegal_subcategory_cinder_value(illegal_subcategory, expected): + addon = addon_factory() + abuse_report = AbuseReport.objects.create( + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + illegal_subcategory=illegal_subcategory, + ) + assert abuse_report.illegal_subcategory_cinder_value == expected diff --git a/src/olympia/abuse/tests/test_serializers.py b/src/olympia/abuse/tests/test_serializers.py index 22cbeab4e5a9..918bf8e449b4 100644 --- a/src/olympia/abuse/tests/test_serializers.py +++ b/src/olympia/abuse/tests/test_serializers.py @@ -16,6 +16,7 @@ ) from olympia.accounts.serializers import BaseUserSerializer from olympia.amo.tests import TestCase, addon_factory, collection_factory, user_factory +from olympia.constants.abuse import ILLEGAL_CATEGORIES, ILLEGAL_SUBCATEGORIES from olympia.ratings.models import Rating @@ -61,6 +62,8 @@ def test_output_with_view_and_addon_object(self): 'reason': None, 'report_entry_point': None, 'location': None, + 'illegal_category': None, + 'illegal_subcategory': None, } def test_guid_report_addon_exists_doesnt_matter(self): @@ -91,6 +94,8 @@ def test_guid_report_addon_exists_doesnt_matter(self): 'reason': None, 'report_entry_point': None, 'location': None, + 'illegal_category': None, + 'illegal_subcategory': None, } def test_guid_report(self): @@ -120,6 +125,8 @@ def test_guid_report(self): 'reason': None, 'report_entry_point': None, 'location': None, + 'illegal_category': None, + 'illegal_subcategory': None, } def test_guid_report_to_internal_value_with_some_fancy_parameters(self): @@ -270,6 +277,8 @@ def test_user_report(self): 'message': 'bad stuff', 'lang': None, 'reason': None, + 'illegal_category': None, + 'illegal_subcategory': None, } @@ -284,7 +293,11 @@ def test_rating_report(self): body='evil rating', addon=addon, user=user, rating=1 ) report = AbuseReport( - rating=rating, message='bad stuff', reason=AbuseReport.REASONS.ILLEGAL + rating=rating, + message='bad stuff', + reason=AbuseReport.REASONS.ILLEGAL, + illegal_category=ILLEGAL_CATEGORIES.ANIMAL_WELFARE, + illegal_subcategory=ILLEGAL_SUBCATEGORIES.OTHER, ) request = RequestFactory().get('/') request.user = AnonymousUser() @@ -305,6 +318,8 @@ def test_rating_report(self): 'reason': 'illegal', 'message': 'bad stuff', 'lang': None, + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', } @@ -338,4 +353,6 @@ def test_collection_report(self): 'reason': 'feedback_spam', 'message': 'this is some spammy stûff', 'lang': None, + 'illegal_category': None, + 'illegal_subcategory': None, } diff --git a/src/olympia/abuse/tests/test_tasks.py b/src/olympia/abuse/tests/test_tasks.py index d9bdc0e3b06f..6b1f9153ef0e 100644 --- a/src/olympia/abuse/tests/test_tasks.py +++ b/src/olympia/abuse/tests/test_tasks.py @@ -1,4 +1,5 @@ import json +import uuid from datetime import datetime from unittest import mock @@ -13,26 +14,33 @@ from olympia.abuse.tasks import flag_high_abuse_reports_addons_according_to_review_tier from olympia.activity.models import ActivityLog from olympia.amo.tests import TestCase, addon_factory, days_ago, user_factory +from olympia.constants.abuse import ( + DECISION_ACTIONS, + ILLEGAL_CATEGORIES, + ILLEGAL_SUBCATEGORIES, +) from olympia.constants.reviewers import EXTRA_REVIEW_TARGET_PER_DAY_CONFIG_KEY from olympia.files.models import File from olympia.reviewers.models import NeedsHumanReview, ReviewActionReason, UsageTier from olympia.versions.models import Version from olympia.zadmin.models import set_config -from ..models import AbuseReport, CinderJob, CinderPolicy +from ..models import AbuseReport, CinderJob, CinderPolicy, ContentDecision from ..tasks import ( appeal_to_cinder, + handle_escalate_action, + notify_addon_decision_to_cinder, report_to_cinder, resolve_job_in_cinder, sync_cinder_policies, ) -def addon_factory_with_abuse_reports(*args, **kwargs): - abuse_reports_count = kwargs.pop('abuse_reports_count') - addon = addon_factory(*args, **kwargs) +def addon_factory_with_abuse_reports(*, abuse_reports_count, **kwargs): + abuse_kwargs = kwargs.pop('abuse_reports_kwargs', {}) + addon = addon_factory(**kwargs) for _x in range(0, abuse_reports_count): - AbuseReport.objects.create(guid=addon.guid) + AbuseReport.objects.create(guid=addon.guid, **abuse_kwargs) return addon @@ -121,6 +129,22 @@ def test_flag_high_abuse_reports_addons_according_to_review_tier(): ).current_version, is_active=True, ).version.addon, + # only has reports that are individually actionable, so ignored + addon_factory_with_abuse_reports( + name='B tier, but all dsa reasons', + average_daily_users=200, + abuse_reports_count=2, + abuse_reports_kwargs={ + 'reason': AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE + }, + ), + # Would be above the threshold, but has one report that is individually + # actionable so just below + addon_factory_with_abuse_reports( + name='A tier, but one report a dsa reason, for a listed version', + average_daily_users=250, + abuse_reports_count=3, + ), # Belongs to B tier but the last abuse report that would make its total # above threshold is deleted, and it has another old one that does not # count (see below). @@ -130,8 +154,14 @@ def test_flag_high_abuse_reports_addons_according_to_review_tier(): abuse_reports_count=2, ), ] - AbuseReport.objects.filter(guid=not_flagged[-1].guid).latest('pk').delete() - AbuseReport.objects.create(guid=not_flagged[-1].guid, created=days_ago(15)) + with_deleted_report = not_flagged[-1] + AbuseReport.objects.filter(guid=with_deleted_report.guid).latest('pk').delete() + AbuseReport.objects.create(guid=with_deleted_report.guid, created=days_ago(15)) + with_dsa_report = not_flagged[-2] + AbuseReport.objects.filter(guid=with_dsa_report.guid).latest('pk').update( + addon_version=with_dsa_report.current_version.version, + reason=AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE, + ) flagged = [ addon_factory_with_abuse_reports( @@ -165,7 +195,7 @@ def test_flag_high_abuse_reports_addons_according_to_review_tier(): assert ( addon.versions.latest('pk') .needshumanreview_set.filter( - reason=NeedsHumanReview.REASON_ABUSE_REPORTS_THRESHOLD, is_active=True + reason=NeedsHumanReview.REASONS.ABUSE_REPORTS_THRESHOLD, is_active=True ) .count() == 0 @@ -175,7 +205,7 @@ def test_flag_high_abuse_reports_addons_according_to_review_tier(): version = addon.versions.latest('pk') assert ( version.needshumanreview_set.filter( - reason=NeedsHumanReview.REASON_ABUSE_REPORTS_THRESHOLD, is_active=True + reason=NeedsHumanReview.REASONS.ABUSE_REPORTS_THRESHOLD, is_active=True ).count() == 1 ), f'Addon {addon} should have been flagged' @@ -201,7 +231,11 @@ def test_flag_high_abuse_reports_addons_according_to_review_tier(): def test_addon_report_to_cinder(statsd_incr_mock): addon = addon_factory() abuse_report = AbuseReport.objects.create( - guid=addon.guid, reason=AbuseReport.REASONS.ILLEGAL, message='This is bad' + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + message='This is bad', + illegal_category=ILLEGAL_CATEGORIES.OTHER, + illegal_subcategory=ILLEGAL_SUBCATEGORIES.OTHER, ) assert not CinderJob.objects.exists() responses.add( @@ -229,6 +263,8 @@ def test_addon_report_to_cinder(statsd_incr_mock): 'or contains content that ' 'violates the law', 'considers_illegal': True, + 'illegal_category': 'STATEMENT_CATEGORY_OTHER', + 'illegal_subcategory': 'KEYWORD_OTHER', }, 'entity_type': 'amo_report', } @@ -254,7 +290,7 @@ def test_addon_report_to_cinder(statsd_incr_mock): 'name': str(addon.name), 'release_notes': '', 'privacy_policy': '', - 'promoted_badge': '', + 'promoted': '', 'slug': addon.slug, 'summary': str(addon.summary), 'support_email': None, @@ -280,7 +316,11 @@ def test_addon_report_to_cinder(statsd_incr_mock): def test_addon_report_to_cinder_exception(statsd_incr_mock): addon = addon_factory() abuse_report = AbuseReport.objects.create( - guid=addon.guid, reason=AbuseReport.REASONS.ILLEGAL, message='This is bad' + guid=addon.guid, + reason=AbuseReport.REASONS.ILLEGAL, + message='This is bad', + illegal_category=ILLEGAL_CATEGORIES.OTHER, + illegal_subcategory=ILLEGAL_SUBCATEGORIES.OTHER, ) assert not CinderJob.objects.exists() responses.add( @@ -312,6 +352,8 @@ def test_addon_report_to_cinder_different_locale(): reason=AbuseReport.REASONS.ILLEGAL, message='This is bad', application_locale='fr', + illegal_category=ILLEGAL_CATEGORIES.OTHER, + illegal_subcategory=ILLEGAL_SUBCATEGORIES.OTHER, ) assert not CinderJob.objects.exists() responses.add( @@ -338,6 +380,8 @@ def test_addon_report_to_cinder_different_locale(): 'or contains content that ' 'violates the law', 'considers_illegal': True, + 'illegal_category': 'STATEMENT_CATEGORY_OTHER', + 'illegal_subcategory': 'KEYWORD_OTHER', }, 'entity_type': 'amo_report', } @@ -363,7 +407,7 @@ def test_addon_report_to_cinder_different_locale(): 'name': str(names['fr']), 'release_notes': '', 'privacy_policy': '', - 'promoted_badge': '', + 'promoted': '', 'slug': addon.slug, 'summary': str(addon.summary), 'support_email': None, @@ -386,9 +430,12 @@ def test_addon_report_to_cinder_different_locale(): def test_addon_appeal_to_cinder_reporter(statsd_incr_mock): addon = addon_factory() cinder_job = CinderJob.objects.create( - decision_id='4815162342-abc', - decision_date=datetime.now(), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + decision=ContentDecision.objects.create( + cinder_id='4815162342-abc', + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon, + action_date=datetime.now(), + ) ) abuse_report = AbuseReport.objects.create( guid=addon.guid, @@ -396,6 +443,8 @@ def test_addon_appeal_to_cinder_reporter(statsd_incr_mock): reporter_name='It is me', reporter_email='m@r.io', cinder_job=cinder_job, + illegal_category=ILLEGAL_CATEGORIES.OTHER, + illegal_subcategory=ILLEGAL_SUBCATEGORIES.OTHER, ) responses.add( responses.POST, @@ -406,7 +455,7 @@ def test_addon_appeal_to_cinder_reporter(statsd_incr_mock): statsd_incr_mock.reset_mock() appeal_to_cinder.delay( - decision_id=cinder_job.decision_id, + decision_cinder_id=cinder_job.decision.cinder_id, abuse_report_id=abuse_report.id, appeal_text='I appeal', user_id=None, @@ -423,17 +472,16 @@ def test_addon_appeal_to_cinder_reporter(statsd_incr_mock): }, 'appealer_entity_type': 'amo_unauthenticated_reporter', 'decision_to_appeal_id': '4815162342-abc', - 'queue_slug': 'amo-env-listings', + 'queue_slug': 'amo-escalations', 'reasoning': 'I appeal', } cinder_job.reload() - assert cinder_job.appeal_job_id - appeal_job = cinder_job.appeal_job + assert cinder_job.decision.appeal_job_id + appeal_job = cinder_job.decision.appeal_job assert appeal_job.job_id == '2432615184-xyz' abuse_report.reload() - assert appeal_job == abuse_report.appellant_job - assert abuse_report.reporter_appeal_date + assert abuse_report.cinderappeal.decision == cinder_job.decision assert statsd_incr_mock.call_count == 1 assert statsd_incr_mock.call_args[0] == ('abuse.tasks.appeal_to_cinder.success',) @@ -444,9 +492,12 @@ def test_addon_appeal_to_cinder_reporter(statsd_incr_mock): def test_addon_appeal_to_cinder_reporter_exception(statsd_incr_mock): addon = addon_factory() cinder_job = CinderJob.objects.create( - decision_id='4815162342-abc', - decision_date=datetime.now(), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + decision=ContentDecision.objects.create( + cinder_id='4815162342-abc', + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon, + action_date=datetime.now(), + ) ) abuse_report = AbuseReport.objects.create( guid=addon.guid, @@ -454,6 +505,8 @@ def test_addon_appeal_to_cinder_reporter_exception(statsd_incr_mock): reporter_name='It is me', reporter_email='m@r.io', cinder_job=cinder_job, + illegal_category=ILLEGAL_CATEGORIES.OTHER, + illegal_subcategory=ILLEGAL_SUBCATEGORIES.OTHER, ) responses.add( responses.POST, @@ -465,7 +518,7 @@ def test_addon_appeal_to_cinder_reporter_exception(statsd_incr_mock): with pytest.raises(ConnectionError): appeal_to_cinder.delay( - decision_id=cinder_job.decision_id, + decision_cinder_id=cinder_job.decision.cinder_id, abuse_report_id=abuse_report.id, appeal_text='I appeal', user_id=None, @@ -481,15 +534,20 @@ def test_addon_appeal_to_cinder_authenticated_reporter(): user = user_factory(fxa_id='fake-fxa-id') addon = addon_factory() cinder_job = CinderJob.objects.create( - decision_id='4815162342-abc', - decision_date=datetime.now(), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, + decision=ContentDecision.objects.create( + cinder_id='4815162342-abc', + action=DECISION_ACTIONS.AMO_APPROVE, + addon=addon, + action_date=datetime.now(), + ) ) abuse_report = AbuseReport.objects.create( guid=addon.guid, reason=AbuseReport.REASONS.ILLEGAL, cinder_job=cinder_job, reporter=user, + illegal_category=ILLEGAL_CATEGORIES.OTHER, + illegal_subcategory=ILLEGAL_SUBCATEGORIES.OTHER, ) responses.add( responses.POST, @@ -499,7 +557,7 @@ def test_addon_appeal_to_cinder_authenticated_reporter(): ) appeal_to_cinder.delay( - decision_id=cinder_job.decision_id, + decision_cinder_id=cinder_job.decision.cinder_id, abuse_report_id=abuse_report.pk, appeal_text='I appeal', user_id=user.pk, @@ -518,32 +576,28 @@ def test_addon_appeal_to_cinder_authenticated_reporter(): }, 'appealer_entity_type': 'amo_user', 'decision_to_appeal_id': '4815162342-abc', - 'queue_slug': 'amo-env-listings', + 'queue_slug': 'amo-escalations', 'reasoning': 'I appeal', } cinder_job.reload() - assert cinder_job.appeal_job_id - appeal_job = cinder_job.appeal_job + assert cinder_job.decision.appeal_job_id + appeal_job = cinder_job.decision.appeal_job assert appeal_job.job_id == '2432615184-xyz' abuse_report.reload() - assert abuse_report.appellant_job == appeal_job - assert abuse_report.reporter_appeal_date + assert abuse_report.cinderappeal.decision == cinder_job.decision @pytest.mark.django_db def test_addon_appeal_to_cinder_authenticated_author(): user = user_factory(fxa_id='fake-fxa-id') + user_factory(pk=settings.TASK_USER_ID) addon = addon_factory(users=[user]) - cinder_job = CinderJob.objects.create( - decision_id='4815162342-abc', - decision_date=datetime.now(), - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, - ) - abuse_report = AbuseReport.objects.create( - guid=addon.guid, - reason=AbuseReport.REASONS.ILLEGAL, - cinder_job=cinder_job, + decision = ContentDecision.objects.create( + cinder_id='4815162342-abc', + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon, + action_date=datetime.now(), ) responses.add( responses.POST, @@ -553,7 +607,7 @@ def test_addon_appeal_to_cinder_authenticated_author(): ) appeal_to_cinder.delay( - decision_id=cinder_job.decision_id, + decision_cinder_id=decision.cinder_id, abuse_report_id=None, appeal_text='I appeal', user_id=user.pk, @@ -572,17 +626,14 @@ def test_addon_appeal_to_cinder_authenticated_author(): }, 'appealer_entity_type': 'amo_user', 'decision_to_appeal_id': '4815162342-abc', - 'queue_slug': 'amo-env-listings', + 'queue_slug': 'amo-env-addon-infringement', 'reasoning': 'I appeal', } - cinder_job.reload() - assert cinder_job.appeal_job_id - appeal_job = cinder_job.appeal_job + decision.reload() + assert decision.appeal_job_id + appeal_job = decision.appeal_job assert appeal_job.job_id == '2432615184-xyz' - abuse_report.reload() - assert abuse_report.reporter_appeal_date is None - assert abuse_report.appellant_job_id is None @pytest.mark.django_db @@ -597,32 +648,23 @@ def test_resolve_job_in_cinder(statsd_incr_mock): ) responses.add( responses.POST, - f'{settings.CINDER_SERVER_URL}create_decision', - json={'uuid': '123'}, + f'{settings.CINDER_SERVER_URL}jobs/{cinder_job.job_id}/decision', + json={'uuid': uuid.uuid4().hex}, status=201, ) - responses.add( - responses.POST, - f'{settings.CINDER_SERVER_URL}jobs/{cinder_job.job_id}/cancel', - json={'external_id': cinder_job.job_id}, - status=200, - ) statsd_incr_mock.reset_mock() - review_action_reason = ReviewActionReason.objects.create( - cinder_policy=CinderPolicy.objects.create(name='policy', uuid='12345678') - ) + cinder_policy = CinderPolicy.objects.create(name='policy', uuid='12345678') log_entry = ActivityLog.objects.create( amo.LOG.FORCE_DISABLE, abuse_report.target, abuse_report.target.current_version, - review_action_reason, - details={'comments': 'some review text'}, + cinder_policy, + details={'comments': 'some review text', 'cinder_action': 'AMO_DISABLE_ADDON'}, user=user_factory(), ) resolve_job_in_cinder.delay( cinder_job_id=cinder_job.id, - decision=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, log_entry_id=log_entry.id, ) @@ -630,9 +672,8 @@ def test_resolve_job_in_cinder(statsd_incr_mock): request_body = json.loads(request.body) assert request_body['policy_uuids'] == ['12345678'] assert request_body['reasoning'] == 'some review text' - assert request_body['entity']['id'] == str(abuse_report.target.id) cinder_job.reload() - assert cinder_job.decision_action == CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON + assert cinder_job.decision.action == DECISION_ACTIONS.AMO_DISABLE_ADDON assert statsd_incr_mock.call_count == 1 assert statsd_incr_mock.call_args[0] == ( @@ -652,18 +693,16 @@ def test_resolve_job_in_cinder_exception(statsd_incr_mock): ) responses.add( responses.POST, - f'{settings.CINDER_SERVER_URL}create_decision', - json={'uuid': '123'}, + f'{settings.CINDER_SERVER_URL}jobs/999/decision', + json={'uuid': uuid.uuid4().hex}, status=500, ) log_entry = ActivityLog.objects.create( amo.LOG.FORCE_DISABLE, abuse_report.target, abuse_report.target.current_version, - ReviewActionReason.objects.create( - cinder_policy=CinderPolicy.objects.create(name='policy', uuid='12345678') - ), - details={'comments': 'some review text'}, + cinder_policy=CinderPolicy.objects.create(name='policy', uuid='12345678'), + details={'comments': 'some review text', 'cinder_action': 'AMO_DISABLE_ADDON'}, user=user_factory(), ) statsd_incr_mock.reset_mock() @@ -671,7 +710,6 @@ def test_resolve_job_in_cinder_exception(statsd_incr_mock): with pytest.raises(ConnectionError): resolve_job_in_cinder.delay( cinder_job_id=cinder_job.id, - decision=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, log_entry_id=log_entry.id, ) @@ -682,6 +720,75 @@ def test_resolve_job_in_cinder_exception(statsd_incr_mock): @pytest.mark.django_db +@mock.patch('olympia.abuse.tasks.statsd.incr') +def test_notify_addon_decision_to_cinder(statsd_incr_mock): + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_decision', + json={'uuid': uuid.uuid4().hex}, + status=201, + ) + addon = addon_factory() + statsd_incr_mock.reset_mock() + cinder_policy = CinderPolicy.objects.create(name='policy', uuid='12345678') + log_entry = ActivityLog.objects.create( + amo.LOG.FORCE_DISABLE, + addon, + addon.current_version, + cinder_policy, + details={'comments': 'some review text', 'cinder_action': 'AMO_DISABLE_ADDON'}, + user=user_factory(), + ) + + notify_addon_decision_to_cinder.delay( + log_entry_id=log_entry.id, + addon_id=addon.id, + ) + + request = responses.calls[0].request + request_body = json.loads(request.body) + assert request_body['policy_uuids'] == ['12345678'] + assert request_body['reasoning'] == 'some review text' + assert request_body['entity']['id'] == str(addon.id) + assert ContentDecision.objects.get().action == DECISION_ACTIONS.AMO_DISABLE_ADDON + + assert statsd_incr_mock.call_count == 1 + assert statsd_incr_mock.call_args[0] == ( + 'abuse.tasks.notify_addon_decision_to_cinder.success', + ) + + +@pytest.mark.django_db +@mock.patch('olympia.abuse.tasks.statsd.incr') +def test_notify_addon_decision_to_cinder_exception(statsd_incr_mock): + addon = addon_factory() + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_decision', + json={'uuid': uuid.uuid4().hex}, + status=500, + ) + log_entry = ActivityLog.objects.create( + amo.LOG.FORCE_DISABLE, + addon, + addon.current_version, + cinder_policy=CinderPolicy.objects.create(name='policy', uuid='12345678'), + details={'comments': 'some review text', 'cinder_action': 'AMO_DISABLE_ADDON'}, + user=user_factory(), + ) + statsd_incr_mock.reset_mock() + + with pytest.raises(ConnectionError): + notify_addon_decision_to_cinder.delay( + log_entry_id=log_entry.id, addon_id=addon.id + ) + + assert statsd_incr_mock.call_count == 1 + assert statsd_incr_mock.call_args[0] == ( + 'abuse.tasks.notify_addon_decision_to_cinder.failure', + ) + + class TestSyncCinderPolicies(TestCase): def setUp(self): self.url = f'{settings.CINDER_SERVER_URL}policies' @@ -753,3 +860,222 @@ def test_sync_cinder_policies_handles_nested_policies(self): CinderPolicy.objects.get(id=nested_policy.parent_id).uuid == self.policy['uuid'] ) + + def test_sync_cinder_policies_name_too_long(self): + policies = [ + { + 'name': 'a' * 300, + 'description': 'Some description', + 'uuid': 'some-uuid', + 'nested_policies': [], + }, + { + 'name': 'Another Pôlicy', + 'description': 'Another description', + 'uuid': 'another-uuid', + 'nested_policies': [], + }, + ] + responses.add(responses.GET, self.url, json=policies, status=200) + + sync_cinder_policies() + + new_policy = CinderPolicy.objects.get(uuid='some-uuid') + assert new_policy.name == 'a' * 255 # Truncated. + assert new_policy.text == 'Some description' + + another_policy = CinderPolicy.objects.get(uuid='another-uuid') + assert another_policy.name == 'Another Pôlicy' + assert another_policy.text == 'Another description' + + def test_old_unused_policies_deleted_and_used_kept_and_marked_as_orphaned(self): + CinderPolicy.objects.create( + uuid='old-uuid', + name='old', + text='Old policy with no decisions or reasons', + ) + old_policy_with_decision = CinderPolicy.objects.create( + uuid='old-uuid-decision', + name='old-decision', + text='Old policy, but with linked decision', + ) + old_policy_with_decision.update(modified=days_ago(1)) + decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_APPROVE, addon=addon_factory() + ) + decision.policies.add(old_policy_with_decision) + old_policy_with_reason = CinderPolicy.objects.create( + uuid='old-uuid-reason', + name='old-reason', + text='Old policy, but with linked ReviewActionReason', + ) + old_policy_with_reason.update(modified=days_ago(1)) + ReviewActionReason.objects.create( + name='a review reason', + cinder_policy=old_policy_with_reason, + canned_response='.', + ) + existing_policy_exposed = CinderPolicy.objects.create( + uuid='existing-uuid-exposed', + name='Existing policy', + text='Existing policy with no decision or ReviewActionReason but exposed', + expose_in_reviewer_tools=True, + ) + updated_policy = CinderPolicy.objects.create( + uuid=self.policy['uuid'], + name=self.policy['name'], + text='Existing policy with no decision or ReviewActionReason but updated', + ) + responses.add(responses.GET, self.url, json=[self.policy], status=200) + + sync_cinder_policies() + assert CinderPolicy.objects.filter(uuid='test-uuid').exists() + assert updated_policy.reload().present_in_cinder is True + + assert CinderPolicy.objects.filter(uuid='old-uuid-decision').exists() + assert old_policy_with_decision.reload().present_in_cinder is False + + assert CinderPolicy.objects.filter(uuid='old-uuid-reason').exists() + assert old_policy_with_reason.reload().present_in_cinder is False + + assert CinderPolicy.objects.filter(uuid='existing-uuid-exposed').exists() + assert existing_policy_exposed.reload().present_in_cinder is False + + assert not CinderPolicy.objects.filter(uuid='old-uuid').exists() + + def test_nested_policies_considered_for_deletion_and_marking_orphans(self): + self.policy = { + 'uuid': 'test-uuid', + 'name': 'test-name', + 'description': 'test-description', + 'nested_policies': [ + { + 'uuid': 'test-uuid-nested', + 'name': 'test-name-nested', + 'description': 'test-description-nested', + 'nested_policies': [], + } + ], + } + updated_nested_policy = CinderPolicy.objects.create( + uuid='test-uuid-nested', + name='test-name-nested', + text='nested policy synced from cinder', + ) + responses.add(responses.GET, self.url, json=[self.policy], status=200) + + sync_cinder_policies() + assert CinderPolicy.objects.filter(uuid='test-uuid-nested').exists() + assert updated_nested_policy.reload().present_in_cinder is True + + def test_only_amo_labelled_policies_added(self): + data = [ + { + 'uuid': uuid.uuid4().hex, + 'name': 'MoSo labeled', + 'description': 'SKIPPED', + 'labels': [{'name': 'MoSo'}], + 'nested_policies': [ + { + 'uuid': uuid.uuid4().hex, + 'name': 'Nested under MoSo, No label', + 'description': 'SKIPPED', + }, + { + 'uuid': uuid.uuid4().hex, + 'name': 'Nested under MoSo, AMO labeled', + 'description': 'SKIPPED', + 'labels': [{'name': 'AMO'}], + }, + ], + }, + { + 'uuid': uuid.uuid4().hex, + 'name': 'No label', + 'description': 'ADDED', + 'nested_policies': [ + { + 'uuid': uuid.uuid4().hex, + 'name': 'Nested under no label, no label', + 'description': 'ADDED', + }, + { + 'uuid': uuid.uuid4().hex, + 'name': 'Nested under no label, MoSo labeled', + 'description': 'SKIPPED', + 'labels': [{'name': 'MoSo'}], + }, + ], + }, + { + 'uuid': uuid.uuid4().hex, + 'name': 'AMO labeled', + 'description': 'ADDED', + 'labels': [{'name': 'AMO'}], + 'nested_policies': [ + { + 'uuid': uuid.uuid4().hex, + 'name': 'Nested under AMO label', + 'description': 'ADDED', + }, + { + 'uuid': uuid.uuid4().hex, + 'name': 'Nested under AMO label, MoSo labeled', + 'description': 'SKIPPED', + 'labels': [{'name': 'MoSo'}], + }, + ], + }, + { + 'uuid': uuid.uuid4().hex, + 'name': 'AMO & MoSo labeled', + 'description': 'ADDED', + 'labels': [{'name': 'AMO'}, {'name': 'MoSo'}], + 'nested_policies': [ + { + 'uuid': uuid.uuid4().hex, + 'name': 'Nested under two labels', + 'description': 'ADDED', + }, + { + 'uuid': uuid.uuid4().hex, + 'name': 'Nested under two label, MoSo labeled', + 'description': 'SKIPPED', + 'labels': [{'name': 'MoSo'}], + }, + ], + }, + ] + responses.add(responses.GET, self.url, json=data, status=200) + + sync_cinder_policies() + assert CinderPolicy.objects.count() == 6 + assert CinderPolicy.objects.filter(text='ADDED').count() == 6 + + +@pytest.mark.django_db +def test_handle_escalate_action(): + addon = addon_factory() + decision = ContentDecision.objects.create( + action=DECISION_ACTIONS.AMO_ESCALATE_ADDON, addon=addon, notes='blah' + ) + job = CinderJob.objects.create(job_id='1234', target_addon=addon, decision=decision) + report = AbuseReport.objects.create(guid=addon.guid, cinder_job=job) + assert not job.resolvable_in_reviewer_tools + responses.add( + responses.POST, + f'{settings.CINDER_SERVER_URL}create_report', + json={'job_id': '5678'}, + status=201, + ) + + handle_escalate_action(job_pk=job.pk) + + job.reload() + new_job = job.forwarded_to_job + assert new_job.job_id == '5678' + assert list(new_job.forwarded_from_jobs.all()) == [job] + assert new_job.resolvable_in_reviewer_tools + assert new_job.target_addon == addon + assert report.reload().cinder_job == new_job + assert json.loads(responses.calls[0].request.body)['reasoning'] == 'blah' diff --git a/src/olympia/abuse/tests/test_utils.py b/src/olympia/abuse/tests/test_utils.py deleted file mode 100644 index 76d0365c071f..000000000000 --- a/src/olympia/abuse/tests/test_utils.py +++ /dev/null @@ -1,767 +0,0 @@ -from datetime import datetime - -from django.conf import settings -from django.core import mail -from django.urls import reverse - -from olympia import amo -from olympia.activity.models import ActivityLog -from olympia.amo.tests import ( - TestCase, - addon_factory, - collection_factory, - user_factory, - version_factory, -) -from olympia.core import set_user -from olympia.ratings.models import Rating -from olympia.reviewers.models import NeedsHumanReview - -from ..models import AbuseReport, CinderJob, CinderPolicy -from ..utils import ( - CinderActionApproveInitialDecision, - CinderActionBanUser, - CinderActionDeleteCollection, - CinderActionDeleteRating, - CinderActionDisableAddon, - CinderActionEscalateAddon, - CinderActionOverrideApprove, - CinderActionRejectVersion, - CinderActionTargetAppealApprove, - CinderActionTargetAppealRemovalAffirmation, -) - - -class BaseTestCinderAction: - def setUp(self): - self.cinder_job = CinderJob.objects.create( - job_id='1234', - decision_id='ab89', - decision_date=datetime.now(), - decision_notes='extra notes', - ) - self.cinder_job.policies.add( - CinderPolicy.objects.create( - uuid='1234', - name='Bad policy', - text='This is bad thing', - parent=CinderPolicy.objects.create( - uuid='p4r3nt', - name='Parent Policy', - text='Parent policy text', - ), - ) - ) - self.abuse_report_no_auth = AbuseReport.objects.create( - reason=AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE, - guid='1234', - cinder_job=self.cinder_job, - reporter_email='email@domain.com', - ) - self.abuse_report_auth = AbuseReport.objects.create( - reason=AbuseReport.REASONS.HATEFUL_VIOLENT_DECEPTIVE, - guid='1234', - cinder_job=self.cinder_job, - reporter=user_factory(), - ) - self.task_user = user_factory(pk=settings.TASK_USER_ID) - # It's the webhook's responsability to do this before calling the - # action. We need it for the ActivityLog creation to work. - set_user(self.task_user) - - def _test_reporter_takedown_email(self, subject): - assert mail.outbox[0].to == ['email@domain.com'] - assert mail.outbox[1].to == [self.abuse_report_auth.reporter.email] - assert mail.outbox[0].subject == ( - subject + f' [ref:ab89/{self.abuse_report_no_auth.id}]' - ) - assert mail.outbox[1].subject == ( - subject + f' [ref:ab89/{self.abuse_report_auth.id}]' - ) - assert 'have therefore removed' in mail.outbox[0].body - assert 'have therefore removed' in mail.outbox[1].body - assert 'appeal' not in mail.outbox[0].body - assert 'appeal' not in mail.outbox[1].body - assert f'[ref:ab89/{self.abuse_report_no_auth.id}]' in mail.outbox[0].body - assert f'[ref:ab89/{self.abuse_report_auth.id}]' in mail.outbox[1].body - assert 'After reviewing' not in mail.outbox[0].body - assert 'After reviewing' not in mail.outbox[0].body - assert '"' not in mail.outbox[0].body - assert '"' not in mail.outbox[1].body - assert '<b>' not in mail.outbox[0].body - assert '<b>' not in mail.outbox[1].body - - def _test_reporter_ignore_email(self, subject): - assert mail.outbox[0].to == ['email@domain.com'] - assert mail.outbox[1].to == [self.abuse_report_auth.reporter.email] - assert mail.outbox[0].subject == ( - subject + f' [ref:ab89/{self.abuse_report_no_auth.id}]' - ) - assert mail.outbox[1].subject == ( - subject + f' [ref:ab89/{self.abuse_report_auth.id}]' - ) - assert 'does not violate Mozilla' in mail.outbox[0].body - assert 'does not violate Mozilla' in mail.outbox[1].body - assert 'was correct' not in mail.outbox[0].body - assert ( - reverse( - 'abuse.appeal_reporter', - kwargs={ - 'abuse_report_id': self.abuse_report_no_auth.id, - 'decision_id': self.cinder_job.decision_id, - }, - ) - in mail.outbox[0].body - ) - assert ( - reverse( - 'abuse.appeal_reporter', - kwargs={ - 'abuse_report_id': self.abuse_report_auth.id, - 'decision_id': self.cinder_job.decision_id, - }, - ) - in mail.outbox[1].body - ) - assert f'[ref:ab89/{self.abuse_report_no_auth.id}]' in mail.outbox[0].body - assert f'[ref:ab89/{self.abuse_report_auth.id}]' in mail.outbox[1].body - assert '"' not in mail.outbox[0].body - assert '"' not in mail.outbox[1].body - assert '<b>' not in mail.outbox[0].body - assert '<b>' not in mail.outbox[1].body - - def _test_reporter_appeal_takedown_email(self, subject): - assert mail.outbox[0].to == [self.abuse_report_auth.reporter.email] - assert mail.outbox[0].subject == ( - subject + f' [ref:ab89/{self.abuse_report_auth.id}]' - ) - assert 'have removed' in mail.outbox[0].body - assert 'right to appeal' not in mail.outbox[0].body - assert f'[ref:ab89/{self.abuse_report_auth.id}]' in mail.outbox[0].body - assert 'After reviewing' in mail.outbox[0].body - assert '"' not in mail.outbox[0].body - assert '<b>' not in mail.outbox[0].body - - def _test_reporter_ignore_appeal_email(self, subject): - assert mail.outbox[0].to == [self.abuse_report_auth.reporter.email] - assert mail.outbox[0].subject == ( - subject + f' [ref:ab89/{self.abuse_report_auth.id}]' - ) - assert 'does not violate Mozilla' in mail.outbox[0].body - assert 'right to appeal' not in mail.outbox[0].body - assert 'was correct' in mail.outbox[0].body - assert f'[ref:ab89/{self.abuse_report_auth.id}]' in mail.outbox[0].body - assert '"' not in mail.outbox[0].body - assert '<b>' not in mail.outbox[0].body - - def _check_owner_email(self, mail_item, subject, snippet): - user = getattr(self, 'user', getattr(self, 'author', None)) - assert mail_item.to == [user.email] - assert mail_item.subject == subject + ' [ref:ab89]' - assert snippet in mail_item.body - assert '[ref:ab89]' in mail_item.body - assert '"' not in mail_item.body - assert '<b>' not in mail_item.body - - def _test_owner_takedown_email(self, subject, snippet): - mail_item = mail.outbox[-1] - self._check_owner_email(mail_item, subject, snippet) - assert 'right to appeal' in mail_item.body - assert ( - reverse( - 'abuse.appeal_author', - kwargs={ - 'decision_id': self.cinder_job.decision_id, - }, - ) - in mail_item.body - ) - assert ( - '\n - Parent Policy, specifically Bad policy: This is bad thing\n' - in mail_item.body - ) - assert '"' not in mail_item.body - assert '<b>' not in mail_item.body - - def _test_owner_affirmation_email( - self, subject, additional_reasoning='extra notes.' - ): - mail_item = mail.outbox[0] - self._check_owner_email(mail_item, subject, 'was correct') - assert 'right to appeal' not in mail_item.body - if additional_reasoning: - assert additional_reasoning in mail_item.body - else: - assert ' was correct. Based on that determination' in mail_item.body - - def _test_owner_restore_email(self, subject): - mail_item = mail.outbox[0] - assert len(mail.outbox) == 1 - self._check_owner_email(mail_item, subject, 'we have restored') - assert 'right to appeal' not in mail_item.body - - def _test_approve_appeal_or_override(CinderActionClass): - raise NotImplementedError - - def test_approve_appeal_success(self): - self._test_approve_appeal_or_override(CinderActionTargetAppealApprove) - assert 'After reviewing your appeal' in mail.outbox[0].body - - def test_approve_override(self): - self._test_approve_appeal_or_override(CinderActionOverrideApprove) - assert 'After reviewing your appeal' not in mail.outbox[0].body - - def test_reporter_ignore_report(self): - subject = self._test_reporter_ignore_initial_or_appeal() - assert len(mail.outbox) == 2 - self._test_reporter_ignore_email(subject) - - def test_reporter_ignore_appeal(self): - original_job = CinderJob.objects.create(job_id='original') - self.cinder_job.appealed_jobs.add(original_job) - self.abuse_report_no_auth.update(cinder_job=original_job) - self.abuse_report_auth.update( - cinder_job=original_job, appellant_job=self.cinder_job - ) - self.cinder_job.reload() - subject = self._test_reporter_ignore_initial_or_appeal() - assert len(mail.outbox) == 1 # only abuse_report_auth reporter - self._test_reporter_ignore_appeal_email(subject) - - def test_owner_ignore_report_email(self): - # This isn't called by cinder actions, because - # CinderActionApproveInitialDecision.process_action returns None, - # but could be triggered by reviewer actions - subject = self._test_reporter_ignore_initial_or_appeal(send_owner_email=True) - assert len(mail.outbox) == 3 - self._test_reporter_ignore_email(subject) - assert 'has been approved' in mail.outbox[-1].body - - -class TestCinderActionUser(BaseTestCinderAction, TestCase): - ActionClass = CinderActionBanUser - - def setUp(self): - super().setUp() - self.user = user_factory(display_name='Bad Hørse') - self.cinder_job.abusereport_set.update(user=self.user, guid=None) - - def _test_ban_user(self): - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_BAN_USER) - action = self.ActionClass(self.cinder_job) - assert action.process_action() - - self.user.reload() - self.assertCloseToNow(self.user.banned) - assert ActivityLog.objects.count() == 1 - activity = ActivityLog.objects.get(action=amo.LOG.ADMIN_USER_BANNED.id) - assert activity.arguments == [self.user] - assert activity.user == self.task_user - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - subject = f'Mozilla Add-ons: {self.user.name}' - self._test_owner_takedown_email(subject, 'has been suspended') - return subject - - def test_ban_user(self): - subject = self._test_ban_user() - assert len(mail.outbox) == 3 - self._test_reporter_takedown_email(subject) - - def test_ban_user_after_reporter_appeal(self): - original_job = CinderJob.objects.create(job_id='original') - self.cinder_job.appealed_jobs.add(original_job) - self.abuse_report_no_auth.update(cinder_job=original_job) - self.abuse_report_auth.update( - cinder_job=original_job, appellant_job=self.cinder_job - ) - subject = self._test_ban_user() - assert len(mail.outbox) == 2 - self._test_reporter_appeal_takedown_email(subject) - - def _test_reporter_ignore_initial_or_appeal(self, *, send_owner_email=None): - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE) - action = CinderActionApproveInitialDecision(self.cinder_job) - assert action.process_action() is None - - self.user.reload() - assert not self.user.banned - assert len(mail.outbox) == 0 - - action.notify_reporters() - if send_owner_email: - action.notify_owners() - return f'Mozilla Add-ons: {self.user.name}' - - def _test_approve_appeal_or_override(self, CinderActionClass): - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE) - self.user.update(banned=self.days_ago(1), deleted=True) - action = CinderActionClass(self.cinder_job) - assert action.process_action() - - self.user.reload() - assert not self.user.banned - assert ActivityLog.objects.count() == 1 - activity = ActivityLog.objects.get(action=amo.LOG.ADMIN_USER_UNBAN.id) - assert activity.arguments == [self.user] - assert activity.user == self.task_user - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - self._test_owner_restore_email(f'Mozilla Add-ons: {self.user.name}') - - def test_target_appeal_decline(self): - self.user.update(banned=self.days_ago(1), deleted=True) - action = CinderActionTargetAppealRemovalAffirmation(self.cinder_job) - assert action.process_action() - - self.user.reload() - assert self.user.banned - assert ActivityLog.objects.count() == 0 - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - self._test_owner_affirmation_email(f'Mozilla Add-ons: {self.user.name}') - - -class TestCinderActionAddon(BaseTestCinderAction, TestCase): - ActionClass = CinderActionDisableAddon - - def setUp(self): - super().setUp() - self.author = user_factory() - self.addon = addon_factory(users=(self.author,), name='Bad Addön') - ActivityLog.objects.all().delete() - self.cinder_job.abusereport_set.update(guid=self.addon.guid) - - def _test_disable_addon(self): - self.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON - ) - action = self.ActionClass(self.cinder_job) - assert action.process_action() - - assert self.addon.reload().status == amo.STATUS_DISABLED - assert ActivityLog.objects.count() == 1 - activity = ActivityLog.objects.get(action=amo.LOG.FORCE_DISABLE.id) - assert activity.arguments == [self.addon] - assert activity.user == self.task_user - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - subject = f'Mozilla Add-ons: {self.addon.name}' - self._test_owner_takedown_email(subject, 'permanently disabled') - assert f'Your Extension {self.addon.name}' in mail.outbox[-1].body - return subject - - def test_disable_addon(self): - subject = self._test_disable_addon() - assert len(mail.outbox) == 3 - self._test_reporter_takedown_email(subject) - - def test_disable_addon_after_reporter_appeal(self): - original_job = CinderJob.objects.create(job_id='original') - self.cinder_job.appealed_jobs.add(original_job) - self.abuse_report_no_auth.update(cinder_job=original_job) - self.abuse_report_auth.update( - cinder_job=original_job, appellant_job=self.cinder_job - ) - subject = self._test_disable_addon() - assert len(mail.outbox) == 2 - self._test_reporter_appeal_takedown_email(subject) - - def _test_approve_appeal_or_override(self, CinderActionClass): - self.addon.update(status=amo.STATUS_DISABLED) - ActivityLog.objects.all().delete() - action = CinderActionClass(self.cinder_job) - assert action.process_action() - - assert self.addon.reload().status == amo.STATUS_APPROVED - assert ActivityLog.objects.count() == 1 - activity = ActivityLog.objects.get(action=amo.LOG.FORCE_ENABLE.id) - assert activity.arguments == [self.addon] - assert activity.user == self.task_user - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - self._test_owner_restore_email(f'Mozilla Add-ons: {self.addon.name}') - - def _test_reporter_ignore_initial_or_appeal(self, *, send_owner_email=None): - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE) - action = CinderActionApproveInitialDecision(self.cinder_job) - assert action.process_action() is None - - assert self.addon.reload().status == amo.STATUS_APPROVED - assert ActivityLog.objects.count() == 0 - assert len(mail.outbox) == 0 - action.notify_reporters() - if send_owner_email: - action.notify_owners() - return f'Mozilla Add-ons: {self.addon.name}' - - def test_escalate_addon(self): - listed_version = self.addon.current_version - listed_version.file.update(is_signed=True) - unlisted_version = version_factory( - addon=self.addon, channel=amo.CHANNEL_UNLISTED, file_kw={'is_signed': True} - ) - ActivityLog.objects.all().delete() - action = CinderActionEscalateAddon(self.cinder_job) - assert action.process_action() is None - - assert self.addon.reload().status == amo.STATUS_APPROVED - assert ( - listed_version.reload().needshumanreview_set.get().reason - == NeedsHumanReview.REASON_CINDER_ESCALATION - ) - assert ( - unlisted_version.reload().needshumanreview_set.get().reason - == NeedsHumanReview.REASON_CINDER_ESCALATION - ) - assert ActivityLog.objects.count() == 2 - activity = ActivityLog.objects.filter( - action=amo.LOG.NEEDS_HUMAN_REVIEW_AUTOMATIC.id - ).order_by('pk')[0] - assert activity.arguments == [listed_version] - assert activity.user == self.task_user - activity = ActivityLog.objects.filter( - action=amo.LOG.NEEDS_HUMAN_REVIEW_AUTOMATIC.id - ).order_by('pk')[1] - assert activity.arguments == [unlisted_version] - assert activity.user == self.task_user - - # but if we have a version specified, we flag that version - NeedsHumanReview.objects.all().delete() - other_version = version_factory( - addon=self.addon, file_kw={'status': amo.STATUS_DISABLED, 'is_signed': True} - ) - assert not other_version.due_date - ActivityLog.objects.all().delete() - self.cinder_job.abusereport_set.update(addon_version=other_version.version) - assert action.process_action() is None - assert not listed_version.reload().needshumanreview_set.exists() - assert not unlisted_version.reload().needshumanreview_set.exists() - other_version.reload() - assert other_version.due_date - assert ( - other_version.needshumanreview_set.get().reason - == NeedsHumanReview.REASON_CINDER_ESCALATION - ) - assert ActivityLog.objects.count() == 1 - activity = ActivityLog.objects.get( - action=amo.LOG.NEEDS_HUMAN_REVIEW_AUTOMATIC.id - ) - assert activity.arguments == [other_version] - assert activity.user == self.task_user - action.notify_reporters() - assert len(mail.outbox) == 0 - - def test_target_appeal_decline(self): - self.addon.update(status=amo.STATUS_DISABLED) - ActivityLog.objects.all().delete() - action = CinderActionTargetAppealRemovalAffirmation(self.cinder_job) - assert action.process_action() - - self.addon.reload() - assert self.addon.status == amo.STATUS_DISABLED - assert ActivityLog.objects.count() == 0 - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - self._test_owner_affirmation_email(f'Mozilla Add-ons: {self.addon.name}') - - def test_target_appeal_decline_no_additional_reasoning(self): - self.addon.update(status=amo.STATUS_DISABLED) - ActivityLog.objects.all().delete() - self.cinder_job.update(decision_notes='') - action = CinderActionTargetAppealRemovalAffirmation(self.cinder_job) - assert action.process_action() - - self.addon.reload() - assert self.addon.status == amo.STATUS_DISABLED - assert ActivityLog.objects.count() == 0 - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - self._test_owner_affirmation_email( - f'Mozilla Add-ons: {self.addon.name}', additional_reasoning=None - ) - - def test_notify_owners_with_manual_policy_block(self): - self.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON - ) - self.ActionClass(self.cinder_job).notify_owners( - policy_text='some other policy justification' - ) - mail_item = mail.outbox[0] - self._check_owner_email( - mail_item, f'Mozilla Add-ons: {self.addon.name}', 'permanently disabled' - ) - assert 'right to appeal' in mail_item.body - assert ( - reverse( - 'abuse.appeal_author', - kwargs={ - 'decision_id': self.cinder_job.decision_id, - }, - ) - in mail_item.body - ) - assert 'Bad policy: This is bad thing' not in mail_item.body - assert 'some other policy justification' in mail_item.body - - def _test_reject_version(self): - self.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_REJECT_VERSION_ADDON - ) - action = CinderActionRejectVersion(self.cinder_job) - action.affected_versions = [ - version_factory(addon=self.addon, version='2.3'), - version_factory(addon=self.addon, version='3.45'), - ] - - # note: process_action isn't implemented for this action currently. - - subject = f'Mozilla Add-ons: {self.addon.name}' - - assert len(mail.outbox) == 0 - action.notify_reporters() - action.notify_owners() - mail_item = mail.outbox[-1] - self._check_owner_email(mail_item, subject, 'have been disabled') - - assert 'right to appeal' in mail_item.body - assert ( - reverse( - 'abuse.appeal_author', - kwargs={ - 'decision_id': self.cinder_job.decision_id, - }, - ) - in mail_item.body - ) - assert 'Bad policy: This is bad thing' in mail_item.body - assert 'Affected versions: 2.3, 3.45' in mail_item.body - return subject - - def test_reject_version(self): - subject = self._test_reject_version() - assert len(mail.outbox) == 3 - self._test_reporter_takedown_email(subject) - - def test_reject_version_after_reporter_appeal(self): - original_job = CinderJob.objects.create(job_id='original') - self.cinder_job.appealed_jobs.add(original_job) - self.abuse_report_no_auth.update(cinder_job=original_job) - self.abuse_report_auth.update( - cinder_job=original_job, appellant_job=self.cinder_job - ) - subject = self._test_reject_version() - assert len(mail.outbox) == 2 - self._test_reporter_appeal_takedown_email(subject) - - -class TestCinderActionCollection(BaseTestCinderAction, TestCase): - ActionClass = CinderActionDeleteCollection - - def setUp(self): - super().setUp() - self.author = user_factory() - self.collection = collection_factory( - author=self.author, - name='Bad Collectiôn', - slug='bad-collection', - ) - self.cinder_job.abusereport_set.update(collection=self.collection, guid=None) - - def _test_delete_collection(self): - self.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_DELETE_COLLECTION - ) - action = self.ActionClass(self.cinder_job) - assert action.process_action() - - assert self.collection.reload() - assert self.collection.deleted - assert self.collection.slug - assert ActivityLog.objects.count() == 1 - activity = ActivityLog.objects.get(action=amo.LOG.COLLECTION_DELETED.id) - assert activity.arguments == [self.collection] - assert activity.user == self.task_user - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - subject = f'Mozilla Add-ons: {self.collection.name}' - self._test_owner_takedown_email(subject, 'permanently removed') - return subject - - def test_delete_collection(self): - subject = self._test_delete_collection() - assert len(mail.outbox) == 3 - self._test_reporter_takedown_email(subject) - - def test_delete_collection_after_reporter_appeal(self): - original_job = CinderJob.objects.create(job_id='original') - self.cinder_job.appealed_jobs.add(original_job) - self.abuse_report_no_auth.update(cinder_job=original_job) - self.abuse_report_auth.update( - cinder_job=original_job, appellant_job=self.cinder_job - ) - subject = self._test_delete_collection() - assert len(mail.outbox) == 2 - self._test_reporter_appeal_takedown_email(subject) - - def _test_reporter_ignore_initial_or_appeal(self, *, send_owner_email=None): - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE) - action = CinderActionApproveInitialDecision(self.cinder_job) - assert action.process_action() is None - - assert self.collection.reload() - assert not self.collection.deleted - assert ActivityLog.objects.count() == 0 - assert len(mail.outbox) == 0 - - action.notify_reporters() - if send_owner_email: - action.notify_owners() - return f'Mozilla Add-ons: {self.collection.name}' - - def _test_approve_appeal_or_override(self, CinderActionClass): - self.collection.update(deleted=True) - action = CinderActionClass(self.cinder_job) - assert action.process_action() - - assert self.collection.reload() - assert not self.collection.deleted - assert ActivityLog.objects.count() == 1 - activity = ActivityLog.objects.get(action=amo.LOG.COLLECTION_UNDELETED.id) - assert activity.arguments == [self.collection] - assert activity.user == self.task_user - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - self._test_owner_restore_email(f'Mozilla Add-ons: {self.collection.name}') - - def test_target_appeal_decline(self): - self.collection.update(deleted=True) - action = CinderActionTargetAppealRemovalAffirmation(self.cinder_job) - assert action.process_action() - - self.collection.reload() - assert self.collection.deleted - assert ActivityLog.objects.count() == 0 - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - self._test_owner_affirmation_email(f'Mozilla Add-ons: {self.collection.name}') - - -class TestCinderActionRating(BaseTestCinderAction, TestCase): - ActionClass = CinderActionDeleteRating - - def setUp(self): - super().setUp() - self.author = user_factory() - self.rating = Rating.objects.create( - addon=addon_factory(), user=self.author, body='Saying something bad' - ) - self.cinder_job.abusereport_set.update(rating=self.rating, guid=None) - ActivityLog.objects.all().delete() - - def _test_delete_rating(self): - self.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_DELETE_RATING - ) - action = self.ActionClass(self.cinder_job) - assert action.process_action() - - assert self.rating.reload().deleted - assert ActivityLog.objects.count() == 1 - activity = ActivityLog.objects.get(action=amo.LOG.DELETE_RATING.id) - assert activity.arguments == [self.rating.addon, self.rating] - assert activity.user == self.task_user - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - subject = f'Mozilla Add-ons: "Saying ..." for {self.rating.addon.name}' - self._test_owner_takedown_email(subject, 'permanently removed') - return subject - - def test_delete_rating(self): - subject = self._test_delete_rating() - assert len(mail.outbox) == 3 - self._test_reporter_takedown_email(subject) - - def test_delete_rating_after_reporter_appeal(self): - original_job = CinderJob.objects.create(job_id='original') - self.cinder_job.appealed_jobs.add(original_job) - self.abuse_report_no_auth.update(cinder_job=original_job) - self.abuse_report_auth.update( - cinder_job=original_job, appellant_job=self.cinder_job - ) - subject = self._test_delete_rating() - assert len(mail.outbox) == 2 - self._test_reporter_appeal_takedown_email(subject) - - def _test_reporter_ignore_initial_or_appeal(self, *, send_owner_email=None): - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE) - action = CinderActionApproveInitialDecision(self.cinder_job) - assert action.process_action() is None - - assert not self.rating.reload().deleted - assert ActivityLog.objects.count() == 0 - assert len(mail.outbox) == 0 - - action.notify_reporters() - if send_owner_email: - action.notify_owners() - return f'Mozilla Add-ons: "Saying ..." for {self.rating.addon.name}' - - def _test_approve_appeal_or_override(self, CinderActionClass): - self.rating.delete() - ActivityLog.objects.all().delete() - action = CinderActionClass(self.cinder_job) - assert action.process_action() - - assert not self.rating.reload().deleted - assert ActivityLog.objects.count() == 1 - activity = ActivityLog.objects.get(action=amo.LOG.UNDELETE_RATING.id) - assert activity.arguments == [self.rating, self.rating.addon] - assert activity.user == self.task_user - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - self._test_owner_restore_email( - f'Mozilla Add-ons: "Saying ..." for {self.rating.addon.name}' - ) - - def test_target_appeal_decline(self): - self.rating.delete() - ActivityLog.objects.all().delete() - action = CinderActionTargetAppealRemovalAffirmation(self.cinder_job) - assert action.process_action() - - self.rating.reload() - assert self.rating.deleted - assert ActivityLog.objects.count() == 0 - assert len(mail.outbox) == 0 - - action.notify_reporters() - action.notify_owners() - self._test_owner_affirmation_email( - f'Mozilla Add-ons: "Saying ..." for {self.rating.addon.name}' - ) diff --git a/src/olympia/abuse/tests/test_views.py b/src/olympia/abuse/tests/test_views.py index e8b71dcd4a0f..c9af425db4d1 100644 --- a/src/olympia/abuse/tests/test_views.py +++ b/src/olympia/abuse/tests/test_views.py @@ -6,6 +6,7 @@ from unittest import mock from django.conf import settings +from django.core import mail from django.test.utils import override_settings from django.urls import reverse from django.utils.encoding import force_bytes @@ -27,10 +28,17 @@ reverse_ns, user_factory, ) +from olympia.constants.abuse import DECISION_ACTIONS from olympia.core import get_user, set_user from olympia.ratings.models import Rating -from ..models import AbuseReport, CinderJob +from ..actions import ( + ContentActionApproveNoAction, + ContentActionDisableAddon, + ContentActionTargetAppealApprove, + ContentActionTargetAppealRemovalAffirmation, +) +from ..models import AbuseReport, CinderAppeal, CinderJob, ContentDecision from ..views import CinderInboundPermission, cinder_webhook, filter_enforcement_actions @@ -531,34 +539,215 @@ def test_abuse_report_with_invalid_data(self): assert response.status_code == 400 assert json.loads(response.content) == {'addon_install_method': 'Invalid value'} - def _setup_reportable_reason(self, reason): - addon = addon_factory(guid='@badman') + def _setup_reportable_reason(self, reason, *, addon=None, extra_data=None): + addon = addon or addon_factory(guid='@badman') response = self.client.post( self.url, - data={'addon': addon.guid, 'reason': reason}, + data={'addon': addon.guid, 'reason': reason, **(extra_data or {})}, REMOTE_ADDR='123.45.67.89', HTTP_X_FORWARDED_FOR=f'123.45.67.89, {get_random_ip()}', ) assert response.status_code == 201, response.content @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=True) + @override_switch('dsa-job-technical-processing', active=True) def test_reportable_reason_calls_cinder_task(self, task_mock): self._setup_reportable_reason('hateful_violent_deceptive') task_mock.assert_called() @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=False) + @override_switch('dsa-job-technical-processing', active=True) + def test_reportable_reason_does_call_if_version_listed(self, task_mock): + addon = addon_factory(guid='@badman') + self._setup_reportable_reason( + 'hateful_violent_deceptive', + addon=addon, + extra_data={'addon_version': addon.current_version.version}, + ) + task_mock.assert_called() + + @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') + @override_switch('dsa-job-technical-processing', active=True) + def test_reportable_reason_does_not_call_if_version_unlisted(self, task_mock): + addon = addon_factory(guid='@badman') + version = addon.current_version + self.make_addon_unlisted(addon) + self._setup_reportable_reason( + 'hateful_violent_deceptive', + addon=addon, + extra_data={'addon_version': version.version}, + ) + task_mock.assert_not_called() + + @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') + @override_switch('dsa-job-technical-processing', active=False) def test_reportable_reason_does_not_call_cinder_with_waffle_off(self, task_mock): self._setup_reportable_reason('hateful_violent_deceptive') task_mock.assert_not_called() @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=True) + @override_switch('dsa-job-technical-processing', active=True) def test_not_reportable_reason_does_not_call_cinder_task(self, task_mock): self._setup_reportable_reason('feedback_spam') task_mock.assert_not_called() + def test_reject_illegal_category_when_reason_is_not_illegal(self): + addon = addon_factory(guid='@badman') + response = self.client.post( + self.url, + data={ + 'addon': addon.guid, + 'reason': 'feedback_spam', + 'illegal_category': 'other', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': [ + 'This value must be omitted or set to "null" when the `reason` is ' + 'not "illegal".' + ], + } + + def test_reject_illegal_subcategory_when_reason_is_not_illegal(self): + addon = addon_factory(guid='@badman') + response = self.client.post( + self.url, + data={ + 'addon': addon.guid, + 'reason': 'feedback_spam', + 'illegal_subcategory': 'other', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': [ + 'This value must be omitted or set to "null" when the `reason` is ' + 'not "illegal".' + ], + } + + def test_illegal_category_required_when_reason_is_illegal(self): + addon = addon_factory(guid='@badman') + response = self.client.post( + self.url, data={'addon': addon.guid, 'reason': 'illegal'} + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['This field is required.'] + } + + def test_illegal_category_cannot_be_blank_when_reason_is_illegal(self): + addon = addon_factory(guid='@badman') + response = self.client.post( + self.url, + data={ + 'addon': addon.guid, + 'reason': 'illegal', + 'illegal_category': '', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['"" is not a valid choice.'] + } + + def test_illegal_category_cannot_be_null_when_reason_is_illegal(self): + addon = addon_factory(guid='@badman') + response = self.client.post( + self.url, + data={ + 'addon': addon.guid, + 'reason': 'illegal', + 'illegal_category': None, + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['This field may not be null.'] + } + + def test_illegal_subcategory_required_when_reason_is_illegal(self): + addon = addon_factory(guid='@badman') + response = self.client.post( + self.url, + data={ + 'addon': addon.guid, + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['This field is required.'] + } + + def test_illegal_subcategory_cannot_be_blank_when_reason_is_illegal(self): + addon = addon_factory(guid='@badman') + response = self.client.post( + self.url, + data={ + 'addon': addon.guid, + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': '', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['"" is not a valid choice.'] + } + + def test_illegal_subcategory_cannot_be_null_when_reason_is_illegal(self): + addon = addon_factory(guid='@badman') + response = self.client.post( + self.url, + data={ + 'addon': addon.guid, + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': None, + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['This field may not be null.'] + } + + def test_illegal_subcategory_depends_on_category(self): + addon = addon_factory(guid='@badman') + response = self.client.post( + self.url, + data={ + 'addon': addon.guid, + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'biometric_data_breach', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': [ + 'This value cannot be used in combination with the supplied ' + '`illegal_category`.' + ] + } + + def test_addon_signature_unknown(self): + addon = addon_factory() + response = self.client.post( + self.url, + data={ + 'addon': str(addon.id), + 'message': 'abuse!', + 'addon_signature': 'unknown: undefined', + }, + ) + assert response.status_code == 201 + + report = AbuseReport.objects.get(guid=addon.guid) + assert report.addon_signature == AbuseReport.ADDON_SIGNATURES.UNKNOWN + class TestAddonAbuseViewSetLoggedOut(AddonAbuseViewSetTestBase, TestCase): def check_reporter(self, report): @@ -675,7 +864,13 @@ def test_message_required_missing(self): def test_message_not_required_with_content_reason(self): user = user_factory() response = self.client.post( - self.url, data={'user': str(user.username), 'reason': 'illegal'} + self.url, + data={ + 'user': str(user.username), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', + }, ) assert response.status_code == 201 @@ -696,7 +891,13 @@ def test_non_content_reason_not_accepted(self): response = self.client.post( self.url, - data={'user': str(user.username), 'reason': 'illegal', 'message': 'Fine!'}, + data={ + 'user': str(user.username), + 'reason': 'illegal', + 'message': 'Fine!', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', + }, ) assert response.status_code == 201 @@ -750,19 +951,19 @@ def _setup_reportable_reason(self, reason, message=None): assert response.status_code == 201, response.content @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=True) + @override_switch('dsa-job-technical-processing', active=True) def test_reportable_reason_calls_cinder_task(self, task_mock): self._setup_reportable_reason('hateful_violent_deceptive') task_mock.assert_called() @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=False) + @override_switch('dsa-job-technical-processing', active=False) def test_reportable_reason_does_not_call_cinder_with_waffle_off(self, task_mock): self._setup_reportable_reason('hateful_violent_deceptive') task_mock.assert_not_called() @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=True) + @override_switch('dsa-job-technical-processing', active=True) def test_no_reason_does_not_call_cinder_task(self, task_mock): self._setup_reportable_reason(None, 'Some message since no reason is provided') task_mock.assert_not_called() @@ -784,6 +985,148 @@ def test_lang(self): self.check_report(report, f'Abuse Report for User {user.pk}') assert report.application_locale == 'Lô-käl' + def test_reject_illegal_category_when_reason_is_not_illegal(self): + user = user_factory() + response = self.client.post( + self.url, + data={ + 'user': str(user.username), + 'reason': 'feedback_spam', + 'illegal_category': 'other', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': [ + 'This value must be omitted or set to "null" when the `reason` is ' + 'not "illegal".' + ], + } + + def test_reject_illegal_subcategory_when_reason_is_not_illegal(self): + user = user_factory() + response = self.client.post( + self.url, + data={ + 'user': str(user.username), + 'reason': 'feedback_spam', + 'illegal_subcategory': 'other', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': [ + 'This value must be omitted or set to "null" when the `reason` is ' + 'not "illegal".' + ], + } + + def test_illegal_category_required_when_reason_is_illegal(self): + user = user_factory() + response = self.client.post( + self.url, data={'user': str(user.username), 'reason': 'illegal'} + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['This field is required.'] + } + + def test_illegal_category_cannot_be_blank_when_reason_is_illegal(self): + user = user_factory() + response = self.client.post( + self.url, + data={ + 'user': str(user.username), + 'reason': 'illegal', + 'illegal_category': '', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['"" is not a valid choice.'] + } + + def test_illegal_category_cannot_be_null_when_reason_is_illegal(self): + user = user_factory() + response = self.client.post( + self.url, + data={ + 'user': str(user.username), + 'reason': 'illegal', + 'illegal_category': None, + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['This field may not be null.'] + } + + def test_illegal_subcategory_required_when_reason_is_illegal(self): + user = user_factory() + response = self.client.post( + self.url, + data={ + 'user': str(user.username), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['This field is required.'] + } + + def test_illegal_subcategory_cannot_be_blank_when_reason_is_illegal(self): + user = user_factory() + response = self.client.post( + self.url, + data={ + 'user': str(user.username), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': '', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['"" is not a valid choice.'] + } + + def test_illegal_subcategory_cannot_be_null_when_reason_is_illegal(self): + user = user_factory() + response = self.client.post( + self.url, + data={ + 'user': str(user.username), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': None, + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['This field may not be null.'] + } + + def test_illegal_subcategory_depends_on_category(self): + user = user_factory() + response = self.client.post( + self.url, + data={ + 'user': str(user.username), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'biometric_data_breach', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': [ + 'This value cannot be used in combination with the supplied ' + '`illegal_category`.' + ] + } + class TestUserAbuseViewSetLoggedOut(UserAbuseViewSetTestBase, TestCase): def check_reporter(self, report): @@ -830,8 +1173,10 @@ class TestCinderWebhook(TestCase): def setUp(self): self.task_user = user_factory(pk=settings.TASK_USER_ID) - def get_data(self, filename='cinder_webhook.json'): - webhook_file = os.path.join(TESTS_DIR, 'assets', filename) + def get_data(self, filename='decision.json'): + webhook_file = os.path.join( + TESTS_DIR, 'assets', 'cinder_webhook_payloads', filename + ) with open(webhook_file) as file_object: return json.loads(file_object.read()) @@ -903,57 +1248,50 @@ def test_filter_enforcement_actions(self): 'not-amo-action', # not a valid action at all ] assert filter_enforcement_actions(actions_from_json, cinder_job) == [ - CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, + DECISION_ACTIONS.AMO_DISABLE_ADDON, # no AMO_BAN_USER action because not a user target - CinderJob.DECISION_ACTIONS.AMO_APPROVE, + DECISION_ACTIONS.AMO_APPROVE, ] # check with another content type too abuse_report.update(guid=None, user=user_factory()) assert filter_enforcement_actions(actions_from_json, cinder_job) == [ # no AMO_DISABLE_ADDON action because not an add-on target - CinderJob.DECISION_ACTIONS.AMO_BAN_USER, - CinderJob.DECISION_ACTIONS.AMO_APPROVE, + DECISION_ACTIONS.AMO_BAN_USER, + DECISION_ACTIONS.AMO_APPROVE, ] - def _test_process_decision_called(self, data, *, override): + def test_process_decision_called(self, data=None): abuse_report = self._setup_reports() addon_factory(guid=abuse_report.guid) - req = self.get_request(data=data) + req = self.get_request(data=data or self.get_data()) with mock.patch.object(CinderJob, 'process_decision') as process_mock: response = cinder_webhook(req) process_mock.assert_called() process_mock.assert_called_with( - decision_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', - decision_date=datetime(2023, 10, 12, 9, 8, 37, 4789), - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON.value, + decision_cinder_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', + decision_action=DECISION_ACTIONS.AMO_DISABLE_ADDON.value, decision_notes='some notes', policy_ids=['f73ad527-54ed-430c-86ff-80e15e2a352b'], - override=override, ) assert response.status_code == 201 assert response.data == {'amo': {'received': True, 'handled': True}} - def test_process_decision_called_not_override(self): - data = self.get_data() - return self._test_process_decision_called(data, override=False) - - def test_process_decision_called_for_override(self): - data = self.get_data() - data['payload']['source']['decision']['type'] = 'override' - return self._test_process_decision_called(data, override=True) - - def test_process_decision_called_for_appeal_confirm_approve(self): - data = self.get_data(filename='cinder_webhook_appeal_confirm_approve.json') + def test_process_decision_called_for_appeal_confirm_approve( + self, filename='reporter_appeal_confirm_approve.json' + ): + data = self.get_data(filename=filename) abuse_report = self._setup_reports() - addon_factory(guid=abuse_report.guid) + addon = addon_factory(guid=abuse_report.guid) original_cinder_job = CinderJob.objects.get() original_cinder_job.update( - decision_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', - decision_date=datetime(2023, 10, 12, 9, 8, 37, 4789), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE.value, - appeal_job=CinderJob.objects.create( - job_id='5c7c3e21-8ccd-4d2f-b3b4-429620bd7a63' + decision=ContentDecision.objects.create( + cinder_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', + action=DECISION_ACTIONS.AMO_APPROVE, + appeal_job=CinderJob.objects.create( + job_id='5c7c3e21-8ccd-4d2f-b3b4-429620bd7a63' + ), + addon=addon, ), ) req = self.get_request(data=data) @@ -961,56 +1299,211 @@ def test_process_decision_called_for_appeal_confirm_approve(self): response = cinder_webhook(req) assert process_mock.call_count == 1 process_mock.assert_called_with( - decision_id='76e0006d-1a42-4ec7-9475-148bab1970f1', - decision_date=datetime(2024, 1, 12, 15, 20, 19, 226428), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE.value, + decision_cinder_id='76e0006d-1a42-4ec7-9475-148bab1970f1', + decision_action=DECISION_ACTIONS.AMO_APPROVE.value, decision_notes='still no!', policy_ids=['1c5d711a-78b7-4fc2-bdef-9a33024f5e8b'], - override=False, ) assert response.status_code == 201 assert response.data == {'amo': {'received': True, 'handled': True}} - def test_process_decision_called_for_appeal_disable(self): - data = self.get_data(filename='cinder_webhook_appeal_change_to_disable.json') + def test_process_decision_called_for_appeal_confirm_approve_with_override(self): + """This is to cover the unusual case in cinder where a moderator processes an + appeal by selecting to override the decision, but chooses to approve it again. + """ + self.test_process_decision_called_for_appeal_confirm_approve( + filename='reporter_appeal_change_but_still_approve.json' + ) + + def test_process_decision_called_for_appeal_change_to_disable(self): + data = self.get_data(filename='reporter_appeal_change_to_disable.json') abuse_report = self._setup_reports() - addon_factory(guid=abuse_report.guid) + addon = addon_factory(guid=abuse_report.guid) original_cinder_job = CinderJob.objects.get() original_cinder_job.update( - decision_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', - decision_date=datetime(2023, 10, 12, 9, 8, 37, 4789), - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE.value, - appeal_job=CinderJob.objects.create( - job_id='5ab7cb33-a5ab-4dfa-9d72-4c2061ffeb08' - ), + decision=ContentDecision.objects.create( + action_date=datetime(2023, 10, 12, 9, 8, 37, 4789), + cinder_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', + action=DECISION_ACTIONS.AMO_APPROVE, + appeal_job=CinderJob.objects.create( + job_id='5ab7cb33-a5ab-4dfa-9d72-4c2061ffeb08' + ), + addon=addon, + ) ) req = self.get_request(data=data) with mock.patch.object(CinderJob, 'process_decision') as process_mock: response = cinder_webhook(req) assert process_mock.call_count == 1 process_mock.assert_called_with( - decision_id='4f18b22c-6078-4934-b395-6a2e01cadf63', - decision_date=datetime(2024, 1, 12, 14, 53, 23, 438634), - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON.value, + decision_cinder_id='4f18b22c-6078-4934-b395-6a2e01cadf63', + decision_action=DECISION_ACTIONS.AMO_DISABLE_ADDON.value, decision_notes="fine I'll disable it", - policy_ids=['86d7bf98-288c-4e78-9a63-3f5db96847b1'], - override=True, + policy_ids=[ + '7ea512a2-39a6-4cb6-91a0-2ed162192f7f', + 'a5c96c92-2373-4d11-b573-61b0de00d8e0', + ], ) assert response.status_code == 201 assert response.data == {'amo': {'received': True, 'handled': True}} - def test_queue_does_not_matter_non_reviewer_case(self): - data = self.get_data() - data['payload']['source']['job']['queue']['slug'] = 'amo-another-queue' - return self._test_process_decision_called(data, override=False) - - def test_queue_handled_reviewer_queue_ignored(self): - data = self.get_data() - data['payload']['source']['job']['queue']['slug'] = 'amo-addon-infringement' + def test_process_decision_called_for_override_to_approve(self): abuse_report = self._setup_reports() - addon_factory(guid=abuse_report.guid) - req = self.get_request(data=data) - with mock.patch.object(CinderJob, 'process_decision') as process_mock: + CinderJob.objects.get().update( + decision=ContentDecision.objects.create( + cinder_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon_factory(guid=abuse_report.guid), + ), + ) + req = self.get_request( + data=self.get_data(filename='override_change_to_approve.json') + ) + with mock.patch.object(CinderJob, 'process_decision') as process_mock: + response = cinder_webhook(req) + assert process_mock.call_count == 1, response.data + process_mock.assert_called_with( + decision_cinder_id='3eacdc09-c292-4fcb-a56f-a3d45d5eefeb', + decision_action=DECISION_ACTIONS.AMO_APPROVE.value, + decision_notes='changed our mind', + policy_ids=['085f6a1c-46b6-44c2-a6ae-c3a73488aa1e'], + ) + assert response.status_code == 201 + assert response.data == {'amo': {'received': True, 'handled': True}} + + def test_process_decision_triggers_emails_when_disable_confirmed(self): + data = self.get_data(filename='target_appeal_confirm_disable.json') + abuse_report = self._setup_reports() + author = user_factory() + addon = addon_factory(guid=abuse_report.guid, users=[author]) + original_cinder_job = CinderJob.objects.get() + original_cinder_job.update( + decision=ContentDecision.objects.create( + action_date=datetime(2023, 10, 12, 9, 8, 37, 4789), + cinder_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + appeal_job=CinderJob.objects.create( + job_id='5ab7cb33-a5ab-4dfa-9d72-4c2061ffeb08' + ), + addon=addon, + ) + ) + req = self.get_request(data=data) + with mock.patch.object( + ContentActionTargetAppealRemovalAffirmation, 'process_action' + ) as process_mock: + cinder_webhook(req) + process_mock.assert_called() + assert len(mail.outbox) == 1 + assert mail.outbox[0].to == [author.email] + assert 'will not reinstate your Extension' in mail.outbox[0].body + + def test_process_decision_triggers_emails_when_disable_reverted(self): + data = self.get_data(filename='target_appeal_change_to_approve.json') + abuse_report = self._setup_reports() + author = user_factory() + addon = addon_factory(guid=abuse_report.guid, users=[author]) + original_cinder_job = CinderJob.objects.get() + original_cinder_job.update( + decision=ContentDecision.objects.create( + action_date=datetime(2023, 10, 12, 9, 8, 37, 4789), + cinder_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + appeal_job=CinderJob.objects.create( + job_id='5ab7cb33-a5ab-4dfa-9d72-4c2061ffeb08' + ), + addon=addon, + ) + ) + req = self.get_request(data=data) + with mock.patch.object( + ContentActionTargetAppealApprove, 'process_action' + ) as process_mock: + cinder_webhook(req) + process_mock.assert_called() + assert len(mail.outbox) == 1 + assert mail.outbox[0].to == [author.email] + assert 'we have restored your Extension' in mail.outbox[0].body + + def test_process_decision_triggers_emails_for_reporter_appeal_disable(self): + data = self.get_data(filename='reporter_appeal_change_to_disable.json') + abuse_report = self._setup_reports() + author = user_factory() + addon = addon_factory(guid=abuse_report.guid, users=[author]) + original_cinder_job = CinderJob.objects.get() + original_cinder_job.update( + decision=ContentDecision.objects.create( + action_date=datetime(2023, 10, 12, 9, 8, 37, 4789), + cinder_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', + action=DECISION_ACTIONS.AMO_APPROVE, + appeal_job=CinderJob.objects.create( + job_id='5ab7cb33-a5ab-4dfa-9d72-4c2061ffeb08' + ), + addon=addon, + ) + ) + abuse_report.update( + reporter_email='reporter@email.com', cinder_job=original_cinder_job + ) + CinderAppeal.objects.create( + decision=original_cinder_job.decision, reporter_report=abuse_report + ) + req = self.get_request(data=data) + with mock.patch.object( + ContentActionDisableAddon, 'process_action' + ) as process_mock: + cinder_webhook(req) + process_mock.assert_called() + assert len(mail.outbox) == 2 + assert mail.outbox[0].to == ['reporter@email.com'] + assert 'was incorrect' in mail.outbox[0].body + assert mail.outbox[1].to == [author.email] + assert 'has been permanently disabled' in mail.outbox[1].body + + def test_process_decision_triggers_no_target_email_for_reporter_approve(self): + data = self.get_data(filename='reporter_appeal_confirm_approve.json') + abuse_report = self._setup_reports() + author = user_factory() + addon = addon_factory(guid=abuse_report.guid, users=[author]) + original_cinder_job = CinderJob.objects.get() + original_cinder_job.update( + decision=ContentDecision.objects.create( + action_date=datetime(2023, 10, 12, 9, 8, 37, 4789), + cinder_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', + action=DECISION_ACTIONS.AMO_APPROVE, + appeal_job=CinderJob.objects.create( + job_id='5c7c3e21-8ccd-4d2f-b3b4-429620bd7a63' + ), + addon=addon, + ) + ) + abuse_report.update( + reporter_email='reporter@email.com', cinder_job=original_cinder_job + ) + CinderAppeal.objects.create( + decision=original_cinder_job.decision, reporter_report=abuse_report + ) + req = self.get_request(data=data) + with mock.patch.object( + ContentActionApproveNoAction, 'process_action' + ) as process_mock: + cinder_webhook(req) + process_mock.assert_called() + assert len(mail.outbox) == 1 + assert mail.outbox[0].to == ['reporter@email.com'] + assert 'we have denied your appeal' in mail.outbox[0].body + + def test_queue_does_not_matter_non_reviewer_case(self): + data = self.get_data() + data['payload']['source']['job']['queue']['slug'] = 'amo-another-queue' + return self.test_process_decision_called(data) + + def test_unknown_event(self): + self._setup_reports() + data = self.get_data() + data['event'] = 'report.created' + req = self.get_request(data=data) + with mock.patch.object(CinderJob, 'process_decision') as process_mock: response = cinder_webhook(req) process_mock.assert_not_called() assert response.status_code == 200 @@ -1018,15 +1511,74 @@ def test_queue_handled_reviewer_queue_ignored(self): 'amo': { 'received': True, 'handled': False, - 'not_handled_reason': 'Queue handled by AMO reviewers', + 'not_handled_reason': 'report.created is not a event we support', } } - def test_not_decision_event(self): + def test_missing_payload(self): + expected = { + 'amo': { + 'received': True, + 'handled': False, + 'not_handled_reason': 'No payload dict', + } + } + + def check(response): + process_mock.assert_not_called() + assert response.status_code == 200 + assert response.data == expected + self._setup_reports() data = self.get_data() - data['event'] = 'report.created' - req = self.get_request(data=data) + with mock.patch.object(CinderJob, 'process_decision') as process_mock: + del data['payload'] + check(cinder_webhook(self.get_request(data=data))) + data['payload'] = 'string' + check(cinder_webhook(self.get_request(data=data))) + data['payload'] = {} + check(cinder_webhook(self.get_request(data=data))) + + def test_no_cinder_job(self): + req = self.get_request() + with mock.patch.object(CinderJob, 'process_decision') as process_mock: + response = cinder_webhook(req) + process_mock.assert_not_called() + assert response.status_code == 200 + assert response.data == { + 'amo': { + 'received': True, + 'handled': False, + 'not_handled_reason': 'No matching job id found', + } + } + + def test_no_decision(self): + req = self.get_request( + data=self.get_data(filename='override_change_to_approve.json') + ) + with mock.patch.object(CinderJob, 'process_decision') as process_mock: + response = cinder_webhook(req) + process_mock.assert_not_called() + assert response.status_code == 200 + assert response.data == { + 'amo': { + 'received': True, + 'handled': False, + 'not_handled_reason': 'No matching decision id found', + } + } + + def test_valid_decision_but_no_cinder_job(self): + abuse_report = self._setup_reports() + ContentDecision.objects.create( + cinder_id='d1f01fae-3bce-41d5-af8a-e0b4b5ceaaed', + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=addon_factory(guid=abuse_report.guid), + ) + req = self.get_request( + data=self.get_data(filename='override_change_to_approve.json') + ) with mock.patch.object(CinderJob, 'process_decision') as process_mock: response = cinder_webhook(req) process_mock.assert_not_called() @@ -1035,11 +1587,13 @@ def test_not_decision_event(self): 'amo': { 'received': True, 'handled': False, - 'not_handled_reason': 'Not a decision', + 'not_handled_reason': 'No matching job found for decision id', } } - def test_no_cinder_report(self): + def test_reviewer_tools_resolved_cinder_job(self): + report = self._setup_reports() + report.cinder_job.update(resolvable_in_reviewer_tools=True) req = self.get_request() with mock.patch.object(CinderJob, 'process_decision') as process_mock: response = cinder_webhook(req) @@ -1049,7 +1603,7 @@ def test_no_cinder_report(self): 'amo': { 'received': True, 'handled': False, - 'not_handled_reason': 'No matching job id found', + 'not_handled_reason': 'Decision already handled via reviewer tools', } } @@ -1100,6 +1654,69 @@ def test_invalid_decision_action(self): } } + def test_process_queue_move_called(self): + abuse_report = self._setup_reports() + addon_factory(guid=abuse_report.guid) + req = self.get_request( + data=self.get_data('job_actioned_move_to_dev_infringement.json') + ) + with mock.patch.object(CinderJob, 'process_queue_move') as process_mock: + response = cinder_webhook(req) + process_mock.assert_called() + process_mock.assert_called_with( + new_queue='amo-env-addon-infringement', notes='no' + ) + assert response.status_code == 201 + assert response.data == {'amo': {'received': True, 'handled': True}} + + def test_process_queue_move_no_cinder_report(self): + req = self.get_request( + data=self.get_data('job_actioned_move_to_dev_infringement.json') + ) + with mock.patch.object(CinderJob, 'process_queue_move') as process_mock: + response = cinder_webhook(req) + process_mock.assert_not_called() + assert response.status_code == 200 + assert response.data == { + 'amo': { + 'received': True, + 'handled': False, + 'not_handled_reason': 'No matching job id found', + } + } + + def test_process_queue_move_invalid_action(self): + data = self.get_data('job_actioned_move_to_dev_infringement.json') + + data['payload']['action'] = 'something_else' + response = cinder_webhook(self.get_request(data=data)) + assert response.status_code == 200 + assert response.data == { + 'amo': { + 'received': True, + 'handled': False, + 'not_handled_reason': ( + 'Unsupported action (something_else) for job.actioned' + ), + } + } + + def test_process_queue_move_not_addon(self): + data = self.get_data('job_actioned_move_to_dev_infringement.json') + + data['payload']['job']['entity']['entity_schema'] = 'amo_user' + response = cinder_webhook(self.get_request(data=data)) + assert response.status_code == 200 + assert response.data == { + 'amo': { + 'received': True, + 'handled': False, + 'not_handled_reason': ( + 'Unsupported entity_schema (amo_user) for job.actioned' + ), + } + } + def test_set_user(self): set_user(user_factory()) req = self.get_request() @@ -1131,6 +1748,8 @@ def test_report_rating_id(self): 'rating': str(target_rating.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', ) @@ -1150,6 +1769,8 @@ def test_report_rating_id_int(self): 'rating': target_rating.pk, 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', ) @@ -1161,7 +1782,13 @@ def test_report_rating_id_int(self): def test_no_rating_fails(self): response = self.client.post( - self.url, data={'message': 'abuse!', 'reason': 'illegal'} + self.url, + data={ + 'message': 'abuse!', + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', + }, ) assert response.status_code == 400 assert json.loads(response.content) == {'rating': ['This field is required.']} @@ -1189,7 +1816,13 @@ def test_message_can_be_blank_if_reason_is_provided(self): ) response = self.client.post( self.url, - data={'rating': str(target_rating.pk), 'message': '', 'reason': 'illegal'}, + data={ + 'rating': str(target_rating.pk), + 'message': '', + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', + }, ) assert response.status_code == 201 @@ -1199,7 +1832,12 @@ def test_message_can_be_missing_if_reason_is_provided(self): ) response = self.client.post( self.url, - data={'rating': str(target_rating.pk), 'reason': 'illegal'}, + data={ + 'rating': str(target_rating.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', + }, ) assert response.status_code == 201 @@ -1235,6 +1873,8 @@ def test_throttle(self): 'rating': str(target_rating.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_FORWARDED_FOR=f'123.45.67.89, {get_random_ip()}', @@ -1247,6 +1887,8 @@ def test_throttle(self): 'rating': str(target_rating.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_FORWARDED_FOR=f'123.45.67.89, {get_random_ip()}', @@ -1263,6 +1905,8 @@ def test_report_country_code(self): 'rating': str(target_rating.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_COUNTRY_CODE='YY', @@ -1287,13 +1931,13 @@ def _setup_reportable_reason(self, reason): assert response.status_code == 201, response.content @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=True) + @override_switch('dsa-job-technical-processing', active=True) def test_reportable_reason_calls_cinder_task(self, task_mock): self._setup_reportable_reason('hateful_violent_deceptive') task_mock.assert_called() @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=False) + @override_switch('dsa-job-technical-processing', active=False) def test_reportable_reason_does_not_call_cinder_with_waffle_off(self, task_mock): self._setup_reportable_reason('hateful_violent_deceptive') task_mock.assert_not_called() @@ -1309,6 +1953,8 @@ def test_lang(self): 'message': 'abuse!', 'reason': 'illegal', 'lang': 'Lô-käl', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', ) @@ -1318,6 +1964,172 @@ def test_lang(self): self.check_report(report, f'Abuse Report for Rating {target_rating.pk}') assert report.application_locale == 'Lô-käl' + def test_reject_illegal_category_when_reason_is_not_illegal(self): + target_rating = Rating.objects.create( + addon=addon_factory(), user=user_factory(), body='Booh', rating=1 + ) + response = self.client.post( + self.url, + data={ + 'rating': str(target_rating.pk), + 'reason': 'feedback_spam', + 'illegal_category': 'other', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': [ + 'This value must be omitted or set to "null" when the `reason` is ' + 'not "illegal".' + ], + } + + def test_reject_illegal_subcategory_when_reason_is_not_illegal(self): + target_rating = Rating.objects.create( + addon=addon_factory(), user=user_factory(), body='Booh', rating=1 + ) + response = self.client.post( + self.url, + data={ + 'rating': str(target_rating.pk), + 'reason': 'feedback_spam', + 'illegal_subcategory': 'other', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': [ + 'This value must be omitted or set to "null" when the `reason` is ' + 'not "illegal".' + ], + } + + def test_illegal_category_required_when_reason_is_illegal(self): + target_rating = Rating.objects.create( + addon=addon_factory(), user=user_factory(), body='Booh', rating=1 + ) + response = self.client.post( + self.url, data={'rating': str(target_rating.pk), 'reason': 'illegal'} + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['This field is required.'] + } + + def test_illegal_category_cannot_be_blank_when_reason_is_illegal(self): + target_rating = Rating.objects.create( + addon=addon_factory(), user=user_factory(), body='Booh', rating=1 + ) + response = self.client.post( + self.url, + data={ + 'rating': str(target_rating.pk), + 'reason': 'illegal', + 'illegal_category': '', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['"" is not a valid choice.'] + } + + def test_illegal_category_cannot_be_null_when_reason_is_illegal(self): + target_rating = Rating.objects.create( + addon=addon_factory(), user=user_factory(), body='Booh', rating=1 + ) + response = self.client.post( + self.url, + data={ + 'rating': str(target_rating.pk), + 'reason': 'illegal', + 'illegal_category': None, + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['This field may not be null.'] + } + + def test_illegal_subcategory_required_when_reason_is_illegal(self): + target_rating = Rating.objects.create( + addon=addon_factory(), user=user_factory(), body='Booh', rating=1 + ) + response = self.client.post( + self.url, + data={ + 'rating': str(target_rating.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['This field is required.'] + } + + def test_illegal_subcategory_cannot_be_blank_when_reason_is_illegal(self): + target_rating = Rating.objects.create( + addon=addon_factory(), user=user_factory(), body='Booh', rating=1 + ) + response = self.client.post( + self.url, + data={ + 'rating': str(target_rating.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': '', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['"" is not a valid choice.'] + } + + def test_illegal_subcategory_cannot_be_null_when_reason_is_illegal(self): + target_rating = Rating.objects.create( + addon=addon_factory(), + user=user_factory(), + body='Booh', + rating=1, + ) + response = self.client.post( + self.url, + data={ + 'rating': str(target_rating.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': None, + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['This field may not be null.'] + } + + def test_illegal_subcategory_depends_on_category(self): + target_rating = Rating.objects.create( + addon=addon_factory(), + user=user_factory(), + body='Booh', + rating=1, + ) + response = self.client.post( + self.url, + data={ + 'rating': str(target_rating.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'biometric_data_breach', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': [ + 'This value cannot be used in combination with the supplied ' + '`illegal_category`.' + ] + } + class TestRatingAbuseViewSetLoggedOut(RatingAbuseViewSetTestBase, TestCase): def check_reporter(self, report): @@ -1346,6 +2158,8 @@ def test_throttle_ip_for_authenticated_users(self): 'rating': str(target_rating.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_FORWARDED_FOR=f'123.45.67.89, {get_random_ip()}', @@ -1361,6 +2175,8 @@ def test_throttle_ip_for_authenticated_users(self): 'rating': str(target_rating.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_FORWARDED_FOR=f'123.45.67.89, {get_random_ip()}', @@ -1447,6 +2263,8 @@ def test_message_can_be_blank_if_reason_is_provided(self): 'collection': str(target_collection.pk), 'message': '', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, ) assert response.status_code == 201 @@ -1455,7 +2273,12 @@ def test_message_can_be_missing_if_reason_is_provided(self): target_collection = collection_factory() response = self.client.post( self.url, - data={'collection': str(target_collection.pk), 'reason': 'illegal'}, + data={ + 'collection': str(target_collection.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', + }, ) assert response.status_code == 201 @@ -1487,6 +2310,8 @@ def test_throttle(self): 'collection': str(target_collection.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_FORWARDED_FOR=f'123.45.67.89, {get_random_ip()}', @@ -1499,6 +2324,8 @@ def test_throttle(self): 'collection': str(target_collection.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_FORWARDED_FOR=f'123.45.67.89, {get_random_ip()}', @@ -1513,6 +2340,8 @@ def test_report_country_code(self): 'collection': str(target_collection.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_COUNTRY_CODE='YY', @@ -1539,13 +2368,13 @@ def _setup_reportable_reason(self, reason): assert response.status_code == 201, response.content @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=True) + @override_switch('dsa-job-technical-processing', active=True) def test_reportable_reason_calls_cinder_task(self, task_mock): self._setup_reportable_reason('hateful_violent_deceptive') task_mock.assert_called() @mock.patch('olympia.abuse.tasks.report_to_cinder.delay') - @override_switch('enable-cinder-reporting', active=False) + @override_switch('dsa-job-technical-processing', active=False) def test_reportable_reason_does_not_call_cinder_with_waffle_off(self, task_mock): self._setup_reportable_reason('hateful_violent_deceptive') task_mock.assert_not_called() @@ -1568,6 +2397,149 @@ def test_lang(self): self.check_report(report, f'Abuse Report for Collection {target_collection.pk}') assert report.application_locale == 'Lô-käl' + def test_reject_illegal_category_when_reason_is_not_illegal(self): + target_collection = collection_factory() + response = self.client.post( + self.url, + data={ + 'collection': str(target_collection.pk), + 'reason': 'feedback_spam', + 'illegal_category': 'other', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': [ + 'This value must be omitted or set to "null" when the `reason` is ' + 'not "illegal".' + ], + } + + def test_reject_illegal_subcategory_when_reason_is_not_illegal(self): + target_collection = collection_factory() + response = self.client.post( + self.url, + data={ + 'collection': str(target_collection.pk), + 'reason': 'feedback_spam', + 'illegal_subcategory': 'other', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': [ + 'This value must be omitted or set to "null" when the `reason` is ' + 'not "illegal".' + ], + } + + def test_illegal_category_required_when_reason_is_illegal(self): + target_collection = collection_factory() + response = self.client.post( + self.url, + data={'collection': str(target_collection.pk), 'reason': 'illegal'}, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['This field is required.'] + } + + def test_illegal_category_cannot_be_blank_when_reason_is_illegal(self): + target_collection = collection_factory() + response = self.client.post( + self.url, + data={ + 'collection': str(target_collection.pk), + 'reason': 'illegal', + 'illegal_category': '', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['"" is not a valid choice.'] + } + + def test_illegal_category_cannot_be_null_when_reason_is_illegal(self): + target_collection = collection_factory() + response = self.client.post( + self.url, + data={ + 'collection': str(target_collection.pk), + 'reason': 'illegal', + 'illegal_category': None, + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_category': ['This field may not be null.'] + } + + def test_illegal_subcategory_required_when_reason_is_illegal(self): + target_collection = collection_factory() + response = self.client.post( + self.url, + data={ + 'collection': str(target_collection.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['This field is required.'] + } + + def test_illegal_subcategory_cannot_be_blank_when_reason_is_illegal(self): + target_collection = collection_factory() + response = self.client.post( + self.url, + data={ + 'collection': str(target_collection.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': '', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['"" is not a valid choice.'] + } + + def test_illegal_subcategory_cannot_be_null_when_reason_is_illegal(self): + target_collection = collection_factory() + response = self.client.post( + self.url, + data={ + 'collection': str(target_collection.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': None, + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': ['This field may not be null.'] + } + + def test_illegal_subcategory_depends_on_category(self): + target_collection = collection_factory() + response = self.client.post( + self.url, + data={ + 'collection': str(target_collection.pk), + 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'biometric_data_breach', + }, + ) + assert response.status_code == 400 + assert json.loads(response.content) == { + 'illegal_subcategory': [ + 'This value cannot be used in combination with the supplied ' + '`illegal_category`.' + ] + } + class TestCollectionAbuseViewSetLoggedOut(CollectionAbuseViewSetTestBase, TestCase): def check_reporter(self, report): @@ -1594,6 +2566,8 @@ def test_throttle_ip_for_authenticated_users(self): 'collection': str(target_collection.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_FORWARDED_FOR=f'123.45.67.89, {get_random_ip()}', @@ -1609,6 +2583,8 @@ def test_throttle_ip_for_authenticated_users(self): 'collection': str(target_collection.pk), 'message': 'abuse!', 'reason': 'illegal', + 'illegal_category': 'animal_welfare', + 'illegal_subcategory': 'other', }, REMOTE_ADDR='123.45.67.89', HTTP_X_FORWARDED_FOR=f'123.45.67.89, {get_random_ip()}', @@ -1620,9 +2596,12 @@ class TestAppeal(TestCase): def setUp(self): self.addon = addon_factory() self.cinder_job = CinderJob.objects.create( - decision_id='my-decision-id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, - decision_date=self.days_ago(1), + decision=ContentDecision.objects.create( + cinder_id='my-decision-id', + action=DECISION_ACTIONS.AMO_APPROVE, + action_date=self.days_ago(1), + addon=self.addon, + ), created=self.days_ago(2), ) self.abuse_report = AbuseReport.objects.create( @@ -1635,13 +2614,13 @@ def setUp(self): 'abuse.appeal_reporter', kwargs={ 'abuse_report_id': self.abuse_report.id, - 'decision_id': self.cinder_job.decision_id, + 'decision_cinder_id': self.cinder_job.decision.cinder_id, }, ) self.author_appeal_url = reverse( 'abuse.appeal_author', kwargs={ - 'decision_id': self.cinder_job.decision_id, + 'decision_cinder_id': self.cinder_job.decision.cinder_id, }, ) patcher = mock.patch('olympia.abuse.views.appeal_to_cinder') @@ -1649,7 +2628,7 @@ def setUp(self): self.appeal_mock = patcher.start() def test_no_decision_yet(self): - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.NO_DECISION) + self.cinder_job.decision.delete() assert self.client.get(self.reporter_appeal_url).status_code == 404 assert self.client.get(self.author_appeal_url).status_code == 404 @@ -1658,7 +2637,7 @@ def test_no_such_decision(self): 'abuse.appeal_reporter', kwargs={ 'abuse_report_id': self.abuse_report.id, - 'decision_id': '1234-5678-9000', + 'decision_cinder_id': '1234-5678-9000', }, ) assert self.client.get(url).status_code == 404 @@ -1666,7 +2645,7 @@ def test_no_such_decision(self): url = reverse( 'abuse.appeal_author', kwargs={ - 'decision_id': '1234-5678-9000', + 'decision_cinder_id': '1234-5678-9000', }, ) assert self.client.get(url).status_code == 404 @@ -1676,7 +2655,7 @@ def test_no_such_abuse_report(self): 'abuse.appeal_reporter', kwargs={ 'abuse_report_id': self.abuse_report.id + 1, - 'decision_id': self.cinder_job.decision_id, + 'decision_cinder_id': self.cinder_job.decision.cinder_id, }, ) assert self.client.get(url).status_code == 404 @@ -1740,14 +2719,14 @@ def test_appeal_approval_anonymous_report_with_email_post(self): assert self.appeal_mock.delay.call_args_list[0][0] == () assert self.appeal_mock.delay.call_args_list[0][1] == { 'appeal_text': 'I dont like this', - 'decision_id': self.cinder_job.decision_id, + 'decision_cinder_id': self.cinder_job.decision.cinder_id, 'abuse_report_id': self.abuse_report.id, 'user_id': None, 'is_reporter': True, } def test_appeal_approval_anonymous_report_with_email_post_cant_be_appealed(self): - self.cinder_job.update(decision_date=self.days_ago(200)) + self.cinder_job.decision.update(action_date=self.days_ago(200)) self.abuse_report.update(reporter_email='me@example.com') response = self.client.get(self.reporter_appeal_url) assert response.status_code == 200 @@ -1802,7 +2781,7 @@ def test_appeal_approval_loggued_in_user(self): assert self.appeal_mock.call_count == 0 def test_appeal_approval_logged_in_report_cant_be_appealed(self): - self.cinder_job.update(decision_date=self.days_ago(200)) + self.cinder_job.decision.update(action_date=self.days_ago(200)) self.user = user_factory() self.abuse_report.update(reporter=self.user) self.client.force_login(self.user) @@ -1817,25 +2796,19 @@ def test_appeal_approval_logged_in_report_cant_be_appealed(self): assert self.appeal_mock.call_count == 0 def test_appeal_rejection_not_logged_in(self): - self.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON - ) + self.cinder_job.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) response = self.client.get(self.author_appeal_url) self.assertLoginRedirects(response, self.author_appeal_url) def test_appeal_rejection_not_author(self): - self.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON - ) + self.cinder_job.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) user = user_factory() self.client.force_login(user) response = self.client.get(self.author_appeal_url) assert response.status_code == 403 def test_appeal_rejection_author(self): - self.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON - ) + self.cinder_job.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) user = user_factory() self.addon.authors.add(user) self.client.force_login(user) @@ -1860,18 +2833,59 @@ def test_appeal_rejection_author(self): assert self.appeal_mock.delay.call_args_list[0][0] == () assert self.appeal_mock.delay.call_args_list[0][1] == { 'appeal_text': 'I dont like this', - 'decision_id': self.cinder_job.decision_id, + 'decision_cinder_id': self.cinder_job.decision.cinder_id, 'abuse_report_id': None, 'user_id': user.pk, 'is_reporter': False, } - def test_appeal_banned_user(self): - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_BAN_USER) - self.abuse_report.update(guid=None, user=user_factory()) + def test_appeal_rejection_author_no_cinderjob(self): + user = user_factory() + self.addon.authors.add(user) + self.client.force_login(user) + decision = ContentDecision.objects.create( + addon=self.addon, + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + cinder_id='some-decision-id', + action_date=datetime.now(), + ) + author_appeal_url = reverse( + 'abuse.appeal_author', kwargs={'decision_cinder_id': decision.cinder_id} + ) + response = self.client.get(author_appeal_url) + assert response.status_code == 200 + doc = pq(response.content) + assert not doc('#id_email') + assert not doc('#appeal-thank-you') + assert doc('#id_reason') + assert doc('#appeal-submit') + response = self.client.post( - self.author_appeal_url, {'email': self.abuse_report.user.email} + author_appeal_url, + {'email': 'me@example.com', 'reason': 'I dont like this'}, + ) + assert response.status_code == 200 + doc = pq(response.content) + assert doc('#appeal-thank-you') + assert not doc('#id_reason') + assert not doc('#appeal-submit') + assert self.appeal_mock.delay.call_count == 1 + assert self.appeal_mock.delay.call_args_list[0][0] == () + assert self.appeal_mock.delay.call_args_list[0][1] == { + 'appeal_text': 'I dont like this', + 'decision_cinder_id': decision.cinder_id, + 'abuse_report_id': None, + 'user_id': user.pk, + 'is_reporter': False, + } + + def test_appeal_banned_user(self): + target = user_factory() + self.cinder_job.decision.update( + action=DECISION_ACTIONS.AMO_BAN_USER, addon=None, user=target ) + self.abuse_report.update(guid=None, user=target) + response = self.client.post(self.author_appeal_url, {'email': target.email}) assert response.status_code == 200 doc = pq(response.content) email_input = doc('#id_email')[0] @@ -1883,7 +2897,7 @@ def test_appeal_banned_user(self): response = self.client.post( self.author_appeal_url, - {'email': self.abuse_report.user.email, 'reason': 'I am not a bad guy'}, + {'email': target.email, 'reason': 'I am not a bad guy'}, ) assert response.status_code == 200 doc = pq(response.content) @@ -1894,15 +2908,18 @@ def test_appeal_banned_user(self): assert self.appeal_mock.delay.call_args_list[0][0] == () assert self.appeal_mock.delay.call_args_list[0][1] == { 'appeal_text': 'I am not a bad guy', - 'decision_id': self.cinder_job.decision_id, + 'decision_cinder_id': self.cinder_job.decision.cinder_id, 'abuse_report_id': None, 'user_id': None, 'is_reporter': False, } def test_appeal_banned_user_wrong_email(self): - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_BAN_USER) - self.abuse_report.update(guid=None, user=user_factory()) + target = user_factory() + self.cinder_job.decision.update( + action=DECISION_ACTIONS.AMO_BAN_USER, addon=None, user=target + ) + self.abuse_report.update(guid=None, user=target) response = self.client.post(self.author_appeal_url, {'email': 'me@example.com'}) assert response.status_code == 200 doc = pq(response.content) @@ -1941,8 +2958,10 @@ def test_reporter_cant_appeal_appealed_decision_already_made_for_other_affirm(se reporter_email='otherreporter@example.com', ) appeal_job = CinderJob.objects.create(job_id='appeal job id') - self.cinder_job.update(appeal_job=appeal_job) - other_abuse_report.update(appellant_job=appeal_job) + self.cinder_job.decision.update(appeal_job=appeal_job) + CinderAppeal.objects.create( + decision=self.cinder_job.decision, reporter_report=other_abuse_report + ) self.client.force_login(user) response = self.client.get(self.reporter_appeal_url) @@ -1957,9 +2976,11 @@ def test_reporter_cant_appeal_appealed_decision_already_made_for_other_affirm(se # specific error message (in this case we confirmed the original # decision). appeal_job.update( - decision_id='appeal decision id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_APPROVE, - decision_date=datetime.now(), + decision=ContentDecision.objects.create( + cinder_id='appeal decision id', + action=DECISION_ACTIONS.AMO_APPROVE, + addon=self.addon, + ) ) response = self.client.get(self.reporter_appeal_url) @@ -1987,8 +3008,10 @@ def test_reporter_cant_appeal_appealed_decision_already_made_for_other_turned(se reporter_email='otherreporter@example.com', ) appeal_job = CinderJob.objects.create(job_id='appeal job id') - self.cinder_job.update(appeal_job=appeal_job) - other_abuse_report.update(appellant_job=appeal_job) + self.cinder_job.decision.update(appeal_job=appeal_job) + CinderAppeal.objects.create( + decision=self.cinder_job.decision, reporter_report=other_abuse_report + ) self.client.force_login(user) response = self.client.get(self.reporter_appeal_url) @@ -2004,9 +3027,11 @@ def test_reporter_cant_appeal_appealed_decision_already_made_for_other_turned(se # the content is already supposed to be disabled but the reporter might # not have noticed). appeal_job.update( - decision_id='appeal decision id', - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON, - decision_date=datetime.now(), + decision=ContentDecision.objects.create( + cinder_id='appeal decision id', + action=DECISION_ACTIONS.AMO_DISABLE_ADDON, + addon=self.addon, + ) ) response = self.client.get(self.reporter_appeal_url) @@ -2021,13 +3046,77 @@ def test_reporter_cant_appeal_appealed_decision_already_made_for_other_turned(se 'and have reversed our prior decision' ) in doc.text() + def test_reporter_cant_appeal_overridden_decision(self): + user = user_factory() + self.abuse_report.update(reporter=user) + + self.client.force_login(user) + response = self.client.get(self.reporter_appeal_url) + assert response.status_code == 200 + doc = pq(response.content) + assert doc('#id_reason') + assert not doc('#appeal-thank-you') + assert doc('#appeal-submit') + + ContentDecision.objects.create( + cinder_id='appeal decision id', + action=DECISION_ACTIONS.AMO_APPROVE, + addon=self.addon, + override_of=self.cinder_job.decision, + ) + + response = self.client.get(self.reporter_appeal_url) + assert response.status_code == 200 + doc = pq(response.content) + assert not doc('#id_reason') + assert not doc('#appeal-thank-you') + assert not doc('#appeal-submit') + assert ( + 'The decision you are appealing has already been overridden by a new ' + 'decision' in doc.text() + ) + + def test_author_cant_appeal_overridden_decision(self): + self.cinder_job.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) + user = user_factory() + self.addon.authors.add(user) + self.client.force_login(user) + response = self.client.get(self.author_appeal_url) + + assert response.status_code == 200 + doc = pq(response.content) + assert doc('#id_reason') + assert not doc('#appeal-thank-you') + assert doc('#appeal-submit') + + ContentDecision.objects.create( + cinder_id='appeal decision id', + action=DECISION_ACTIONS.AMO_APPROVE, + addon=self.addon, + override_of=self.cinder_job.decision, + ) + + response = self.client.get(self.author_appeal_url) + assert response.status_code == 200 + doc = pq(response.content) + assert not doc('#id_reason') + assert not doc('#appeal-thank-you') + assert not doc('#appeal-submit') + assert ( + 'The decision you are appealing has already been overridden by a new ' + 'decision' in doc.text() + ) + def test_throttling_initial_email_form(self): expected_error_message = ( 'You have submitted this form too many times recently. ' 'Please try again after some time.' ) - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_BAN_USER) - self.abuse_report.update(guid=None, user=user_factory()) + target = user_factory() + self.cinder_job.decision.update( + action=DECISION_ACTIONS.AMO_BAN_USER, addon=None, user=target + ) + self.abuse_report.update(guid=None, user=target) with freeze_time() as frozen_time: for _x in range(0, 20): self._add_fake_throttling_action( @@ -2038,7 +3127,7 @@ def test_throttling_initial_email_form(self): ) response = self.client.post( self.author_appeal_url, - {'email': self.abuse_report.user.email}, + {'email': target.email}, REMOTE_ADDR='5.6.7.8', ) assert ( @@ -2061,7 +3150,7 @@ def test_throttling_initial_email_form(self): frozen_time.tick(delta=timedelta(hours=24, seconds=1)) response = self.client.post( self.author_appeal_url, - {'email': self.abuse_report.user.email}, + {'email': target.email}, REMOTE_ADDR='5.6.7.8', ) assert ( @@ -2078,8 +3167,11 @@ def test_throttling_doesnt_reveal_validation_state_fields(self): 'You have submitted this form too many times recently. ' 'Please try again after some time.' ) - self.cinder_job.update(decision_action=CinderJob.DECISION_ACTIONS.AMO_BAN_USER) - self.abuse_report.update(guid=None, user=user_factory()) + target = user_factory() + self.cinder_job.decision.update( + action=DECISION_ACTIONS.AMO_BAN_USER, addon=None, user=target + ) + self.abuse_report.update(guid=None, user=target) with freeze_time(): for _x in range(0, 20): self._add_fake_throttling_action( @@ -2107,9 +3199,7 @@ def test_throttling_appeal_form(self): 'You have submitted this form too many times recently. ' 'Please try again after some time.' ) - self.cinder_job.update( - decision_action=CinderJob.DECISION_ACTIONS.AMO_DISABLE_ADDON - ) + self.cinder_job.decision.update(action=DECISION_ACTIONS.AMO_DISABLE_ADDON) user = user_factory() self.addon.authors.add(user) self.client.force_login(user) diff --git a/src/olympia/abuse/urls.py b/src/olympia/abuse/urls.py index 8e279e5bedb6..d35107fdea52 100644 --- a/src/olympia/abuse/urls.py +++ b/src/olympia/abuse/urls.py @@ -5,13 +5,13 @@ urlpatterns = [ path( - 'appeal/