diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 36826ce303d8..000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,652 +0,0 @@ -# These environment variables must be set in CircleCI UI -# -# DOCKERHUB_REPO - docker hub repo, format: / -# DOCKER_USER - Login user for docker hub -# DOCKER_PASS - Login password for docker hub user -version: 2.1 - -references: - # We declare the autograph configuration here to be able to fully leverage - # Docker executors. This configuration should be kept in sync with the - # content of `scripts/autograph_localdev_config.yaml`, which is used for - # local dev. Sadly, we cannot "include" this file here. - autograph_config: &autograph_config | - # Note: Most of the configuration here got copied from - # https://github.com/mozilla-services/autograph/blob/master/autograph.yaml - server: - # This port should be perfectly free, the upstream default of 8000 is - # used by django sometimes so let's not do that. - listen: "0.0.0.0:5500" - # cache 500k nonces to protect from authorization replay attacks - noncecachesize: 10 - - # The keys below are testing keys that do not grant any power - signers: - - id: webextensions-rsa - type: xpi - # The signing parameters for each type of add-on are 'add-on' are - # signed with the OU 'Production' and the provided ID 'extension' are - # signed with the OU 'Mozilla Extensions' and the provided ID 'system - # add-on' are signed with the OU 'Mozilla Components' and the - # provided ID - mode: add-on - recommendation: - path: "mozilla-recommendation.json" - certificate: | - -----BEGIN CERTIFICATE----- - MIIH0zCCBbugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBvDELMAkGA1UEBhMCVVMx - CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRwwGgYDVQQKExNB - bGxpem9tIENvcnBvcmF0aW9uMSAwHgYDVQQLExdBbGxpem9tIEFNTyBEZXZlbG9w - bWVudDEYMBYGA1UEAxMPZGV2LmFtby5yb290LmNhMS4wLAYJKoZIhvcNAQkBFh9m - b3hzZWMrZGV2YW1vcm9vdGNhQG1vemlsbGEuY29tMB4XDTE3MDMyMTIzNDQwNFoX - DTI3MDMxOTIzNDQwNFowgbwxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQG - A1UEBxMNTW91bnRhaW4gVmlldzEcMBoGA1UEChMTQWxsaXpvbSBDb3Jwb3JhdGlv - bjEgMB4GA1UECxMXQWxsaXpvbSBBTU8gRGV2ZWxvcG1lbnQxGDAWBgNVBAMTD2Rl - di5hbW8ucm9vdC5jYTEuMCwGCSqGSIb3DQEJARYfZm94c2VjK2RldmFtb3Jvb3Rj - YUBtb3ppbGxhLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMdX - 5soUuvWnkVHRHN5BKByrgpuU3QioE8SNT7BwRFeqbOySdvu5ecQAdNUoRbRyFmNB - ety2rQM9qw6y8eSe9fufIgrv1sg/xj7vweLmuC8Ob+zo5/iwRQw4JUdXnDjwX3W0 - auh0QRYfxWGK3hVrP9j1zIJk/yRBornCvXTtn8C/hVSE/PWc6CuV8vTcpyj+TPni - Lvulq17NdlX5qgUdn1yougJxnznkwnoIaBYLdAyZJJIUEomiEIxfabjnh8rfSMIw - AqmslrC8F73yo4JrCqJPt1ipggfpO3ZAjlEoTMcTUgyqR8B35GyuywWR0XrkJV7N - A7BM1qNjLb2to0XQSrGyWA7uPw88LuVk2aUPDE5uNK5Kv//+SGChUn2fDZTsjj3J - KY7f39JVwh/nk8ZkApplne8fKPoknW7er2R+rejyBx1+fJjLegKQsATpgKz4LRf4 - ct34oWSV6QXrZ/KKW+frWoHncy8C+UnCC3cDBKs272yqOvBoGMQTrF5oMn8i/Rap - gBbBdwysdJXb+buf/+ZS0PUt7avKFIlXqCNZjG3xotBsTuCL5zAoVKoXJW1FwrcZ - pveQuishKWNf9Id+0HaBdDp/vlbrTwXD1zsxfYvYw8wI7NkNO3TQBni5iyG4B1wh - oR+Z5AebWuJqVnsJyjPakNiuhKNsO/xTa4TF/ymfAgMBAAGjggHcMIIB2DAPBgNV - HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAWBgNVHSUBAf8EDDAKBggrBgEF - BQcDAzAdBgNVHQ4EFgQU2LRpqTdeQ1QlBWNA6fYAqHdpSaUwgekGA1UdIwSB4TCB - 3oAU2LRpqTdeQ1QlBWNA6fYAqHdpSaWhgcKkgb8wgbwxCzAJBgNVBAYTAlVTMQsw - CQYDVQQIEwJDQTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEcMBoGA1UEChMTQWxs - aXpvbSBDb3Jwb3JhdGlvbjEgMB4GA1UECxMXQWxsaXpvbSBBTU8gRGV2ZWxvcG1l - bnQxGDAWBgNVBAMTD2Rldi5hbW8ucm9vdC5jYTEuMCwGCSqGSIb3DQEJARYfZm94 - c2VjK2RldmFtb3Jvb3RjYUBtb3ppbGxhLmNvbYIBATBCBglghkgBhvhCAQQENRYz - aHR0cHM6Ly9jb250ZW50LXNpZ25hdHVyZS5kZXYubW96YXdzLm5ldC9jYS9jcmwu - cGVtME4GCCsGAQUFBwEBBEIwQDA+BggrBgEFBQcwAoYyaHR0cHM6Ly9jb250ZW50 - LXNpZ25hdHVyZS5kZXYubW96YXdzLm5ldC9jYS9jYS5wZW0wDQYJKoZIhvcNAQEL - BQADggIBALqVt54WTkxD5U5fHPRUSZA9rFigoIcrHNrq+gTDd057cBDUWNc0cEHV - qaP0zgzqD2bIhV/WWlfMDY3VnB8L2+Vjvu2CEt8/9Kh5x9IgBmZt5VUMuEdmQOyH - vA7lz3UI+jmUGcojtLsi+sf4kxDZh3QB3T/wGiHg+K7vXnY7GWEy1Cjfwk/dvbT2 - ODTb5B3SPGsh75VmfzFGgerzsS71LN4FYBRUklLe8ozqKF8r/jGE2vfDR1Cy09pN - oR9ti+zaBiEtMlWJjxYrv7HvuoDR9xLmPxyV6gQbo6NnbudkpNdg5LhbY3WV1IgL - TnwJ7aHXgzOZ3w/VsSctg4beZZgYnr81vLKyefWJH1VzCe5XTgwXC1R/afGiVJ0P - hA1+T4My9oTaQBsiNYA2keXKJbTKerMTupoLgV/lJjxfF5BfQiy9NL18/bzxqf+J - 7w4P/4oHt3QCdISAIhlG4ttXfRR8oz6obAb6QYdCf3x9b2/3UXKd3UJ+gwchPjj6 - InnLK8ig9scn4opVNkBkjlMRsq1yd017eQzLSirpKj3br69qyLoyb/nPNJi7bL1K - bf6m5mF5GmKR+Glvq74O8rLQZ3a75v6H+NwOqAlZnWSJmC84R2HHsHPBw+2pExJT - E5bRcttRlhEdN4NJ2vWJnOH0DENHy6TEwACINJVx6ftucfPfvOxI - -----END CERTIFICATE----- - privatekey: | - -----BEGIN PRIVATE KEY----- - MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDHV+bKFLr1p5FR - 0RzeQSgcq4KblN0IqBPEjU+wcERXqmzsknb7uXnEAHTVKEW0chZjQXrctq0DPasO - svHknvX7nyIK79bIP8Y+78Hi5rgvDm/s6Of4sEUMOCVHV5w48F91tGrodEEWH8Vh - it4Vaz/Y9cyCZP8kQaK5wr107Z/Av4VUhPz1nOgrlfL03Kco/kz54i77patezXZV - +aoFHZ9cqLoCcZ855MJ6CGgWC3QMmSSSFBKJohCMX2m454fK30jCMAKprJawvBe9 - 8qOCawqiT7dYqYIH6Tt2QI5RKEzHE1IMqkfAd+RsrssFkdF65CVezQOwTNajYy29 - raNF0EqxslgO7j8PPC7lZNmlDwxObjSuSr///khgoVJ9nw2U7I49ySmO39/SVcIf - 55PGZAKaZZ3vHyj6JJ1u3q9kfq3o8gcdfnyYy3oCkLAE6YCs+C0X+HLd+KFklekF - 62fyilvn61qB53MvAvlJwgt3AwSrNu9sqjrwaBjEE6xeaDJ/Iv0WqYAWwXcMrHSV - 2/m7n//mUtD1Le2ryhSJV6gjWYxt8aLQbE7gi+cwKFSqFyVtRcK3Gab3kLorISlj - X/SHftB2gXQ6f75W608Fw9c7MX2L2MPMCOzZDTt00AZ4uYshuAdcIaEfmeQHm1ri - alZ7Ccoz2pDYroSjbDv8U2uExf8pnwIDAQABAoICADf7eqgD3GGC1q/Yfzf3qnEq - xXo1+0EkGrEXUmrljHvmM8LYeyvEcerWifkW30SGybzENeHoN3xyhCiTnpUrAz/P - 9/qEUphYOK+SG6xCSTWF427wFb1km2+MEQQRGaFv+A8RRPjVNTYmZAM5wZbYUMz4 - cp+oB3NCL5Xll9lPpo61+pa65mN/1j/vU5TqptM/X5TJrZIke5UbNIF+pP3czNVz - 2RE4oZPbp7YnyDtwqf2jwH55vp8CcY1KemFgPGWAAWnvm7/U5Vjq6ewBSWQl9Y2R - v5bZu9fG61kRViZ6n91EksVVyOLHiNHw4LlGs0LE8a3G+6M2YQzvnHfpXLINhfwU - SZ6BWAJdknVsu6eesYoC08+nyikkq/A3BVD65pT5C9VsmUPbqqpGSYZmAuFgsf9m - zdyKVH4fOPx82DqSZEHZBojg3s5K141DmPp6o0OBX8Ydgfkg2sWXuNi/noBDvh9O - FXWN2IcgK0dET3pX4xFei0QuZgglDp3VyVVSCSUPsOwecZ2XTjtBZPCQVpp3r+QV - LyecFudQ94Ki/0R+M4CrE/mPApDvq+pTjYKFZ10YWtGIdguXq5BVZIMZfZzwIPWN - HdoaFnXRTXTlR4pLIM2nlOvyZmSMo0x6nzUMVGdv4Km9pxi6ZKAgAt4DkbCF9mt0 - QG8RpGJhiIch4kgKFmqxAoIBAQDw4X9Fp9t4f2UiessUDYxLyAtq4acu4ahup5Eb - vlDZPf9gInvz5q9aFHtYgtjTlH449f+EB4isKQscVMysgrJK+7z1IXXMm0sg44wT - F4oV+kvg3KpAridRHyE456RvCPqXYzty6ywJ9B7Zf2oCvd40JUOTm8z11K4COLut - rFIW/24PJA1CWudY/EgzD164k6379On0KryA77iKEZMUztBfHm/bdO8J/zmp7g+E - FS2TCBzR4LpN0uhBwp9wh4rVr74LrPDnQJVZKgeFd24UHEtmcVprAFNUexb2yy1s - vxnHsRPmv5eF7ED1Wlz2K+7LUWqibYOrjeCrS85+CEcey0ApAoIBAQDT2vmbHosb - Qr6ZENt6UX6n0RF8i4g3G4qhucr5hEMQs4H2J8SrUM68QT0GVY0GoDW6f79Pcyr0 - W1tm7qbAOm1Iv4uNYVL1qgpq1GnD5qpWSACGsVSE3OGELlNaVz8fgVdz6zT+rU2A - tp2t795UlrvaLgFI4wITqJF3LoTfy2MZu8JYCzlKM5pZksmEmJfR0RDAot2grtD3 - H5A+PZfUIZ/8BhmdaOAv5i647unfVF6UpPYejZ0rb67oEazxdeIHK3aD5AjurdsO - UpW/PMwsbaltp4iI7hvUfRX7Afb5fPXIhv9pHh1xWYl3djUNWmFoiMMP4tuxpOBo - y+T4maQaiDSHAoIBADrlZ9EIMclMnNXJYE4O4fbFesUvV0lHM3+ayQgXiH0Vg5Nl - 2xjPlqBX0bDajVluPU6AF3GYxfoSLv1GXqTvb9iVpKXrAHp+nef0uxMP9ltZT6Qz - UA1wh3x2OBFJ0hK0B1FsmeSHS8VDQye615jEA8iMM/GrbnnM/p7ccEcOkyO8YJSj - I/rNbzN6u8yAPZCzyx6Hy4w/xsdf1acslOHJj3kyX/cwqCGxnc/GvVR2OSZyHVnT - sLnGj7NEeudwvKlyxuzj5CMmz111wVEI2olgQa9Sl+EBu140mnDNTNYCA7OnwE3z - GoFMOrXC2mf2ZfSge4orbL5Nellnt51pOLp2x8ECggEBALM8Mazw/FOF9mbdgjJM - PFGSaa7rBcVJwdHttDHBmlPI6wzsvFEMPru6nfx76KJQbORqK9r13sN5fyzof59m - TwsbMt/cFSnOQJ39M7YPstDofbl20cDOduUzpEVsRvVKokhqGB3XVRiuZ1y+8WSz - Wh7OiTu3AwzKsrcYXkZQdnlRBq0iYcfLPKzHqUJLLzbOH9Q6djL5c8V/qLNfvNI1 - 2HqKVqV8Ex+zKJhBWRAe+x3bKnbS7MPQ6zNfsOdgCmhydwRCquPzpr7JU/PFZh+4 - b31cHgFrIZR2d2AzW1XcSLzsqa2vUs2RKOIu2deAPaUI/66zCZeTnGBNEFza76Ga - 1oUCggEAA38oXcnputwL103SeD8+uwHjtTf183Rucr+Ryqz6GymiWjlzELqu7TRd - yadAaNg9CuXmYS33Jtk/UNS0k9FvYqGTR+SBXIZr6nt9ZFd0SNlQkwkAQCsuekEs - nJlmUZax7DxXMgIHMKDboHZYM/MhgzEGSALmhU5LZ76MS17v3NEPxYpVHxjAotxW - g03HjWTltS8Bgt6u0KFTGJKEUcfwvWKZtjk5Fc1heZ49zh1nU3zo9C/h8iiijTy2 - s/YksP6cxveae4b7soN4rD/vnfsmKcG+DnTf6B8Zbm6tI2TneYOfFSCryp+yDnaJ - PIDNiTxNecePOmrD+1ivAEXcoL+e1w== - -----END PRIVATE KEY----- - - - id: webextensions-rsa-with-recommendation - type: xpi - # The signing parameters for each type of add-on are 'add-on' are - # signed with the OU 'Production' and the provided ID - # 'add-on-with-recommendation' are signed with the OU 'Production' - # and the provided ID and add a recommendation file 'extension' are - # signed with the OU 'Mozilla Extensions' and the provided ID 'system - # add-on' are signed with the OU 'Mozilla Components' and the - # provided ID 'hotfix' are signed with the OU 'Production' and the ID - # 'firefox-hotfix@mozilla.org' - mode: add-on-with-recommendation - recommendation: - path: "mozilla-recommendation.json" - states: - recommended: true - recommended-android: true - verified: true - line: true - relative_start: 0h - duration: 26298h - # RSA key gen is slow and CPU intensive, so we can optionally - # pregenerate and cache keys with a worker pool - rsacacheconfig: - numkeys: 25 - numgenerators: 2 - generatorsleepduration: 1m - fetchtimeout: 100ms - statssamplerate: 1m - certificate: | - -----BEGIN CERTIFICATE----- - MIIH0zCCBbugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBvDELMAkGA1UEBhMCVVMx - CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRwwGgYDVQQKExNB - bGxpem9tIENvcnBvcmF0aW9uMSAwHgYDVQQLExdBbGxpem9tIEFNTyBEZXZlbG9w - bWVudDEYMBYGA1UEAxMPZGV2LmFtby5yb290LmNhMS4wLAYJKoZIhvcNAQkBFh9m - b3hzZWMrZGV2YW1vcm9vdGNhQG1vemlsbGEuY29tMB4XDTE3MDMyMTIzNDQwNFoX - DTI3MDMxOTIzNDQwNFowgbwxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQG - A1UEBxMNTW91bnRhaW4gVmlldzEcMBoGA1UEChMTQWxsaXpvbSBDb3Jwb3JhdGlv - bjEgMB4GA1UECxMXQWxsaXpvbSBBTU8gRGV2ZWxvcG1lbnQxGDAWBgNVBAMTD2Rl - di5hbW8ucm9vdC5jYTEuMCwGCSqGSIb3DQEJARYfZm94c2VjK2RldmFtb3Jvb3Rj - YUBtb3ppbGxhLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMdX - 5soUuvWnkVHRHN5BKByrgpuU3QioE8SNT7BwRFeqbOySdvu5ecQAdNUoRbRyFmNB - ety2rQM9qw6y8eSe9fufIgrv1sg/xj7vweLmuC8Ob+zo5/iwRQw4JUdXnDjwX3W0 - auh0QRYfxWGK3hVrP9j1zIJk/yRBornCvXTtn8C/hVSE/PWc6CuV8vTcpyj+TPni - Lvulq17NdlX5qgUdn1yougJxnznkwnoIaBYLdAyZJJIUEomiEIxfabjnh8rfSMIw - AqmslrC8F73yo4JrCqJPt1ipggfpO3ZAjlEoTMcTUgyqR8B35GyuywWR0XrkJV7N - A7BM1qNjLb2to0XQSrGyWA7uPw88LuVk2aUPDE5uNK5Kv//+SGChUn2fDZTsjj3J - KY7f39JVwh/nk8ZkApplne8fKPoknW7er2R+rejyBx1+fJjLegKQsATpgKz4LRf4 - ct34oWSV6QXrZ/KKW+frWoHncy8C+UnCC3cDBKs272yqOvBoGMQTrF5oMn8i/Rap - gBbBdwysdJXb+buf/+ZS0PUt7avKFIlXqCNZjG3xotBsTuCL5zAoVKoXJW1FwrcZ - pveQuishKWNf9Id+0HaBdDp/vlbrTwXD1zsxfYvYw8wI7NkNO3TQBni5iyG4B1wh - oR+Z5AebWuJqVnsJyjPakNiuhKNsO/xTa4TF/ymfAgMBAAGjggHcMIIB2DAPBgNV - HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAWBgNVHSUBAf8EDDAKBggrBgEF - BQcDAzAdBgNVHQ4EFgQU2LRpqTdeQ1QlBWNA6fYAqHdpSaUwgekGA1UdIwSB4TCB - 3oAU2LRpqTdeQ1QlBWNA6fYAqHdpSaWhgcKkgb8wgbwxCzAJBgNVBAYTAlVTMQsw - CQYDVQQIEwJDQTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEcMBoGA1UEChMTQWxs - aXpvbSBDb3Jwb3JhdGlvbjEgMB4GA1UECxMXQWxsaXpvbSBBTU8gRGV2ZWxvcG1l - bnQxGDAWBgNVBAMTD2Rldi5hbW8ucm9vdC5jYTEuMCwGCSqGSIb3DQEJARYfZm94 - c2VjK2RldmFtb3Jvb3RjYUBtb3ppbGxhLmNvbYIBATBCBglghkgBhvhCAQQENRYz - aHR0cHM6Ly9jb250ZW50LXNpZ25hdHVyZS5kZXYubW96YXdzLm5ldC9jYS9jcmwu - cGVtME4GCCsGAQUFBwEBBEIwQDA+BggrBgEFBQcwAoYyaHR0cHM6Ly9jb250ZW50 - LXNpZ25hdHVyZS5kZXYubW96YXdzLm5ldC9jYS9jYS5wZW0wDQYJKoZIhvcNAQEL - BQADggIBALqVt54WTkxD5U5fHPRUSZA9rFigoIcrHNrq+gTDd057cBDUWNc0cEHV - qaP0zgzqD2bIhV/WWlfMDY3VnB8L2+Vjvu2CEt8/9Kh5x9IgBmZt5VUMuEdmQOyH - vA7lz3UI+jmUGcojtLsi+sf4kxDZh3QB3T/wGiHg+K7vXnY7GWEy1Cjfwk/dvbT2 - ODTb5B3SPGsh75VmfzFGgerzsS71LN4FYBRUklLe8ozqKF8r/jGE2vfDR1Cy09pN - oR9ti+zaBiEtMlWJjxYrv7HvuoDR9xLmPxyV6gQbo6NnbudkpNdg5LhbY3WV1IgL - TnwJ7aHXgzOZ3w/VsSctg4beZZgYnr81vLKyefWJH1VzCe5XTgwXC1R/afGiVJ0P - hA1+T4My9oTaQBsiNYA2keXKJbTKerMTupoLgV/lJjxfF5BfQiy9NL18/bzxqf+J - 7w4P/4oHt3QCdISAIhlG4ttXfRR8oz6obAb6QYdCf3x9b2/3UXKd3UJ+gwchPjj6 - InnLK8ig9scn4opVNkBkjlMRsq1yd017eQzLSirpKj3br69qyLoyb/nPNJi7bL1K - bf6m5mF5GmKR+Glvq74O8rLQZ3a75v6H+NwOqAlZnWSJmC84R2HHsHPBw+2pExJT - E5bRcttRlhEdN4NJ2vWJnOH0DENHy6TEwACINJVx6ftucfPfvOxI - -----END CERTIFICATE----- - privatekey: | - -----BEGIN PRIVATE KEY----- - MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDHV+bKFLr1p5FR - 0RzeQSgcq4KblN0IqBPEjU+wcERXqmzsknb7uXnEAHTVKEW0chZjQXrctq0DPasO - svHknvX7nyIK79bIP8Y+78Hi5rgvDm/s6Of4sEUMOCVHV5w48F91tGrodEEWH8Vh - it4Vaz/Y9cyCZP8kQaK5wr107Z/Av4VUhPz1nOgrlfL03Kco/kz54i77patezXZV - +aoFHZ9cqLoCcZ855MJ6CGgWC3QMmSSSFBKJohCMX2m454fK30jCMAKprJawvBe9 - 8qOCawqiT7dYqYIH6Tt2QI5RKEzHE1IMqkfAd+RsrssFkdF65CVezQOwTNajYy29 - raNF0EqxslgO7j8PPC7lZNmlDwxObjSuSr///khgoVJ9nw2U7I49ySmO39/SVcIf - 55PGZAKaZZ3vHyj6JJ1u3q9kfq3o8gcdfnyYy3oCkLAE6YCs+C0X+HLd+KFklekF - 62fyilvn61qB53MvAvlJwgt3AwSrNu9sqjrwaBjEE6xeaDJ/Iv0WqYAWwXcMrHSV - 2/m7n//mUtD1Le2ryhSJV6gjWYxt8aLQbE7gi+cwKFSqFyVtRcK3Gab3kLorISlj - X/SHftB2gXQ6f75W608Fw9c7MX2L2MPMCOzZDTt00AZ4uYshuAdcIaEfmeQHm1ri - alZ7Ccoz2pDYroSjbDv8U2uExf8pnwIDAQABAoICADf7eqgD3GGC1q/Yfzf3qnEq - xXo1+0EkGrEXUmrljHvmM8LYeyvEcerWifkW30SGybzENeHoN3xyhCiTnpUrAz/P - 9/qEUphYOK+SG6xCSTWF427wFb1km2+MEQQRGaFv+A8RRPjVNTYmZAM5wZbYUMz4 - cp+oB3NCL5Xll9lPpo61+pa65mN/1j/vU5TqptM/X5TJrZIke5UbNIF+pP3czNVz - 2RE4oZPbp7YnyDtwqf2jwH55vp8CcY1KemFgPGWAAWnvm7/U5Vjq6ewBSWQl9Y2R - v5bZu9fG61kRViZ6n91EksVVyOLHiNHw4LlGs0LE8a3G+6M2YQzvnHfpXLINhfwU - SZ6BWAJdknVsu6eesYoC08+nyikkq/A3BVD65pT5C9VsmUPbqqpGSYZmAuFgsf9m - zdyKVH4fOPx82DqSZEHZBojg3s5K141DmPp6o0OBX8Ydgfkg2sWXuNi/noBDvh9O - FXWN2IcgK0dET3pX4xFei0QuZgglDp3VyVVSCSUPsOwecZ2XTjtBZPCQVpp3r+QV - LyecFudQ94Ki/0R+M4CrE/mPApDvq+pTjYKFZ10YWtGIdguXq5BVZIMZfZzwIPWN - HdoaFnXRTXTlR4pLIM2nlOvyZmSMo0x6nzUMVGdv4Km9pxi6ZKAgAt4DkbCF9mt0 - QG8RpGJhiIch4kgKFmqxAoIBAQDw4X9Fp9t4f2UiessUDYxLyAtq4acu4ahup5Eb - vlDZPf9gInvz5q9aFHtYgtjTlH449f+EB4isKQscVMysgrJK+7z1IXXMm0sg44wT - F4oV+kvg3KpAridRHyE456RvCPqXYzty6ywJ9B7Zf2oCvd40JUOTm8z11K4COLut - rFIW/24PJA1CWudY/EgzD164k6379On0KryA77iKEZMUztBfHm/bdO8J/zmp7g+E - FS2TCBzR4LpN0uhBwp9wh4rVr74LrPDnQJVZKgeFd24UHEtmcVprAFNUexb2yy1s - vxnHsRPmv5eF7ED1Wlz2K+7LUWqibYOrjeCrS85+CEcey0ApAoIBAQDT2vmbHosb - Qr6ZENt6UX6n0RF8i4g3G4qhucr5hEMQs4H2J8SrUM68QT0GVY0GoDW6f79Pcyr0 - W1tm7qbAOm1Iv4uNYVL1qgpq1GnD5qpWSACGsVSE3OGELlNaVz8fgVdz6zT+rU2A - tp2t795UlrvaLgFI4wITqJF3LoTfy2MZu8JYCzlKM5pZksmEmJfR0RDAot2grtD3 - H5A+PZfUIZ/8BhmdaOAv5i647unfVF6UpPYejZ0rb67oEazxdeIHK3aD5AjurdsO - UpW/PMwsbaltp4iI7hvUfRX7Afb5fPXIhv9pHh1xWYl3djUNWmFoiMMP4tuxpOBo - y+T4maQaiDSHAoIBADrlZ9EIMclMnNXJYE4O4fbFesUvV0lHM3+ayQgXiH0Vg5Nl - 2xjPlqBX0bDajVluPU6AF3GYxfoSLv1GXqTvb9iVpKXrAHp+nef0uxMP9ltZT6Qz - UA1wh3x2OBFJ0hK0B1FsmeSHS8VDQye615jEA8iMM/GrbnnM/p7ccEcOkyO8YJSj - I/rNbzN6u8yAPZCzyx6Hy4w/xsdf1acslOHJj3kyX/cwqCGxnc/GvVR2OSZyHVnT - sLnGj7NEeudwvKlyxuzj5CMmz111wVEI2olgQa9Sl+EBu140mnDNTNYCA7OnwE3z - GoFMOrXC2mf2ZfSge4orbL5Nellnt51pOLp2x8ECggEBALM8Mazw/FOF9mbdgjJM - PFGSaa7rBcVJwdHttDHBmlPI6wzsvFEMPru6nfx76KJQbORqK9r13sN5fyzof59m - TwsbMt/cFSnOQJ39M7YPstDofbl20cDOduUzpEVsRvVKokhqGB3XVRiuZ1y+8WSz - Wh7OiTu3AwzKsrcYXkZQdnlRBq0iYcfLPKzHqUJLLzbOH9Q6djL5c8V/qLNfvNI1 - 2HqKVqV8Ex+zKJhBWRAe+x3bKnbS7MPQ6zNfsOdgCmhydwRCquPzpr7JU/PFZh+4 - b31cHgFrIZR2d2AzW1XcSLzsqa2vUs2RKOIu2deAPaUI/66zCZeTnGBNEFza76Ga - 1oUCggEAA38oXcnputwL103SeD8+uwHjtTf183Rucr+Ryqz6GymiWjlzELqu7TRd - yadAaNg9CuXmYS33Jtk/UNS0k9FvYqGTR+SBXIZr6nt9ZFd0SNlQkwkAQCsuekEs - nJlmUZax7DxXMgIHMKDboHZYM/MhgzEGSALmhU5LZ76MS17v3NEPxYpVHxjAotxW - g03HjWTltS8Bgt6u0KFTGJKEUcfwvWKZtjk5Fc1heZ49zh1nU3zo9C/h8iiijTy2 - s/YksP6cxveae4b7soN4rD/vnfsmKcG+DnTf6B8Zbm6tI2TneYOfFSCryp+yDnaJ - PIDNiTxNecePOmrD+1ivAEXcoL+e1w== - -----END PRIVATE KEY----- - authorizations: - - id: alice - key: fs5wgcer9qj819kfptdlp8gm227ewxnzvsuj9ztycsx08hfhzu - signers: - - webextensions-rsa - - id: bob - key: 9vh6bhlc10y63ow2k4zke7k0c3l9hpr8mo96p92jmbfqngs9e7d - signers: - - webextensions-rsa-with-recommendation - ########################################################################### - # - # The autograph configuration ends here. - # - ########################################################################### - - variables: - - &working-directory "~/addons-server" - - docker-images: - # These versions should try to match what we run in production. - # Note that the cimg/python:3.10-node convenience image provides - # nodejs latest lts, which we need to build assets and run the linter. - - &python "cimg/python:3.10-node" - - &redis "redis:6.2" - - &memcached "memcached:1.4" - - &mysql "mysql:8.0.28" - - mysql-config: &mysql-config - environment: - MYSQL_ALLOW_EMPTY_PASSWORD: true - MYSQL_DATABASE: olympia - - defaults: &defaults - working_directory: &working-directory - docker: - - image: *python - - defaults-with-services: &defaults-with-services - <<: *defaults - docker: - - image: *python - # Below are services this project depends on. In addition to these - # services, we also need autograph and elasticsearch but not all the - # time, hence the presence of other `defaults-with-*` references. - # - # Most settings below should be kept in sync with `docker-compose.yml`. - - image: *redis - - image: *memcached - - image: *mysql - <<: *mysql-config - - defaults-with-elasticsearch: &defaults-with-elasticsearch - <<: *defaults - docker: - - image: *python - - image: *redis - - image: *memcached - - image: *mysql - <<: *mysql-config - - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.3 - environment: - # Disable all xpack related features to avoid unrelated logging in - # docker logs. https://github.com/mozilla/addons-server/issues/8887 - xpack.security.enabled: false - xpack.monitoring.enabled: false - xpack.graph.enabled: false - xpack.watcher.enabled: false - discovery.type: single-node - cluster.name: default-cluster - ES_JAVA_OPTS: -Xms256m -Xmx256m - - defaults-with-autograph: &defaults-with-autograph - <<: *defaults - docker: - - image: *python - - image: *redis - - image: *memcached - - image: *mysql - <<: *mysql-config - - image: mozilla/autograph:3.3.2 - command: bash -c 'echo -e "$AUTOGRAPH_CONFIG" > amo_config.yaml && cat amo_config.yaml && /go/bin/autograph -c amo_config.yaml' - environment: - AUTOGRAPH_CONFIG: *autograph_config - - defaults-release: &defaults-release - machine: - image: ubuntu-2004:202201-02 - working_directory: &working-directory - -commands: - make_release: - description: "Builds and pushes a Docker image" - parameters: - image_tag: - type: string - default: "latest" - steps: - - run: > - printf '{"commit":"%s","version":"%s","source":"https://github.com/%s/%s","build":"%s"}\n' - "$CIRCLE_SHA1" - "$CIRCLE_TAG" - "$CIRCLE_PROJECT_USERNAME" - "$CIRCLE_PROJECT_REPONAME" - "$CIRCLE_BUILD_URL" - > version.json - - run: - name: Build docker image and push to repo - command: | - docker version - docker login -u "${DOCKERHUB_USER}" -p "${DOCKERHUB_PASS}" - docker build -t app:build --label git.commit="$CIRCLE_SHA1" . - docker tag app:build "${DOCKERHUB_REPO}":<< parameters.image_tag >> - docker push "${DOCKERHUB_REPO}":<< parameters.image_tag >> - - better_checkout: - description: circle ci checkout step on steroids - parameters: - clone_options: - type: string - default: --depth=1 - description: git clone options - fetch_options: - type: string - default: --depth=10 - description: git fetch options - steps: - - run: - name: checkout - command: | - #!/bin/sh - set -e - - # Workaround old docker images with incorrect $HOME - # check https://github.com/docker/docker/issues/2968 for details - if [ "${HOME}" = "/" ] - then - export HOME=$(getent passwd $(id -un) | cut -d: -f6) - fi - - export SSH_CONFIG_DIR="$HOME/.ssh" - - echo "Using SSH Config Dir $SSH_CONFIG_DIR" - - mkdir -p "$SSH_CONFIG_DIR" - - echo 'github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=' >> "$SSH_CONFIG_DIR/known_hosts" - - (umask 077; touch "$SSH_CONFIG_DIR/id_rsa") - chmod 0600 "$SSH_CONFIG_DIR/id_rsa" - (cat $CHECKOUT_KEY > "$SSH_CONFIG_DIR/id_rsa") - - export GIT_SSH_COMMAND='ssh -i $SSH_CONFIG_DIR/id_rsa -o UserKnownHostsFile=$SSH_CONFIG_DIR/known_hosts' - - # use git+ssh instead of https - git config --global url."ssh://git@github.com".insteadOf "https://github.com" || true - git config --global gc.auto 0 || true - - if [ -e .git ] - then - git remote set-url origin "$CIRCLE_REPOSITORY_URL" || true - else - git clone << parameters.clone_options >> "$CIRCLE_REPOSITORY_URL" . - fi - - if [ -n "$CIRCLE_TAG" ] - then - git fetch << parameters.fetch_options >> --force origin "refs/tags/${CIRCLE_TAG}" - elif [[ "$CIRCLE_BRANCH" =~ ^pull\/* ]] - then - git fetch << parameters.fetch_options >> --force origin "${CIRCLE_BRANCH}/head:remotes/origin/${CIRCLE_BRANCH}" - else - git fetch << parameters.fetch_options >> --force origin "${CIRCLE_BRANCH}:remotes/origin/${CIRCLE_BRANCH}" - fi - - - if [ -n "$CIRCLE_TAG" ] - then - git reset --hard "$CIRCLE_SHA1" - git checkout -q "$CIRCLE_TAG" - elif [ -n "$CIRCLE_BRANCH" ] - then - git reset --hard "$CIRCLE_SHA1" - git checkout -q -B "$CIRCLE_BRANCH" - fi - - git reset --hard "$CIRCLE_SHA1" - - setup_container: - description: common setup for the primary container - parameters: - wait_services: - type: boolean - default: true - steps: - - better_checkout - - run: - name: Initial setup - command: | - curl -sL https://repo.mysql.com/mysql-apt-config_0.8.29-1_all.deb --output mysql-apt-config.deb - sudo dpkg -i mysql-apt-config.deb - sudo apt-get update -q - sudo apt-get install -y gettext pngcrush librsvg2-bin libmysqlclient-dev - sudo cp ./docker/etc/mime.types /etc/mime.types - sudo touch /addons-server-docker-container - - run: - name: Install dockerize - command: | - wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz - sudo tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz - rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz - environment: - DOCKERIZE_VERSION: v0.6.1 - - when: - condition: << parameters.wait_services >> - steps: - - run: - name: Wait for redis - command: dockerize -wait tcp://localhost:6379 -timeout 1m - - run: - name: Wait for mysql - command: dockerize -wait tcp://localhost:3306 -timeout 1m - - run: - name: Wait for memcached - command: dockerize -wait tcp://localhost:11211 -timeout 1m - - run: - name: Set environment variables - command: | - echo export CPUCOUNT=2 >> $BASH_ENV - echo export NPM_CONFIG_PREFIX=/deps/ >> $BASH_ENV - echo export CC=\"`python -c 'import sysconfig; print(sysconfig.get_config_var("CC"))'`\" >> $BASH_ENV - cat $BASH_ENV - - run: - name: Install Python and Node dependencies - command: | - sudo mkdir /deps - sudo chown circleci /deps - ACTUAL_CIRCLE_WORKING_DIRECTORY="${CIRCLE_WORKING_DIRECTORY/#\~/$HOME}" - ln -s ${ACTUAL_CIRCLE_WORKING_DIRECTORY}/package.json /deps/package.json - ln -s ${ACTUAL_CIRCLE_WORKING_DIRECTORY}/package-lock.json /deps/package-lock.json - make update_deps - # should be executed after all python install commands - - run: pyenv rehash - -jobs: - addons-versions-files-ratings: - <<: *defaults-with-services - steps: - - setup_container - - run: - command: pytest -n 2 -m 'not es_tests and not needs_locales_compilation and not static_assets' -v src/olympia/addons/ src/olympia/versions/ src/olympia/files/ src/olympia/ratings/ - - amo-lib-locales-and-signing: - <<: *defaults-with-autograph - steps: - - setup_container - - run: - command: pytest -n 2 -m 'not es_tests and not needs_locales_compilation and not static_assets' -v src/olympia/amo/ src/olympia/lib/ src/olympia/signing - environment: - AUTOGRAPH_SERVER_URL: http://127.0.0.1:5500 - # After having removed `tox`, we noticed that this job was a lot slower. - # The reason seems to be related to pyenv shims, which add a huge - # overhead to CLIs like `dennis-cmd`. By updating the path below, we - # should use `dennis-cmd` directly instead of the pyenv shim. - - run: echo "export PATH=\"$(pyenv prefix)/bin:$PATH\"" >> $BASH_ENV - - run: bash ./locale/compile-mo.sh ./locale/ - - run: - command: pytest -n 2 -m 'needs_locales_compilation' -v src/olympia/ - - assets: - <<: *defaults-with-services - steps: - - setup_container - - run: make update_assets - - run: - command: pytest -m "static_assets" -v src/olympia/ - - run: make run_js_tests - - codestyle: - <<: *defaults - steps: - - setup_container: - wait_services: false - - run: pyenv rehash - - run: make lint-codestyle - - localization: - <<: *defaults - steps: - - setup_container: - wait_services: false - - run : make extract_locales - - run: git diff - - devhub: - <<: *defaults-with-services - steps: - - setup_container - - run: - command: pytest -n 2 -m 'not es_tests and not needs_locales_compilation and not static_assets' -v src/olympia/devhub/ - - docs: - <<: *defaults - steps: - - setup_container: - wait_services: false - - run: make docs - - main: - <<: *defaults-with-services - steps: - - setup_container - - run: - command: | - pytest -n 2 -m 'not es_tests and not needs_locales_compilation and not static_assets and not internal_routes_allowed' \ - -v src/olympia/ \ - --ignore src/olympia/addons/ \ - --ignore src/olympia/devhub/ \ - --ignore src/olympia/files/ \ - --ignore src/olympia/reviewers/ \ - --ignore src/olympia/ratings/ \ - --ignore src/olympia/amo/ \ - --ignore src/olympia/lib/ \ - --ignore src/olympia/signing \ - --ignore src/olympia/versions/ \ - --ignore src/olympia/zadmin - - reviewers-and-zadmin: - <<: *defaults-with-autograph - steps: - - setup_container - - run: - command: pytest -n 2 -m 'not es_tests and not needs_locales_compilation and not static_assets' -v src/olympia/reviewers/ src/olympia/zadmin/ - environment: - AUTOGRAPH_SERVER_URL: http://127.0.0.1:5500 - - run: - name: internal_routes_allowed - command: | - # We need to change the setting in the file because we can't - # override an env variable here, and the next command requires - # `INTERNAL_ROUTES_ALLOWED` to be set to `True`. - sed -i 's/^INTERNAL_ROUTES_ALLOWED.*/INTERNAL_ROUTES_ALLOWED=True/' settings_test.py - pytest -m 'internal_routes_allowed' -v src/olympia/ - - es-tests: - <<: *defaults-with-elasticsearch - steps: - - setup_container - - run: - name: Wait for elasticsearch - command: dockerize -wait tcp://localhost:9200 -timeout 1m - - run: - command: pytest -m "es_tests and not needs_locales_compilation and not static_assets" -v src/olympia/ - - release-master: - <<: *defaults-release - steps: - - checkout - - make_release: - image_tag: latest - - release-tag: - <<: *defaults-release - steps: - - checkout - - make_release: - image_tag: "${CIRCLE_TAG}" - -workflows: - version: 2 - default-workflow: - jobs: - - addons-versions-files-ratings - - amo-lib-locales-and-signing - - assets - - codestyle - - devhub - - docs - - main - - reviewers-and-zadmin - - es-tests - - localization - - release-master: - filters: - branches: - only: master - tags: - ignore: /.*/ - - release-tag: - filters: - tags: - only: /.*/ - branches: - ignore: /.*/ diff --git a/.dockerignore b/.dockerignore index 3327bb406839..e7b5b4c3d5aa 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,63 @@ -.env -.git +# Copy of .gitignore please keep these in sync +*.css.tmp +*.egg-info +*.js.tmp +*.less.css +*.mo +*.po~ +*.py[co] +*.signed.zip +*.styl.css +*-all.css +*-all.js +*-min.css +*-min.js +.*.sw? .cache -.tox -deps/ -node_modules/ -storage/ -logs/* +.DS_Store +.env +.ipython +.mysql_history +.nose* +.npm/ +.pdbhistory +.pytest_cache +.ssh +.tox/ +.vscode +backups +build*.py +buildx-bake-metadata.json +deps/* +docker*.yml +docker/artifacts/* +docs/_build +docs/_gh-pages +docs/api/_build +local_settings.py +MANIFEST +node_modules +pip-log.txt +private/ +.ruff_cache +settings_local.py* +settings_local_*.py +shellng_local.py +site-static/* +src/olympia/discovery/strings.jinja2 +static-build/* +static/css/node_lib/* +static/js/node_lib/* +storage +tmp/* + +# Additionally ignore these files from the docker build that are not in .gitignore + +.dockerignore +.github +docs +private +docker-bake.hcl +docker-compose*.yml +Dockerfile* +Makefile-os diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 000000000000..2091a55b7d4a --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,29 @@ +(contributing)= + +# Contributing + +If you're not sure this is the correct place to file an issue +then please file an issue on the [mozilla/addons] project instead. + +Before contributing code, please note: + +- You agree to license your contributions under the [license]. +- Please ask on the [dev-addons mailing list] before submitting + pull-requests for new features or large changes that are not related to + existing issues. +- Follow [PEP8], [jshint] and our other [style guide conventions]. +- Please write tests and read the docs on [addons-server]. + +Ready to get started? Follow [these steps](https://wiki.mozilla.org/Add-ons/Contribute/Code). + +**Note to staff:** If you come across a potential "good first bug" for contributors, please tag it with "**maybe good first bug**". The community team [triages](https://wiki.mozilla.org/Add-ons/Contribute/Goodfirstbugs_triage) these every other week to ensure they have mentors assigned, onboarding information, and basic steps to get started. This gives new contributors a better experience when they pick a "good first bug" to work on. + +Thank you for contributing! + +[addons-server]: https://addons-server.readthedocs.io/en/latest/ +[dev-addons mailing list]: https://mail.mozilla.org/listinfo/dev-addons +[jshint]: http://www.jshint.com/ +[license]: https://github.com/mozilla/addons-server/blob/master/LICENSE +[mozilla/addons]: https://github.com/mozilla/addons/issues/new +[pep8]: https://www.python.org/dev/peps/pep-0008/ +[style guide conventions]: https://mozweb.readthedocs.io/en/latest/ diff --git a/.github/CONTRIBUTING.rst b/.github/CONTRIBUTING.rst deleted file mode 100644 index 5b40638cbccb..000000000000 --- a/.github/CONTRIBUTING.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _contributing: - -============ -Contributing -============ - -If you're not sure this is the correct place to file an issue -then please file an issue on the `mozilla/addons`_ project instead. - -Before contributing code, please note: - -- You agree to license your contributions under the `license`_. -- Please ask on the `dev-addons mailing list`_ before submitting - pull-requests for new features or large changes that are not related to - existing issues. -- Follow `PEP8`_, `jshint`_ and our other `style guide conventions`_. -- Please write tests and read the docs on `addons-server`_. - -Ready to get started? Follow `these steps `_. - -**Note to staff:** If you come across a potential "good first bug" for contributors, please tag it with "**maybe good first bug**". The community team `triages `_ these every other week to ensure they have mentors assigned, onboarding information, and basic steps to get started. This gives new contributors a better experience when they pick a "good first bug" to work on. - -Thank you for contributing! - -.. _license: https://github.com/mozilla/addons-server/blob/master/LICENSE -.. _dev-addons mailing list: https://mail.mozilla.org/listinfo/dev-addons -.. _PEP8: https://www.python.org/dev/peps/pep-0008/ -.. _jshint: http://www.jshint.com/ -.. _style guide conventions: https://mozweb.readthedocs.io/en/latest/ -.. _addons-server: https://addons-server.readthedocs.io/en/latest/ -.. _mozilla/addons: https://github.com/mozilla/addons/issues/new diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 09fc8fde6e4e..4906acfcc999 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,15 +1,43 @@ +Fixes: mozilla/addons#ISSUENUM + + + +### Description + + + +### Context + + + +### Testing + + + +### Checklist + -* [ ] This PR relates to an existing open issue and there are no existing - PRs open for the same issue. -* [ ] Add `Fixes #ISSUENUM` at the top of your PR. -* [ ] Add a description of the changes introduced in this PR. -* [ ] The change has been successfully run locally. -* [ ] Add tests to cover the changes added in this PR. -* [ ] Add before and after screenshots (Only for changes that impact the UI). - -Once you have met the above requirements please replace this section with -a `Fixes #ISSUENUM` linking to the issue fixed by this PR along with an -explanation of the changes. Thanks for your contribution! +- [ ] Add `#ISSUENUM` at the top of your PR to an existing open issue in the mozilla/addons repository. +- [ ] Successfully verified the change locally. +- [ ] The change is covered by automated tests, or otherwise indicated why doing so is unnecessary/impossible. +- [ ] Add before and after screenshots (Only for changes that impact the UI). +- [ ] Add or update relevant [docs](../docs/) reflecting the changes made. diff --git a/.github/actions/build-docker/action.yml b/.github/actions/build-docker/action.yml new file mode 100644 index 000000000000..b5b77025cf5f --- /dev/null +++ b/.github/actions/build-docker/action.yml @@ -0,0 +1,83 @@ +name: 'Docker Build Action' +description: 'Build the docker image' +inputs: + registry: + required: true + description: The registry to tag the image with + image: + required: true + description: The image to tag the image with + version: + required: true + description: The image version to tag with + push: + required: false + description: Push the image? + default: 'false' + +outputs: + tag: + description: The docker tag of the built image + value: ${{ steps.build_meta.outputs.tag }} + version: + description: The docker version of the built image + value: ${{ steps.meta.outputs.version }} + digest: + description: The docker build digest of the built image + value: ${{ steps.build_meta.outputs.digest }} + +runs: + using: 'composite' + steps: + - name: Context + id: context + shell: bash + run: | + git_repo_url="${{ github.server_url }}/${{ github.repository }}" + + echo "git_build_url=$git_repo_url/actions/runs/${{ github.run_id }}" >> $GITHUB_OUTPUT + echo "git_sha=${{ github.sha }}" >> $GITHUB_OUTPUT + + cat $GITHUB_OUTPUT + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + bake-target: web + images: ${{ inputs.registry }}/${{ inputs.image }} + tags: | + # use raw tag to allow the calling workflow to define the version of the image + # and to prevent multiple tags from being associated with a build + type=raw,value=${{ inputs.version }} + + - name: Create .env and version.json files + shell: bash + run: | + # We only build the production image in CI + echo "DOCKER_TARGET=production" >> $GITHUB_ENV + echo "DOCKER_VERSION=${{ steps.meta.outputs.version }}" >> $GITHUB_ENV + echo "DOCKER_COMMIT=${{ steps.context.outputs.git_sha }}" >> $GITHUB_ENV + echo "DOCKER_BUILD=${{ steps.context.outputs.git_build_url }}" >> $GITHUB_ENV + echo "TAGS_FILE=${{ steps.meta.outputs.bake-file-tags }}" >> $GITHUB_ENV + echo "ANNOTATIONS_FILE=${{ steps.meta.outputs.bake-file-annotations }}" >> $GITHUB_ENV + echo "DOCKER_METADATA_FILE=buildx-bake-metadata.json" >> $GITHUB_ENV + + make setup + + - name: Build Image + id: build + shell: bash + run: | + make docker_build_web \ + ARGS="--file ${{ env.TAGS_FILE }} --file ${{ env.ANNOTATIONS_FILE }}" \ + DOCKER_PUSH=${{ inputs.push }} + + - name: Get image digest + id: build_meta + shell: bash + run: | + metadata=$(cat $DOCKER_METADATA_FILE) + echo "digest=$(echo $metadata | jq -r '.web."containerimage.digest"')" >> $GITHUB_OUTPUT + echo "tag=$(echo $metadata | jq -r '.web."image.name"')" >> $GITHUB_OUTPUT + diff --git a/.github/actions/context/action.yml b/.github/actions/context/action.yml new file mode 100644 index 000000000000..0d2a61722d89 --- /dev/null +++ b/.github/actions/context/action.yml @@ -0,0 +1,129 @@ +name: 'Dump Context' +description: 'Display context for action run' + +outputs: + # All github action outputs are strings, even if set to "true" + # so when using these values always assert against strings or convert from json + # \$\{{ needs.context.outputs.is_fork == 'true' }} // true + # \$\{{ fromJson(needs.context.outputs.is_fork) == false }} // true + # \$\{{ needs.context.outputs.is_fork == true }} // false + # \$\{{ needs.context.outputs.is_fork }} // false + is_fork: + description: "" + value: ${{ steps.context.outputs.is_fork }} + is_default_branch: + description: "" + value: ${{ steps.context.outputs.is_default_branch }} + is_release_master: + description: "" + value: ${{ steps.context.outputs.is_release_master }} + is_release_tag: + description: "" + value: ${{ steps.context.outputs.is_release_tag }} + docker_version: + description: "" + value: ${{ steps.context.outputs.docker_version }} + +runs: + using: 'composite' + steps: + - name: Dump GitHub context + shell: bash + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + run: echo "$GITHUB_CONTEXT" + - name: Dump job context + shell: bash + env: + JOB_CONTEXT: ${{ toJson(job) }} + run: echo "$JOB_CONTEXT" + - name: Dump steps context + shell: bash + env: + STEPS_CONTEXT: ${{ toJson(steps) }} + run: echo "$STEPS_CONTEXT" + - name: Dump runner context + shell: bash + env: + RUNNER_CONTEXT: ${{ toJson(runner) }} + run: echo "$RUNNER_CONTEXT" + - name: Dump env context + shell: bash + env: + ENV_CONTEXT: ${{ toJson(env) }} + run: | + echo "$ENV_CONTEXT" + - name: Dump inputs context + shell: bash + env: + INPUTS_CONTEXT: ${{ toJson(inputs) }} + run: | + echo "$INPUTS_CONTEXT" + + - name: Set context + id: context + env: + # The default branch of the repository, in this case "master" + default_branch: ${{ github.event.repository.default_branch }} + shell: bash + run: | + event_name="${{ github.event_name }}" + event_action="${{ github.event.action }}" + + # Stable check for if the workflow is running on the default branch + # https://stackoverflow.com/questions/64781462/github-actions-default-branch-variable + is_default_branch="${{ format('refs/heads/{0}', env.default_branch) == github.ref }}" + + # In most events, the epository refers to the head which would be the fork + is_fork="${{ github.event.repository.fork }}" + # Default version is the branch name + docker_version="${{ github.ref_name }}" + + # This is different in a pull_request where we need to check the head explicitly + if [[ "${{ github.event_name }}" == 'pull_request' ]]; then + # repository on a pull request refers to the base which is always mozilla/addons-server + is_head_fork="${{ github.event.pull_request.head.repo.fork }}" + # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions + is_dependabot="${{ github.actor == 'dependabot[bot]' }}" + + # For PRs we need to reference the head branch + docker_version="${{ github.head_ref }}" + + # If the head repository is a fork or if the PR is opened by dependabot + # we consider the run to be a fork. Dependabot and proper forks are treated + # the same in terms of limited read only github token scope + if [[ "$is_head_fork" == 'true' || "$is_dependabot" == 'true' ]]; then + is_fork="true" + fi + fi + + is_release_master="false" + is_release_tag="false" + + # Releases can only happen if we are NOT on a fork + if [[ "$is_fork" == 'false' ]]; then + # A master release occurs on a push to the default branch of the origin repository + if [[ "$event_name" == 'push' && "$is_default_branch" == 'true' ]]; then + is_release_master="true" + + # If we are releasing master, we tag latest + docker_version="latest" + fi + + # A tag release occurs when a release is published + if [[ "$event_name" == 'release' && "$event_action" == 'published' ]]; then + is_release_tag="true" + + # If we are releasing a tag, we tag the docker version as the git tag + docker_version="${{ github.event.release.tag_name }}" + fi + fi + + echo "is_default_branch=$is_default_branch" >> $GITHUB_OUTPUT + echo "is_fork=$is_fork" >> $GITHUB_OUTPUT + echo "is_release_master=$is_release_master" >> $GITHUB_OUTPUT + echo "is_release_tag=$is_release_tag" >> $GITHUB_OUTPUT + echo "docker_version=$docker_version" >> $GITHUB_OUTPUT + + echo "event_name: $event_name" + cat $GITHUB_OUTPUT diff --git a/.github/actions/login-docker/action.yml b/.github/actions/login-docker/action.yml new file mode 100644 index 000000000000..412dc672893b --- /dev/null +++ b/.github/actions/login-docker/action.yml @@ -0,0 +1,37 @@ +name: Login to dockerhub +description: Login to dockerhub and return image configuration for building + +inputs: + username: + required: true + description: The docker username + password: + required: true + description: The docker password + +outputs: + registry: + description: The dockerhub registry + value: ${{ steps.context.outputs.registry }} + image: + description: The dockerhub image to push to + value: ${{ steps.context.outputs.image }} + +runs: + using: 'composite' + steps: + - name: Set Context + id: context + shell: bash + run: | + echo "registry=docker.io" >> $GITHUB_OUTPUT + echo "image=${{ github.repository }}" >> $GITHUB_OUTPUT + + - name: Login to Dockerhub + uses: docker/login-action@v3 + with: + registry: ${{ steps.context.outputs.registry }} + username: ${{ inputs.username }} + password: ${{ inputs.password }} + + diff --git a/.github/actions/login-gar/action.yml b/.github/actions/login-gar/action.yml new file mode 100644 index 000000000000..b46cfaf90249 --- /dev/null +++ b/.github/actions/login-gar/action.yml @@ -0,0 +1,46 @@ +name: Login to Google Aritfact Registry +description: Login to GAR and return image configuration for building + +inputs: + service_account: + required: true + description: The service account used for GCP + workload_identity_provider: + required: true + description: The workloadd + +outputs: + registry: + description: The gar registry + value: ${{ steps.context.outputs.registry }} + image: + description: The gar image to push to + value: ${{ steps.context.outputs.image }} + +runs: + using: 'composite' + steps: + - name: Set Context + id: context + shell: bash + run: | + echo "registry=us-docker.pkg.dev" >> $GITHUB_OUTPUT + echo "image=moz-fx-amo-prod/amo-prod/addons-server" >> $GITHUB_OUTPUT + + - name: get the GCP auth token + id: gcp-auth + uses: google-github-actions/auth@v2 + with: + token_format: access_token + service_account: ${{ inputs.service_account }} + workload_identity_provider: ${{ inputs.workload_identity_provider }} + + - name: login to GAR + if: steps.gcp-auth.outcome == 'success' + uses: docker/login-action@v3 + with: + registry: ${{ steps.context.outputs.registry }} + username: oauth2accesstoken + password: ${{ steps.gcp-auth.outputs.access_token }} + + diff --git a/.github/actions/run-docker/action.yml b/.github/actions/run-docker/action.yml new file mode 100644 index 000000000000..1619f498eb95 --- /dev/null +++ b/.github/actions/run-docker/action.yml @@ -0,0 +1,62 @@ +name: 'Docker Run Action' +description: 'Run a command in a new container' +inputs: + version: + description: 'The version of the image to run. ' + required: true + default: 'local' + digest: + description: 'The build digest of the image to run. Overrides version.' + required: true + default: '' + run: + description: 'Run command in container' + required: true + logs: + description: 'Show logs' + required: false + data_backup_skip: + description: 'Skip data backup' + required: false + default: 'true' + target: + description: 'Docker target to run (development|production)' + required: false + default: 'production' + mount: + description: 'Mount host files at runtime? (development|production)' + required: false + default: 'production' + deps: + description: 'Which dependencies to install at runtime? (development|production)' + required: false + default: 'production' + +runs: + using: 'composite' + steps: + - name: Run Docker Container + shell: bash + run: | + # Start the specified services + make up \ + DOCKER_VERSION="${{ inputs.version }}" \ + DOCKER_DIGEST="${{ inputs.digest }}" \ + DOCKER_TARGET="${{ inputs.target }}" \ + OLYMPIA_UID="$(id -u)" \ + OLYMPIA_MOUNT="${{ inputs.mount }}" \ + OLYMPIA_DEPS="${{ inputs.deps }}" \ + DATA_BACKUP_SKIP="${{ inputs.data_backup_skip }}" \ + DOCKER_WAIT="true" + + + # Exec the run command in the container + # quoted 'EOF' to prevent variable expansion + cat <<'EOF' | docker compose exec --user olympia web sh + ${{ inputs.run }} + EOF + + - name: Logs + if: ${{ inputs.logs }} + shell: bash + run: docker compose logs diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9653b409031c..2a03b7768183 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -34,7 +34,7 @@ updates: - ">= 6" - dependency-name: idna versions: - - ">= 3" + - ">= 4" - dependency-name: amqp versions: - ">= 6" diff --git a/.github/release-template.md b/.github/release-template.md new file mode 100644 index 000000000000..76d17362f3d1 --- /dev/null +++ b/.github/release-template.md @@ -0,0 +1,28 @@ +This week's push hero is @{{GITHUB_USER}} + +Previous Release: [{{PREVIOUS_TAG}}]({{PREVIOUS_RELEASE_URL}}) + +## Blockers: + +## Cherry-picks: + + + +## Before we push: + +## Before we start: + +## Before we promote: + +## After we're done: + +## Addons-Frontend Changelog: + + + +## Addons Server Changelog: + + diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 000000000000..f0633633c47d --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,11 @@ +changelog: + categories: + - title: Notable things shipping + labels: + - '*' + exclude: + labels: + - dependencies + - title: Dependendabots + labels: + - dependencies diff --git a/.github/workflows/_test.yml b/.github/workflows/_test.yml new file mode 100644 index 000000000000..81ed82c564e9 --- /dev/null +++ b/.github/workflows/_test.yml @@ -0,0 +1,72 @@ +name: Test Docker Image + +run-name: | + ref: ${{ github.ref_name }} | + version: ${{ inputs.version }} | + digest: ${{ inputs.digest }} | + +on: + workflow_call: + inputs: + version: + description: The version of the image to run + type: string + required: true + digest: + description: The build digest of the image to run. Overrides version. + type: string + required: false + workflow_dispatch: + inputs: + version: + description: The version of the image to run + type: string + required: true + digest: + description: The build digest of the image to run. Overrides version. + type: string + required: false + +concurrency: + group: test-${{ github.workflow }}-${{ github.event_name}}-${{ github.ref}}-${{ toJson(inputs) }} + cancel-in-progress: true + +jobs: + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - + name: Needs Locale Compilation + services: '' + run: | + make compile_locales + make test_needs_locales_compilation + - + name: Static Assets + services: '' + run: make test_static_assets + - + name: Internal Routes + services: '' + run: make test_internal_routes_allowed + - + name: Elastic Search + services: '' + run: make test_es_tests + - + name: Codestyle + services: web + run: make lint-codestyle + steps: + - uses: actions/checkout@v4 + - name: Test (${{ matrix.name }}) + uses: ./.github/actions/run-docker + with: + version: ${{ inputs.version }} + digest: ${{ inputs.digest }} + services: ${{ matrix.services }} + deps: development + run: ${{ matrix.run }} diff --git a/.github/workflows/_test_check.yml b/.github/workflows/_test_check.yml new file mode 100644 index 000000000000..eebd0f900289 --- /dev/null +++ b/.github/workflows/_test_check.yml @@ -0,0 +1,171 @@ +name: Test make up and check the local dev setup + +run-name: | + ref: ${{ github.ref_name }} | + version: ${{ inputs.version }} | + digest: ${{ inputs.digest }} | + +on: + workflow_call: + inputs: + version: + description: The version of the image to run + type: string + required: true + digest: + description: The build digest of the image to run. Overrides version. + type: string + required: false + workflow_dispatch: + inputs: + version: + description: The version of the image to run + type: string + required: true + digest: + description: The build digest of the image to run. Overrides version. + type: string + required: false + +concurrency: + group: test_check-${{ github.workflow }}-${{ github.event_name}}-${{ github.ref}}-${{ toJson(inputs) }} + cancel-in-progress: true + +jobs: + context: + runs-on: ubuntu-latest + outputs: + is_fork: ${{ steps.context.outputs.is_fork }} + steps: + - uses: actions/checkout@v4 + - id: context + uses: ./.github/actions/context + + test_check: + runs-on: ubuntu-latest + name: | + version: '${{ matrix.version }}' | + target: '${{ matrix.target }}' | + mount: '${{ matrix.mount }}' | + deps: '${{ matrix.deps }}' + strategy: + fail-fast: false + matrix: + version: + - local + - ${{ inputs.version }} + target: + - development + - production + mount: + - development + - production + deps: + - development + - production + steps: + - uses: actions/checkout@v4 + - shell: bash + continue-on-error: true + run: | + cat <> $GITHUB_OUTPUT + + # Construct the matrix input for test_main using the groups count + # the matrix.group should be an array of numbers from 1 to $splits + matrix=[$(seq -s, 1 $splits)] + echo "matrix: $matrix" + echo "matrix=$matrix" >> $GITHUB_OUTPUT + + test_main: + runs-on: ubuntu-latest + needs: [test_config] + strategy: + fail-fast: false + matrix: + group: ${{fromJson(needs.test_config.outputs.matrix)}} + + steps: + - uses: actions/checkout@v4 + + - name: Test (test_matrix) + uses: ./.github/actions/run-docker + with: + services: '' + digest: ${{ inputs.digest }} + version: ${{ inputs.version }} + mount: development + deps: development + run: | + split="--splits ${{ needs.test_config.outputs.splits }}" + group="--group ${{ matrix.group }}" + report="--report-log ${{ env.log_file}}" + make test_main ARGS="${split} ${group} ${report}" + + - name: Upload logs + uses: actions/upload-artifact@v4 + with: + path: ${{ env.log_file }} + name: ${{ env.log_artifact }}-${{ matrix.group }} + retention-days: 1 + overwrite: true + + test_log: + runs-on: ubuntu-latest + if: always() + needs: [test_config, test_main] + + steps: + - uses: actions/checkout@v4 + + - uses: actions/download-artifact@v4 + with: + pattern: ${{ env.log_artifact }}* + + - name: Cat logs + shell: bash + run: | + for dir in $(ls -d ${{ env.log_artifact }}* | sort -V); do + job=$(basename "$dir") + file="${dir}/${{ env.log_file }}" + if [ -f "$file" ]; then + cat "$file" | jq \ + -r \ + --arg job "$job" \ + 'select(has("when") and .when == "teardown") | "[\($job)] \(.outcome) \(.nodeid)"' + else + echo "$file: No such file or directory" + fi + done diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000000..d782354e3346 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,249 @@ +name: CI + +on: + # Runs when there is a push to the default branch + # This triggers tests and a pushed "latest" image + # That is deployed to the "dev" environment + push: + branches: + - master + # Runs on pull requests to verify changes and push + # PR image for local testing + pull_request: + # Manually dispatch run entire CI on a ref + workflow_dispatch: + # Runs when a release is published + # Pushes a tagged image + # That is deployed to the "staging/production" environments + release: + types: [published] + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name}}-${{ github.ref}} + cancel-in-progress: true + +env: + docs_artifact: docs + +jobs: + context: + runs-on: ubuntu-latest + + outputs: + is_fork: ${{ steps.context.outputs.is_fork }} + is_release_master: ${{ steps.context.outputs.is_release_master }} + is_dependabot: ${{ steps.context.outputs.is_dependabot }} + is_default_branch: ${{ steps.context.outputs.is_default_branch }} + is_release_tag: ${{ steps.context.outputs.is_release_tag }} + docker_version: ${{ steps.context.outputs.docker_version }} + + steps: + - uses: actions/checkout@v4 + - name: Set context + id: context + uses: ./.github/actions/context + + build: + name: ${{ needs.context.outputs.is_fork == 'true' && 'Skip' || 'Build' }} CI Image + runs-on: ubuntu-latest + needs: context + + outputs: + # If build is skipped we should pass local version to build the image + version: ${{ steps.build.outputs.version || 'local' }} + digest: ${{ steps.build.outputs.digest || '' }} + + steps: + - uses: actions/checkout@v4 + + - name: Login to Dockerhub + if: needs.context.outputs.is_fork == 'false' + id: docker_hub + uses: ./.github/actions/login-docker + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASS }} + + - name: Build and Push Image + if: steps.docker_hub.outcome == 'success' + id: build + uses: ./.github/actions/build-docker + with: + registry: ${{ steps.docker_hub.outputs.registry }} + image: ${{ steps.docker_hub.outputs.image }} + version: ci-${{ needs.context.outputs.docker_version }} + push: true + + docs_build: + runs-on: ubuntu-latest + needs: build + + steps: + - uses: actions/checkout@v4 + - uses: actions/configure-pages@v4 + + - name: Build Docs + uses: ./.github/actions/run-docker + with: + digest: ${{ needs.build.outputs.digest }} + version: ${{ needs.build.outputs.version }} + mount: development + deps: development + target: development + run: | + make docs + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: 'docs/_build/html' + name: ${{ env.docs_artifact }} + + docs_deploy: + needs: [context, docs_build] + # Only deploy docs on a push event + # to the default branch + # that is not running on a fork + if: | + github.event_name == 'push' && + needs.context.outputs.is_default_branch == 'true' && + needs.context.outputs.is_fork == 'false' + permissions: + contents: read + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 + with: + artifact_name: ${{ env.docs_artifact }} + + locales: + runs-on: ubuntu-latest + needs: [build, context] + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name }} + + - name: Extract Locales + uses: ./.github/actions/run-docker + with: + digest: ${{ needs.build.outputs.digest }} + version: ${{ needs.build.outputs.version }} + mount: development + deps: development + run: make extract_locales + + - name: Push Locales + if: | + github.event_name == 'push' || + github.event_name == 'pull_request' + shell: bash + run: | + is_fork="${{ needs.context.outputs.is_fork }}" + is_default_branch="${{ needs.context.outputs.is_default_branch }}" + is_push="${{ github.event_name == 'push' }}" + + if [[ "$is_fork" == 'true' ]]; then + cat <<'EOF' + Github actions are not authorized to push from workflows triggered by forks. + We cannot verify if the l10n extraction push will work or not. + Please submit a PR from the base repository if you are modifying l10n extraction scripts. + EOF + else + if [[ "$is_default_branch" == 'true' && "$is_push" == 'true' ]]; then + args="" + else + args="--dry-run" + fi + make push_locales ARGS="${args}" + fi + + test: + needs: build + uses: ./.github/workflows/_test.yml + with: + version: ${{ needs.build.outputs.version }} + digest: ${{ needs.build.outputs.digest }} + + test_main: + needs: [context, build] + uses: ./.github/workflows/_test_main.yml + with: + version: ${{ needs.build.outputs.version }} + digest: ${{ needs.build.outputs.digest }} + + test_check: + needs: [context, build] + uses: ./.github/workflows/_test_check.yml + with: + version: ${{ needs.build.outputs.version }} + digest: ${{ needs.build.outputs.digest }} + + push_dockerhub: + name: Push Production Docker Image (Dockerhub) + runs-on: ubuntu-latest + if: | + needs.context.outputs.is_release_master == 'true' || + needs.context.outputs.is_release_tag == 'true' + needs: [context, build, docs_build, locales, test, test_main] + + steps: + - uses: actions/checkout@v4 + + - name: Login to Dockerhub + id: docker_hub + uses: ./.github/actions/login-docker + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASS }} + + - name: Build and Push Image + id: build + uses: ./.github/actions/build-docker + with: + registry: ${{ steps.docker_hub.outputs.registry }} + image: ${{ steps.docker_hub.outputs.image }} + version: ${{ needs.context.outputs.docker_version }} + push: true + + push_gar: + name: Push Production Docker Image (GAR) + runs-on: ubuntu-latest + if: | + needs.context.outputs.is_release_master == 'true' || + needs.context.outputs.is_release_tag == 'true' + needs: [context, build, docs_build, locales, test, test_main] + + permissions: + contents: 'read' + id-token: 'write' + + steps: + - uses: actions/checkout@v4 + + - name: Login to GAR + id: docker_gar + uses: ./.github/actions/login-gar + with: + service_account: ${{ secrets.GAR_PUSHER_SERVICE_ACCOUNT_EMAIL }} + workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} + + - name: Build and Push Image + id: build + uses: ./.github/actions/build-docker + with: + registry: ${{ steps.docker_gar.outputs.registry }} + image: ${{ steps.docker_gar.outputs.image }} + version: ${{ needs.context.outputs.docker_version }} + push: true diff --git a/.github/workflows/draft_release.yml b/.github/workflows/draft_release.yml new file mode 100644 index 000000000000..eb586804768a --- /dev/null +++ b/.github/workflows/draft_release.yml @@ -0,0 +1,88 @@ +name: Draft Release + +on: + workflow_dispatch: + inputs: + push_hero: + description: The person responsible for facilitating the release. + required: true + type: choice + options: + - kevinmind + - diox + - eviljeff + tag: + description: 'Release date YYYY.MM.DD Also used to generate the tag name.' + required: true + +jobs: + draft_release: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Create Release Draft + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + shell: bash + run: | + if [ -z "${{ github.event.inputs.tag }}" ]; then + echo "Tag is required" + exit 1 + fi + + if [ -z "${{ github.event.inputs.push_hero }}" ]; then + echo "Push hero is required" + exit 1 + fi + + # Format current date as YYYY.MM.DD + + if [[ "${{ github.event_name}}" == 'workflow_dispatch' ]]; then + tag="${{ github.event.inputs.tag }}" + fi + + # Validate the tag is formatted correctly YYYY.MM.DD or YYYY.MM.DD-X + # where X is a whole number greater than zero + if [[ ! $tag =~ ^[0-9]{4}\.[0-9]{2}\.[0-9]{2}(-[0-9]+)?$ ]]; then + echo "Invalid tag format. Must be YYYY.MM.DD or YYYY.MM.DD-X" + exit 1 + fi + + # Verify that a release with this tag does not already exist + if gh release view $tag &> /dev/null; then + echo "Release $tag-next already exists" + exit 1 + fi + + # Get the latest release tag + previous_release=$( + gh api \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/mozilla/addons-server/releases/latest + ) + + previous_tag=$(echo $previous_release | jq -r '.tag_name') + previous_release_url=$(echo $previous_release | jq -r '.html_url') + + cat <= v1.9.0 -ENV OLYMPIA_UID=9500 \ - OLYMPIA_GID=9500 -RUN groupadd -g ${OLYMPIA_GID} olympia && useradd -u ${OLYMPIA_UID} -g ${OLYMPIA_GID} -s /sbin/nologin -d /data/olympia olympia +FROM python:3.12-slim-bookworm AS olympia -# Add support for https apt repos and gpg signed repos -RUN apt-get update && apt-get install -y \ - apt-transport-https \ - gnupg2 \ - && rm -rf /var/lib/apt/lists/* +ENV BUILD_INFO=/build-info.json + +# Set shell to bash with logs and errors for build +SHELL ["/bin/bash", "-xue", "-c"] + +ENV OLYMPIA_UID=9500 +RUN < ${BUILD_INFO} +{ + "commit": "${DOCKER_COMMIT}", + "version": "${DOCKER_VERSION}", + "build": "${DOCKER_BUILD}", + "target": "${DOCKER_TARGET}", + "source": "https://github.com/mozilla/addons-server" +} +INNEREOF +# Set permissions to make the file readable by all but only writable by root +chmod 644 ${BUILD_INFO} +EOF + +FROM olympia AS base # Add keys and repos for node and mysql -COPY docker/*.gpg.key /etc/pki/gpg/ -RUN APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn \ - apt-key add /etc/pki/gpg/nodesource.gpg.key \ - && APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn \ - apt-key add /etc/pki/gpg/mysql.gpg.key +# TODO: replace this with a bind mount on the RUN command +COPY docker/*.gpg.asc /etc/apt/trusted.gpg.d/ COPY docker/*.list /etc/apt/sources.list.d/ -# Allow scripts to detect we're running in our own container and install -# packages. -RUN touch /addons-server-docker-container \ - && apt-get update && apt-get install -y \ - # General (dev-) dependencies - bash-completion \ - build-essential \ - curl \ - libjpeg-dev \ - libsasl2-dev \ - libxml2-dev \ - libxslt-dev \ - locales \ - zlib1g-dev \ - libffi-dev \ - libssl-dev \ - nodejs \ - # Git, because we're using git-checkout dependencies - git \ - # Dependencies for mysql-python (from mysql apt repo, not debian) - mysql-client \ - libmysqlclient-dev \ - swig \ - gettext \ - # Use rsvg-convert to render our static theme previews - librsvg2-bin \ - # Use pngcrush to optimize the PNGs uploaded by developers - pngcrush \ - && rm -rf /var/lib/apt/lists/* +RUN < settings_local.py && DJANGO_SETTINGS_MODULE='settings_local' locale/compile-mo.sh locale \ - && DJANGO_SETTINGS_MODULE='settings_local' python manage.py compress_assets \ - && DJANGO_SETTINGS_MODULE='settings_local' python manage.py generate_jsi18n_files \ - && DJANGO_SETTINGS_MODULE='settings_local' python manage.py collectstatic --noinput \ - && npm prune --production \ - && ./scripts/generate_build.py > build.py \ - && rm -f settings_local.py settings_local.pyc +FROM base AS development + +FROM base AS locales +ARG LOCALE_DIR=${HOME}/locale +# Compile locales +# Copy the locale files from the host so it is writable by the olympia user +COPY --chown=olympia:olympia locale ${LOCALE_DIR} +# Copy the executable individually to improve the cache validity +RUN \ + --mount=type=bind,source=requirements/locale.txt,target=${HOME}/requirements/locale.txt \ + --mount=type=bind,source=Makefile-docker,target=${HOME}/Makefile-docker \ + --mount=type=bind,source=locale/compile-mo.sh,target=${HOME}/compile-mo.sh \ + make -f Makefile-docker compile_locales + +# More efficient caching by mounting the exact files we need +# and copying only the static/ & locale/ directory. +FROM pip_production AS assets + +# In order to create js i18n files with all of our strings, we need to include +# the compiled locale files +COPY --from=locales --chown=olympia:olympia ${HOME}/locale/ ${HOME}/locale/ +# TODO: only copy the files we need for compiling assets +COPY --chown=olympia:olympia static/ ${HOME}/static/ + +# Finalize the build +# TODO: We should move update_assets to the `builder` stage once we can efficiently +# Run that command without having to copy the whole source code +# This will shave nearly 1 minute off the best case build time +RUN \ + --mount=type=bind,src=src,target=${HOME}/src \ + --mount=type=bind,src=Makefile-docker,target=${HOME}/Makefile-docker \ + --mount=type=bind,src=manage.py,target=${HOME}/manage.py \ +< settings_local.py +DJANGO_SETTINGS_MODULE="settings_local" make -f Makefile-docker update_assets +EOF + +FROM base AS production +# Copy the rest of the source files from the host +COPY --chown=olympia:olympia . ${HOME} +# Copy compiled locales from builder +COPY --from=locales --chown=olympia:olympia ${HOME}/locale ${HOME}/locale +# Copy assets from assets +COPY --from=assets --chown=olympia:olympia ${HOME}/site-static ${HOME}/site-static +COPY --from=assets --chown=olympia:olympia ${HOME}/static-build ${HOME}/static-build +# Copy build info from info +COPY --from=info ${BUILD_INFO} ${BUILD_INFO} +# Copy compiled locales from builder +COPY --from=locales --chown=olympia:olympia ${HOME}/locale ${HOME}/locale +# Copy dependencies from `pip_production` +COPY --from=pip_production --chown=olympia:olympia /deps /deps diff --git a/Makefile-docker b/Makefile-docker index badf73089174..87c2ef8c6bc2 100644 --- a/Makefile-docker +++ b/Makefile-docker @@ -8,144 +8,87 @@ export PYTHON_COMMAND=python3 export PIP_COMMAND=$(PYTHON_COMMAND) -m pip APP=src/olympia/ -NUM_ADDONS=10 -NUM_THEMES=$(NUM_ADDONS) - -NPM_ARGS := +NODE_MODULES := $(NPM_CONFIG_PREFIX)node_modules/ -ifneq ($(NPM_CONFIG_PREFIX),) - NPM_ARGS := --prefix $(NPM_CONFIG_PREFIX) +REQUIRED_FILES := \ + Makefile \ + Makefile-os \ + Makefile-docker \ + /deps/package.json \ + /deps/package-lock.json \ + /addons-server-docker-container \ + +# Build list of dependencies to install +DEPS = pip prod +# If we're running a development image, then we should install the development dependencies +ifeq ($(OLYMPIA_DEPS), development) +DEPS += dev endif -NODE_MODULES := $(NPM_CONFIG_PREFIX)node_modules/ -STATIC_CSS := static/css/node_lib/ -STATIC_JS := static/js/node_lib/ -STATIC_JQUERY_UI := static/js/node_lib/ui/ - -NODE_LIBS_CSS := \ -@claviska/jquery-minicolors/jquery.minicolors.css \ -@claviska/jquery-minicolors/jquery.minicolors.png \ - -# NODE_LIBS_JS and NODE_LIBS_JQUERY_UI are referenced in settings.MINIFY_BUNDLES - keep both lists in sync -NODE_LIBS_JS := \ -less/dist/less.js \ -jquery/dist/jquery.js \ -jquery.browser/dist/jquery.browser.js \ -jquery.cookie/jquery.cookie.js \ -@claviska/jquery-minicolors/jquery.minicolors.js \ -jszip/dist/jszip.js \ -timeago/jquery.timeago.js \ -underscore/underscore.js \ -netmask/lib/netmask.js \ - -NODE_LIBS_JQUERY_UI := \ -jquery-ui/ui/data.js \ -jquery-ui/ui/scroll-parent.js \ -jquery-ui/ui/widget.js \ -jquery-ui/ui/widgets/mouse.js \ -jquery-ui/ui/widgets/sortable.js - .PHONY: help_redirect help_redirect: @$(MAKE) help --no-print-directory -.PHONY: initialize_db -initialize_db: ## create a new database - rm -rf ./user-media/* ./tmp/* - $(PYTHON_COMMAND) manage.py create_db --force - $(PYTHON_COMMAND) manage.py migrate --noinput - $(PYTHON_COMMAND) manage.py loaddata initial.json - $(PYTHON_COMMAND) manage.py import_prod_versions - $(PYTHON_COMMAND) manage.py createsuperuser - $(PYTHON_COMMAND) manage.py loaddata zadmin/users - -.PHONY: populate_data -populate_data: ## populate a new database - # reindex --wipe will force the ES mapping to be re-installed. Useful to - # make sure the mapping is correct before adding a bunch of add-ons. - $(PYTHON_COMMAND) manage.py reindex --wipe --force --noinput - $(PYTHON_COMMAND) manage.py generate_addons --app firefox $(NUM_ADDONS) - $(PYTHON_COMMAND) manage.py generate_addons --app android $(NUM_ADDONS) - $(PYTHON_COMMAND) manage.py generate_themes $(NUM_THEMES) - # These add-ons are specifically useful for the addons-frontend - # homepage. You may have to re-run this, in case the data there - # changes. - $(PYTHON_COMMAND) manage.py generate_default_addons_for_frontend - - # Now that addons have been generated, reindex. - $(PYTHON_COMMAND) manage.py reindex --force --noinput - -.PHONY: update_deps_base -update_deps_base: ## update the python and node dependencies - # Work arounds "Multiple .dist-info directories" issue. - rm -rf /deps/build/* - # pep 517 mode (the default) breaks editable install in our project. https://github.com/mozilla/addons-server/issues/16144 - $(PIP_COMMAND) install --no-use-pep517 -e . - $(PIP_COMMAND) install --progress-bar=off --no-deps --exists-action=w -r requirements/pip.txt - $(PIP_COMMAND) install --progress-bar=off --no-deps --exists-action=w -r requirements/prod.txt - - npm install $(NPM_ARGS) - for dest in $(NODE_LIBS_CSS) ; do cp $(NODE_MODULES)$$dest $(STATIC_CSS) ; done - for dest in $(NODE_LIBS_JS) ; do cp $(NODE_MODULES)$$dest $(STATIC_JS) ; done - for dest in $(NODE_LIBS_JQUERY_UI) ; do cp $(NODE_MODULES)$$dest $(STATIC_JQUERY_UI) ; done +.PHONY: check_debian_packages +check_debian_packages: ## check the existence of multiple debian packages + ./scripts/check_debian_packages.sh -.PHONY: update_deps -update_deps: update_deps_base ## update the python and node dependencies for development - $(PIP_COMMAND) install --progress-bar=off --no-deps --exists-action=w -r requirements/dev.txt +.PHONY: check_pip_packages +check_pip_packages: ## check the existence of multiple python packages + @for dep in $(DEPS); do \ + ./scripts/check_pip_packages.sh $$dep.txt; \ + done + +.PHONY: check_files +check_files: ## check the existence of multiple files + @for file in $(REQUIRED_FILES); do test -f "$$file" || (echo "$$file is missing." && exit 1); done + @echo "All required files are present." + +.PHONY: check_olympia_user +check_olympia_user: ## check if the olympia user exists and is current user + @if [ "$$(id -u olympia)" != "$$(id -u)" ]; then echo "The current user is not the olympia user."; exit 1; fi + @echo "The current user is the olympia user." + +.PHONY: check_django +check_django: ## check if the django app is configured properly + ./manage.py check + +.PHONY: check_nginx +check_nginx: ## check if the nginx config for local development is configured properly + mkdir -p /data/olympia/storage/shared_storage/uploads + echo "OK" > /data/olympia/storage/shared_storage/uploads/.check + @if [ "$$(curl -sf http://nginx/user-media/.check)" != "OK" ]; then echo "Requesting http://nginx/user-media/.check failed"; exit 1; fi + @echo "Nginx user-media configuration looks correct." -.PHONY: update_deps_prod -update_deps_prod: update_deps_base ## update the python and node dependencies for production - npm prune --omit=dev +.PHONY: check +check: check_nginx check_files check_olympia_user check_debian_packages check_pip_packages check_django -.PHONY: update_db -update_db: ## run the database migrations - $(PYTHON_COMMAND) manage.py migrate --noinput +.PHONY: data_dump +data_dump: + ./manage.py data_dump $(ARGS) + +.PHONY: data_load +data_load: + ./manage.py data_load $(ARGS) .PHONY: update_assets update_assets: + # Copy files required in compress_assets to the static folder # If changing this here, make sure to adapt tests in amo/test_commands.py $(PYTHON_COMMAND) manage.py compress_assets - $(PYTHON_COMMAND) manage.py collectstatic --noinput $(PYTHON_COMMAND) manage.py generate_jsi18n_files + # Collect static files: This MUST be run last or files will be missing + $(PYTHON_COMMAND) manage.py collectstatic --noinput -.PHONY: update -update: update_deps update_db update_assets ## update the dependencies, the database, and assets -.PHONY: reindex -reindex: ## reindex everything in elasticsearch, for AMO - $(PYTHON_COMMAND) manage.py reindex $(ARGS) +.PHONY: update_deps +update_deps: ## Update the dependencies + $(HOME)/scripts/install_deps.py $(DEPS) +# TOOD: remove this after we migrate addons-frontned to not depend on it. .PHONY: setup-ui-tests setup-ui-tests: - rm -rf ./user-media/* ./tmp/* - # Reset the database and fake database migrations - $(PYTHON_COMMAND) manage.py create_db --force - $(PYTHON_COMMAND) manage.py migrate --noinput - - # Reindex - $(PYTHON_COMMAND) manage.py reindex --force --noinput --wipe - - # Let's load some initial data and import mozilla-product versions - $(PYTHON_COMMAND) manage.py loaddata initial.json - $(PYTHON_COMMAND) manage.py loaddata zadmin/users - $(PYTHON_COMMAND) manage.py loaddata src/olympia/access/fixtures/initial.json - $(PYTHON_COMMAND) manage.py import_prod_versions - - # Create a proper superuser that can be used to access the API - $(PYTHON_COMMAND) manage.py waffle_switch super-create-accounts on --create - $(PYTHON_COMMAND) manage.py waffle_switch activate-autograph-signing on --create - $(PYTHON_COMMAND) manage.py generate_addons --app firefox $(NUM_ADDONS) - $(PYTHON_COMMAND) manage.py generate_addons --app android $(NUM_ADDONS) - $(PYTHON_COMMAND) manage.py generate_themes $(NUM_THEMES) - $(PYTHON_COMMAND) manage.py generate_default_addons_for_frontend - - # Now that addons have been generated, reindex. - $(PYTHON_COMMAND) manage.py reindex --force --noinput - -.PHONY: perf-tests -perf-tests: setup-ui-tests - $(PIP_COMMAND) install --progress-bar=off --no-deps -r requirements/perftests.txt - locust --no-web -c 1 -f tests/performance/locustfile.py --host "http://olympia.test" + @echo "This is a deprecated target, please stop using it." .PHONY: lint lint: ## lint the code @@ -160,10 +103,6 @@ lint-codestyle: lint docs: ## build the documentation $(MAKE) -C docs html SPHINXOPTS='-nW' -.PHONY: debug -debug: ## connect for debugging - supervisorctl fg olympia - .PHONY: djshell djshell: ## connect to django shell $(PYTHON_COMMAND) ./manage.py shell_plus @@ -173,41 +112,94 @@ dbshell: ## connect to a database shell $(PYTHON_COMMAND) ./manage.py dbshell .PHONY: initialize -initialize: update_deps initialize_db update_assets populate_data ## init the dependencies, the database, and assets - -.PHONY: reload -reload: ## force django code reload - uwsgi --reload ${HOME}/docker/artifacts/addons-server-uwsgi-master.pid - -reload-uwsgi: reload +initialize: ## ensure database exists + @echo "Initializing data..." + @echo "args: $(ARGS)" + $(PYTHON_COMMAND) ./manage.py initialize $(ARGS) + +PYTEST_SRC := src/olympia/ + +.PHONY: test_needs_locales_compilation +test_needs_locales_compilation: + pytest $(PYTEST_SRC) \ + -m 'needs_locales_compilation' \ + $(ARGS) + +.PHONY: test_static_assets +test_static_assets: run_js_tests + pytest $(PYTEST_SRC) \ + -m 'static_assets' \ + $(ARGS) + +.PHONY: test_main +test_main: + pytest $(PYTEST_SRC) \ + -n auto \ + -m 'not es_tests and not needs_locales_compilation and not static_assets and not internal_routes_allowed' \ + $(ARGS) + +.PHONY: test_internal_routes_allowed +test_internal_routes_allowed: +# We need to change the setting in the file because we can't +# override an env variable here, and the next command requires +# `INTERNAL_ROUTES_ALLOWED` to be set to `True`. + sed -i 's/^INTERNAL_ROUTES_ALLOWED.*/INTERNAL_ROUTES_ALLOWED=True/' settings_test.py + pytest \ + $(PYTEST_SRC) \ + -m 'internal_routes_allowed' \ + $(ARGS) + +.PHONY: test_es_tests +test_es_tests: + pytest \ + $(PYTEST_SRC) \ + -m 'es_tests and not needs_locales_compilation and not static_assets' \ + $(ARGS) .PHONY: test test: ## run the entire test suite - pytest $(APP) $(ARGS) + pytest \ + $(PYTEST_SRC) \ + $(ARGS) .PHONY: test_es test_es: ## run the ES tests - pytest -m es_tests $(APP) $(ARGS) + pytest \ + $(PYTEST_SRC) \ + -m es_tests \ + $(ARGS) .PHONY: test_no_es test_no_es: ## run all but the ES tests - pytest -m "not es_tests" $(APP) $(ARGS) + pytest \ + $(PYTEST_SRC) \ + -m "not es_tests" \ + $(ARGS) .PHONY: test_force_db test_force_db: ## run the entire test suite with a new database - pytest --create-db $(APP) $(ARGS) + pytest \ + $(PYTEST_SRC) \ + --create-db \ + $(ARGS) .PHONY: tdd tdd: ## run the entire test suite, but stop on the first error - pytest -x --pdb $(ARGS) $(APP) + pytest \ + $(PYTEST_SRC) \ + -x --pdb \ + $(ARGS) .PHONY: test_failed test_failed: ## rerun the failed tests from the previous run - pytest --lf $(ARGS) $(APP) + pytest \ + $(PYTEST_SRC) \ + --lf \ + $(ARGS) .PHONY: run_js_tests run_js_tests: ## Run the JavaScript test suite (requires compiled/compressed assets). - NODE_PATH=$(NODE_MODULES) npm exec $(NPM_ARGS) -- jest + NODE_PATH=$(NODE_MODULES) npm exec $(NPM_ARGS) -- jest tests/js .PHONY: watch_js_tests watch_js_tests: ## Run+watch the JavaScript test suite (requires compiled/compressed assets). @@ -223,6 +215,11 @@ format: ## Autoformat our codebase. extract_locales: ## extracts and merges translation strings ./scripts/run_l10n_extraction.sh +.PHONE: compile_locales +compile_locales: ## compiles translation strings + $(PIP_COMMAND) install --progress-bar=off --no-deps -r requirements/locale.txt + ./locale/compile-mo.sh ./locale/ + .PHONY: help_submake help_submake: @grep -E '^[a-zA-Z_-]+:.*?## .*$$' Makefile-docker | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/Makefile-os b/Makefile-os index 88758dc74962..e3079503fcd3 100644 --- a/Makefile-os +++ b/Makefile-os @@ -1,7 +1,63 @@ -UID := $(shell id -u) -GID := $(shell id -g) -export UID -export GID +#################################################################################################### +# Our makefile makes use of docker compose commands. Our config files rely on environment variables +# both for passing configuration to the containers as well as configuring the compose file itself. +# Variables referenced in docker-compose*.yml should be read from .env, exported and saved in .env +#################################################################################################### + +DOCKER_PROGRESS ?= auto +DOCKER_METADATA_FILE ?= buildx-bake-metadata.json +DOCKER_PUSH ?= +DOCKER_WAIT ?= +# Not in dot env saved, +# Docker needs these values set, +# Static, cache preserved. +export DOCKER_COMMIT ?= +export DOCKER_BUILD ?= +export DOCKER_VERSION ?= +export DATA_BACKUP_SKIP ?= +override DOCKER_MYSQLD_VOLUME = addons-server_data_mysqld + +INITIALIZE_ARGS ?= +INIT_CLEAN ?= +INIT_LOAD ?= + +ifneq ($(INIT_CLEAN),) + INITIALIZE_ARGS += --clean +endif + +ifneq ($(INIT_LOAD),) + INITIALIZE_ARGS += --load $(INIT_LOAD) +endif + +DOCKER_BAKE_ARGS := \ + --file docker-bake.hcl \ + --file .env \ + --progress $(DOCKER_PROGRESS) \ + --metadata-file $(DOCKER_METADATA_FILE) \ + +ifeq ($(DOCKER_PUSH), true) + DOCKER_BAKE_ARGS += --push +endif + +DOCKER_COMPOSE_ARGS := \ + -d \ + --remove-orphans \ + --no-build \ + --quiet-pull \ + +ifneq ($(DOCKER_WAIT),) + DOCKER_COMPOSE_ARGS += --wait +endif + +# Paths should be cleaned before mounting .:/data/olympia +# These are files which should be sourced from the container +# or should be fresh on every run of the project +CLEAN_PATHS := \ + src/olympia.egg-info \ + supervisord.pid \ + version.json \ + logs \ + buildx-bake-metadata.json \ .PHONY: help_redirect help_redirect: @@ -14,12 +70,21 @@ help_submake: @echo "\nAll other commands will be passed through to the docker 'web' container make:" @make -f Makefile-docker help_submake +.PHONY: test_setup +test_setup: + npm exec jest -- ./tests/make --runInBand + +.PHONY: setup +setup: ## create configuration files version.json and .env required to run this project + for path in $(CLEAN_PATHS); do rm -rf "$(PWD)/$$path" && echo "$$path removed"; done + ./scripts/setup.py + +.PHONY: push_locales +push_locales: ## extracts and merges translation strings + bash ./scripts/push_l10n_extraction.sh $(ARGS) + .PHONY: update_docker -update_docker: ## update all the docker images - docker compose exec --user olympia worker make update_deps - docker compose exec --user olympia web make update - docker compose restart web - docker compose restart worker +update_docker: data_export up data_restore ## update all the docker images .PHONY: shell shell: ## connect to a running addons-server docker shell @@ -29,31 +94,96 @@ shell: ## connect to a running addons-server docker shell rootshell: ## connect to a running addons-server docker shell with root user docker compose exec --user root web bash -.PHONY: create_env_file -create_env_file: - echo "UID=${UID}\nGID=${GID}" > .env - -.PHONY: initialize_docker -initialize_docker: create_env_file -# Run a fresh container from the base image to install deps. Since /deps is -# shared via a volume in docker-compose.yml, this installs deps for both web -# and worker containers, and does so without requiring the containers to be up. -# We just create dummy empty package.json and package-lock.json in deps/ so -# that docker compose doesn't create dummy ones itself, as they would be owned -# by root. They don't matter: the ones at the root directory are mounted -# instead. - touch deps/package.json - touch deps/package-lock.json -# Note that this is running with --user ${UID}:${GID} because the user olympia -# would be uid 9500 regardless of host at this point (this is only fixed when -# the container is up, through the command defined in docker-compose.yml), -# which is wrong for local development. - docker compose run --rm --user ${UID}:${GID} web make update_deps - docker compose up -d - docker compose exec --user olympia web make initialize +.PHONY: docker_compose_config +docker_compose_config: ## Show the docker compose configuration + @docker compose config web --format json + +.PHONY: docker_build_web +docker_build_web: ## Build the docker images using buildx bake + docker buildx bake $(DOCKER_BAKE_ARGS) $(ARGS) + +.PHONY: docker_pull_web +docker_pull_web: ## Pull the latest docker image using current tag + docker compose pull web --policy always + +.PHONY: docker_pull_or_build ## Pull or build the docker image based on the image version +docker_pull_or_build: +# If the image is tagged with version "local" then we should build the image before running +# docker compose up. The image will be available to docker compose, skipping a pull attempt. +# This is useful for local development where the image is built and tagged with "local". +# Also for CI/CID pipelines on forks where we cannot pull the image and must build locally. +# If the image is tagged with a version other than "local" then we should skip the build +# and let docker compose pull the image instead. This is useful for CI/CD pipelines where +# the image is already built and pushed to a registry. + @IMAGE=$$(docker compose config web --format json | jq -r '.services.web.image'); \ + echo "image: $$IMAGE"; \ + if echo "$$IMAGE" | grep -q ":local"; then \ + $(MAKE) docker_build_web; \ + else \ + $(MAKE) docker_pull_web; \ + fi + +.PHONY: docker_mysqld_volume_create +docker_mysqld_volume_create: ## Create the mysqld volume + docker volume create $(DOCKER_MYSQLD_VOLUME) + +.PHONY: docker_mysqld_volume_remove +docker_mysqld_volume_remove: ## Remove the mysqld volume + docker volume rm $(DOCKER_MYSQLD_VOLUME) + +.PHONY: docker_compose_down +docker_compose_down: ## Stop the docker containers + docker compose down --rmi local --remove-orphans --volumes + +.PHONY: docker_clean_volumes +docker_clean_volumes: ## Remove dangling volumes, skipping the mysqld volume + docker volume prune \ + --filter label=com.docker.compose.project=addons-server \ + --all \ + --force + +.PHONY: docker_clean_images +docker_clean_images: ## Remove dangling images + docker image prune --filter "dangling=true" --force + +.PHONY: docker_clean_build_cache +docker_clean_build_cache: ## Remove buildx build cache + docker buildx prune -af + +.PHONY: clean_docker +clean_docker: docker_compose_down docker_mysqld_volume_remove docker_clean_images docker_clean_volumes docker_clean_build_cache ## Remove all docker resources taking space on the host machine + +.PHONY: docker_update_deps +docker_update_deps: docker_mysqld_volume_create ## Update the dependencies in the container based on the docker tag and target + docker compose run \ + --rm \ + --no-deps \ + $(DOCKER_RUN_ARGS) \ + web \ + make update_deps + +.PHONY: up_pre +up_pre: setup docker_pull_or_build docker_update_deps ## Pre-up the environment, setup files, volumes and host state + +.PHONY: up_start +up_start: docker_mysqld_volume_create ## Start the docker containers + docker compose up $(DOCKER_COMPOSE_ARGS) $(ARGS) + +.PHONY: up_post +up_post: docker_clean_images docker_clean_volumes ## Post-up the environment, setup files, volumes and host state +# Explicitly run initialize via the web container as make can get confused +# both routing the command to the web container and +# routing the command to the proper target. + docker compose exec --user olympia web make -f Makefile-docker initialize ARGS=$(shell echo "'$(INITIALIZE_ARGS)'") + +.PHONY: up +up: up_pre up_start up_post ## Up the environment + +.PHONY: down +down: docker_compose_down docker_clean_images docker_clean_volumes ## Stop the docker containers and clean up non-peristent dangling resources %: ## This directs any other recipe (command) to the web container's make. - docker compose exec --user olympia web make $(MAKECMDGOALS) ARGS=$(ARGS) + docker compose exec --user olympia web make $(MAKECMDGOALS) ARGS="$(shell echo $(ARGS))" # You probably want to put new commands in Makefile-docker, unless they operate # on multiple containers or are host-os specific. diff --git a/README.md b/README.md new file mode 100644 index 000000000000..ff8d201247f4 --- /dev/null +++ b/README.md @@ -0,0 +1,27 @@ +[![Code of conduct](https://img.shields.io/badge/%E2%9D%A4-code%20of%20conduct-blue.svg)](https://github.com/mozilla/addons-server/blob/master/.github/CODE_OF_CONDUCT.md) + +[![CI Status](https://github.com/mozilla/addons-server/actions/workflows/ci.yml/badge.svg)](https://github.com/mozilla/addons-server/actions/workflows/ci.yml) + +# Addons-Server + +Welcome to the Addons Server repository! Please feel free to visit the web page of the current project hosted on [addons.mozilla.org]. If you want to develop or run the applciation locally, please follow the [setup and configuration][setup_link] docs. We'd love your help! You can come talk to us on [Matrix #amo:mozilla.org] if you have any questions. + +Please report bugs here: + +You can access the AMO dev environment at and the AMO stage environment at + +% marker-for-security-bug-inclusion-do-not-remove + +# Security Bug Reports + +This code and its associated production web page are included in the Mozilla’s web and services [bug bounty program]. If you find a security vulnerability, please submit it via the process outlined in the program and [FAQ pages]. Further technical details about this application are available from the [Bug Bounty Onramp page]. + +Please submit all security-related bugs through Bugzilla using the [web security bug form]. Never submit security-related bugs through a Github Issue or by email. + +[addons.mozilla.org]: https://addons.mozilla.org +[bug bounty onramp page]: https://wiki.mozilla.org/Security/BugBountyOnramp/ +[bug bounty program]: https://www.mozilla.org/en-US/security/web-bug-bounty/ +[faq pages]: https://www.mozilla.org/en-US/security/bug-bounty/faq-webapp/ +[setup_link]: https://mozilla.github.io/addons-server/topics/development/setup_and_configuration.html +[matrix #amo:mozilla.org]: https://chat.mozilla.org/#/room/#amo:mozilla.org +[web security bug form]: https://bugzilla.mozilla.org/form.web.bounty diff --git a/README.rst b/README.rst deleted file mode 100644 index bafc75445fec..000000000000 --- a/README.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. image:: https://img.shields.io/badge/%E2%9D%A4-code%20of%20conduct-blue.svg - :target: https://github.com/mozilla/addons-server/blob/master/.github/CODE_OF_CONDUCT.md - :alt: Code of conduct - -.. image:: https://circleci.com/gh/mozilla/addons-server.svg?style=svg - :target: https://circleci.com/gh/mozilla/addons-server - - -Addons-Server -============= - -Welcome to the Addons Server repository! Please feel free to visit the web page of the current project hosted on `addons.mozilla.org`_. If you want to install it follow our guide located in `install docs`_. We'd love your help! You can come talk to us on `Matrix #amo:mozilla.org`_ if you have any questions. - -Please report bugs here: https://github.com/mozilla/addons/issues or https://github.com/mozilla/addons-server/issues -You can access the AMO dev environment at https://addons-dev.allizom.org/ and the AMO stage environment at https://addons.allizom.org/ - - -.. _`addons.mozilla.org`: https://addons.mozilla.org -.. _`install docs`: https://addons-server.readthedocs.io/en/latest/topics/install/docker.html -.. _`Matrix #amo:mozilla.org`: https://chat.mozilla.org/#/room/#amo:mozilla.org - - -.. marker-for-security-bug-inclusion-do-not-remove - -Security Bug Reports --------------------- - -This code and its associated production web page are included in the Mozilla’s web and services `bug bounty program`_. If you find a security vulnerability, please submit it via the process outlined in the program and `FAQ pages`_. Further technical details about this application are available from the `Bug Bounty Onramp page`_. - -Please submit all security-related bugs through Bugzilla using the `web security bug form`_. Never submit security-related bugs through a Github Issue or by email. - -.. _bug bounty program: https://www.mozilla.org/en-US/security/web-bug-bounty/ -.. _FAQ pages: https://www.mozilla.org/en-US/security/bug-bounty/faq-webapp/ -.. _Bug Bounty Onramp page: https://wiki.mozilla.org/Security/BugBountyOnramp/ -.. _web security bug form: https://bugzilla.mozilla.org/form.web.bounty diff --git a/babel.cfg b/babel.cfg index 18880252cddb..8a10f0e960a9 100644 --- a/babel.cfg +++ b/babel.cfg @@ -4,3 +4,6 @@ [django: src/olympia/**/templates/admin/**.html] [django: src/olympia/**/templates/devhub/forms/widgets/compat_app_input_option.html] [jinja2_custom: src/olympia/**/templates/**.html] + +[extractors] +jinja2_custom = olympia.core.babel:extract_jinja diff --git a/conftest.py b/conftest.py index eb8ccba583a7..d8a9e55aa6fd 100644 --- a/conftest.py +++ b/conftest.py @@ -4,6 +4,7 @@ Please note that there should not be any Django/Olympia related imports on module-level, they should instead be added to hooks or fixtures directly. """ + import os import uuid @@ -138,11 +139,12 @@ def test_pre_setup(request, tmpdir, settings): from django.core.cache import caches from django.utils import translation - from olympia import amo, core - from olympia.translations.hold import clean_translations from waffle import models as waffle_models from waffle.utils import get_cache as waffle_get_cache + from olympia import amo, core + from olympia.translations.hold import clean_translations + # Clear all cache-instances. They'll be re-initialized by Django # This will make sure that our random `KEY_PREFIX` is applied # appropriately. @@ -184,8 +186,6 @@ def _path(*args): settings.SHARED_STORAGE = shared_storage = _path(storage_root, 'shared_storage') settings.ADDONS_PATH = _path(storage_root, 'files') - settings.GUARDED_ADDONS_PATH = _path(storage_root, 'guarded-addons') - settings.GIT_FILE_STORAGE_PATH = _path(storage_root, 'git-storage') settings.MLBF_STORAGE_PATH = _path(storage_root, 'mlbf') settings.MEDIA_ROOT = _path(shared_storage, 'uploads') settings.SITEMAP_STORAGE_PATH = _path(storage_root, 'sitemaps') diff --git a/logs/.gitkeep b/deps/.gitkeep similarity index 100% rename from logs/.gitkeep rename to deps/.gitkeep diff --git a/docker-bake.hcl b/docker-bake.hcl new file mode 100644 index 000000000000..c1944889b061 --- /dev/null +++ b/docker-bake.hcl @@ -0,0 +1,30 @@ +group "default" { + targets = ["web"] +} + +variable DOCKER_BUILD {} +variable DOCKER_COMMIT {} +variable DOCKER_VERSION {} +variable DOCKER_TARGET {} +variable DOCKER_TAG {} + +target "web" { + context = "." + dockerfile = "Dockerfile" + target = "${DOCKER_TARGET}" + tags = ["${DOCKER_TAG}"] + platforms = ["linux/amd64"] + args = { + DOCKER_COMMIT = "${DOCKER_COMMIT}" + DOCKER_VERSION = "${DOCKER_VERSION}" + DOCKER_BUILD = "${DOCKER_BUILD}" + DOCKER_TARGET = "${DOCKER_TARGET}" + DOCKER_SOURCE = "https://github.com/mozilla/addons-server" + } + pull = true + + output = [ + "type=docker", + ] + +} diff --git a/docker-compose.override.yml b/docker-compose.override.yml index 0a226055e9ab..13bbe9968adc 100644 --- a/docker-compose.override.yml +++ b/docker-compose.override.yml @@ -1,5 +1,3 @@ -version: "2.4" - services: nginx: ports: diff --git a/docker-compose.private.yml b/docker-compose.private.yml index a0dbc2ebd0b9..8e00d7628d2c 100644 --- a/docker-compose.private.yml +++ b/docker-compose.private.yml @@ -1,5 +1,3 @@ -version: "2.4" - services: worker: depends_on: diff --git a/docker-compose.yml b/docker-compose.yml index 85a63cfddb65..4e847cacc989 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,7 @@ -version: "2.4" - x-env-mapping: &env + # https://docs.docker.com/compose/environment-variables/envvars-precedence/ + env_file: + - .env environment: - CELERY_BROKER_URL=amqp://olympia:olympia@rabbitmq/olympia - CELERY_RESULT_BACKEND=redis://redis:6379/1 @@ -12,61 +13,99 @@ x-env-mapping: &env - OLYMPIA_SITE_URL=http://olympia.test - PYTHONDONTWRITEBYTECODE=1 - PYTHONUNBUFFERED=1 + - PYTHONBREAKPOINT=ipdb.set_trace - TERM=xterm-256color - - CIRCLECI=${CIRCLECI} - HISTFILE=/data/olympia/docker/artifacts/bash_history - HISTSIZE=50000 - HISTIGNORE=ls:exit:"cd .." - HISTCONTROL=erasedups - # Note: docker compose uses the values exported from .env for GID/UID if - # they exist. ./docker/fix_olympia_user.sh uses those variables to fix - # the uid/gid of the user to match the host if necessary. - - UID=${UID:-9500} - - GID=${UID:-9500} + - CIRCLECI + - DATA_BACKUP_SKIP + +x-olympia: &olympia + <<: *env + image: ${DOCKER_TAG:-} + # We don't want docker compose to manage the image for us. + # We sometimes build the image locally and sometimes pull from a registry + # but docker compose should always assume the image is available. + pull_policy: never + # We drop down to a different user through entrypoint.sh, but starting as + # root allows us to fix the ownership of files generated at image build + # time through the ./docker/entrypoint.sh script. + user: root + platform: linux/amd64 + entrypoint: ["/data/olympia/docker/entrypoint.sh"] services: - worker: &worker - <<: *env - image: mozilla/addons-server:latest - # We drop down to a different user through supervisord, but starting as - # root allows us to fix the ownership of files generated at image build - # time through the ./docker/fix_olympia_user.sh script. - user: root - platform: linux/amd64 - command: - - /bin/sh - - -c - - | - ./docker/fix_olympia_user.sh - supervisord -n -c /data/olympia/docker/supervisor-celery.conf + olympia_volumes: + <<: *olympia + # We could let this container exit 0, but docker compose --wait + # interprets this as a failure, even with a passing healtcheck. + # so we just sleep indefinitely instead. + command: ["sleep", "infinity"] + volumes: + # used by: web, worker, nginx + - ${HOST_MOUNT_SOURCE:?}:/data/olympia + - ${HOST_MOUNT_SOURCE:?}deps:/deps + - data_site_static:/data/olympia/site-static + - ${HOST_MOUNT_SOURCE:?}storage:/data/olympia/storage + worker: + <<: *olympia + command: [ + "DJANGO_SETTINGS_MODULE=settings", + "watchmedo", + "auto-restart", + "--directory=/data/olympia/src", + "--pattern=*.py", + "--recursive", + "--no-restart-on-command-exit", + "--", + "celery -A olympia.amo.celery:app worker -E -c 2 --loglevel=INFO", + ] volumes: - - .:/data/olympia - - ./deps:/deps - - ./package.json:/deps/package.json - - ./package-lock.json:/deps/package-lock.json + - ${HOST_MOUNT_SOURCE:?}:/data/olympia + - ${HOST_MOUNT_SOURCE:?}deps:/deps + - ${HOST_MOUNT_SOURCE:?}storage:/data/olympia/storage extra_hosts: - "olympia.test:127.0.0.1" + restart: on-failure:5 + healthcheck: + test: ["CMD-SHELL", "./manage.py monitors --services celery_worker --skip-checks"] + interval: 30s + retries: 3 + start_interval: 1s + depends_on: + - olympia_volumes + - mysqld + - elasticsearch + - redis + - memcached + - rabbitmq + - autograph web: - <<: *worker - platform: linux/amd64 + extends: + service: worker command: - - /bin/sh - - -c - - | - ./docker/fix_olympia_user.sh - supervisord -n -c /data/olympia/docker/supervisor.conf + - uwsgi --ini /data/olympia/docker/uwsgi.ini + healthcheck: + test: ["CMD-SHELL", "./manage.py monitors --services localdev_web --skip-checks"] + interval: 30s + retries: 3 + start_interval: 1s + volumes: + # Don't mount generated files. They only exist in the container + # and would otherwiser be deleted by mounting the cwd volume above + - data_static_build:/data/olympia/static-build + - data_site_static:/data/olympia/site-static nginx: image: nginx volumes: - - ./docker/nginx/addons.conf:/etc/nginx/conf.d/addons.conf - - ./static:/srv/static - - ./site-static:/srv/site-static - - ./storage/shared_storage/uploads:/srv/user-media - - ./storage/files:/srv/user-media/addons - - ./storage/guarded-addons:/srv/user-media/guarded-addons - - ./storage/sitemaps:/srv/user-media/sitemaps + - data_nginx:/etc/nginx/conf.d + - ${HOST_MOUNT_SOURCE:?}:/srv + - data_site_static:/srv/site-static + - ${HOST_MOUNT_SOURCE:?}storage:/srv/storage ports: - "80:80" networks: @@ -74,8 +113,7 @@ services: aliases: - olympia.test depends_on: - - web - - addons-frontend + - olympia_volumes memcached: image: memcached:1.4 @@ -89,6 +127,24 @@ services: - MYSQL_DATABASE=olympia ports: - "3306:3306" + volumes: + - data_mysqld:/var/lib/mysql + command: + # Optimize for development speed over durability + - --innodb-flush-log-at-trx-commit=0 + - --innodb-buffer-pool-size=64M + - --innodb-log-buffer-size=8M + - --innodb-log-file-size=32M + # Skip DNS lookups + - --skip-name-resolve + # Disable performance schema for faster startup + - --performance-schema=OFF + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "--silent"] + start_interval: 1s + timeout: 2s + start_period: 10s + retries: 3 elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:7.17.3 @@ -109,7 +165,7 @@ services: image: redis:6.2 rabbitmq: - image: rabbitmq:3.8 + image: rabbitmq:3.12 hostname: olympia expose: - "5672" @@ -121,9 +177,9 @@ services: autograph: image: mozilla/autograph:3.3.2 platform: linux/amd64 - command: /go/bin/autograph -c /data/olympia/scripts/autograph_localdev_config.yaml + command: /go/bin/autograph -c /data/autograph/autograph_localdev_config.yaml volumes: - - .:/data/olympia + - data_autograph:/data/autograph addons-frontend: <<: *env @@ -141,3 +197,47 @@ services: # exposed using webpack and not by the node app server). - 7011:7011 command: yarn amo:olympia + +networks: + default: + driver: bridge + enable_ipv6: false + +volumes: + # Volumes for static files that should not be + # mounted from the host. + data_static_build: + data_site_static: + # Volumes for the production olympia mounts + # allowing to conditionally mount directories + # from the host or from the image to + # in the running docker container. + # If OLYMPIA_MOUNT_SOURCE matches (data_olympia_) + # then we use the production volume mounts. Otherwise + # it will map to the current directory ./ + # (data_olympia_):/ + data_olympia_: + data_olympia_deps: + data_olympia_storage: + # Volume for rabbitmq/redis to avoid anonymous volumes + data_rabbitmq: + data_redis: + data_mysqld: + # Keep this value in sync with Makefile-os + # External volumes must be manually created/destroyed + name: addons-server_data_mysqld + external: true + # Volume for nginx configuration + data_nginx: + driver: local + driver_opts: + type: none + o: bind + device: ${PWD}/docker/nginx + # Volume for autograph configuration + data_autograph: + driver: local + driver_opts: + type: none + o: bind + device: ${PWD}/docker/autograph diff --git a/docker/artifacts/uwsgi-reload-monitor b/docker/artifacts/uwsgi-reload-monitor deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/scripts/autograph_localdev_config.yaml b/docker/autograph/autograph_localdev_config.yaml similarity index 99% rename from scripts/autograph_localdev_config.yaml rename to docker/autograph/autograph_localdev_config.yaml index 4e53ac6f0928..2ad22c641432 100644 --- a/scripts/autograph_localdev_config.yaml +++ b/docker/autograph/autograph_localdev_config.yaml @@ -1,8 +1,5 @@ # Note (1): Most of the configuration here got copied from # https://github.com/mozilla-services/autograph/blob/master/autograph.yaml -# -# Note (2): the content of the file is also embedded in `.circleci/config.yml`. -# Any change here should likely be duplicated. server: # This port should be perfectly free, the upstream default of 8000 @@ -135,7 +132,6 @@ signers: states: recommended: true recommended-android: true - verified: true line: true relative_start: 0h duration: 26298h diff --git a/docker/debian_packages.txt b/docker/debian_packages.txt new file mode 100644 index 000000000000..6b5263c564db --- /dev/null +++ b/docker/debian_packages.txt @@ -0,0 +1,24 @@ +# General (dev-) dependencies +bash-completion +build-essential +curl +libjpeg-dev +libsasl2-dev +libxml2-dev +locales +zlib1g-dev +libffi-dev +libssl-dev +nodejs +# Git, because we're using git-checkout dependencies +git +# Dependencies for mysql-python (from mysql apt repo, not debian) +pkg-config +mysql-client +libmysqlclient-dev +swig +gettext +# Use rsvg-convert to render our static theme previews +librsvg2-bin +# Use pngcrush to optimize the PNGs uploaded by developers +pngcrush diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh new file mode 100755 index 000000000000..e3f59fcdc489 --- /dev/null +++ b/docker/entrypoint.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +### This is the entrypoint script used for local and CI environments +### It allows the web/worker containers to be run as root, but execute +### the commands as the olympia user. This is necessary because the +### id of the olympia user sometimes should match the host user's id +### to avoid permission issues with mounted volumes. + +set -ue + +if [[ $(id -u) -ne 0 ]]; then + echo "This script must be run as root" + exit 1 +fi + +OLYMPIA_USER="olympia" + +function get_olympia_uid() { echo "$(id -u "$OLYMPIA_USER")"; } +function get_olympia_gid() { echo "$(id -g "$OLYMPIA_USER")"; } + +OLD_HOST_UID=$(get_olympia_uid) + +# If the olympia user's uid is different in the container than from the build, +# we need to update the olympia user's uid to match the new one. +if [[ "${HOST_UID}" != "${OLD_HOST_UID}" ]]; then + usermod -u ${HOST_UID} ${OLYMPIA_USER} + echo "${OLYMPIA_USER} UID: ${OLD_HOST_UID} -> ${HOST_UID}" +fi + +NEW_HOST_UID=$(get_olympia_uid) +OLYMPIA_ID_STRING="${NEW_HOST_UID}:$(get_olympia_gid)" + +# If we are on production mode, update the ownership of /data/olympia and /deps to match the new id +if [[ "${HOST_MOUNT}" == "production" ]]; then + echo "Updating ownership of /data/olympia and /deps to ${OLYMPIA_ID_STRING}" + chown -R ${OLYMPIA_ID_STRING} /data/olympia /deps +fi + +cat <, and +covers development using Add-ons Server, the source code for [Add-ons](https://addons.mozilla.org/). + +Its source location is in the [/docs](https://github.com/mozilla/addons-server/tree/master/docs) folder. + +Note: this project was once called *olympia*, this documentation often uses +that term. + +## Build the documentation + +This is as simple as running: + +``` +make docs +``` + +This is the same as `cd`'ing to the `docs` folder, and running `make +html` from there. + +We include a daemon that can watch and regenerate the built HTML when +documentation source files change. To use it, go to the `docs` folder +and run: + +``` +python watcher.py 'make html' $(find . -name '*.rst') +``` + +Once done, check the result by opening the following file in your browser: + +> /path/to/olympia/docs/\_build/html/index.html diff --git a/docs/README.rst b/docs/README.rst deleted file mode 100644 index c7138c30ab9d..000000000000 --- a/docs/README.rst +++ /dev/null @@ -1,47 +0,0 @@ -============================ -Add-ons Server Documentation -============================ - -This is the documentation for the use of the addons-server and its services. -All documentation is in plain text files using -`reStructuredText `_ and -`Sphinx `_. - -To build the documentation, you need the dependencies from -``requirements/docs.txt``. Those are automatically installed together with -``make update_deps``, so if you've installed that already (following the -:ref:`installation` page), you're all set. - -If you're unsure, activate your ``virtualenv`` and run:: - - make update_deps - -The documentation is viewable at http://addons-server.readthedocs.io/, and -covers development using Add-ons Server, the source code for `Add-ons -`_. - -Its source location is in the `/docs -`_ folder. - -Note: this project was once called *olympia*, this documentation often uses -that term. - -Build the documentation ------------------------ - -This is as simple as running:: - - make docs - -This is the same as ``cd``'ing to the ``docs`` folder, and running ``make -html`` from there. - -We include a daemon that can watch and regenerate the built HTML when -documentation source files change. To use it, go to the ``docs`` folder -and run:: - - python watcher.py 'make html' $(find . -name '*.rst') - -Once done, check the result by opening the following file in your browser: - - /path/to/olympia/docs/_build/html/index.html diff --git a/docs/conf.py b/docs/conf.py index 7e1b03209d6c..ef668878b0ad 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -15,8 +15,6 @@ import sys import os -import sphinx_rtd_theme - # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -51,8 +49,8 @@ master_doc = 'index' # General information about the project. -project = u'addons-server' -copyright = u'2023, Mozilla' +project = 'addons-server' +copyright = '2024, Mozilla' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -114,7 +112,6 @@ on_rtd = os.environ.get('READTHEDOCS', None) == 'True' html_theme = 'sphinx_rtd_theme' -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # otherwise, readthedocs.org uses their theme by default, so no need to specify # it diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000000..86cfc7e04932 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,40 @@ +# Welcome to Add-ons Servers documentation! + +Add-ons Server is the codebase for ; +the source lives at . + +In the past, this project was *olympia*; documentation that refers to olympia +refers to this project. + +## Contents + +```{toctree} +:maxdepth: 2 + +topics/readme_include +topics/api/index +topics/development/index +topics/logs +topics/remote_addr +topics/third-party +topics/blocklist +``` + +## Archived Contents + +```{toctree} +:maxdepth: 1 +:titlesonly: true + +topics/api/v3_legacy/index +topics/api/v4_frozen/index +``` + +## Playground + +```{toctree} +:maxdepth: 1 +:titlesonly: true + +topics/playground +``` diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index 2cfb924c5e7f..000000000000 --- a/docs/index.rst +++ /dev/null @@ -1,32 +0,0 @@ -========================================= -Welcome to Add-ons Servers documentation! -========================================= - -Add-ons Server is the codebase for https://addons.mozilla.org/; -the source lives at https://github.com/mozilla/addons-server. - -In the past, this project was *olympia*; documentation that refers to olympia -refers to this project. - -Contents --------- - -.. toctree:: - :maxdepth: 2 - - topics/readme_include - topics/api/index - topics/install/index - topics/development/index - topics/third-party - topics/blocklist - -Archived Contents ------------------ - -.. toctree:: - :titlesonly: - :maxdepth: 1 - - topics/api/v3_legacy/index - topics/api/v4_frozen/index diff --git a/docs/topics/api/abuse.rst b/docs/topics/api/abuse.rst index aa9e5f1144dd..ea29003506d0 100644 --- a/docs/topics/api/abuse.rst +++ b/docs/topics/api/abuse.rst @@ -56,6 +56,8 @@ to if necessary. :`. :`. + :`. :>json object|null reporter: The user who submitted the report, if authenticated. :>json int reporter.id: The id of the user who submitted the report. :>json string reporter.name: The name of the user who submitted the report. @@ -86,6 +88,8 @@ to if necessary. :>json string|null operating_system: The client's operating system. :>json string|null operating_system_version: The client's operating system version. :>json string|null reason: The reason for the report. + :>json string|null illegal_category: The type of illegal content - only defined when the reason is set to ``illegal``. + :>json string|null illegal_subcategory: The specific violation - only defined when the reason is set to ``illegal``. .. _abuse-report_entry_point-parameter: @@ -228,6 +232,102 @@ to if necessary. both Offending content is in both locations =========================== =================================================== +.. _abuse-report-illegal_category-parameter: + + Accepted values for the ``illegal_category`` parameter: + + ================================================ ================================================ + Value Description + ================================================ ================================================ + animal_welfare Animal welfare + consumer_information Consumer information infringements + data_protection_and_privacy_violations Data protection and privacy violations + illegal_or_harmful_speech Illegal or harmful speech + intellectual_property_infringements Intellectual property infringements + negative_effects_on_civic_discourse_or_elections Negative effects on civic discourse or elections + non_consensual_behaviour Non-consensual behavior + pornography_or_sexualized_content Pornography or sexualized content + protection_of_minors Protection of minors + risk_for_public_security Risk for public security + scams_and_fraud Scams or fraud + self_harm Self-harm + unsafe_and_prohibited_products Unsafe, non-compliant, or prohibited products + violence Violence + other Other + ================================================ ================================================ + +.. _abuse-report-illegal_subcategory-parameter: + + Accepted values for the ``illegal_subcategory`` parameter: + + ================================================ ============================================ ============================================================================================= + Illegal category Value Description + ================================================ ============================================ ============================================================================================= + animal_welfare other Something else + consumer_information insufficient_information_on_traders Insufficient information on traders + consumer_information noncompliance_pricing Non-compliance with pricing regulations + consumer_information hidden_advertisement Hidden advertisement or commercial communication, including by influencers + consumer_information misleading_info_goods_services Misleading information about the characteristics of the goods and services + consumer_information misleading_info_consumer_rights Misleading information about the consumer’s rights + consumer_information other Something else + data_protection_and_privacy_violations biometric_data_breach Biometric data breach + data_protection_and_privacy_violations missing_processing_ground Missing processing ground for data + data_protection_and_privacy_violations right_to_be_forgotten Right to be forgotten + data_protection_and_privacy_violations data_falsification Data falsification + data_protection_and_privacy_violations other Something else + illegal_or_harmful_speech defamation Defamation + illegal_or_harmful_speech discrimination Discrimination + illegal_or_harmful_speech hate_speech Illegal incitement to violence and hatred based on protected characteristics (hate speech) + illegal_or_harmful_speech other Something else + intellectual_property_infringements design_infringement Design infringements + intellectual_property_infringements geographic_indications_infringement Geographical indications infringements + intellectual_property_infringements patent_infringement Patent infringements + intellectual_property_infringements trade_secret_infringement Trade secret infringements + intellectual_property_infringements other Something else + negative_effects_on_civic_discourse_or_elections violation_eu_law Violation of EU law relevant to civic discourse or elections + negative_effects_on_civic_discourse_or_elections violation_national_law Violation of national law relevant to civic discourse or elections + negative_effects_on_civic_discourse_or_elections misinformation_disinformation_disinformation Misinformation, disinformation, foreign information manipulation and interference + negative_effects_on_civic_discourse_or_elections other Something else + non_consensual_behaviour non_consensual_image_sharing Non-consensual image sharing + non_consensual_behaviour non_consensual_items_deepfake Non-consensual items containing deepfake or similar technology using a third party's features + non_consensual_behaviour online_bullying_intimidation Online bullying/intimidation + non_consensual_behaviour stalking Stalking + non_consensual_behaviour other Something else + pornography_or_sexualized_content adult_sexual_material Adult sexual material + pornography_or_sexualized_content image_based_sexual_abuse Image-based sexual abuse (excluding content depicting minors) + pornography_or_sexualized_content other Something else + protection_of_minors age_specific_restrictions_minors Age-specific restrictions concerning minors + protection_of_minors child_sexual_abuse_material Child sexual abuse material + protection_of_minors grooming_sexual_enticement_minors Grooming/sexual enticement of minors + protection_of_minors other Something else + risk_for_public_security illegal_organizations Illegal organizations + risk_for_public_security risk_environmental_damage Risk for environmental damage + risk_for_public_security risk_public_health Risk for public health + risk_for_public_security terrorist_content Terrorist content + risk_for_public_security other Something else + scams_and_fraud inauthentic_accounts Inauthentic accounts + scams_and_fraud inauthentic_listings Inauthentic listings + scams_and_fraud inauthentic_user_reviews Inauthentic user reviews + scams_and_fraud impersonation_account_hijacking Impersonation or account hijacking + scams_and_fraud phishing Phishing + scams_and_fraud pyramid_schemes Pyramid schemes + scams_and_fraud other Something else + self_harm content_promoting_eating_disorders Content promoting eating disorders + self_harm self_mutilation Self-mutilation + self_harm suicide Suicide + self_harm other Something else + unsafe_and_prohibited_products prohibited_products Prohibited or restricted products + unsafe_and_prohibited_products unsafe_products Unsafe or non-compliant products + unsafe_and_prohibited_products other Something else + violence coordinated_harm Coordinated harm + violence gender_based_violence Gender-based violence + violence human_exploitation Human exploitation + violence human_trafficking Human trafficking + violence incitement_violence_hatred General calls or incitement to violence and/or hatred + violence other Something else + other other Something else + ================================================ ============================================ ============================================================================================= + ------------------------------ Submitting a user abuse report @@ -249,6 +349,8 @@ so reports can be responded to if necessary. :`. :`. + :`. :>json object|null reporter: The user who submitted the report, if authenticated. :>json int reporter.id: The id of the user who submitted the report. :>json string reporter.name: The name of the user who submitted the report. @@ -263,6 +365,8 @@ so reports can be responded to if necessary. :>json string user.username: The username of the user reported. :>json string message: The body/content of the abuse report. :>json string|null lang: The language code of the locale used by the client for the application. + :>json string|null illegal_category: The type of illegal content - only defined when the reason is set to ``illegal``. + :>json string|null illegal_subcategory: The specific violation - only defined when the reason is set to ``illegal``. .. _abuse-user-reason-parameter: @@ -298,6 +402,8 @@ so reports can be responded to if necessary. :`. :`. + :`. :>json object|null reporter: The user who submitted the report, if authenticated. :>json int reporter.id: The id of the user who submitted the report. :>json string reporter.name: The name of the user who submitted the report. @@ -310,6 +416,8 @@ so reports can be responded to if necessary. :>json string message: The body/content of the abuse report. :>json string|null lang: The language code of the locale used by the client for the application. :>json string|null reason: The reason for the report. + :>json string|null illegal_category: The type of illegal content - only defined when the reason is set to ``illegal``. + :>json string|null illegal_subcategory: The specific violation - only defined when the reason is set to ``illegal``. .. _abuse-rating-reason-parameter: @@ -345,6 +453,8 @@ so reports can be responded to if necessary. :`. :`. + :`. :>json object|null reporter: The user who submitted the report, if authenticated. :>json int reporter.id: The id of the user who submitted the report. :>json string reporter.name: The name of the user who submitted the report. @@ -356,6 +466,8 @@ so reports can be responded to if necessary. :>json int collection.id: The id of the collection reported. :>json string message: The body/content of the abuse report. :>json string|null lang: The language code of the locale used by the client for the application. + :>json string|null illegal_category: The type of illegal content - only defined when the reason is set to ``illegal``. + :>json string|null illegal_subcategory: The specific violation - only defined when the reason is set to ``illegal``. .. _abuse-collection-reason-parameter: diff --git a/docs/topics/api/activity.rst b/docs/topics/api/activity.rst index dd85ab8d0031..700e43fa99c5 100644 --- a/docs/topics/api/activity.rst +++ b/docs/topics/api/activity.rst @@ -55,6 +55,7 @@ This endpoint allows you to fetch a single review note for a specific version of :>json string user.name: The name of the reviewer or author. :>json string comments: The text content of the review note. :>json string date: The date the review note was created. + :>json string|null attachment_url: The link to download the associated attachment, if any .. _review-note-action: diff --git a/docs/topics/api/addons.rst b/docs/topics/api/addons.rst index 17f46e9d483c..7fa0527f62e7 100644 --- a/docs/topics/api/addons.rst +++ b/docs/topics/api/addons.rst @@ -278,14 +278,12 @@ This endpoint allows you to fetch a specific add-on by id, slug or guid. line "By Firefox" category notable Notable category recommended Recommended category - sponsored Sponsored category spotlight Spotlight category strategic Strategic category - verified Verified category badged A meta category that's available for the ``promoted`` search filter that is all the categories we expect an API client to expose as "reviewed" by Mozilla. - Currently equal to ``line&recommended&sponsored&verified``. + Currently equal to ``line&recommended``. ============== ========================================================== @@ -894,6 +892,26 @@ This endpoint allows you to fetch an add-on EULA and privacy policy. :>json object|null privacy_policy: The text of the Privacy Policy, if present (See :ref:`translated fields `). +---------------------------- +EULA and Privacy Policy Edit +---------------------------- + +.. _addon-eula-policy-edit: + +This endpoint allows an add-on's EULA and privacy policy to be edited. + + .. note:: + This API requires :doc:`authentication `, and for the user to be an author of the add-on. + + .. note:: + This API is not valid for themes - themes do not have EULA or privacy policies. + +.. http:patch:: /api/v5/addons/addon/(int:id|string:slug|string:guid)/eula_policy/ + + :`). + :`). + + -------------- Language Tools -------------- diff --git a/docs/topics/api/blocklist.rst b/docs/topics/api/blocklist.rst index 7d9373643520..deb20ee359ba 100644 --- a/docs/topics/api/blocklist.rst +++ b/docs/topics/api/blocklist.rst @@ -26,9 +26,8 @@ This endpoint returns an add-on Block from the blocklist, specified by guid or i :>json string modified: The date the block was last updated. :>json object|null addon_name: The add-on name, if we have details of an add-on matching that guid (See :ref:`translated fields `). :>json string guid: The guid of the add-on being blocked. - :>json string min_version: The minimum version of the add-on that will be blocked. "0" is the lowest version, meaning all versions up to max_version will be blocked. ("0" - "*" would be all versions). - :>json string max_version: The maximum version of the add-on that will be blocked. "*" is the highest version, meaning all versions from min_version will be blocked. ("0" - "*" would be all versions). :>json string|null reason: Why the add-on needed to be blocked. :>json object|null url: A url to the report/request that detailed why the add-on should potentially be blocked. Typically a bug report on bugzilla.mozilla.org. (See :ref:`Outgoing Links `) - :>json string versions[]: The versions of this add-on that are blocked. + :>json string blocked[]: The versions of this add-on that are (hard) blocked. + :>json string soft_blocked[]: The versions of this add-on that are soft blocked (can be optionally re-enabled by existing users). :>json boolean is_all_versions: Are all versions of this add-on blocked. If ``False``, some versions are not blocked. diff --git a/docs/topics/api/licenses.rst b/docs/topics/api/licenses.rst index 36d1fb03e8fa..eae2de3b47ee 100644 --- a/docs/topics/api/licenses.rst +++ b/docs/topics/api/licenses.rst @@ -16,7 +16,7 @@ License Choices - non-Themes .. _license-list-extension: -Some popular opensource licenses can be chosen from, when creating or updating a version +Some popular open source licenses can be chosen from, when creating or updating a version via in the :ref:`add-on api`. These licenses cannot be used for themes. Open source license slugs are taken from the `SPDX License list `_ - and are case-sensitive. @@ -25,13 +25,17 @@ Open source license slugs are taken from the `SPDX License list `_ - GPL-2.0-or-later `GNU General Public License v2.0 (or later) `_ - GPL-3.0-or-later `GNU General Public License v3.0 (or later) `_ - LGPL-2.1-or-later `GNU Library General Public License v2.1 (or later) `_ - LGPL-3.0-or-later `GNU Library General Public License v3.0 (or later) `_ - MIT `The MIT License `_ - BSD-2-Clause `The BSD (2 clause) License `_ + MPL-2.0 `Mozilla Public License 2.0 `_ + Apache-2.0 `Apache License 2.0 `_ + GPL-2.0-only `GNU General Public License v2.0 only `_ + GPL-3.0-only `GNU General Public License v3.0 only `_ + LGPL-2.1-only `GNU Lesser General Public License v2.1 only `_ + LGPL-3.0-only `GNU Lesser General Public License v3.0 only `_ + AGPL-3.0-only `GNU Affero General Public License v3.0 only `_ + MIT `MIT License `_ + ISC `ISC License `_ + BSD-2-Clause `BSD 2-Clause "Simplified" License `_ + Unlicense `The Unlicense `_ =================== ============================================================== @@ -43,17 +47,17 @@ License Choices - Themes Creative Commons licenses can be chosen from, when creating or updating a theme version via in the :ref:`add-on api`. These are the only license options for themes. -Open source license slugs are taken from the `SPDX License list `_ +Creative Commons license slugs are taken from the `SPDX License list `_ - and are case-sensitive. ====================== =========================================================== Slug License ====================== =========================================================== cc-all-rights-reserved All Rights Reserved. This is not an open source license. - CC-BY-3.0 `Creative Commons Attribution 3.0 `_ - CC-BY-NC-3.0 `Creative Commons Attribution-NonCommercial 3.0 `_ - CC-BY-NC-ND-3.0 `Creative Commons Attribution-NonCommercial-NoDerivs 3.0 `_ - CC-BY-NC-SA-3.0 `Creative Commons Attribution-NonCommercial-Share Alike 3.0 `_ - CC-BY-ND-3.0 `Creative Commons Attribution-NoDerivs 3.0 `_ - CC-BY-SA-3.0 `Creative Commons Attribution-ShareAlike 3.0 `_ + CC-BY-4.0 `Creative Commons Attribution 4.0 `_ + CC-BY-NC-4.0 `Creative Commons Attribution-NonCommercial 4.0 `_ + CC-BY-NC-ND-4.0 `Creative Commons Attribution-NonCommercial-NoDerivs 4.0 `_ + CC-BY-NC-SA-4.0 `Creative Commons Attribution-NonCommercial-Share Alike 4.0 `_ + CC-BY-ND-4.0 `Creative Commons Attribution-NoDerivs 4.0 `_ + CC-BY-SA-4.0 `Creative Commons Attribution-ShareAlike 4.0 `_ ====================== =========================================================== diff --git a/docs/topics/api/overview.rst b/docs/topics/api/overview.rst index 1a9da2fd2ba5..18b5e30d4723 100644 --- a/docs/topics/api/overview.rst +++ b/docs/topics/api/overview.rst @@ -397,6 +397,7 @@ v4 API changelog * 2021-01-14: as addons-frontend now uses /v5/, v5 becomes the stable default; v4 becomes frozen; v3 is deprecated * 2021-02-12: added ``versions_url`` to addon detail endpoint. https://github.com/mozilla/addons-server/issues/16534 * 2021-02-25: ``platform`` filtering was removed from add-on search and autocomplete endpoints. https://github.com/mozilla/addons-server/issues/16463 +* 2024-11-28: reviewers APIs to list, browse, compare and draft comments on versions were removed. ---------------- v5 API changelog @@ -469,6 +470,11 @@ These are `v5` specific changes - `v4` changes apply also. * 2023-11-02: removed ``application`` from categories endpoint, flattened ``categories`` in addon detail/search endpoint. https://github.com/mozilla/addons-server/issues/5989 * 2023-11-09: removed reviewers /enable and /disable endpoints. https://github.com/mozilla/addons-server/issues/21356 * 2023-12-07: added ``lang`` parameter to all /abuse/report/ endpoints. https://github.com/mozilla/addons-server/issues/21529 +* 2024-06-20: added ``illegal_category`` parameter to all /abuse/report/ endpoints. https://github.com/mozilla/addons/issues/14870 +* 2024-06-20: added ``illegal_subcategory`` parameter to all /abuse/report/ endpoints. https://github.com/mozilla/addons/issues/14875 +* 2024-08-08: added support for writing to add-on eula_policy endpoint. https://github.com/mozilla/addons/issues/14927 +* 2024-08-22: restricted add-on eula_policy endpoint to non-themes only. https://github.com/mozilla/addons/issues/14937 +* 2024-10-17: replaced ``versions`` with ``blocked`` and ``soft_blocked`` in blocklist api; dropped unused ``min_version`` and ``max_version``. https://github.com/mozilla/addons/issues/15015 .. _`#11380`: https://github.com/mozilla/addons-server/issues/11380/ .. _`#11379`: https://github.com/mozilla/addons-server/issues/11379/ diff --git a/docs/topics/api/ratings.rst b/docs/topics/api/ratings.rst index a9ac3323e663..4dbaeec9451b 100644 --- a/docs/topics/api/ratings.rst +++ b/docs/topics/api/ratings.rst @@ -27,6 +27,7 @@ user has already posted a rating for the current version of an add-on. .. http:get:: /api/v5/ratings/rating/ :query string addon: The :ref:`add-on ` id, slug, or guid to fetch ratings from. When passed, the ratings shown will always be the latest posted by each user on this particular add-on (which means there should only be one rating per user in the results), unless the ``version`` parameter is also passed. + :query string app: Set the :ref:`add-on application ` for that query. This won't filter the results, but can affect the URLs being returned. Defaults to ``firefox``. :query string exclude_ratings: Exclude ratings by their ``id``. Multiple ratings can be specified, separated by comma(s). :query string filter: The :ref:`filter(s) ` to apply. :query string score: Only include ratings that have been given a specific ``score``. Multiple scores can be specified, separated by comma(s). diff --git a/docs/topics/api/reviewers.rst b/docs/topics/api/reviewers.rst index 20f2dec9549a..3018c797bdac 100644 --- a/docs/topics/api/reviewers.rst +++ b/docs/topics/api/reviewers.rst @@ -9,8 +9,11 @@ Reviewers These APIs are not frozen and can change at any time without warning. See :ref:`the API versions available` for alternatives if you need stability. - The only authentication method available at - the moment is :ref:`the internal one`. + + The only authentication method available for these APIs is + :ref:`the internal one`, except for the + :ref:`validation results` endpoint, which allows both + internal and :ref:`external auth`. --------- Subscribe @@ -50,6 +53,23 @@ sent when a new version is submitted on a particular add-on. .. http:post:: /api/v5/reviewers/addon/(int:addon_id)/unsubscribe_unlisted/ +--------------- +File Validation +--------------- + +.. _reviewers-validation: + +This endpoint allows you to view the validation results of a given file +belonging to an add-on. + + .. note:: + Requires authentication and the current user to have any + reviewer-related permission. + +.. http:post:: /api/v5/reviewers/addon/(int: addon_id)/file/(int: file_id)/validation/ + + :>json object validation: the validation results + ----- Flags ----- @@ -102,274 +122,3 @@ denied. :statuscode 202: Success. :statuscode 409: The add-on GUID was already denied. - -------------- -List Versions -------------- - -This endpoint allows you to list versions that can be used either for :ref:`browsing ` or diffing versions. - - .. note:: - Requires authentication and the current user to have ``ReviewerTools:View`` - permission for listed add-ons as well as ``Addons:ReviewUnlisted`` for - unlisted add-ons. Additionally the current user can also be the owner - of the add-on. - - This endpoint is not paginated as normal, and instead will return all - results without obeying regular pagination parameters. - - -If the user doesn't have ``AddonsReviewUnlisted`` permissions only listed versions are shown. Otherwise it can contain mixed listed and unlisted versions. - -.. http:get:: /api/v5/reviewers/addon/(int:addon_id)/versions/ - - :>json int id: The version id. - :>json string channel: The version channel, which determines its visibility on the site. Can be either ``unlisted`` or ``listed``. - :>json string version: The version number string for the version. - -.. _reviewers-versions-browse: - ------- -Browse ------- - -This endpoint allows you to browse through the contents of an Add-on version. - - .. note:: - Requires authentication and the current user to have ``ReviewerTools:View`` - permission for listed add-ons as well as ``Addons:ReviewUnlisted`` for - unlisted add-ons. Additionally the current user can also be the owner - of the add-on. - -.. http:get:: /api/v5/reviewers/addon/(int:addon_id)/versions/(int:version_id)/ - - Inherits the following properties from :ref:`version detail `: ``id``, ``channel``, ``reviewed`` and ``version``. - - .. _reviewers-versions-browse-detail: - - :param string file: The specific file in the XPI to retrieve. Defaults to manifest.json, install.rdf or package.json for Add-ons as well as the XML file for search engines. - :param boolean file_only: Indicates that the API should only return data for the requested file, and not version data. If this is ``true`` then the only property returned of those listed below is the ``file`` property. - :>json string validation_url_json: The absolute url to the addons-linter validation report, rendered as JSON. - :>json string validation_url: The absolute url to the addons-linter validation report, rendered as HTML. - :>json boolean has_been_validated: ``True`` if the version has been validated through addons-linter. - :>json object addon: A simplified :ref:`add-on ` object that contains only a few properties: ``id``, ``name``, ``icon_url`` and ``slug``. - :>json array file_entries[]: The complete file-tree of the extracted XPI. - :>json int file_entries[].depth: Level of folder-tree depth, starting with 0. - :>json string file_entries[].filename: The filename of the file. - :>json string file_entries[].path: The absolute path (from the root of the XPI) of the file. - :>json string file_entries[].mime_category: The mime type category of this file. Can be ``image``, ``directory``, ``text`` or ``binary``. - :>json object file: The requested file. - :>json int file.id: The id of the submitted file (i.e., the xpi file). - :>json string file.content: Raw content of the requested file. - :>json string file.selected_file: The selected file, either from the ``file`` parameter or the default (manifest.json, install.rdf or package.json for Add-ons as well as the XML file for search engines). - :>json string|null file.download_url: The download url of the selected file or ``null`` in case of a directory. - :>json string file.mimetype: The determined mimetype of the selected file or ``application/octet-stream`` if none could be determined. - :>json string file.sha256: SHA256 hash of the selected file. - :>json int file.size: The size of the selected file in bytes. - :>json string file.filename: The filename of the file. - :>json string file.mime_category: The mime type category of this file. Can be ``image``, ``directory``, ``text`` or ``binary``. - :>json boolean uses_unknown_minified_code: Indicates that the selected file could be using minified code. - - -------- -Compare -------- - -This endpoint allows you to compare two Add-on versions with each other. - - .. note:: - Requires authentication and the current user to have ``ReviewerTools:View`` - permission for listed add-ons as well as ``Addons:ReviewUnlisted`` for - unlisted add-ons. Additionally the current user can also be the owner - of the add-on. - -.. http:get:: /api/v5/reviewers/addon/(int:addon_id)/versions/(int:base_version_id)/compare_to/(int:version_id)/ - - .. note:: - - Contrary to what ``git diff`` does, this API renders a hunk full of unmodified lines for unmodified files. - - Inherits most properties from :ref:`browse detail `, except that most of the `file.entries[]` properties - and `file.download_url` can be `null` in case of a deleted file. - - Properties specific to this endpoint: - - :>json array file_entries[]: The complete file-tree of the extracted XPI. - :>json string file.entries[].status: Status of this file, see https://git-scm.com/docs/git-status#_short_format - :>json int file_entries[].depth: Level of folder-tree depth, starting with 0. - :>json string file_entries[].filename: The filename of the file. - :>json string file_entries[].path: The absolute path (from the root of the XPI) of the file. - :>json string file_entries[].mime_category: The mime type category of this file. Can be ``image``, ``directory``, ``text`` or ``binary``. - :>json object|null diff: See the following output with inline comments for a complete description. - :>json object base_file: The file attached to the base version you're comparing against. - :>json object base_file.id: The id of the base file. - :>json boolean uses_unknown_minified_code: Indicates that the selected file in either the current or the parent version could be using minified code. - - Git patch we're talking about: - - .. code:: diff - - diff --git a/README.md b/README.md - index a37979d..b12683c 100644 - --- a/README.md - +++ b/README.md - @@ -1 +1 @@ - -# beastify - +Updated readme - diff --git a/manifest.json b/manifest.json - index aba695f..24f385f 100644 - --- a/manifest.json - +++ b/manifest.json - @@ -1,36 +1 @@ - -{ - - - - "manifest_version": 2, - - "name": "Beastify", - - "version": "1.0", - - - - "permissions": [ - - "http://*/*", - - "https://*/*", - - "bookmarks", - - "made up permission", - - "https://google.com/" - - ], - - - - "content_scripts": [ - - { - - "matches": ["*://*.mozilla.org/*"], - - "js": ["borderify.js"] - - }, - - { - - "matches": ["*://*.mozilla.com/*", "https://*.mozillians.org/*"], - - "js": ["borderify.js"] - - } - - ], - - - - "browser_action": { - - "default_icon": "button/beasts.png", - - "default_title": "Beastify", - - "default_popup": "popup/choose_beast.html" - - }, - - - - "web_accessible_resources": [ - - "beasts/*.jpg" - - ] - - - -} - +{"id": "random"} - - - The following represents the git patch from above. - - .. code:: javascript - - "diff": { - "path": "README.md", - "old_path": "README.md", - "size": 15, // Size in bytes - "lines_added": 1, // How many lines got added - "lines_deleted": 1, // How many lines got deleted - "is_binary": false, // Is this a binary file (as determined by git) - "mode": "M", // Status of this file, see https://git-scm.com/docs/git-status#_short_format - "hunks": [ - { - "header": "@@ -1 +1 @@\\n", - "old_start": 1, - "new_start": 1, - "old_lines": 1, - "new_lines": 1, - "changes": [ - { - "content": "# beastify\\n", - "type": "delete", - "old_line_number": 1, - "new_line_number": -1 - }, - { - "content": "Updated readme\\n", - "type": "insert", - "old_line_number": -1, - "new_line_number": 1 - } - ] - } - ], - "parent": "075c5755198be472522477a1b396951b3b68ac18", - "hash": "00161dcf22afb7bab23cf205f0c903eb5aad5431" - } - - ------------------ -Drafting Comments ------------------ - -These endpoints allow you to draft comments that can be submitted through the regular reviewer pages. - - .. note:: - Requires authentication and the current user to have ``ReviewerTools:View`` - permission for listed add-ons as well as ``Addons:ReviewUnlisted`` for - unlisted add-ons. Additionally the current user can also be the owner - of the add-on. - - -.. http:get:: /api/v5/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/ - - Retrieve existing draft comments for a specific version. See :ref:`pagination ` for more details. - - :>json int count: The number of comments for this version. - :>json string next: The URL of the next page of results. - :>json string previous: The URL of the previous page of results. - :>json array results: An array of :ref:`comments `. - - -.. http:get:: /api/v5/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/(int:comment_id)/ - - .. _reviewers-draft-comment-detail-object: - - :>json int id: The id of the draft comment object. - :>json string comment: The comment that is being drafted as part of a review. Specific to a line in a file. - :>json string|null filename: The full file path a specific comment is related to. Can be ``null`` in case a comment doesn't belong to a specific file but the whole version. - :>json int|null lineno: The line number a specific comment is related to. Please make sure that in case of comments for git diffs, that the `lineno` used here belongs to the file in the version that belongs to `version_id` and not it's parent. Can be ``null`` in case a comment belongs to the whole file and not to a specific line. - :>json int version_id: The id of the version. - :>json int user.id: The id for an author. - :>json string user.name: The name for an author. - :>json string user.username: The username for an author. - :>json string|null user.url: The link to the profile page for an author, if the author's profile is public. - -.. http:post:: /api/v5/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/ - - Create a draft comment for a specific version. - - :`. - -.. http:delete:: /api/v5/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/(int:comment_id)/ - - Delete a draft comment. - - :statuscode 204: The comment has been deleted successfully. - :statuscode 404: The user doesn't have the permission to delete. This might happen when someone tries to delete a comment created by another reviewer or author. - - -.. http:patch:: /api/v5/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/(int:comment_id) - - Update a comment, it's filename or line number. - - :`. diff --git a/docs/topics/api/v4_frozen/addons.rst b/docs/topics/api/v4_frozen/addons.rst index 0cd6702b7ee3..4b8703d1a870 100644 --- a/docs/topics/api/v4_frozen/addons.rst +++ b/docs/topics/api/v4_frozen/addons.rst @@ -270,14 +270,12 @@ This endpoint allows you to fetch a specific add-on by id, slug or guid. ============== ========================================================== line "By Firefox" category recommended Recommended category - sponsored Sponsored category spotlight Spotlight category strategic Strategic category - verified Verified category badged A meta category that's available for the ``promoted`` search filter that is all the categories we expect an API client to expose as "reviewed" by Mozilla. - Currently equal to ``line&recommended&sponsored&verified``. + Currently equal to ``line&recommended``. ============== ========================================================== ----------------------------- diff --git a/docs/topics/api/v4_frozen/reviewers.rst b/docs/topics/api/v4_frozen/reviewers.rst index b8060dc40334..163f1e51a4a1 100644 --- a/docs/topics/api/v4_frozen/reviewers.rst +++ b/docs/topics/api/v4_frozen/reviewers.rst @@ -129,304 +129,3 @@ denied. :statuscode 202: Success. :statuscode 409: The add-on GUID was already denied. - -------------- -List Versions -------------- - -This endpoint allows you to list versions that can be used either for :ref:`browsing ` or diffing versions. - - .. note:: - Requires authentication and the current user to have ``ReviewerTools:View`` - permission for listed add-ons as well as ``Addons:ReviewUnlisted`` for - unlisted add-ons. Additionally the current user can also be the owner - of the add-on. - - This endpoint is not paginated as normal, and instead will return all - results without obeying regular pagination parameters. - - -If the user doesn't have ``AddonsReviewUnlisted`` permissions only listed versions are shown. Otherwise it can contain mixed listed and unlisted versions. - -.. http:get:: /api/v4/reviewers/addon/(int:addon_id)/versions/ - - :>json int id: The version id. - :>json string channel: The version channel, which determines its visibility on the site. Can be either ``unlisted`` or ``listed``. - :>json string version: The version number string for the version. - -.. _v4-reviewers-versions-browse: - ------- -Browse ------- - -This endpoint allows you to browse through the contents of an Add-on version. - - .. note:: - Requires authentication and the current user to have ``ReviewerTools:View`` - permission for listed add-ons as well as ``Addons:ReviewUnlisted`` for - unlisted add-ons. Additionally the current user can also be the owner - of the add-on. - -.. http:get:: /api/v4/reviewers/addon/(int:addon_id)/versions/(int:version_id)/ - - Inherits the following properties from :ref:`version detail `: ``id``, ``channel``, ``reviewed`` and ``version``. - - .. _v4-reviewers-versions-browse-detail: - - :param string file: The specific file in the XPI to retrieve. Defaults to manifest.json, install.rdf or package.json for Add-ons as well as the XML file for search engines. - :param boolean file_only: Indicates that the API should only return data for the requested file, and not version data. If this is ``true`` then the only property returned of those listed below is the ``file`` property. - :>json string validation_url_json: The absolute url to the addons-linter validation report, rendered as JSON. - :>json string validation_url: The absolute url to the addons-linter validation report, rendered as HTML. - :>json boolean has_been_validated: ``True`` if the version has been validated through addons-linter. - :>json object addon: A simplified :ref:`add-on ` object that contains only a few properties: ``id``, ``name``, ``icon_url`` and ``slug``. - :>json array file_entries[]: The complete file-tree of the extracted XPI. - :>json int file_entries[].depth: Level of folder-tree depth, starting with 0. - :>json string file_entries[].filename: The filename of the file. - :>json string file_entries[].path: The absolute path (from the root of the XPI) of the file. - :>json string file_entries[].mime_category: The mime type category of this file. Can be ``image``, ``directory``, ``text`` or ``binary``. - :>json object file: The requested file. - :>json int file.id: The id of the submitted file (i.e., the xpi file). - :>json string file.content: Raw content of the requested file. - :>json string file.selected_file: The selected file, either from the ``file`` parameter or the default (manifest.json, install.rdf or package.json for Add-ons as well as the XML file for search engines). - :>json string|null file.download_url: The download url of the selected file or ``null`` in case of a directory. - :>json string file.mimetype: The determined mimetype of the selected file or ``application/octet-stream`` if none could be determined. - :>json string file.sha256: SHA256 hash of the selected file. - :>json int file.size: The size of the selected file in bytes. - :>json string file.filename: The filename of the file. - :>json string file.mime_category: The mime type category of this file. Can be ``image``, ``directory``, ``text`` or ``binary``. - :>json boolean uses_unknown_minified_code: Indicates that the selected file could be using minified code. - - -------- -Compare -------- - -This endpoint allows you to compare two Add-on versions with each other. - - .. note:: - Requires authentication and the current user to have ``ReviewerTools:View`` - permission for listed add-ons as well as ``Addons:ReviewUnlisted`` for - unlisted add-ons. Additionally the current user can also be the owner - of the add-on. - -.. http:get:: /api/v4/reviewers/addon/(int:addon_id)/versions/(int:base_version_id)/compare_to/(int:version_id)/ - - .. note:: - - Contrary to what ``git diff`` does, this API renders a hunk full of unmodified lines for unmodified files. - - Inherits most properties from :ref:`browse detail `, except that most of the `file.entries[]` properties - and `file.download_url` can be `null` in case of a deleted file. - - Properties specific to this endpoint: - - :>json array file_entries[]: The complete file-tree of the extracted XPI. - :>json string file.entries[].status: Status of this file, see https://git-scm.com/docs/git-status#_short_format - :>json int file_entries[].depth: Level of folder-tree depth, starting with 0. - :>json string file_entries[].filename: The filename of the file. - :>json string file_entries[].path: The absolute path (from the root of the XPI) of the file. - :>json string file_entries[].mime_category: The mime type category of this file. Can be ``image``, ``directory``, ``text`` or ``binary``. - :>json object|null diff: See the following output with inline comments for a complete description. - :>json object base_file: The file attached to the base version you're comparing against. - :>json object base_file.id: The id of the base file. - :>json boolean uses_unknown_minified_code: Indicates that the selected file in either the current or the parent version could be using minified code. - - Git patch we're talking about: - - .. code:: diff - - diff --git a/README.md b/README.md - index a37979d..b12683c 100644 - --- a/README.md - +++ b/README.md - @@ -1 +1 @@ - -# beastify - +Updated readme - diff --git a/manifest.json b/manifest.json - index aba695f..24f385f 100644 - --- a/manifest.json - +++ b/manifest.json - @@ -1,36 +1 @@ - -{ - - - - "manifest_version": 2, - - "name": "Beastify", - - "version": "1.0", - - - - "permissions": [ - - "http://*/*", - - "https://*/*", - - "bookmarks", - - "made up permission", - - "https://google.com/" - - ], - - - - "content_scripts": [ - - { - - "matches": ["*://*.mozilla.org/*"], - - "js": ["borderify.js"] - - }, - - { - - "matches": ["*://*.mozilla.com/*", "https://*.mozillians.org/*"], - - "js": ["borderify.js"] - - } - - ], - - - - "browser_action": { - - "default_icon": "button/beasts.png", - - "default_title": "Beastify", - - "default_popup": "popup/choose_beast.html" - - }, - - - - "web_accessible_resources": [ - - "beasts/*.jpg" - - ] - - - -} - +{"id": "random"} - - - The following represents the git patch from above. - - .. code:: javascript - - "diff": { - "path": "README.md", - "old_path": "README.md", - "size": 15, // Size in bytes - "lines_added": 1, // How many lines got added - "lines_deleted": 1, // How many lines got deleted - "is_binary": false, // Is this a binary file (as determined by git) - "mode": "M", // Status of this file, see https://git-scm.com/docs/git-status#_short_format - "hunks": [ - { - "header": "@@ -1 +1 @@\\n", - "old_start": 1, - "new_start": 1, - "old_lines": 1, - "new_lines": 1, - "changes": [ - { - "content": "# beastify\\n", - "type": "delete", - "old_line_number": 1, - "new_line_number": -1 - }, - { - "content": "Updated readme\\n", - "type": "insert", - "old_line_number": -1, - "new_line_number": 1 - } - ] - } - ], - "parent": "075c5755198be472522477a1b396951b3b68ac18", - "hash": "00161dcf22afb7bab23cf205f0c903eb5aad5431" - } - - ----------------- -Canned Responses ----------------- - -This endpoint allows you to retrieve a list of canned responses. - - .. note:: - Requires authentication and the current user to have any - reviewer-related permission. - -.. http:get:: /api/v4/reviewers/canned-responses/ - - .. _v4-reviewers-canned-response-detail: - - Retrieve canned responses - - .. note:: - Because this endpoint is not returning too much data it is not - paginated as normal, and instead will return all results without - obeying regular pagination parameters. - - :>json int id: The canned response id. - :>json string title: The title of the canned response. - :>json string response: The text that will be filled in as the response. - :>json string category: The category of the canned response. For example, "Other", "Privacy reasons" etc. - - ------------------ -Drafting Comments ------------------ - -These endpoints allow you to draft comments that can be submitted through the regular reviewer pages. - - .. note:: - Requires authentication and the current user to have ``ReviewerTools:View`` - permission for listed add-ons as well as ``Addons:ReviewUnlisted`` for - unlisted add-ons. Additionally the current user can also be the owner - of the add-on. - - -.. http:get:: /api/v4/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/ - - Retrieve existing draft comments for a specific version. See :ref:`pagination ` for more details. - - :>json int count: The number of comments for this version. - :>json string next: The URL of the next page of results. - :>json string previous: The URL of the previous page of results. - :>json array results: An array of :ref:`comments `. - - -.. http:get:: /api/v4/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/(int:comment_id)/ - - .. _v4-reviewers-draft-comment-detail-object: - - :>json int id: The id of the draft comment object. - :>json string comment: The comment that is being drafted as part of a review. Specific to a line in a file. - :>json string|null filename: The full file path a specific comment is related to. Can be ``null`` in case a comment doesn't belong to a specific file but the whole version. - :>json int|null lineno: The line number a specific comment is related to. Please make sure that in case of comments for git diffs, that the `lineno` used here belongs to the file in the version that belongs to `version_id` and not it's parent. Can be ``null`` in case a comment belongs to the whole file and not to a specific line. - :>json int version_id: The id of the version. - :>json int user.id: The id for an author. - :>json string user.name: The name for an author. - :>json string user.username: The username for an author. - :>json string|null user.url: The link to the profile page for an author, if the author's profile is public. - :>json object|null canned_response: Object holding the :ref:`canned response ` if set. - -.. http:post:: /api/v4/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/ - - Create a draft comment for a specific version. - - :` (optional). - - :statuscode 201: New comment has been created. - :statuscode 400: An error occurred, check the `error` value in the JSON. - :statuscode 403: The user doesn't have the permission to create a comment. This might happen (among other cases) when someone without permissions for unlisted versions tries to add a comment for an unlisted version (which shouldn't happen as the user doesn't see unlisted versions, but it's blocked here too). - - **Response** - In case of successful creation, the response is a :ref:`draft comment object`. - -.. http:delete:: /api/v4/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/(int:comment_id)/ - - Delete a draft comment. - - :statuscode 204: The comment has been deleted successfully. - :statuscode 404: The user doesn't have the permission to delete. This might happen when someone tries to delete a comment created by another reviewer or author. - - -.. http:patch:: /api/v4/reviewers/addon/(int:addon_id)/versions/(int:version_id)/draft_comments/(int:comment_id) - - Update a comment, it's filename or line number. - - :` (optional). - - :statuscode 200: The comment has been updated. - :statuscode 400: An error occurred, check the `error` value in the JSON. - - **Response** - In case of successful creation, the response is a :ref:`draft comment object`. diff --git a/docs/topics/blocklist.md b/docs/topics/blocklist.md new file mode 100644 index 000000000000..788d96ff18fd --- /dev/null +++ b/docs/topics/blocklist.md @@ -0,0 +1,146 @@ +# AMO Blocklist + +(blocklist-doc)= + +This is a high-level overview of the addons-server implementation of the addons blocklist. + +## Full-Stack Overview + +(blocklist-doc-overview)= + +Firefox determines which add-ons are unsafe to allow to continue to be enabled by checking a blocklist. +With v1 and v2 blocklist this is literally a list of addon guids, plus other metadata, that should be blocked from executing (v1 is XML format, v2 is JSON format); +with v3 blocklist this is a bloomfilter that is queried - if the addon guid and xpi version is present then it's in the blocklist so should be blocked by Firefox. + +**The blocklists are all served via Firefox Remote Settings (the current implementation is Kinto):** + +- the v3 blocklist bloomfilter files are attachments in the records of +- the v2 blocklist is the full output of +- the v1 blocklist is a (server-side) wrapper around the v2 blocklist that rewrites the JSON into XML + +```{admonition} legacy +v2/v1 are referred to as "legacy" blocklist in these docs. +``` + +AMO holds the addon blocklist records and generates the bloomfilters as needed, which are then uploaded to Remote Settings. The records are managed via the admin tools on addons-server. + +If any changes are needed to the contents of the v1/v2 blocklist it must be made via the Firefox Remote Settings web admin tool - there is no longer any way to import or export changes between the v1/v2 blocklist and the v3 blocklist. + +## Admin Tool + +(blocklist-doc-admin)= + +_Block_ records aren't created and changed directly via the admin tool; instead _BlockSubmission_ records are created that hold details of the submission of (potentially many) blocks that will be created, updated, or deleted. +If the add-ons that the Block affects are used by a significant number of users (see _DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD_ setting - currently 100k) then the BlockSubmission must be signed off (approved) by another admin user first. + +Once the submission is approved - or immediately after saving if the average daily user counts are under the threshold - a task is started to asynchronously create, update, or delete, Block records. + +## Bloomfilter Generation + +(blocklist-doc-bloomfilter)= + +Generating a bloomfilter can be quite slow, so a new one is only generated every 6 hours - or less frequently if no Block records have been changed/added/deleted in that time - via a cron job. + +An ad-hoc bloomfilter can be created with the _export_blocklist_ command but it isn't considered for the cron job (or {ref}`stashing `) + +### Bloomfilter records + +(blocklist-doc-bloomfilter-records)= + +A record is created on Remote Settings for each bloomfilter and the filter uploaded as an attachment. The _generation_time_ property represents the point in time when all previous addon guid + versions and blocks were used to generate the bloomfilter. +An add-on version/file from before this time will definitely be accounted for in the bloomfilter so we can reliably assert if it's blocked or not. +An add-on version/file from after this time can't be reliably asserted - there may be false positives or false negatives. + +See and + +#### Bloomfilter record example + +```json +{ + "attachment": { + "hash": "37ba24caec49afe6c97c424623e226e31ad052286ffa66d794eb82497dabc279", + "size": 28561, + "filename": "filter.bin", + "location": "staging/addons-bloomfilters/1234567890.bin", + "mimetype": "application/octet-stream" + }, + "key_format": "{guid}:{version}", + "attachment_type": "bloomfilter-base", + "generation_time": 1587990908999, +} +``` + +### Stashing + +(blocklist-doc-stashing)= + +Because the bloomfilter files can be quite large "stash" files are also generated, which represent the changes since the previous bloomfilter generation and can be used by Firefox instead to save on bandwidth. + +Multiple stashes can be applied by Firefox (in chronological order) to match the state of an up-to-date bloomfilter. + +#### Stash record example + +```json +{ + "stash": { + "blocked": [ + "{6f6b1eaa-bb69-4cdb-a24f-1014493d4290}:10.48", + "kittens@pioneer.mozilla.com:1.2", + "kittens@pioneer.mozilla.com:1.1", + "{b01e0601-eddc-4306-886b-8a4fb5c38a1e}:1", + "{232f11df-20ca-49d4-94eb-e3e63d7ae773}:1.1.2", + "kittens@pioneer.mozilla.com:1.3", + ], + "unblocked": [ + "{896aff0b-d86e-4dd5-9097-5869579b4c28}:1.2", + "{95ffc924-6ea7-4dfb-8f7b-1dd44f2159d1}:1.22.2" + ] + }, + "key_format": "{guid}:{version}", + "stash_time": 1587990908999, +} +``` + +The blocked items represent new versions that should be blocked in addition to any matches in the bloomfilter; the unblocked items represent versions that shouldn't be blocked (even though they would match the bloomfilter). _stash_time_ is a timestamp that can be relied on to order the stashes. + +### addons-bloomfilter collection + +(blocklist-doc-collection)= + +The collection on Remote Settings at any given point will consist of a single record with _"attachment-type": "bloomfilter-base"_, which is the base bloomfilter to compare the stash files to, and potentially subsequent records which either contain an attachment with _"attachment-type": "bloomfilter-full"_, or stash data directly in the data property. The client-side algorithm would be to: + +- Get the entire collection from Remote Settings (the implementation supports diffing so only new records would be downloaded). +- Download the base bloomfilter attachment (_"attachment-type": "bloomfilter-base"_) if it hasn't already been downloaded. +- Gather the stash records and consolidate them, taking into account timestamps so later stashes override earlier stashes. + +#### Stashing support disabled in Firefox + +If stashing support is disabled in a Firefox version the stash records can be ignored and all bloomfilters considered instead. (Records with a bloomfilter attachment always have a _generation_time_ field). Firefox would just download the latest attachment and use that as it's bloomfilter. + +### Process + +(blocklist-doc-process)= + +**The server process is:** + +- If the _blocklist_mlbf_submit_ waffle switch is enabled, check if there have been any changes to the blocklist since the previous execution of the cron job - if not return without any action. (not blocked guids) +- Produce a list of all "guid:version" combinations of all signed webextension addons/versions in the database. (blocked guids) +- Produce a list of "guid:version" combinations that the Block records cover. Blocks have a minimum and maximum version range - 0 being the minimum, and * meaning infinity, so 0 - * would be all versions of an add-on. +- Create and verify a bloomfilter with these two lists (we use ); save the filter file and the two lists (as JSON) +- Compare list of blocked guids from this execution to the base bloomfilter file. If there have been few changes then write those changes to a stash JSON blob + + 1. Upload the stash as JSON data in record + 2. Upload the filter as an attachment to a separate record with the type _bloomfilter-full_ + +- If there have been many changes then: + + 1. clear the collection on Remote Settings + 2. Upload the filter as an attachment to a separate record with the type _bloomfilter-base_ + +## Legacy Blocklist + +(blocklist-doc-legacy)= + +To populate the blocklist on AMO the legacy blocklist on Remote Settings was imported; all guids that matched addons on AMO (and that had at least one webextension version) were added; any guids that were regular expressions were "expanded" to individual records for each addon present in the AMO database. + +Support for importing the legacy blocklist into AMO, and exporting changes from AMO into the legacy blocklist, has now been removed; it is no longer possible to propagate changes made to the v2 blocklist via the remote-settings web admin tool to the v3 blocklist held on AMO, or visa versa. diff --git a/docs/topics/blocklist.rst b/docs/topics/blocklist.rst deleted file mode 100644 index 779bcc469b9e..000000000000 --- a/docs/topics/blocklist.rst +++ /dev/null @@ -1,174 +0,0 @@ -############# -AMO Blocklist -############# - -.. _blocklist-doc: - -This is a high-level overview of the addons-server implementation of the addons blocklist. - - -=================== -Full-Stack Overview -=================== - -.. _blocklist-doc-overview: - -Firefox determines which add-ons are unsafe to allow to continue to be enabled by checking a blocklist. -With v1 and v2 blocklist this is literally a list of addon guids, plus other metadata, that should be blocked from executing (v1 is XML format, v2 is JSON format); -with v3 blocklist this is a bloomfilter that is queried - if the addon guid and xpi version is present then it's in the blocklist so should be blocked by Firefox. - -The blocklists are all served via Firefox Remote Settings (the current implementation is Kinto): - - the v3 blocklist bloomfilter files are attachments in the records of https://firefox.settings.services.mozilla.com/v1/buckets/blocklists/collections/addons-bloomfilters/records - - the v2 blocklist is the full output of https://firefox.settings.services.mozilla.com/v1/buckets/blocklists/collections/addons/records - - the v1 blocklist is a (server-side) wrapper around the v2 blocklist that rewrites the JSON into XML - -.. note:: - v2/v1 are referred to as "legacy" blocklist in these docs. - -AMO holds the addon blocklist records and generates the bloomfilters as needed, which are then uploaded to Remote Settings. The records are managed via the admin tools on addons-server. - -If any changes are needed to the contents of the v1/v2 blocklist it must be made via the Firefox Remote Settings web admin tool - there is no longer any way to import or export changes between the v1/v2 blocklist and the v3 blocklist. - - -========== -Admin Tool -========== - -.. _blocklist-doc-admin: - -`Block` records aren't created and changed directly via the admin tool; instead `BlockSubmission` records are created that hold details of the submission of (potentially many) blocks that will be created, updated, or deleted. -If the add-ons that the Block affects are used by a significant number of users (see `DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD` setting - currently 100k) then the BlockSubmission must be signed off (approved) by another admin user first. - -Once the submission is approved - or immediately after saving if the average daily user counts are under the threshold - a task is started to asynchronously create, update, or delete, Block records. - - -====================== -Bloomfilter Generation -====================== - -.. _blocklist-doc-bloomfilter: - -Generating a bloomfilter can be quite slow, so a new one is only generated every 6 hours - or less frequently if no Block records have been changed/added/deleted in that time - via a cron job. - -An ad-hoc bloomfilter can be created with the `export_blocklist` command but it isn't considered for the cron job (or :ref:`stashing `) - -------------------- -Bloomfilter records -------------------- - -.. _blocklist-doc-bloomfilter-records: - -A record is created on Remote Settings for each bloomfilter and the filter uploaded as an attachment. The `generation_time` property represents the point in time when all previous addon guid + versions and blocks were used to generate the bloomfilter. -An add-on version/file from before this time will definitely be accounted for in the bloomfilter so we can reliably assert if it's blocked or not. -An add-on version/file from after this time can't be reliably asserted - there may be false positives or false negatives. - -See https://github.com/mozilla/addons-server/issues/13695 and https://github.com/mozilla/addons-server/blob/master/src/olympia/blocklist/cron.py - - -Bloomfilter record example -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "attachment": { - "hash": "37ba24caec49afe6c97c424623e226e31ad052286ffa66d794eb82497dabc279", - "size": 28561, - "filename": "filter.bin", - "location": "staging/addons-bloomfilters/1234567890.bin", - "mimetype": "application/octet-stream" - }, - "key_format": "{guid}:{version}", - "attachment_type": "bloomfilter-base", - "generation_time": 1587990908999, - } - - --------- -Stashing --------- - -.. _blocklist-doc-stashing: - -Because the bloomfilter files can be quite large "stash" files are also generated, which represent the changes since the previous bloomfilter generation and can be used by Firefox instead to save on bandwidth. - -Multiple stashes can be applied by Firefox (in chronological order) to match the state of an up-to-date bloomfilter. - - -Stash record example -^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "stash": { - "blocked": [ - "{6f6b1eaa-bb69-4cdb-a24f-1014493d4290}:10.48", - "kittens@pioneer.mozilla.com:1.2", - "kittens@pioneer.mozilla.com:1.1", - "{b01e0601-eddc-4306-886b-8a4fb5c38a1e}:1", - "{232f11df-20ca-49d4-94eb-e3e63d7ae773}:1.1.2", - "kittens@pioneer.mozilla.com:1.3", - ], - "unblocked": [ - "{896aff0b-d86e-4dd5-9097-5869579b4c28}:1.2", - "{95ffc924-6ea7-4dfb-8f7b-1dd44f2159d1}:1.22.2" - ] - }, - "key_format": "{guid}:{version}", - "stash_time": 1587990908999, - } - -The blocked items represent new versions that should be blocked in addition to any matches in the bloomfilter; the unblocked items represent versions that shouldn't be blocked (even though they would match the bloomfilter). `stash_time` is a timestamp that can be relied on to order the stashes. - - ------------------------------ -addons-bloomfilter collection ------------------------------ - -.. _blocklist-doc-collection: - -The collection on Remote Settings at any given point will consist of a single record with `"attachment-type": "bloomfilter-base"`, which is the base bloomfilter to compare the stash files to, and potentially subsequent records which either contain an attachment with `"attachment-type": "bloomfilter-full"`, or stash data directly in the data property. The client-side algorithm would be to: - -* Get the entire collection from Remote Settings (the implementation supports diffing so only new records would be downloaded). -* Download the base bloomfilter attachment (`"attachment-type": "bloomfilter-base"`) if it hasn't already been downloaded. -* Gather the stash records and consolidate them, taking into account timestamps so later stashes override earlier stashes. - - -Stashing support disabled in Firefox -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If stashing support is disabled in a Firefox version the stash records can be ignored and all bloomfilters considered instead. (Records with a bloomfilter attachment always have a `generation_time` field). Firefox would just download the latest attachment and use that as it's bloomfilter. - - -------- -Process -------- - -.. _blocklist-doc-process: - -The server process is: - * If the `blocklist_mlbf_submit` waffle switch is enabled, check if there have been any changes to the blocklist since the previous execution of the cron job - if not return without any action. (not blocked guids) - * Produce a list of all "guid:version" combinations of all signed webextension addons/versions in the database. (blocked guids) - * Produce a list of "guid:version" combinations that the Block records cover. Blocks have a minimum and maximum version range - 0 being the minimum, and * meaning infinity, so 0 - * would be all versions of an add-on. - * Create and verify a bloomfilter with these two lists (we use https://github.com/mozilla/filter-cascade/); save the filter file and the two lists (as JSON) - - * Compare list of blocked guids from this execution to the base bloomfilter file. If there have been few changes then write those changes to a stash JSON blob - - #. Upload the stash as JSON data in record - #. Upload the filter as an attachment to a separate record with the type `bloomfilter-full` - * If there have been many changes then: - - #. clear the collection on Remote Settings - #. Upload the filter as an attachment to a separate record with the type `bloomfilter-base` - - -================ -Legacy Blocklist -================ - -.. _blocklist-doc-legacy: - -To populate the blocklist on AMO the legacy blocklist on Remote Settings was imported; all guids that matched addons on AMO (and that had at least one webextension version) were added; any guids that were regular expressions were "expanded" to individual records for each addon present in the AMO database. - -Support for importing the legacy blocklist into AMO, and exporting changes from AMO into the legacy blocklist, has now been removed; it is no longer possible to propagate changes made to the v2 blocklist via the remote-settings web admin tool to the v3 blocklist held on AMO, or visa versa. diff --git a/docs/topics/development/acl.md b/docs/topics/development/acl.md new file mode 100644 index 000000000000..ab64d49b53fa --- /dev/null +++ b/docs/topics/development/acl.md @@ -0,0 +1,35 @@ +(acl)= + +# Access Control Lists + +## How permissions work + +On top of that we use the `access.models.GroupUser` and `Group` to define +what access groups a user is a part of, and each group has `rules` defining +which permissions they grant their members, separated by `,`. + +Permissions that you can use as filters can be either explicit or general. + +For example `Admin:EditAddons` means only someone with that permission will +validate. + +If you simply require that a user has _some_ permission in the _Admin_ group +you can use `Admin:%`. The `%` means "any." + +Similarly a user might be in a group that has explicit or general permissions. +They may have `Admin:EditAddons` which means they can see things with that +same permission, or things that require `Admin:%`. + +If a user has a wildcard, they will have more permissions. For example, +`Admin:*` means they have permission to see anything that begins with +`Admin:`. + +The notion of a superuser has a permission of `*:*` and therefore they can +see everything. + +## Django Admin + +Django admin relies on 2 things to gate access: + +- To access the admin itself, `UserProfile.is_staff` needs to be `True`. Our custom implementation allows access to users with a `@mozilla.com` email. +- To access individual modules/apps, `UserProfile.has_perm(perm, obj)` and `UserProfile.has_module_perms(app_label)` need to return `True`. Our custom implementation uses the `Group` of the current user as above, with a mapping constant called `DJANGO_PERMISSIONS_MAPPING` which translates Django-style permissions into our own. diff --git a/docs/topics/development/acl.rst b/docs/topics/development/acl.rst deleted file mode 100644 index 316151c049eb..000000000000 --- a/docs/topics/development/acl.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. _acl: - -==================== -Access Control Lists -==================== - -How permissions work --------------------- - -On top of that we use the ``access.models.GroupUser`` and ``Group`` to define -what access groups a user is a part of, and each group has ``rules`` defining -which permissions they grant their members, separated by ``,``. - -Permissions that you can use as filters can be either explicit or general. - -For example ``Admin:EditAddons`` means only someone with that permission will -validate. - -If you simply require that a user has `some` permission in the `Admin` group -you can use ``Admin:%``. The ``%`` means "any." - -Similarly a user might be in a group that has explicit or general permissions. -They may have ``Admin:EditAddons`` which means they can see things with that -same permission, or things that require ``Admin:%``. - -If a user has a wildcard, they will have more permissions. For example, -``Admin:*`` means they have permission to see anything that begins with -``Admin:``. - -The notion of a superuser has a permission of ``*:*`` and therefore they can -see everything. - - -Django Admin ------------- - -Django admin relies on 2 things to gate access: -- To access the admin itself, ``UserProfile.is_staff`` needs to be ``True``. Our custom implementation allows access to users with a ``@mozilla.com`` email. -- To access individual modules/apps, ``UserProfile.has_perm(perm, obj)`` and ``UserProfile.has_module_perms(app_label)`` need to return ``True``. Our custom implementation uses the ``Group`` of the current user as above, with a mapping constant called ``DJANGO_PERMISSIONS_MAPPING`` which translates Django-style permissions into our own. diff --git a/docs/topics/development/branching.rst b/docs/topics/development/branching.md similarity index 55% rename from docs/topics/development/branching.rst rename to docs/topics/development/branching.md index 097497f6cba0..453ba9c5507f 100644 --- a/docs/topics/development/branching.rst +++ b/docs/topics/development/branching.md @@ -1,15 +1,11 @@ -.. _branching: +(branching)= -================ -Push From Master -================ +# Push From Master -We deploy from the `master`_ branch once a week. If you commit something to master -that needs additional QA time, be sure to use a `waffle`_ feature flag. +We deploy from the [master] branch once a week. If you commit something to master +that needs additional QA time, be sure to use a [waffle] feature flag. - -Local Branches --------------- +## Local Branches Most new code is developed in local one-off branches, usually encompassing one or two patches to fix a bug. Upstream doesn't care how you do local @@ -21,7 +17,7 @@ of related patches from a feature branch. The rule of thumb is to rebase and use fast-forward merge for single patches or a branch of unrelated bug fixes, but to use a merge commit if you have multiple commits that form a cohesive unit. -Here are some tips on `Using topic branches and interactive rebasing effectively `_. +Here are some tips on [Using topic branches and interactive rebasing effectively](http://blog.mozilla.com/webdev/2011/11/21/git-using-topic-branches-and-interactive-rebasing-effectively/). -.. _master: http://github.com/mozilla/addons-server/tree/master -.. _waffle: https://github.com/jsocol/django-waffle +[master]: http://github.com/mozilla/addons-server/tree/master +[waffle]: https://github.com/jsocol/django-waffle diff --git a/docs/topics/development/building_and_running_services.md b/docs/topics/development/building_and_running_services.md new file mode 100644 index 000000000000..364b81225dd5 --- /dev/null +++ b/docs/topics/development/building_and_running_services.md @@ -0,0 +1,181 @@ +# Building and Running Services + +## Dockerfile Details + +The Dockerfile for the **addons-server** project uses a multi-stage build to optimize the image creation process. Here's an overview of the key concepts and design decisions behind it: + +1. **Multi-Stage Build**: + - **Intent**: Multi-stage builds allow Docker to parallelize steps that don't depend on each other and to better cache between layers. This results in more efficient builds by reducing the size of the final image and reusing intermediate layers. + - **Layer Caching**: The use of `--mount=type=cache` arguments helps cache directories across builds, particularly useful for node and pip dependencies, dramatically speeding up future builds. + +2. **OLYMPIA_USER**: + - **Creating a Non-Root User**: The Dockerfile creates an `olympia` user to run the application. This allows the container to run processes as a non-root user, enhancing security by preventing privilege escalation. + - **Why Non-Root?**: Running containers as root is considered an antipattern for Python projects due to security vulnerabilities. Using a non-root user like `olympia` ensures that even if an attacker accesses the container, they cannot escalate privileges to the host. + +3. **Mounts in Docker Compose**: + - **Mounting Local Repository**: The volume `.:/data/olympia` mounts the local Git repository into the container, allowing real-time changes to files within the container. + - **Mounting Dependencies**: The volume `./deps:/deps` mounts the dependencies directory, enabling better caching across builds and providing visibility for debugging directly on the host. + +4. **Environment Variables for OLYMPIA_USER**: + - **Development Setup**: The `OLYMPIA_UID` .env variable is set to the host user ID, ensuring that the container runs with the correct permissions. + +### Best Practices for the Dockerfile + +- **Use as Few Instructions as Possible**: This minimizes the size of the image and reduces build times. +- **Split Long-Running Tasks**: Distinct stages improve caching and concurrency. +- **Prefer `--mount=type=bind` Over `COPY`**: Use bind mounts for files needed for a single command. Bind mounts do not persist data, so modified files will not be in the final layer. +- **Prefer Copying Individual Files Over Directories**: This reduces the likelihood of false cache hits. +- **Use `--mount=type=cache` for Caching**: Cache npm/pip dependencies to speed up builds. +- **Delay Copying Source Files**: This improves cache validity by ensuring that as many stages as possible can be cached. + +## Build Process + +The **addons-server** project uses BuildKit and Bake to streamline the image-building process. + +1. **BuildKit**: + - **Overview**: BuildKit is a modern Docker image builder that enhances performance, scalability, and extensibility. It allows for parallel build steps, caching, and improved efficiency. + - **Driver**: BuildKit uses a driver model to execute builds, with the `docker` driver being the default. + We use the `docker` driver to build our images as it is the fastest and fits the criteria for our project. + We have used the `docker-container` driver in the past, but it is slower due to transferring files in and out of the build container. + The docker driver is slightly slower at rebuilding cached layers but makes up for the difference by building the layers + where they are stored, on the host. + +2. **Bake**: + - **Overview**: Docker Bake is a tool for defining and executing complex build workflows. It simplifies multi-platform builds and allows for more granular control over the build process. + - **Using Bake**: We use Bake to enable building via Docker Compose consistently across local and CI builds. The `build` target in the `docker-compose.yml` file defines the build context and Dockerfile for the addons-server image. + +To build the Docker images for the project, use the following command: + +```sh +make docker_build_web +``` + +This command leverages BuildKit and Bake to efficiently build the required images. +By default buildx will use 2 configuration files: + +- [docker-bake.hcl](../../../docker-bake.hcl) +- .env (Generated by running `make setup` locally) + +Bake will use the .env file to set environment variables in the build process +and the docker-bake.hcl file to define the build process. + +You can specify additional make arguments to control the behaviour of the build: + +- `DOCKER_PUSH` - Push the image to the current registry, defined by the `DOCKER_TARGET` variable in .env. +- `DOCKER_PROGRESS` - Control the build output verbosity. Default is "auto". Set to "plain" for more detailed output. +- `DOCKER_METADATA_FILE` - Specify the file to store build metadata. Default is "buildx-bake-metadata.json". +- `DOCKER_COMMIT` - Set the Git commit hash for the image. If not set, it will be determined automatically. +- `DOCKER_BUILD` - Set a custom build number for the image. If not set, a timestamp will be used. +- `DOCKER_VERSION` - Set a custom version for the image. If not set, it will be determined from Git tags. +- `ARGS` - Pass additional arguments to the `docker buildx bake` command. + +These arguments allow you to customize various aspects of the build process, +from controlling output verbosity to setting specific image metadata. + +When running local builds you can largely ignore these arguments +and just use `make docker_build_web` to build the image. + +### Clearing Cache + +To clear the custom builder cache used for buildkit mount caching: + +```bash +docker builder prune +``` + +Avoid using `docker system prune` as it does not clear the specific builder cache. + +### Docker Ignore + +Our [.dockerignore](../../../.dockerignore) file is used to ignore files and directories that should not be included in the build context. +This is useful to reduce the final image size and speed up the build process. + +Because our image copies all files in the repository to the image, any time even one character in those files changes, +the entire stage is busted and all files are re-copied. This can take 10-30 seconds. Ignoring files that are irrelevant +reduces the number of times this happens and speeds up the build process. + +All files included in the .dockerignore are files we explicitly do not need in production containers. + +Docker ignore is "ignored" during development as we always mount the host repository into the container at runtime. + +> NOTE: Our dockerignore file is a superset of our gitignore file. Any files included in gitignore should also be included in dockerignore. +> dockerignore also includes extra files we do checkin to git but do not need in production containers. E.g .github directory containing github actions files. + +## Managing Containers + +Managing the Docker containers for the **addons-server** project involves using Makefile commands to start, stop, and interact with the services. + +1. **Starting Services**: + - Use `make up` to start the Docker containers: + + ```sh + make up + ``` + + - This command ensures all necessary files are created and the Docker Compose project is running. + +2. **Stopping Services**: + - Use `make down` to stop and remove the Docker containers: + + ```sh + make down + ``` + +3. **Accessing Containers**: + - Access the web container for debugging: + + ```sh + make shell + ``` + + - Access the Django shell within the container: + + ```sh + make djshell + ``` + +4. **Rebuilding Images**: + - Use `make up` to rebuild the Docker images if you make changes to the Dockerfile or dependencies. Remember, `make up` is idempotent, ensuring your image is built and running based on the latest changes. + +This section provides a thorough understanding of the Dockerfile stages, build process using BuildKit and Bake, and commands to manage the Docker containers for the **addons-server** project. For more detailed information on specific commands, refer to the project's Makefile and Docker Compose configuration in the repository. + +## Docker Compose + +We use docker compose under the hood to orchestrate container both locally and in CI. +The `docker-compose.yml` file defines the services, volumes, and networks required for the project. + +Our docker compose project is split into a root [docker-compose.yml](../../../docker-compose.yml) file and additional files for specific environments, +such as [docker-compose.override.yml](../../../docker-compose.override.yml) for CI environments. + +### Healthchecks + +We define healthchecks for the web and worker services to ensure that the containers are healthy and ready to accept traffic. +The health checks ensure the django wsgi server and celery worker node are running and available to accept requests. + +### Environment specific compose files + +- **Local Development**: The `docker-compose.yml` file is used for local development. It defines services like `web`, `db`, `redis`, and `elasticsearch`. +- **Private**: This file includes the customs service that is not open source and should therefore not be included by default. +- **Override**: This file allows modifying the default configuration without changing the main `docker-compose.yml` file. This file is larglely obsolete and should not be used. + +To mount with a specific set of docker compose files you can add the COMPOSE_FILE argument to make up. This will persist your setting to .env. + +```sh +make up COMPOSE_FILE=docker-compose.yml:docker-compose.override.yml +``` + +Files should be separated with a colon. + +### Volumes + +Our project defines volumes to mount and share local data between services. + +- **data_redis,data_elastic,data_rabbitmq**: Used to persist service specific data in a named volume to avoid anonymous volumes in our project. +- **data_mysql**: Used to persist the MySQL data in a named volume to avoid anonymous volumes in our project. +Additionally this volume is "external" to allow the volume to persist across container lifecycle. If you make down, the data will not be destroyed. +- **storage**: Used to persist local media files to nginx. + +We additionally mount serval local directories to the web/worker containers. + +- **.:/data/olympia**: Mounts the local repository into the container to allow real-time changes to files within the container. +- **./deps:/deps**: Mounts the dependencies directory to enable better caching across builds and provide visibility for debugging directly on the host. diff --git a/docs/topics/development/contributing.md b/docs/topics/development/contributing.md new file mode 100644 index 000000000000..23a2e7d2cf20 --- /dev/null +++ b/docs/topics/development/contributing.md @@ -0,0 +1,4 @@ +```{eval-rst} +.. include:: ../../../.github/CONTRIBUTING.md + :parser: myst_parser.sphinx_ +``` diff --git a/docs/topics/development/contributing.rst b/docs/topics/development/contributing.rst deleted file mode 100644 index 65b992f08ead..000000000000 --- a/docs/topics/development/contributing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../.github/CONTRIBUTING.rst diff --git a/docs/topics/development/data_management.md b/docs/topics/development/data_management.md new file mode 100644 index 000000000000..8d46d00c87d5 --- /dev/null +++ b/docs/topics/development/data_management.md @@ -0,0 +1,126 @@ +# Data Management + +Effective data management is crucial for the **addons-server** project. This section focuses on how the project handles persistent data, data snapshots, and initial data population. + +## Persistent Data Volumes + +The project uses persistent data volumes to store MySQL data. This ensures that data remains intact even when containers are stopped or removed. For details on how these volumes are defined, refer to the Docker Compose configuration in the repository. + +## External Mounts + +The use of an external mount allows for manual management of the data lifecycle. This ensures that data is preserved even if you run `make down`. By defining the MySQL data volume as external, it decouples the data lifecycle from the container lifecycle, allowing you to manually manage the data. + +## Data Initialization + +When you run `make up` make will run the `initialize` command for you. This command will check if the database exists, and if the elasticsearch index exists. + +If they don't exist it will create them. This command can be run manually as well. + +```sh +make initialize +``` + +This command is responsible for ensuring your local mysql database is migrated, seeded, loaded with data and indexed. +There are a number of different ways to execute this command. In most cases, the default behavior is what you want. +But there are a few additional edge cases that it supports. + +### Clean the database + + ```sh + make initialize INIT_CLEAN=true + ``` + + This will force the database to be recreated, and re-initialized. + +### Load a data backup + + ```sh + make initialize [INIT_LOAD=] + ``` + + This command will load a data backup from a specified path. The optional `INIT_LOAD` argument allows you to + specify the path to the data backup file. If not specified, the initialize command will determine if + data should be loaded based on the current state of the databse, and will load the `_init` data backup. + +### Skip seeding + +```sh +make initialize INIT_SKIP_SEED=true +``` + +This will skip the seeding of the database. This can be useful in CI or if you specifically +want to avoid touching the previous data or creating a new _init backup. + +### Skip index recreation + +```sh +make initialize INIT_SKIP_INDEX=true +``` + +This will skip the recreation of the elasticsearch index. This can be useful in CI or if you specifically +want to avoid touching the previous elasticsearch index. + +> NOTE: if your database is modified significantly and you don't re-index elasticsearch you could end up with +> a broken addons-frontend. + +## Data seeding + +`addons-server` uses a a data seeding mechanism to populate the database with the initial data. This data is used to +bootstrap the database with addons and other data to enable development. + +The data seed is treted just like a data backup with a special name `_init`. To recreate the dataseed run: + +```sh +make seed_data +``` + +This will flush the current database, remove the _init backup directory if it exists, run the seed commands, +and finally dump the data back into the _init backup directory. + +The _init backup is used to populate the database with initial data during the initialization process. + +## Data backups + +You can export and load data snapshots to manage data states across different environments or for backup purposes. +The Makefile provides commands to facilitate this. +These commands rely internally on [django-dbbackup](https://django-dbbackup.readthedocs.io/en/stable/) + +- **Data dump**: + + ```sh + make data_dump [ARGS="--name --force"] + ``` + + This command creates a dump of the current MySQL database. The command accepts an optional `name` argument which will determine + the name of the directory created in the backup directory. By default it uses a timestamp to ensure uniqueness. + + You can also specify the `--force` argument to overwrite an existing backup with the same name. + +- **Loading Data**: + + ```sh + make data_load [ARGS="--name "] + ``` + + This command will load data from an existing backup directory, synchronize the storage directory and reindex elasticsearch. + The name is required and must match a directory in the backup directory. + +## Hard Reset Database + +The actual mysql database is created and managed by the `mysqld` container. The database is created on container start +and the actual data is stored in a persistent data volume. This enables data to persist across container restarts. + +`addons-server` assumes that a database named `olympia` already exists and most data management commands will fail +if it does not. + +If you need to hard reset the database (for example, to start with a fresh state), you can use the following command: + +```bash +make down && make docker_mysqld_volume_remove +``` + +This will stop the containers and remove the `mysqld` data volume from docker. The next time you run `make up` it will +create a new empty volume for you and mysql will recreate the database. + +> NOTE: removing the data volume will remove the actual data! You can and should save a backup before doing this +> if you want to keep the data. diff --git a/docs/topics/development/debugging.rst b/docs/topics/development/debugging.rst deleted file mode 100644 index 8a1356b0fbe9..000000000000 --- a/docs/topics/development/debugging.rst +++ /dev/null @@ -1,94 +0,0 @@ -.. _debugging: - -========= -Debugging -========= - -The :ref:`docker setup ` uses supervisord to run the -django runserver. This means if you want to access the management server -from a shell to run things like ipdb_ you still can. - -Using ipdb ----------- - -As with ipdb normally just add a line in your code at the relevant point: - -.. code-block:: python - - import ipdb; ipdb.set_trace() - -Next connect to the running web container:: - - make debug - -This will bring the Django management server to the foreground and you -can interact with ipdb as you would normally. To quit you can just type -``Ctrl+c``. - -All being well it should look like this:: - - $ make debug - docker exec -t -i olympia_web_1 supervisorctl fg olympia - :/opt/rh/python27/root/usr/lib/python2.7/site-packages/celery/utils/__init__.py:93 - 11:02:08 py.warnings:WARNING /opt/rh/python27/root/usr/lib/python2.7/site-packages/jwt/api_jws.py:118: DeprecationWarning: The verify parameter is deprecated. Please use options instead. - 'Please use options instead.', DeprecationWarning) - :/opt/rh/python27/root/usr/lib/python2.7/site-packages/jwt/api_jws.py:118 - [21/Oct/2015 11:02:08] "PUT /en-US/firefox/api/v4/addons/%40unlisted/versions/0.0.5/ HTTP/1.1" 400 36 - Validating models... - - 0 errors found - October 21, 2015 - 13:52:07 - Django version 1.6.11, using settings 'settings' - Starting development server at http://0.0.0.0:8000/ - Quit the server with CONTROL-C. - [21/Oct/2015 13:57:56] "GET /static/img/app-icons/16/sprite.png HTTP/1.1" 200 3810 - 13:58:01 py.warnings:WARNING /opt/rh/python27/root/usr/lib/python2.7/site-packages/celery/task/sets.py:23: CDeprecationWarning: - celery.task.sets and TaskSet is deprecated and scheduled for removal in - version 4.0. Please use "group" instead (see the Canvas section in the userguide) - - """) - :/opt/rh/python27/root/usr/lib/python2.7/site-packages/celery/utils/__init__.py:93 - > /code/src/olympia/browse/views.py(148)themes() - 147 import ipdb;ipdb.set_trace() - --> 148 TYPE = amo.ADDON_THEME - 149 if category is not None: - - ipdb> n - > /code/src/olympia/browse/views.py(149)themes() - 148 TYPE = amo.ADDON_THEME - --> 149 if category is not None: - 150 q = Category.objects.filter(application=request.APP.id, type=TYPE) - - ipdb> - -Logging -------- - -Logs for the celery and Django processes can be found on your machine in the -`logs` directory. - -Using the Django Debug Toolbar ------------------------------- - -The `Django Debug Toolbar`_ is very powerful and useful when viewing pages from -the website, to check the view used, its parameters, the SQL queries, the -templates rendered and their context. - -To use it please see the official getting started docs: https://django-debug-toolbar.readthedocs.io/en/1.4/installation.html#quick-setup - -.. note:: - - You must know that using the Django Debug Toolbar will slow the website quite a - lot. You can mitigate this by deselecting the checkbox next to the ``SQL`` - panel. - - Also, please note that you should only use the Django Debug Toolbar if you need - it, as it makes CSP report only for your local dev. - -.. note:: - You might have to disable CSP by setting `CSP_REPORT_ONLY = True` in your - local settings because django debug toolbar uses "data:" for its logo, - and it uses "unsafe eval" for some panels like the templates or SQL ones. - -.. _ipdb: https://pypi.python.org/pypi/ipdb -.. _Django Debug Toolbar: http://django-debug-toolbar.readthedocs.io/ diff --git a/docs/topics/development/dependencies.md b/docs/topics/development/dependencies.md deleted file mode 100644 index e6b0c07fc83f..000000000000 --- a/docs/topics/development/dependencies.md +++ /dev/null @@ -1,63 +0,0 @@ -# Project Dependencies - -This document describes how to add/upgrade dependencies in the project. -We use pip to manage dependencies and hashin to lock versions. We use npm to manage frontend dependencies. - -## Python - -### Adding Python Dependencies - -We have 2 requirements files for python dependencies: - -- prod.txt -- dev.txt - -Prod dependencies are used by our django app in runtime. -They are strictly required to be installed in the production environment. - -Dev dependencies are used by our django app in development or by tools we use for linting, testing, etc. - -> If you add just the package name the script will automatically get the latest version for you. - -```bash -hashin -r -``` - -This will add hashes and sort the requirements for you adding comments to -show any package dependencies. - -When it's run check the diff and make edits to fix any issues before -submitting a PR with the additions. - -### Upgrading Python Dependencies - -We mostly rely on dependabot for this. TBD Add more details. - -## Frontend - -### Adding Frontend Dependencies - -We use npm to manage frontend dependencies. To add a new dependency, use the following command: - -```bash -npm install [package]@[version] --save --save-dev -``` - -NPM is a fully featured package manager and so you can use the standard CLI. - -## Updating/Installing dependencies - -To update/install all dependencies, run the following command: - -```bash -make update_deps -``` - -This will install all python and frontend dependencies. It also ensures olympia is installed locally. -By default this command will run in a docker container, but you can run it on a host by targetting the Makefile-docker - -```bash -make -f Makefile-docker update_deps -``` - -This is used in github actions for example that do not need a full container to run. diff --git a/docs/topics/development/dependency_management.md b/docs/topics/development/dependency_management.md new file mode 100644 index 000000000000..7f73bd19bc68 --- /dev/null +++ b/docs/topics/development/dependency_management.md @@ -0,0 +1,112 @@ +# Dependency Management + +Managing dependencies effectively is crucial for maintaining a stable and consistent development environment. The **addons-server** project uses a well-structured approach to handle Python and Node.js dependencies. + +## Python Dependencies + +Python dependencies are managed using the Makefile and requirements files. All dependencies are installed into the `/deps` directory, which centralizes dependency management and simplifies data mounts. + +- **Environment Variables**: The project sets environment variables for Python CLIs to install dependencies in specific locations. This includes setting paths for `PIP_CACHE_DIR`, `PIP_SRC`, and others to use the `/deps` directory. + +- **Caching Mechanism**: By using Docker build stages, the project isolates the stages responsible for installing dependencies. This prevents these stages from re-running unless the actual dependency files are changed. Additionally, internal Python cache folders are cached, avoiding unnecessary re-downloads of packages and saving time and bandwidth. + +- **Requirements Files**: + - **`pip.txt`**: Specifies the version of pip to guarantee consistency. + - **`prod.txt`**: Lists dependencies used in production deployments. + - **`dev.txt`**: Lists additional dependencies used for development. + +In the Docker build, only production dependencies are included. When running `make up`, the following command is executed to install development dependencies: + +```sh +make docker_extract_deps +``` + +This command installs the development dependencies inside the container, ensuring the development environment is fully set up. + +### Adding Python Dependencies + +We use `hashin` to manage package installs. It helps you manage your `requirements.txt` file by adding hashes to ensure that the installed package versions match your expectations. `hashin` is automatically installed in local developer environments. + +To add a new dependency: + +```bash +hashin -r {requirements} {dependency}=={version} +``` + +This will add hashes and sort the requirements for you, adding comments to show any package dependencies. Check the diff and make edits to fix any issues before submitting a PR with the additions. + +### Managing Python Dependencies + +All Python dependencies are defined in requirements files in the `requirements` directory. Our 3 most important files are: + +- **`pip.txt`**: Specifies the version of pip to guarantee consistency. +- **`prod.txt`**: Dependencies required in the production environment. +- **`dev.txt`**: Dependencies used for development, linting, testing, etc. + +We use Dependabot to automatically create pull requests for updating dependencies. +This is configured in the `.github/dependabot.yml` file targeting files in the `requirements` directory. + +### Managing Transitive Dependencies + +In local development and CI, we install packages using pip with the `--no-deps` flag to prevent pip from installing transitive dependencies. This approach gives us control over the full dependency chain, ensuring reproducible and trustworthy environments. + +## Pip Dependencies + +In order to determine the dependencies a given package requires you can check + +```bash +pip show +``` + +To see the `requirements` field which lists the dependencies. Install missing dependencies manually. + +```{admonition} Note +Ensure to comment in the requirements file above transitive dependencies which direct dependency it is required by. +``` + +## Node.js Dependencies + +Node.js dependencies are managed using npm. Similar to Python dependencies, Node.js dependencies are installed into the `/deps` directory. + +- **Environment Variables**: Environment variables are set for Node.js CLIs to ensure that dependencies are installed in the `/deps` directory. This includes setting paths for `NPM_CONFIG_PREFIX` and `NPM_CACHE_DIR`. + +- **Caching Mechanism**: Node.js dependencies are also cached using Docker build stages. Internal npm cache folders are cached to avoid re-downloading packages unnecessarily. + +### Adding Frontend Dependencies + +To add a new frontend dependency: + +```bash +npm install [package]@[version] --save --save-dev +``` + +NPM is a fully-featured package manager, so you can use the standard CLI. + +## Caching in Docker Build + +The Dockerfile uses build stages to isolate the dependency installation process. This ensures that stages do not re-run unless the dependency files themselves change. The caching mechanism includes: + +- **Dependency Cache**: Both Python and Node.js dependencies are cached in the `/deps` directory. +- **Cache Folders**: Internal cache folders for pip and npm are themselves cached to speed up the build process. + +## GitHub Actions Cache + +The project uses a custom GitHub Actions action (`./.github/actions/cache-deps`) to cache the `/deps` folder. This action significantly increases install times for CI runs by leveraging the GitHub Actions cache. + +```yaml +- name: Cache dependencies + uses: ./.github/actions/cache-deps +``` + +By caching the `/deps` folder, the project ensures that dependencies are quickly restored in CI environments, reducing overall build and test times. + +## Updating/Installing Dependencies + +To update/install all dependencies, run the following command: + +```bash +make up +``` + +This will rebuild the Docker image with the current dependencies specified in the `requirements` and `package.json` files. +We do not support updating dependencies in a running container as the /deps folder is not writable by the olympia user. diff --git a/docs/topics/development/docs.md b/docs/topics/development/docs.md new file mode 100644 index 000000000000..4c666a7d1205 --- /dev/null +++ b/docs/topics/development/docs.md @@ -0,0 +1,18 @@ +# Building Docs + +To simply build the docs: + +```bash +docker compose run web make docs +``` + +If you're working on the docs, use `make loop` to keep your built pages +up-to-date: + +```bash +make shell +cd docs +make loop +``` + +Open `docs/_build/html/index.html` in a web browser. diff --git a/docs/topics/development/docs.rst b/docs/topics/development/docs.rst deleted file mode 100644 index 029c3712b752..000000000000 --- a/docs/topics/development/docs.rst +++ /dev/null @@ -1,16 +0,0 @@ -============= -Building Docs -============= - -To simply build the docs:: - - docker compose run web make docs - -If you're working on the docs, use ``make loop`` to keep your built pages -up-to-date:: - - make shell - cd docs - make loop - -Open ``docs/_build/html/index.html`` in a web browser. diff --git a/docs/topics/development/error_pages.rst b/docs/topics/development/error_pages.md similarity index 55% rename from docs/topics/development/error_pages.rst rename to docs/topics/development/error_pages.md index 14340967c210..c416894dd8b8 100644 --- a/docs/topics/development/error_pages.rst +++ b/docs/topics/development/error_pages.md @@ -1,13 +1,13 @@ -.. _error: +(error)= -=========== -Error Pages -=========== +# Error Pages When running Django locally you get verbose error pages instead of the standard ones. To access the HTML for the standard error pages, you can -access the urls:: +access the urls: - /services/403 - /services/404 - /services/500 +``` +/services/403 +/services/404 +/services/500 +``` diff --git a/docs/topics/development/github_actions.md b/docs/topics/development/github_actions.md new file mode 100644 index 000000000000..0b2d962f4910 --- /dev/null +++ b/docs/topics/development/github_actions.md @@ -0,0 +1,206 @@ +# Github Actions + +The **addons-server** project uses GitHub Actions to automate testing and building processes in the CI environment. +The CI pipeline broken into workflows, jobs and steps that run on containers in github's cloud environment. +All workflows are defined in the _.github/workflows_ directory. + +## Overview + +Here’s an overview of the existing CI workflows and their architecture: + +Workflows are run on events. Events determine the reason for running the workflow. +A workflow can be triggered by multiple events. {ref}`Learn more ` about configuring events below. + +Generally, we run addons-server inside it's own docker container. This means every workflow generally follows the same pattern. + +### Build the docker image + +Run the {ref}`build-docker ` action to build the docker image from the current ref in github. +This action can be configured to your needs to build, push, and or upload the image to be used later. +The build passing is itself a good first step to test the code, and you can further test after the image is ready. + +### Run the docker image + +Define a job that uses the {ref}`run-docker ` reusable action. This action runs our docker compose +project in the CI container allowing you to exeucte the same commands you would run locally. +This is the foundation of nearly every test we have in CI. + +(configuration)= +## Configuration + +- links to docs for github action configuration and event payloads and syntax +- reusable actions have a _ prefix +- prefer reusable workflows over actions + +### Workflows + +Workflows are the top level configuration object for github actions. Here is an example: + +```yaml +name: Set a name + +on: + pull_request: + branches: + - master + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name}}-${{ github.ref}}-${{ toJson(inputs) }} + cancel-in-progress: true + +jobs: + job1: + ... +``` + +> Important: Always define the most specific event triggers possible +> and always set concurrency groups to prevent too many instances of a workflow running simultaneously + +### Reusable Workflows + +We use reusable workflows to enable calling one workflow from another. +This allows better encapsulation of a set of logic that might itself require multiple jobs, matrix jobs +or direct access to built in context like secrets. Otherwise they are conceptually similar to {ref}`reusable actions `. + +Reusable workflows should define a constant in their _concurrency:group_ to prevent deadlock with their triggering workflow. + +```yaml +concurrency: + group: static-${{ github.workflow }}... +``` + +The unique and static key prevents the worfklow (which will match the calling workflow) concurrency group from over matching. + +(_test_yml)= +#### _test.yml + +[link](../../../.github/workflows/_test.yml) + +Our main testing workflow runs a suite of tests verifying the docker image and django code within are running as expected. + +(_test_main_yml_)= +#### _test_main.yml + +[link](../../../.github/workflows/_test_main.yml) + +This workflow is a branch of our _test.yml workflow, running specifically the main pytest suite. +It is split to its own workflow because it runs via a matrix strategy and spins up a lot of jobs. + +(reusable_actions)= +### Reusable Actions + +Reusable actions are like reusable workflows but they do not run on their own runner, +but directly as a step in a given workflow runner container. + +(actions_context)= +#### context + +[link](../../../.github/actions/context/action.yml) + +This action provides additional context based on the _github_ context object. Most importantly it helps us determine +if we are running on a fork or if various meta events (releast_tag, release_master) match the current context. +These contextual values are relevent globally and should return the same values no matter where context is called, +so context runs as an action and accepts no inputs. + +(actions_build_docker)= +#### build-docker + +[link](../../../.github/actions/build-docker/action.yml) + +The main action to build our docker image. +It builds a docker image based on the current state of the code, setting the appropriate metadata +based on context. + +(actions_run_docker)= +#### run-docker + +[link](../../../.github/actions/run-docker/action.yml) + +The main action to run our docker compose project. This action is configurable to run a specified command, with specified services, +and even configurable compose file. Importantly this action will pull an image via the digest or version, and if it cannot find the image +will build it locally to run the current state of the codebase. + +(actions_login_docker)= +#### login-docker + +[link](../../../.github/actions/login-docker/action.yml) + +Login to dockerhub to push the publically available docker image. This action authenticates using repository secrets so cannot +be used in forks. It also returns the registry and image properties expected by dockerhub in the tag. + +(actions_login_ga)= +#### login-gar + +[link](../../../.github/actions/login-gar/action.yml) + +Login to [GAR][gar_link] (Google Artifact Registry) to push the production docker image. +This action authenticates using repository secrets so cannot be used in forks. +It also returns the registry and image properties expected by dockerhub in the tag. + +### Actions vs Workflows + +Some of our reusable logic is in reusable actions and some in workflows. There are some important tradeoffs worth mentioning +that inform the decision for which to choose in a particular use case. + +1. Actions run ON a job, workflows run AS a job. If the logic you need requires context from the calling job, +like authentication credentials, created files, etc, then an action is the way to go. Workflows are great for code isolation +or if your logic might benefit itself from splitting to multiple jobs. + +2. Actions are steps. Actions run as a step ON a job (see above) so they checkout code, they cannot access secrets, +they cannot define their own runner or set timeouts or environment variables. Actions should be for very isolated logic +that really executes a single step in a job. + +3. Workflows have their own concurrency considerations. When using reusable workflows the concurrency group +can clash with the current workflow or even (if not careful) with other workfllows. Be careful and set strong concurrency groups. + +4. Workflow jobs are collapsed in the github action UI. This is a nice feature if you need to trigger many jobs in parallel, +like {ref}`test_main <_test_main_yml_>` does. Each of the jobs are collapsible in the UI making it easier to clean up the view. + +For the most part actions are simpler and should be the go to method for extacting reusable logic. Workflows are nice +when you need to organize the logic into multiple jobs or require extreme isolation from the rest of the workflow. + +## Gotchas + +### workflow_dispatch and workflow_call inputs should be identical + +Best practice should be to define all _reusable workflows with both a _workflow_dispatch_ and _workflow_call_ event trigger. +The inputs for each should be identical. This allows testing and otherwise triggering reusable workflows directly or via +another workflow with the same parameters and expectations. + +### github object contents depend on the event triggering the workflow + +One of the reasons we have the {ref}`context action ` is because the information embedded in the github +object depends on the event that triggered a workflow, making finding a certain piece of information depend on the 'context'. +Be careful using the github object directly as you must consider many edge cases. Consult the context action and potentially +introduce an important contextual value their so it can be made consistent across different contexts. + +### github converts job/step/workflow outputs to string regardless of the underlying data type + +Even if you define an input with a specific datatype, outputs for steps, jobs and reusable workflows are all converted to strings. +This is important when passing values from outputs to inputs as the data type might not actually be what you want or expect. + +Use: + +```yaml +uses: +with: + number: ${{ fromJSON(output.value) }} +``` + +to convert values back into numbers/booleans. + +### secrets are not available for workflows running on forks. + +Github actions prevents forks from accessing secrets, so workflows that use secrets should be configured to either +not rely on secrets or have fallback behaviour in place. + +### use context action to define global context + +Most global context should be defined in the {ref}`context ` action instead of directly in workflows. + +### prevent invalid workflow configurations + +When reusable workflows are passed invalid outputs, github will silently fail the workflow, to prevent this you should always +check the outcome of reusable workflow calls. + +[gar_link]: https://cloud.google.com/artifact-registry diff --git a/docs/topics/development/index.md b/docs/topics/development/index.md new file mode 100644 index 000000000000..1696b48f37c2 --- /dev/null +++ b/docs/topics/development/index.md @@ -0,0 +1,31 @@ +# Development + +Welcome to the development section of the **addons-server** documentation. This guide is designed to help developers quickly understand, set up, and manage the project's various services. By leveraging Docker Compose, we ensure modularity, isolation, and efficiency across development and testing environments. Here’s a concise overview of each section to get you up and running fast. + +```{toctree} +:maxdepth: 2 + +setup_and_configuration +building_and_running_services +makefile_commands +testing_and_quality_assurance +data_management +dependency_management +performance_and_optimization +localization_and_internationalization +troubleshooting_and_debugging +github_actions +error_pages +style +contributing +branching +vpn +acl +logging +static-files +search +docs +waffle +remote_settings +../../../README.rst +``` diff --git a/docs/topics/development/index.rst b/docs/topics/development/index.rst deleted file mode 100644 index c67c7e70726e..000000000000 --- a/docs/topics/development/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -=========== -Development -=========== - -.. toctree:: - :maxdepth: 2 - - tests - debugging - dependencies - error_pages - testing - style - contributing - branching - vpn - acl - logging - translations - search - docs - waffle - ../../../README.rst diff --git a/docs/topics/development/localization_and_internationalization.md b/docs/topics/development/localization_and_internationalization.md new file mode 100644 index 000000000000..e53db67b719e --- /dev/null +++ b/docs/topics/development/localization_and_internationalization.md @@ -0,0 +1,111 @@ +# Localization and Internationalization + +Localization and internationalization are important aspects of the **addons-server** project, ensuring that the application can support multiple languages and locales. This section covers the key concepts and processes for managing localization and internationalization. + +## Locale Management + +Locale management involves compiling and managing translation files. The **addons-server** project uses a structured approach to handle localization files efficiently. + +1. **Compiling Locales**: + - The Makefile provides commands to compile locale files, ensuring that translations are up-to-date. + - Use the following command to compile locales: + + ```sh + make compile_locales + ``` + +2. **Managing Locale Files**: + - Locale files are typically stored in the `locale` directory within the project. + - The project structure ensures that all locale files are organized and easily accessible for updates and maintenance. + +## Adding New Translations + +We write the english translations of our strings directly in the code, using the `gettext` function. For example: + +```python +from django.utils.translation import gettext_lazy as _ + +def my_view(request): + output = _('Welcome to my site.') + return HttpResponse(output) +``` + +When developing locally you should not really need to do anything special to see the translations. The `gettext` function will return the string as is if it can't find a translation for it. In CI translation strings are automatically extracted and uploaded to Pontoon for translation. + +## Translation Management + +Translation management involves handling translation strings and merging them as needed. The **addons-server** project follows best practices to ensure that translations are accurate and consistent. + +1. **Handling Translation Strings**: + - Translation strings are extracted from the source code and stored in `.po` files. + - The `.po` file format is used to manage locale strings, providing a standard way to handle translations. + +2. **Merging Translation Strings**: + - To extract new locales from the codebase, use the following command: + + ```sh + make extract_locales + ``` + + - This command scans the codebase and updates the `.po` files with new or changed translation strings. + - After extraction, scripts are used to merge new or updated translation strings into the existing locale files. + - This process ensures that all translations are properly integrated and maintained. + +## Additional Tools and Practices + +1. **Pontoon**: + - The **addons-server** project uses Pontoon, Mozilla's localization service, to manage translations. + - Pontoon provides an interface for translators to contribute translations and review changes, ensuring high-quality localization. + +2. **.po File Format**: + - The `.po` file format is a widely used standard for managing translation strings. + - It allows for easy editing and updating of translations, facilitating collaboration among translators. + +## Translating Fields on Models + +The `olympia.translations` app defines a `olympia.translations.models.Translation` model, but for the most part, you shouldn't have to use that directly. When you want to create a foreign key to the `translations` table, use `olympia.translations.fields.TranslatedField`. This subclasses Django's `django.db.models.ForeignKey` to make it work with our special handling of translation rows. + +### Minimal Model Example + +A minimal model with translations in addons-server would look like this: + +```python +from django.db import models + +from olympia.amo.models import ModelBase +from olympia.translations.fields import TranslatedField, save_signal + +class MyModel(ModelBase): + description = TranslatedField() + +models.signals.pre_save.connect(save_signal, + sender=MyModel, + dispatch_uid='mymodel_translations') +``` + +### How It Works Behind the Scenes + +A `TranslatedField` is actually a `ForeignKey` to the `translations` table. To support multiple languages, we use a special feature of MySQL allowing a `ForeignKey` to point to multiple rows. + +#### When Querying + +Our base manager has a `_with_translations()` method that is automatically called when you instantiate a queryset. It does two things: + +- Adds an extra `lang=lang` in the query to prevent query caching from returning objects in the wrong language. +- Calls `olympia.translations.transformers.get_trans()` which builds a custom SQL query to fetch translations in the current language and fallback language. + +This custom query ensures that only the specified languages are considered and uses a double join with `IF`/`ELSE` for each field. The results are fetched using a slave database connection to improve performance. + +#### When Setting + +Every time you set a translated field to a string value, the `TranslationDescriptor` `__set__` method is called. It determines whether it's a new translation or an update to an existing translation and updates the relevant `Translation` objects accordingly. These objects are queued for saving, which happens on the `pre_save` signal to avoid foreign key constraint errors. + +#### When Deleting + +Deleting all translations for a field is done using `olympia.translations.models.delete_translation()`, which sets the field to `NULL` and deletes all attached translations. Deleting a specific translation is possible but not recommended due to potential issues with fallback languages and foreign key constraints. + +### Ordering by a Translated Field + +`olympia.translations.query.order_by_translation` allows you to order a `QuerySet` by a translated field, honoring the current and fallback locales like when querying. + +By following these practices, the **addons-server** project ensures that the application can support multiple languages and locales effectively. For more detailed instructions, refer to the project's Makefile and locale management scripts in the repository. diff --git a/docs/topics/development/logging.md b/docs/topics/development/logging.md new file mode 100644 index 000000000000..20a496145dcc --- /dev/null +++ b/docs/topics/development/logging.md @@ -0,0 +1,77 @@ +(logging)= + +# Logging + +Logging is fun. We all want to be lumberjacks. My muscle-memory wants to put +`print` statements everywhere, but it's better to use `log.debug` instead. +`print` statements make mod_wsgi sad, and they're not much use in production. +Plus, `django-debug-toolbar` can hijack the logger and show all the log +statements generated during the last request. When `DEBUG = True`, all logs +will be printed to the development console where you started the server. In +production, we're piping everything into `mozlog`. + +## Configuration + +The root logger is set up from `settings_base` in the `src/olympia/lib` +of addons-server. It sets up sensible defaults, but you can tweak them to your liking: + +### Log level + +There is no unified log level, instead every logger has it's own log level +because it depends on the context they're used in. + +### LOGGING + +See PEP 391 for formatting help. Messages will not propagate through a +logger unless `propagate: True` is set. + +> ```python +> LOGGING = { +> 'loggers': { +> 'caching': {'handlers': ['null']}, +> }, +> } +> ``` + +If you want to add more to this do something like this: + +``` +LOGGING['loggers'].update({ + 'z.paypal': { + 'level': logging.DEBUG, + }, + 'z.es': { + 'handlers': ['null'], + }, +}) +``` + +## Using Loggers + +The `olympia.core.logger` package uses global objects to make the same +logging configuration available to all code loaded in the interpreter. Loggers +are created in a pseudo-namespace structure, so app-level loggers can inherit +settings from a root logger. olympia's root namespace is just `"z"`, in the +interest of brevity. In the caching package, we create a logger that inherits +the configuration by naming it `"z.caching"`: + +```python +import olympia.core.logger + +log = olympia.core.logger.getLogger('z.caching') + +log.debug("I'm in the caching package.") +``` + +Logs can be nested as much as you want. Maintaining log namespaces is useful +because we can turn up the logging output for a particular section of olympia +without becoming overwhelmed with logging from all other parts. + +### olympia.core.logging vs. logging + +`olympia.core.logger.getLogger` should be used everywhere. It returns a +`LoggingAdapter` that inserts the current user's IP address and username into +the log message. For code that lives outside the request-response cycle, it +will insert empty values, keeping the message formatting the same. + +Complete logging docs: diff --git a/docs/topics/development/logging.rst b/docs/topics/development/logging.rst deleted file mode 100644 index e47eebfa1729..000000000000 --- a/docs/topics/development/logging.rst +++ /dev/null @@ -1,81 +0,0 @@ -.. _logging: - -======= -Logging -======= - -Logging is fun. We all want to be lumberjacks. My muscle-memory wants to put -``print`` statements everywhere, but it's better to use ``log.debug`` instead. -``print`` statements make mod_wsgi sad, and they're not much use in production. -Plus, ``django-debug-toolbar`` can hijack the logger and show all the log -statements generated during the last request. When ``DEBUG = True``, all logs -will be printed to the development console where you started the server. In -production, we're piping everything into ``mozlog``. - - -Configuration -------------- - -The root logger is set up from ``settings_base`` in the ``src/olympia/lib`` -of addons-server. It sets up sensible defaults, but you can tweak them to your liking: - -Log level -~~~~~~~~~ -There is no unified log level, instead every logger has it's own log level -because it depends on the context they're used in. - -LOGGING -~~~~~~~ -See PEP 391 for formatting help. Messages will not propagate through a -logger unless ``propagate: True`` is set. - - :: - - LOGGING = { - 'loggers': { - 'caching': {'handlers': ['null']}, - }, - } - -If you want to add more to this do something like this:: - - LOGGING['loggers'].update({ - 'z.paypal': { - 'level': logging.DEBUG, - }, - 'z.es': { - 'handlers': ['null'], - }, - }) - - -Using Loggers -------------- - -The ``olympia.core.logger`` package uses global objects to make the same -logging configuration available to all code loaded in the interpreter. Loggers -are created in a pseudo-namespace structure, so app-level loggers can inherit -settings from a root logger. olympia's root namespace is just ``"z"``, in the -interest of brevity. In the caching package, we create a logger that inherits -the configuration by naming it ``"z.caching"``:: - - import olympia.core.logger - - log = olympia.core.logger.getLogger('z.caching') - - log.debug("I'm in the caching package.") - -Logs can be nested as much as you want. Maintaining log namespaces is useful -because we can turn up the logging output for a particular section of olympia -without becoming overwhelmed with logging from all other parts. - - -olympia.core.logging vs. logging -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``olympia.core.logger.getLogger`` should be used everywhere. It returns a -``LoggingAdapter`` that inserts the current user's IP address and username into -the log message. For code that lives outside the request-response cycle, it -will insert empty values, keeping the message formatting the same. - -Complete logging docs: http://docs.python.org/library/logging.html diff --git a/docs/topics/development/makefile_commands.md b/docs/topics/development/makefile_commands.md new file mode 100644 index 000000000000..4b5a03e3be95 --- /dev/null +++ b/docs/topics/development/makefile_commands.md @@ -0,0 +1,129 @@ +# Makefile Commands + +The Makefile for the **addons-server** project provides a convenient interface for interacting with the Docker environment and managing common development tasks. This section details the key commands and their purposes. + +## Overview of Makefile + +The Makefile automates various tasks, reducing the need for manual Docker and shell commands. This ensures consistency and streamlines development workflows. + +## Makefile Structure + +The **addons-server** project splits its Makefile configuration into three files to separate concerns between the host operating system and Docker container environments: + +1. **Makefile**: + - Acts as the main entry point and delegates commands to either `Makefile-os` or `Makefile-docker` based on the environment. + +2. **Makefile-os**: + - Contains targets designed to run on the host operating system. + - **Gotcha**: If you run a command specified in `Makefile-os` inside the container (e.g., by running `make shell` and then the command), it might not be available because Make will ignore those commands. + +3. **Makefile-docker**: + - Contains targets designed to run inside the Docker container. + - If you run a target defined in `Makefile-docker` from the host, Make will redirect the command to the running container by prefixing the relevant `docker-compose exec` command. + +A common benefit of using Makefiles in this manner is the ability to coordinate complex behaviors that work in both local and CI environments from a single place. It also helps organize commands meant to be run on the host machine or inside a running container. + +**Note**: We aim to keep the majority of logic defined within the Makefiles themselves. However, if the logic becomes overly complex, it can be defined in a `./scripts/*` file and executed via a Make command. + +## Common Commands + +1. **`setup`**: + - **Purpose**: Initializes the project by creating necessary configuration files, including the `.env` file. + - **Usage**: + + ```sh + make setup + ``` + +2. **`up`**: + - **Purpose**: Builds the Docker image using BuildKit and Bake, and starts the containers as defined in the Docker Compose configuration. + - **Usage**: + + ```sh + make up + ``` + +3. **`down`**: + - **Purpose**: Stops and removes the running containers. + - **Usage**: + + ```sh + make down + ``` + +4. **`djshell`**: + - **Purpose**: Provides access to the Django shell within the `web` container. + - **Usage**: + + ```sh + make djshell + ``` + +5. **`shell`**: + - **Purpose**: Provides access to a shell within the `web` container. + - **Usage**: + + ```sh + make shell + ``` + +6. **`test`**: + - **Purpose**: Executes the entire test suite using pytest. + - **Usage**: + + ```sh + make test + ``` + +7. **`lint`**: + - **Purpose**: Enforces code style and quality standards using various linters. + - **Usage**: + + ```sh + make lint + ``` + +8. **`format`**: + - **Purpose**: Automatically formats the codebase according to predefined style guidelines. + - **Usage**: + + ```sh + make format + ``` + +## Specialized Commands + +1. **`data_export` and `data_restore`**: + - **Purpose**: Facilitates exporting and restoring data from the MySQL database. + - **Usage**: + + ```sh + make data_export + make data_restore + ``` + +2. **`build_docker_image`**: + - **Purpose**: Builds the Docker image using BuildKit and Bake. + - **Usage**: + + ```sh + make build_docker_image + ``` + +## Forcing a Specific Makefile + +You can force Make to run a specific command from a particular Makefile by specifying the file: + +```sh +make -f +``` + +## Running Commands Inside the Container + +If you run a target defined in `Makefile-docker` from the host, Make will redirect the command to the running container. If the containers are not running, this might fail, and you will need to ensure the containers are running by executing: + +```sh +make up +``` + +By using these Makefile commands, developers can streamline their workflow, ensuring consistency and efficiency in their development process. For more detailed information on each command, refer to the comments and definitions within the Makefiles themselves. diff --git a/docs/topics/development/performance_and_optimization.md b/docs/topics/development/performance_and_optimization.md new file mode 100644 index 000000000000..8cfc2429b0ca --- /dev/null +++ b/docs/topics/development/performance_and_optimization.md @@ -0,0 +1,52 @@ +# Performance and Optimization + +Optimizing performance is essential for maintaining efficient development and deployment workflows. This section covers the key strategies and tools used in the **addons-server** project for performance and optimization. + +## Docker Layer Caching + +Docker layer caching is a powerful feature that significantly speeds up the build process by reusing unchanged layers. This section explains the benefits and setup for Docker layer caching in the **addons-server** project. + +1. **Benefits of Docker Layer Caching**: + - **Reduced Build Times**: By caching intermediate layers, Docker can reuse these layers in subsequent builds, reducing the overall build time. + - **Efficient Resource Usage**: Caching helps save bandwidth and computational resources by avoiding redundant downloads and computations. + - **Consistency**: Ensures that identical builds produce identical layers, promoting consistency across builds. + +2. **Setup for Docker Layer Caching**: + - **Build Stages**: The Dockerfile uses build stages to isolate dependency installation and other tasks. This ensures that stages are only re-executed when necessary. + - **Cache Mounts**: The project uses `--mount=type=cache` in the Dockerfile to cache directories across builds. This is particularly useful for caching Python and npm dependencies, speeding up future builds. + + Example snippet from the Dockerfile: + + ```Dockerfile + RUN --mount=type=cache,target=/root/.cache/pip pip install -r requirements/prod.txt + RUN --mount=type=cache,target=/root/.npm npm install + ``` + + - **BuildKit**: Ensures BuildKit is enabled to take advantage of advanced caching features: + + ```sh + export DOCKER_BUILDKIT=1 + ``` + + - **GitHub Actions Cache**: The custom action (`./.github/actions/cache-deps`) caches the `/deps` folder, leveraging GitHub Actions cache to improve CI run times. + +## Performance Testing + +Performance testing is crucial for identifying bottlenecks and optimizing application performance. The **addons-server** project includes various strategies for performance testing and optimization. + +1. **Running Performance Tests**: + - The project uses `pytest` along with plugins like `pytest-split` and `pytest-xdist` to run tests in parallel, significantly reducing test times. + - Performance-specific tests can be run to measure the responsiveness and efficiency of the application. + +2. **Optimization Tips**: + - **Parallel Testing**: Use `pytest-xdist` to run tests in parallel: + + ```sh + pytest -n auto + ``` + + - **Test Splitting**: Use `pytest-split` to distribute tests evenly across multiple processes. + - **Code Profiling**: Use profiling tools to identify slow functions and optimize them. + - **Database Optimization**: Regularly monitor and optimize database queries to ensure efficient data retrieval and storage. + +By implementing these performance and optimization strategies, the **addons-server** project ensures efficient and reliable builds and tests, both locally and in CI environments. For more detailed instructions, refer to the project's Dockerfile, Makefile, and GitHub Actions configurations in the repository. diff --git a/docs/topics/development/remote_settings.md b/docs/topics/development/remote_settings.md new file mode 100644 index 000000000000..3d8b5bcb4ece --- /dev/null +++ b/docs/topics/development/remote_settings.md @@ -0,0 +1,116 @@ +# Remote Settings + +This page explains how to set up [Remote Settings][] locally, which can be +useful when working on the [Blocklist feature](../blocklist.md). AMO must be +running locally first. Make sure it's available at . + +## Configure addons-server + +Add the following configuration variables to the `local_settings.py` file: + +``` +# When using Docker Desktop - `host.docker.internal` is a special host to allow +# containers to get access to the host system, but that won't work on Linux. +REMOTE_SETTINGS_API_URL = "http://host.docker.internal:8888/v1/" +REMOTE_SETTINGS_WRITER_URL = "http://host.docker.internal:8888/v1/" + +# For Linux, you need to find the IP address of the Remote Settings container: +# REMOTE_SETTINGS_API_URL = "http://172.17.0.1:8888/v1/" +# REMOTE_SETTINGS_WRITER_URL = "http://172.17.0.1:8888/v1/" +``` + +Next, reload everything by running `make up`. + +At this point, AMO should be able to find the Remote Settings local server that +we're going to set up next. + +## Set up Remote Settings + +In order to set up Remote Settings, follow these steps: + +1. Clone +2. Run `make start` in the `remote-settings` repository +3. Add `127.0.0.1 autograph` to your `/etc/hosts` file + +Verify that Remote Settings is healthy: + +``` +curl http://127.0.0.1:8888/v1/__heartbeat__ +{ + "storage": true, + "permission": true, + "cache": true, + "attachments": true, + "signer": true +} +``` + +## Configure the user/permissions + +First, we need an `admin` account. We can create one with the Remote Settings +API: + +``` +curl -X PUT -H 'Content-Type: application/json' \ + -d '{"data": {"password": "s3cr3t"}}' \ + http://127.0.0.1:8888/v1/accounts/admin +``` + +Next, we need a user for AMO: + +``` +curl -X PUT -H 'Content-Type: application/json' \ + -d '{"data": {"password": "amo_remote_settings_password"}}' \ + http://127.0.0.1:8888/v1/accounts/amo_remote_settings_username +``` + +We then need to give this user _write_ access to the `staging` bucket so that it +can create the `addons-bloomfilters` collection. This is where AMO will write +the new records, which will be propagated to the public bucket/collection +automatically: + +``` +curl -X PUT -H 'Content-Type: application/json' \ + -d '{"permissions": {"write": ["account:amo_remote_settings_username"]}}' \ + -u admin:s3cr3t \ + http://127.0.0.1:8888/v1/buckets/staging +``` + +``` +curl -X PUT -H 'Content-Type: application/json' \ + -u amo_remote_settings_username:amo_remote_settings_password \ + http://127.0.0.1:8888/v1/buckets/staging/collections/addons-bloomfilters +``` + +At this point, AMO should be able to authenticate to Remote Settings. This can +be verified with the following command: + +``` +curl http://olympia.test/services/__heartbeat__ +{ + "cinder": { + "state": true, + "status": "" + }, + "rabbitmq": { + "state": true, + "status": "" + }, + "remotesettings": { + "state": true, + "status": "" + }, + "signer": { + "state": true, + "status": "" + } +} +``` + +After AMO uploads records, the Remote Settings `addons-bloomfilters` collection +will be available at: + + +We are done \o/ + +[Remote Settings]: https://remote-settings.readthedocs.io/en/latest/index.html diff --git a/docs/topics/development/search.md b/docs/topics/development/search.md new file mode 100644 index 000000000000..05c53d13b48d --- /dev/null +++ b/docs/topics/development/search.md @@ -0,0 +1,114 @@ +(amo-search-explainer)= + +# How does search on AMO work? + +## High-level overview + +AMO add-ons are indexed in our Elasticsearch cluster. For each search query +someone makes on AMO, we run a custom set of full-text queries against that +cluster. + +Our autocomplete (that you can see when starting to type a few characters in +the search field) uses the exact same implementation as a regular search +underneath. + +### Rules + +For each search query, we apply a number of rules that attempt to find the +search terms in each add-on name, summary and description. Each rule generates +a score that depends on: + +> - The frequency of the terms in the field we're looking at +> - The importance of each term in the overall index (the more common the term is across all add-ons, the less it impacts the score) +> - The length of the field (shorter fields give a higher score as the search term is considered more relevant if they make up a larger part of the field) + +Each rule is also given a specific boost affecting its score, making matches +against the add-on name more important and matches against the summary or +description. + +Add-on names receive special treatment: Partial or misspelled matches are +accepted to some extent while exact matches receive a significantly higher +score. + +### Scoring + +Each score for each rule is combined into a final score which we modify +depending on the add-on popularity on a logarithm scale. "Recommended" and +"By Firefox" add-ons get an additional, significant boost to their score. + +Finally, results are returned according to their score in descending order. + +## Technical overview + +We store two kinds of data in the _addons_ index: indexed fields that are used for search purposes, and non-indexed fields that are meant to be returned (often as-is with no transformations) by the search API (allowing us to return search results data without hitting the database). The latter is not relevant to this document. + +Our search can be reached either via the API through {ref}`/api/v5/addons/search/ ` or {ref}`/api/v5/addons/autocomplete/ ` which are used by our frontend. + +### Indexing + +The key fields we search against are `name`, `summary` and `description`. Because all can be translated, we index them multiple times: + +> - Once with the translation in the default locale of the add-on, under `{field}`, analyzed with just the `snowball` analyzer for `description` and `summary`, and a custom analyzer for `name` that applies the following filters: `standard`, `word_delimiter` (a custom version with `preserve_original` set to `true`), `lowercase`, `stop`, and `dictionary_decompounder` (with a specific word list) and `unique`. +> - Once for every translation that exists for that field, using Elasticsearch language-specific analyzer if supported, under `{field}_l10n_{analyzer}`. + +**In addition, for the name, we also have:** + +- For all fields described above also contains a subfield called `raw` that holds a non-analyzed variant for exact matches in the corresponding language (stored as a `keyword`, with a `lowercase` normalizer). +- A `name.trigram` variant for the field in the default language, which is using a custom analyzer that depends on a `ngram` tokenizer (with `min_gram=3`, `max_gram=3` and `token_chars=["letter", "digit"]`). + +### Flow of a search query through AMO + +Let's assume we search on addons-frontend (not legacy) the search query hits the API and gets handled by `AddonSearchView`, which directly queries ElasticSearch and doesn't involve the database at all. + +There are a few filters that are described in the {ref}`/api/v5/addons/search/ docs ` but most of them are not very relevant for text search queries. Examples are filters by guid, platform, category, add-on type or appversion (application version compatibility). Those filters are applied using a `filter` clause and shouldn't affect scoring. + +Much more relevant for text searches (and this is primarily used when you use the search on the frontend) is `SearchQueryFilter`. + +It composes various rules to define a more or less usable ranking: + +#### Primary rules + +These are the ones using the strongest boosts, so they are only applied to the add-on name. + +**Applied rules** (merged via `should`): + +1. A `dis_max` query with `term` matches on `name_l10n_{analyzer}.raw` and `name.raw` if the language of the request matches a known language-specific analyzer, or just a `term` query on `name.raw` (`boost=100.0`) otherwise - our attempt to implement exact matches +2. If we have a matching language-specific analyzer, we add a `match` query to `name_l10n_{analyzer}` (`boost=5.0`, `operator=and`) +3. A `phrase` match on `name` that allows swapped terms (`boost=8.0`, `slop=1`) +4. A `match` on `name`, using the standard text analyzer (`boost=6.0`, `analyzer=standard`, `operator=and`) +5. A `prefix` match on `name` (`boost=3.0`) +6. If a query is \< 20 characters long, a `dis_max` query (`boost=4.0`) composed of a fuzzy match on `name` (`boost=4.0`, `prefix_length=2`, `fuzziness=AUTO`, `minimum_should_match=2<2 3<-25%`) and a `match` query on `name.trigram`, with a `minimum_should_match=66%` to avoid noise + +#### Secondary rules + +These are the ones using the weakest boosts, they are applied to fields containing more text like description, summary and tags. + +**Applied rules** (merged via `should`): + +1. Look for matches inside the summary (`boost=3.0`, `operator=and`) +2. Look for matches inside the description (`boost=2.0`, `operator=and`) + +If the language of the request matches a known language-specific analyzer, those are made using a `multi_match` query using `summary` or `description` and the corresponding `{field}_l10n_{analyzer}`, similar to how exact name matches are performed above, in order to support potential translations. + +#### Scoring + +We combine scores through a `function_score` query that multiplies the score by several factors: + +> - A first multiplier is always applied through the `field_value_factor` function on `average_daily_users` with a `log2p` modifier +> - An additional `4.0` weight is applied if the add-on is public & non-experimental. +> - Finally, `5.0` weight is applied to By Firefox and Recommended add-ons. + +On top of the two sets of rules above, a `rescore` query is applied with a `window_size` of `10`. In production, we have 5 shards, so that should re-adjust the score of the top 50 results returned only. The rules used for rescoring are the same used in the secondary rules above, with just one difference: it's using `match_phrase` instead of `match`, with a slop of `10`. + +#### General query flow + +> 1. Fetch current translation +> 2. Fetch locale specific analyzer ([List of analyzers](https://github.com/mozilla/addons-server/blob/f099b20fa0f27989009082c1f58da0f1d0a341a3/src/olympia/constants/search.py#L13-L52)) +> 3. Apply primary and secondary *should* rules +> 4. Determine the score +> 5. Rescore the top 10 results per shard + +#### See also + +> - [addons-server search ranking tests](https://github.com/mozilla/addons-server/blob/master/src/olympia/search/tests/test_search_ranking.py) +> - [Elasticsearch relevancy algorithm](https://www.elastic.co/blog/practical-bm25-part-2-the-bm25-algorithm-and-its-variables) diff --git a/docs/topics/development/search.rst b/docs/topics/development/search.rst deleted file mode 100644 index 61f53731e1cf..000000000000 --- a/docs/topics/development/search.rst +++ /dev/null @@ -1,87 +0,0 @@ -.. _amo_search_explainer: - -============================ -How does search on AMO work? -============================ - -.. note:: - - This is documenting our current state of how search is implemented in addons-server. - We will be using this to plan future improvements so please note that we are - aware that the process written below is not perfect and has bugs here and there. - - Please see https://github.com/orgs/mozilla/projects/17#card-10287357 for more planning. - - -General structure -================= - -Our Elasticsearch cluster contains Add-ons (``addons`` index) and statistics data. The purpose of that document is to describe the add-ons part only though. We store two kinds of data for add-ons: indexed fields that are used for search purposes, and non-indexed fields that are meant to be returned (often as-is with no transformations) by the search API (allowing us to return search results data without hitting the database). The latter is not relevant to this document. - -Our search can be reached either via the API through :ref:`/api/v4/addons/search/ ` or :ref:`/api/v4/addons/autocomplete/ ` which are used by our addons-frontend as well as via our legacy pages (which are going away and off-topic here). - - -Indexing -======== - -The key fields we search against are ``name``, ``summary`` and ``description``. Because all can be translated, we index twice: -- Once with the translation in the language-specific analyzer if supported, under ``{field}_l10n_{analyzer}`` -- Once with the translation in the default locale of the add-on, under ``{field}``, analyzed with just the ``snowball`` analyzer for ``description`` and ``summary``, and a custom analyzer for ``name`` that applies the following filters: ``standard``, ``word_delimiter`` (a custom version with ``preserve_original`` set to ``true``), ``lowercase``, ``stop``, and ``dictionary_decompounder`` (with a specific word list) and ``unique``. - -In addition, for the name, both fields also contains a subfield called ``raw`` that holds a non-analyzed variant for exact matches in the corresponding language (stored as a ``keyword``, with a ``lowercase`` normalizer). We also have a ``name.trigram`` variant for the field in the default language, which is using a custom analyzer that depends on a ``ngram`` tokenizer (with ``min_gram=3``, ``max_gram=3`` and ``token_chars=["letter", "digit"]``) - - -Flow of a search query through AMO -================================== - -Let's assume we search on addons-frontend (not legacy) the search query hits the API and gets handled by ``AddonSearchView``, which directly queries ElasticSearch and doesn't involve the database at all. - -There are a few filters that are described in the :ref:`/api/v4/addons/search/ docs ` but most of them are not very relevant for raw search queries. Examples are filters by guid, platform, category, add-on type or appversion (application version compatibility). Those filters are applied using a ``filter`` clause and shouldn't affect scoring. - -Much more relevant for raw add-on searches (and this is primarily used when you use the search on the frontend) is ``SearchQueryFilter``. - -It composes various rules to define a more or less usable ranking: - -Primary rules -------------- - -These are the ones using the strongest boosts, so they are only applied to the add-on name. - -**Applied rules** (merged via ``should``): - -1. A ``dis_max`` query with ``term`` matches on ``name_l10n_{analyzer}.raw`` and ``name.raw`` if the language of the request matches a known language-specific analyzer, or just a ``term`` query on ``name.raw`` (``boost=100.0``) otherwise - our attempt to implement exact matches -2. If we have a matching language-specific analyzer, we add a ``match`` query to ``name_l10n_{analyzer}`` (``boost=5.0``, ``operator=and``) -3. A ``phrase`` match on ``name`` that allows swapped terms (``boost=8.0``, ``slop=1``) -4. A ``match`` on ``name``, using the standard text analyzer (``boost=6.0``, ``analyzer=standard``, ``operator=and``) -5. A ``prefix`` match on ``name`` (``boost=3.0``) -6. If a query is < 20 characters long, a ``dis_max`` query (``boost=4.0``) composed of a fuzzy match on ``name`` (``boost=4.0``, ``prefix_length=2``, ``fuzziness=AUTO``, ``minimum_should_match=2<2 3<-25%``) and a ``match`` query on ``name.trigram``, with a ``minimum_should_match=66%`` to avoid noise. - - -Secondary rules ---------------- - -These are the ones using the weakest boosts, they are applied to fields containing more text like description, summary and tags. - -**Applied rules** (merged via ``should``): - -1. Look for matches inside the summary (``boost=3.0``, ``operator=and``) -2. Look for matches inside the description (``boost=2.0``, ``operator=and``) - -If the language of the request matches a known language-specific analyzer, those are made using a ``multi_match`` query using ``summary`` or ``description`` and the corresponding ``{field}_l10n_{analyzer}``, similar to how exact name matches are performed above, in order to support potential translations. - - -Rescoring rules ---------------- - -On top of the two sets of rules above, a ``rescore`` query is applied with a ``window_size`` of ``10``. In production, we have 5 shards, so that -should re-adjust the score of the top 50 results returned only. The rules used for rescoring are the same used in the secondary rules above, with just one difference: it's using ``match_phrase`` instead of ``match``, with a slop of ``10``. - - -General query flow: -------------------- - - 1. Fetch current translation - 2. Fetch locale specific analyzer (`List of analyzers `_) - 3. Merge primary and secondary *should* rules - 4. Create a ``function_score`` query that uses a ``field_value_factor`` function on ``average_daily_users`` with a ``log2p`` modifier, as well as a ``4.0`` weight if the add-on is public & non-experimental. - 5. Add the ``rescore`` query to the mix diff --git a/docs/topics/development/setup_and_configuration.md b/docs/topics/development/setup_and_configuration.md new file mode 100644 index 000000000000..500f8259f00a --- /dev/null +++ b/docs/topics/development/setup_and_configuration.md @@ -0,0 +1,394 @@ +# Setup and Configuration + +This section covers how to run _addons-server_ locally. See [github actions](./github_actions.md) for running in CI. +This should be where you start if you are running _addons-server_ for the first time. +Setting up the local development environment for **addons-server** involves configuring Docker Compose to run the necessary services. +Follow these steps to get started: + +## Prerequisites + +- Ensure Docker and Docker Compose are installed on your system. +- Clone the **addons-server** repository from GitHub: + + ```sh + git clone https://github.com/mozilla/addons-server + cd addons-server + ``` + +## Running the docker compose project + +> TLDR; Just run `make up`. + +The _make up_ command ensures all necessary files are created on the host and starts the Docker Compose project, +including volumes, containers, networks, databases and indexes. +It is meant to be run frequently whenever you want to bring your environment "up". + +Here's a high-level overview of what _make up_ does: + +```make +.PHONY: up +up: setup docker_pull_or_build docker_compose_up docker_clean_images docker_clean_volumes data +``` + +- **setup**: Creates configuration files such as `.env` and `version.json`. +- **docker_pull_or_build**: Pulls or builds the Docker image based on the image version. +- **docker_compose_up**: Starts the Docker containers defined in [docker-compose.yml][docker-compose]. +- **docker_clean_images** and **docker_clean_volumes**: Cleans up unused Docker images and volumes. +- **data**: Ensures the database, seed, and index are created. + +What happens if you run `make up` when your environment is already running?. +Well that depends on what is changed since the last time you ran it. +Because `make up` is {ref}`idempotent ` it will only run the commands that are necessary to bring your environment up to date. +If nothing has changed, nothing will happen because your environment is already in the desired state. + +## Shutting down your environment + +> TLDR; just run `make down` + +The `make down` command does almost the complete opposite of `make up`. +It stops all docker services and removes locally built images and any used volumes. + +Running `make down` will free up resources on your machine and can help if your environment gets stuck in a difficult to debug state. + +A common solution to many problems is to run `make down && make up`. + +> NOTE: When you run make down, it will clear all volumes except the data_mysqld volume. +> This is where your database and other persisted data is stored. +> If you want to start fresh, you can delete the data_mysqld volume. + +```sh +make down +make docker_mysqld_volume_remove # Remove the mysql database volume +make up +``` + +If you want to completely nuke your environment and start over as if you had just cloned the repo, +you can run `make clean_docker`. This will `make down` and remove all docker resources taking space on the host machine. + +### Accessing the Development App + +- Add the following entry to your `/etc/hosts` file to access **addons-server** via a local domain: + + ```sh + 127.0.0.1 olympia.test + ``` + +- The web application should now be accessible at `http://olympia.test`. +- You can access the web container for debugging and development: + + ```sh + make shell + ``` + +- To access the Django shell within the container: + + ```sh + make djshell + ``` + +## Configuring your environment + +Addons-server runs via [docker-compose](./building_and_running_services.md) and can be run in a local environment or on CI. +It is highly configurable to meet the requirements for different environments and use cases. +Here are some practical ways you can configure how _addons-server_ runs. + +### Build vs Pull + +By default, _addons-server_ builds a [docker image](./building_and_running_services.md) tagged _local_ before running the containers as a part of `make up`. +To run _addons-server_ with the _local_ image, just run `make up` like you normally would. It is the default. + +Instead of building, you can configure your environment to run a pulled image instead. To run a pulled image, +specify a {ref}`version or digest ` when calling `make up`. E.g `make up DOCKER_VERSION=latest` to run +the latest published version of `addons-server`. + +For typical development it is recommended to use the default built image. It is aggressively cached and most closely +reflects the current state of your local repository. Pulling a published image can be useful if you have limited CPU +or if you want to run a very specific version of addons-server for testing a Pull request +or debugging a currently deployed version. + +(version-vs-digest)= +### Version vs Digest + +The default behavior is to build the docker image locally, but if you want to run addons-server with a remote image +you can specify a docker image version to pull with: + +```bash +make up DOCKER_VERSION= +``` + +Version is the published tag of addons-server and corresponds to `mozilla/addons-server:` in [dockerhub][addons-server-tags]. + +Specifying a version will configure docker compose to set the [pull policy] to _always_ and specify the _image_ property +in the docker compose config to pull the latest build of the specified `version`. Once you've specified a version, +subsequent calls to `make up` will pull the same version consistently {ref}`see idempotence ` for more details. + +What if you want to run an exact build of `addons-server`, +without fetching later versions that might subsequently get published to the same tag? + +You can specify a `DOCKER_DIGEST` to pull a specific build of addons-server. This can be very useful if you want +to guarantee the exact state of the image you are running. This is used in our own CI environments to ensure each job +runs with the exact same image built in the run. + +```bash +make up DOCKER_DIGEST=sha256@abc123 +``` + +A docker [build digest][docker-image-digest] corresponds to the precise state of a docker image. +Think of it like a content hash, though it's a bit more complicated than that. +Specifying a build digest means you will always run the exact same version +of the image and it will not change the contents of the image. + +Our [CI][ci-workflow] workflow builds and pushes a docker image on each run. To run the exact image built during a CI run, +copy the image digest from the _build_ job logs. Look for a log line like this: + +```shell +#36 pushing manifest for docker.io/mozilla/addons-server:pr-22395-ci@sha256:8464804ed645e429ccb3585a50c6003fafd81bd43407d8d4ab575adb8391537d +``` + +The version for the above image is `pr-22395-ci` and the digest is `sha256:8464804ed645e429ccb3585a50c6003fafd81bd43407d8d4ab575adb8391537d`. +To run the specific build of the exact run for `pr-22395` you would run: + +```bash + make up DOCKER_VERSION=pr-22395-ci +``` + +And to run exactly the version built in this run, even if it is not the latest version, you would run: + +```bash + make up DOCKER_DIGEST=sha256:8464804ed645e429ccb3585a50c6003fafd81bd43407d8d4ab575adb8391537d +``` + +If you specify both a version and digest, digest as the more specific attribute takes precedence. + +(idempotence)= +### Idempotence + +The `make up` command and all of its sub-commands are idempotent. +That means if the command is repeated with the same inputs you will always get the same result. +If you run + +```bash + make up DOCKER_VERSION=banana +``` + +and then run make up again, the .env file will have a docker tag specifying the version `banana`. +This prevents you from needing to constantly specify parameters over and over. +But it also means you have to remember what values you have set for different properties as they can have huge +impacts on what is actually running in your environment. + +`make up` logs the current environment specifications to the terminal as it is running so you should always know +what exactly is happening in your environment at any given time. + +Additionally, by defining all of the critical docker compose variables in a .env file, it means that the behaviour +of running commands via `make` or running the same command directly via the docker CLI should produce the same result. + +Though it is **highly recommended to use the make commands** instead of directly calling docker in your terminal. + +### Docker Compose Files + +- **[docker-compose.yml][docker-compose]**: The primary Docker Compose file defining services, networks, and volumes for local and CI environments. +- **[docker-compose.private.yml][docker-compose-private]**: Runs addons-server with the _customs_ service that is only available to Mozilla employees + +Our docker compose files rely on substituted values, all of which are included in our .env file for direct CLI compatibility. +Any referenced _${VARIABLE}_ in the docker-compose files will be replaced with the value from the .env file. We have tests +that ensure any references are included in the .env file with valid values. + +This means when you run `make docker_compose_up`, the output on your machine will be exactly the same as if you ran +`docker compose up -d --wait --remove-orphans --force-recreate --quiet-pull` directly. You **should** use make commands, +but sometimes you need to debug further what a command is running on the terminal and this architecture allows you to do that. + +By following these steps, you can set up your local development environment and understand the existing CI workflows for the **addons-server** project. For more details on specific commands and configurations, refer to the upcoming sections in this documentation. + +## Gotchas + +Here's a list of a few of the issues you might face when setting up your development environment + +### Can't access the web server? + +Check you've created a hosts file entry pointing `olympia.test` to the relevant IP address. + +If containers are failing to start use `docker compose ps` to check their running status. + +Another way to find out what's wrong is to run `docker compose logs`. + +### Getting "Programming error [table] doesn't exist"? + +Make sure you've run `make up`. + +### ConnectionError during initialize (elasticsearch container fails to start) + +When running `make up` without a working elasticsearch container, you'll get a ConnectionError. Check the logs with `docker compose logs`. If elasticsearch is complaining about `vm.max_map_count`, run this command on your computer or your docker-machine VM: + +```sh + sudo sysctl -w vm.max_map_count=262144 +``` + +This allows processes to allocate more [memory map areas](https://stackoverflow.com/a/11685165/4496684). + +### Connection to elasticsearch timed out (elasticsearch container exits with code 137) + +`docker compose up -d` brings up all containers, but running `make up` causes the elasticsearch container to go down. Running `docker compose ps` shows _Exited (137)_ against it. + +Update default settings in Docker Desktop - we suggest increasing RAM limit to at least 4 GB in the Resources/Advanced section and click on "Apply and Restart". + +### Port collisions (nginx container fails to start) + +If you're already running a service on port 80 or 8000 on your host machine, the _nginx_ container will fail to start. This is because the `docker-compose.override.yml` file tells _nginx_ to listen on port 80 and the web service to listen on port 8000 by default. + +This problem will manifest itself by the services failing to start. Here's an example for the most common case of _nginx_ not starting due to a collision on port 80: + +```shell + ERROR: for nginx Cannot start service nginx:..... + ...Error starting userland proxy: Bind for 0.0.0.0:80: unexpected error (Failure EADDRINUSE) + ERROR: Encountered errors while bringing up the project. +``` + +You can check what's running on that port by using (sudo is required if you're looking at port < 1024): + +```sh + sudo lsof -i :80 +``` + +We specify the ports _nginx_ listens on in the `docker-compose.override.yml` file. If you wish to override the ports you can do so by creating a new _docker-compose_ config and starting the containers using that config alongside the default config. + +For example, if you create a file called `docker-compose-ports.yml`: + +```yaml + nginx: + ports: + - 8880:80 +``` + +Next, you would stop and start the containers with the following: + +```sh + docker compose stop # only needed if running + docker compose -f docker-compose.yml -f docker-compose-ports.yml up -d +``` + +Now the container _nginx_ is listening on 8880 on the host. You can now proxy to the container _nginx_ from the host _nginx_ with the following _nginx_ config: + +```nginx + server { + listen 80; + server_name olympia.test; + location / { + proxy_pass http://olympia.test:8880; + } + } +``` + +### returned Internal Server Error for API route and version + +This can occur if the docker daemon has crashed. Running docker commands will return errors as the CLI cannot communicate +with the daemon. The best thing to do is to restart docker and to check your docker memory usage. The most likely cause +is limited memory. You can check the make commands to see how you can free up space on your machine. + +```bash + docker volume create addons-server_data_mysqld + request returned Internal Server Error for API route and version http://%2FUsers%2Fwilliam%2F.docker%2Frun%2Fdocker.sock/v1.45/volumes/create, check if the server supports the requested API version + make: *** [docker_mysqld_volume_create] Error 1 +``` + +### Mysqld failing to start + +Our MYSQLD service relies on a persistent data volume in order to save the database even after containers are removed. +It is possible that the volume is in an incorrect state during startup which can lead to erros like the following: + +```bash + mysqld-1 | 2024-06-14T13:50:33.169411Z 0 [ERROR] [MY-010457] [Server] --initialize specified but the data directory has files in it. Aborting. + mysqld-1 | 2024-06-14T13:50:33.169416Z 0 [ERROR] [MY-013236] [Server] The designated data directory /var/lib/mysql/ is unusable. You can remove all files that the server added to it. +``` + +The best way around this is to `make down && make up` This will prune volumes and restart addons-server. + +### stat /Users/kmeinhardt/src/mozilla/addons-server/env: no such file or directory + +If you ran into this issue, it is likely due to an invalid .env likely created via running tests for our makefile +and docker-comose.yml file locally. + +```bash + docker compose up -d --wait --remove-orphans --force-recreate --quiet-pull + stat /Users/kmeinhardt/src/mozilla/addons-server/env: no such file or directory + make: *** [docker_compose_up] Error 14 +``` + +To fix this error `rm -f .env` to remove your .env and `make up` to restart the containers. + +[docker-compose]: ../../../docker-compose.yml +[docker-compose-private]: ../../../docker-compose.private.yml +[docker-image-digest]: https://github.com/opencontainers/.github/blob/main/docs/docs/introduction/digests.md +[addons-server-tags]: https://hub.docker.com/r/mozilla/addons-server/tags +[ci-workflow]: https://github.com/mozilla/addons-server/actions/workflows/ci.yml + +### 401 during docker build step in CI + +If the `build-docker` action is run it requires repository secret and permissions to be set correctly. If you see the below error: + +```bash +Error: buildx bake failed with: ERROR: failed to solve: failed to push mozilla/addons-server:pr-22446-ci: failed to authorize: failed to fetch oauth token: unexpected status from GET request to https://auth.docker.io/token?scope=repository%3Amozilla%2Faddons-server%3Apull%2Cpush&service=registry.docker.io: 401 Unauthorized +``` + +See the (workflow example)[./github_actions.md] for correct usage. + +### Invalid pull_policy + +If you run docker compose commands directly in the terminal, it is critical that your `.env` file exists and is up to date. This is handled automatically using make commands +but if you run `docker compose pull` without a .env file, you may encounter validation errors. That is because our docker-compose file itself uses variable substituation +for certain properties. This allows us to modify the behaviour of docker at runtime. + +```bash +validating /Users/user/mozilla/addons-server/docker-compose.yml: services.worker.pull_policy services.worker.pull_policy must be one of the following: "always", "never", "if_not_present", "build", "missing" +``` + +To fix this error, run `make setup` to ensure you have an up-to-date .env file locally. + +### Invalid docker context + +We have in the past used custom docker build contexts to build and run addons-server. +We currently use the `default` builder context so if you get this error running make up: + +```bash +ERROR: run `docker context use default` to switch to default context +18306 v0.16.1-desktop.1 /Users/awagner/.docker/cli-plugins/docker-buildx buildx use default +github.com/docker/buildx/commands.runUse + github.com/docker/buildx/commands/use.go:31 +github.com/docker/buildx/commands.useCmd.func1 + github.com/docker/buildx/commands/use.go:73 +github.com/docker/cli/cli-plugins/plugin.RunPlugin.func1.1.2 + github.com/docker/cli@v27.0.3+incompatible/cli-plugins/plugin/plugin.go:64 +github.com/spf13/cobra.(*Command).execute + github.com/spf13/cobra@v1.8.1/command.go:985 +github.com/spf13/cobra.(*Command).ExecuteC + github.com/spf13/cobra@v1.8.1/command.go:1117 +github.com/spf13/cobra.(*Command).Execute + github.com/spf13/cobra@v1.8.1/command.go:1041 +github.com/docker/cli/cli-plugins/plugin.RunPlugin + github.com/docker/cli@v27.0.3+incompatible/cli-plugins/plugin/plugin.go:79 +main.runPlugin + github.com/docker/buildx/cmd/buildx/main.go:67 +main.main + github.com/docker/buildx/cmd/buildx/main.go:84 +runtime.main + runtime/proc.go:271 +runtime.goexit + runtime/asm_arm64.s:1222 + +make[1]: *** [docker_use_builder] Error 1 +make: *** [docker_pull_or_build] Error 2 +``` + +To fix this error, run `docker context use default` to switch to the default builder context. + +### Failing make up due to invalid or failing migrations + +Every time you run `make up` it will run migrations. If you have failing migrations, +this will cause the make command to fail. However, if migrations are running, it means the containers are already up. + +You can inspect and fix the migration and then run `make up` again to start the re-start the containers. + +Inspecting the database can be done via: + +```bash +make dbshell +``` diff --git a/docs/topics/development/static-files.md b/docs/topics/development/static-files.md new file mode 100644 index 000000000000..46a4e3ffc377 --- /dev/null +++ b/docs/topics/development/static-files.md @@ -0,0 +1,87 @@ +# Static Files in addons-server + +This document explains how static files are served in the addons-server project during local development. In production, +static files are served directly from a CDN. + +## Overview + +addons-server uses a combination of nginx and Django's built-in static file serving capabilities to efficiently serve static files. +These files come from multiple sources: + +1. The `./static` folder in the project +2. Python dependencies +3. npm dependencies +4. Compressed/minified files built by `update_assets` + +## Static File Servers + +We use a combination of servers to serve static files: + +1. Nginx +2. Django's built-in development server + +In development, the nginx server will attempt to serve static files from the `./static` directory mounted into the nginx cointainer. +If the file cannot be found there the request is forwarded to django. +Nginx serves our own static files quickly and any vendor files can be fetched from django directly during development. + +In production mode, we mount a data volume both to `web` anb `nginx` containers. +The `web` container exposes the `site-static` directory to nginx that includes the collected static files. + +> In actual production environments, we upload the static files to a cloud bucket and serve them directly from the static path. + +## Static File Sources + +The rendering path for static files is as follows: + +1. Nginx tries to serve the file if it is available in the `./static` directory. +2. If the file is not found, the request is forwarded to django and served by the static file server. + +The static file serve uses our defined `STATICFILES_STORAGE` setting to determine the URL for static files as well as their underlying source file. +During development, we use the `StaticFilesStorage` class which does not map the hashed file names back to their original file names. +Otherwise we use the same `ManifestStaticFilesStorage` class that is used in production, expecting to serve the files from the `STATIC_ROOT` directory. + +This allows us to skip `update_assets` in dev mode, speeding up the development process, while still enabling production-like behavior +when configured to do so. The long term goal is to run CI in production mode always to ensure all tests verify against the production +static file build. + +To better visualize the impact of the various settings, here is a reference: + +Given a static file 'js/devhub/my-file.js': + +In `DEV_MODE` the url will look like `/static/js/devhub/my-file.js` no matter what. +However, in production, if `DEBUG` is `False`, the url will append the content hash like this, +`/static/js/devhub/my-file.1234567890.js`. Finally, if `DEBUG` is true, this file will be minified and concatenated with other files and probably look something like this `/static/js/devhub-all.min.1234567890.js`. + +The true `production` mode is then when `DEBUG` is `False` and `DEV_MODE` is `False`. But it makes sense +to make these individually toggleable so you can better "debug" js files from a production image. + +### Project Static Files + +Static files specific to the addons-server project are stored in the `./static` directory. These include CSS, JavaScript, images, and other assets used by the application. + +In reality there are 3 static directories in our docker compose container: + +- `/data/olympia/static`: Contains static files that are mounted directly from the host. +- `/data/olympia/static-build`: Contains static files that are built by `compress_assets`. +- `/data/olympia/site-static`: Contains static files that are collected by the `collectstatic` command. + +The only of these directories that is exposed to your host is the `./static` directory. + +### Compressing Static Files + +We currently use a `ducktape` script to compress our static files. +Ideally we would migrate to a modern tool to replace manual scripting, but for now this works. + +Assets are compressed automatically during the docker build, but if you need to manually update files while developing, +the easiest way is to run `make update_assets` which will compress and concatenate static assets as well as collect all static files +to the `site-static` directory. + +### Python Dependencies + +Some Python packages include their own static files. These assets are collected by the `collectstatic` command and included in the final static files directory. +During development they are served by the django development server. + +### npm Dependencies + +We have a (complex) set of npm static assets that are built by the `compress_assets` management command. +During development, these assets are served directly from the node_modules directory using a custom static finder. diff --git a/docs/topics/development/style.rst b/docs/topics/development/style.md similarity index 52% rename from docs/topics/development/style.rst rename to docs/topics/development/style.md index db94d5b2b8b7..309e7cafab72 100644 --- a/docs/topics/development/style.rst +++ b/docs/topics/development/style.md @@ -1,52 +1,51 @@ -.. _style: +(style)= -=================== -Style Guide -=================== +# Style Guide Writing code for olympia? Awesome! Please help keep our code readable by, whenever possible, adhering to these style conventions. +## Python -Python ------- -- see https://wiki.mozilla.org/Webdev:Python +- see +## Markup -Markup ------- -- ```` +- `` - double-quote attributes - Soft tab (2 space) indentation -- Title-Case ``