diff --git a/news/appdirs.vendor b/news/appdirs.vendor
new file mode 100644
index 00000000000..4e4ebd7278a
--- /dev/null
+++ b/news/appdirs.vendor
@@ -0,0 +1 @@
+Upgrade appdirs to 1.4.4
diff --git a/news/certifi.vendor b/news/certifi.vendor
new file mode 100644
index 00000000000..ddd125054b1
--- /dev/null
+++ b/news/certifi.vendor
@@ -0,0 +1 @@
+Upgrade certifi to 2020.6.20
diff --git a/news/distlib.vendor b/news/distlib.vendor
new file mode 100644
index 00000000000..ba8d7633c07
--- /dev/null
+++ b/news/distlib.vendor
@@ -0,0 +1 @@
+Upgrade distlib to 0.3.1
diff --git a/news/html5lib.vendor b/news/html5lib.vendor
new file mode 100644
index 00000000000..ed774270d45
--- /dev/null
+++ b/news/html5lib.vendor
@@ -0,0 +1 @@
+Upgrade html5lib to 1.1
diff --git a/news/idna.vendor b/news/idna.vendor
new file mode 100644
index 00000000000..b1bce37afe0
--- /dev/null
+++ b/news/idna.vendor
@@ -0,0 +1 @@
+Upgrade idna to 2.10
diff --git a/news/packaging.vendor b/news/packaging.vendor
new file mode 100644
index 00000000000..1c69173a95e
--- /dev/null
+++ b/news/packaging.vendor
@@ -0,0 +1 @@
+Upgrade packaging to 20.4
diff --git a/news/requests.vendor b/news/requests.vendor
new file mode 100644
index 00000000000..4e61b1974df
--- /dev/null
+++ b/news/requests.vendor
@@ -0,0 +1 @@
+Upgrade requests to 2.24.0
diff --git a/news/six.vendor b/news/six.vendor
new file mode 100644
index 00000000000..6c9e24900c8
--- /dev/null
+++ b/news/six.vendor
@@ -0,0 +1 @@
+Upgrade six to 1.15.0
diff --git a/news/toml.vendor b/news/toml.vendor
new file mode 100644
index 00000000000..401ae7a8361
--- /dev/null
+++ b/news/toml.vendor
@@ -0,0 +1 @@
+Upgrade toml to 0.10.1
diff --git a/news/urllib3.vendor b/news/urllib3.vendor
new file mode 100644
index 00000000000..f80766b0d06
--- /dev/null
+++ b/news/urllib3.vendor
@@ -0,0 +1 @@
+Upgrade urllib3 to 1.25.9
diff --git a/src/pip/_vendor/appdirs.py b/src/pip/_vendor/appdirs.py
index 8bd9c9ca0b8..33a3b77410c 100644
--- a/src/pip/_vendor/appdirs.py
+++ b/src/pip/_vendor/appdirs.py
@@ -13,8 +13,8 @@
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
-__version_info__ = (1, 4, 3)
-__version__ = '.'.join(map(str, __version_info__))
+__version__ = "1.4.4"
+__version_info__ = tuple(int(segment) for segment in __version__.split("."))
import sys
diff --git a/src/pip/_vendor/certifi/__init__.py b/src/pip/_vendor/certifi/__init__.py
index 1e2dfac7dbe..5d52a62e7f4 100644
--- a/src/pip/_vendor/certifi/__init__.py
+++ b/src/pip/_vendor/certifi/__init__.py
@@ -1,3 +1,3 @@
from .core import contents, where
-__version__ = "2020.04.05.1"
+__version__ = "2020.06.20"
diff --git a/src/pip/_vendor/certifi/cacert.pem b/src/pip/_vendor/certifi/cacert.pem
index ece147c9dc8..0fd855f4646 100644
--- a/src/pip/_vendor/certifi/cacert.pem
+++ b/src/pip/_vendor/certifi/cacert.pem
@@ -58,38 +58,6 @@ AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
-----END CERTIFICATE-----
-# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
-# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
-# Label: "Verisign Class 3 Public Primary Certification Authority - G3"
-# Serial: 206684696279472310254277870180966723415
-# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09
-# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6
-# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44
------BEGIN CERTIFICATE-----
-MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
-CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
-cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
-LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
-aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
-dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
-VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
-aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
-bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
-IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
-LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
-N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
-KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
-kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
-CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
-Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
-imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
-2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
-DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
-/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
-F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
-TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
------END CERTIFICATE-----
-
# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
# Label: "Entrust.net Premium 2048 Secure Server CA"
@@ -152,39 +120,6 @@ ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
-----END CERTIFICATE-----
-# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
-# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
-# Label: "AddTrust External Root"
-# Serial: 1
-# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f
-# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68
-# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2
------BEGIN CERTIFICATE-----
-MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
-MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
-IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
-MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
-FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
-bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
-dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
-H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
-uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
-mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
-a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
-E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
-WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
-VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
-Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
-cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
-IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
-AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
-YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
-6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
-Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
-c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
-mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
------END CERTIFICATE-----
-
# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
# Label: "Entrust Root Certification Authority"
@@ -1499,47 +1434,6 @@ uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
-----END CERTIFICATE-----
-# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
-# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
-# Label: "Staat der Nederlanden Root CA - G2"
-# Serial: 10000012
-# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a
-# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16
-# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f
------BEGIN CERTIFICATE-----
-MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
-TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
-dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX
-DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
-ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
-b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291
-qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp
-uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU
-Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE
-pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp
-5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M
-UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN
-GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy
-5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv
-6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK
-eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6
-B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/
-BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov
-L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
-HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG
-SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS
-CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen
-5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897
-IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK
-gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL
-+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL
-vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm
-bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk
-N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC
-Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z
-ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ==
------END CERTIFICATE-----
-
# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
# Label: "Hongkong Post Root CA 1"
@@ -3788,47 +3682,6 @@ CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
1KyLa2tJElMzrdfkviT8tQp21KW8EA==
-----END CERTIFICATE-----
-# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
-# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
-# Label: "LuxTrust Global Root 2"
-# Serial: 59914338225734147123941058376788110305822489521
-# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c
-# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f
-# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5
------BEGIN CERTIFICATE-----
-MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL
-BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV
-BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw
-MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B
-LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN
-AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F
-ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem
-hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1
-EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn
-Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4
-zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ
-96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m
-j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g
-DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+
-8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j
-X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH
-hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB
-KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0
-Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
-+Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL
-BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9
-BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO
-jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9
-loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c
-qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+
-2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/
-JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre
-zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf
-LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+
-x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6
-oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
------END CERTIFICATE-----
-
# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
@@ -4639,3 +4492,129 @@ IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk
5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY
n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw==
-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft ECC Root Certificate Authority 2017"
+# Serial: 136839042543790627607696632466672567020
+# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67
+# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5
+# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02
+-----BEGIN CERTIFICATE-----
+MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD
+VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw
+MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV
+UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy
+b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR
+ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb
+hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3
+FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV
+L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB
+iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft RSA Root Certificate Authority 2017"
+# Serial: 40975477897264996090493496164228220339
+# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47
+# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74
+# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0
+-----BEGIN CERTIFICATE-----
+MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl
+MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw
+NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5
+IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG
+EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N
+aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ
+Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0
+ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1
+HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm
+gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ
+jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc
+aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG
+YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6
+W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K
+UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH
++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q
+W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC
+LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC
+gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6
+tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh
+SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2
+TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3
+pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR
+xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp
+GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9
+dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN
+AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB
+RA+GsCyRxj3qrg+E
+-----END CERTIFICATE-----
+
+# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Label: "e-Szigno Root CA 2017"
+# Serial: 411379200276854331539784714
+# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98
+# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1
+# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99
+-----BEGIN CERTIFICATE-----
+MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV
+BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk
+LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv
+b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ
+BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg
+THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v
+IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv
+xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H
+Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB
+eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo
+jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ
++efcMQ==
+-----END CERTIFICATE-----
+
+# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Label: "certSIGN Root CA G2"
+# Serial: 313609486401300475190
+# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7
+# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32
+# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05
+-----BEGIN CERTIFICATE-----
+MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV
+BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g
+Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ
+BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ
+R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF
+dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw
+vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ
+uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp
+n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs
+cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW
+xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P
+rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF
+DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx
+DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy
+LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C
+eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ
+d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq
+kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC
+b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl
+qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0
+OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c
+NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk
+ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO
+pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj
+03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk
+PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE
+1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX
+QRBdJ3NghVdJIgc=
+-----END CERTIFICATE-----
diff --git a/src/pip/_vendor/certifi/core.py b/src/pip/_vendor/certifi/core.py
index 56b52a3c8f4..8987449f6b5 100644
--- a/src/pip/_vendor/certifi/core.py
+++ b/src/pip/_vendor/certifi/core.py
@@ -9,7 +9,36 @@
import os
try:
- from importlib.resources import read_text
+ from importlib.resources import path as get_path, read_text
+
+ _CACERT_CTX = None
+ _CACERT_PATH = None
+
+ def where():
+ # This is slightly terrible, but we want to delay extracting the file
+ # in cases where we're inside of a zipimport situation until someone
+ # actually calls where(), but we don't want to re-extract the file
+ # on every call of where(), so we'll do it once then store it in a
+ # global variable.
+ global _CACERT_CTX
+ global _CACERT_PATH
+ if _CACERT_PATH is None:
+ # This is slightly janky, the importlib.resources API wants you to
+ # manage the cleanup of this file, so it doesn't actually return a
+ # path, it returns a context manager that will give you the path
+ # when you enter it and will do any cleanup when you leave it. In
+ # the common case of not needing a temporary file, it will just
+ # return the file system location and the __exit__() is a no-op.
+ #
+ # We also have to hold onto the actual context manager, because
+ # it will do the cleanup whenever it gets garbage collected, so
+ # we will also store that at the global level as well.
+ _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem")
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
+
+ return _CACERT_PATH
+
+
except ImportError:
# This fallback will work for Python versions prior to 3.7 that lack the
# importlib.resources module but relies on the existing `where` function
@@ -19,11 +48,12 @@ def read_text(_module, _path, encoding="ascii"):
with open(where(), "r", encoding=encoding) as data:
return data.read()
+ # If we don't have importlib.resources, then we will just do the old logic
+ # of assuming we're on the filesystem and munge the path directly.
+ def where():
+ f = os.path.dirname(__file__)
-def where():
- f = os.path.dirname(__file__)
-
- return os.path.join(f, "cacert.pem")
+ return os.path.join(f, "cacert.pem")
def contents():
diff --git a/src/pip/_vendor/distlib/__init__.py b/src/pip/_vendor/distlib/__init__.py
index e19aebdc4cc..63d916e345b 100644
--- a/src/pip/_vendor/distlib/__init__.py
+++ b/src/pip/_vendor/distlib/__init__.py
@@ -6,7 +6,7 @@
#
import logging
-__version__ = '0.3.0'
+__version__ = '0.3.1'
class DistlibException(Exception):
pass
diff --git a/src/pip/_vendor/distlib/_backport/shutil.py b/src/pip/_vendor/distlib/_backport/shutil.py
index 159e49ee8c2..10ed3625397 100644
--- a/src/pip/_vendor/distlib/_backport/shutil.py
+++ b/src/pip/_vendor/distlib/_backport/shutil.py
@@ -14,7 +14,10 @@
import stat
from os.path import abspath
import fnmatch
-import collections
+try:
+ from collections.abc import Callable
+except ImportError:
+ from collections import Callable
import errno
from . import tarfile
@@ -528,7 +531,7 @@ def register_archive_format(name, function, extra_args=None, description=''):
"""
if extra_args is None:
extra_args = []
- if not isinstance(function, collections.Callable):
+ if not isinstance(function, Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
@@ -621,7 +624,7 @@ def _check_unpack_options(extensions, function, extra_args):
raise RegistryError(msg % (extension,
existing_extensions[extension]))
- if not isinstance(function, collections.Callable):
+ if not isinstance(function, Callable):
raise TypeError('The registered function must be a callable')
diff --git a/src/pip/_vendor/distlib/compat.py b/src/pip/_vendor/distlib/compat.py
index ff328c8ee49..c316fd973ad 100644
--- a/src/pip/_vendor/distlib/compat.py
+++ b/src/pip/_vendor/distlib/compat.py
@@ -319,7 +319,7 @@ def python_implementation():
try:
callable = callable
except NameError: # pragma: no cover
- from collections import Callable
+ from collections.abc import Callable
def callable(obj):
return isinstance(obj, Callable)
diff --git a/src/pip/_vendor/distlib/database.py b/src/pip/_vendor/distlib/database.py
index c16c0c8d9ed..0a90c300ba8 100644
--- a/src/pip/_vendor/distlib/database.py
+++ b/src/pip/_vendor/distlib/database.py
@@ -550,7 +550,7 @@ def __init__(self, path, metadata=None, env=None):
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
- r = finder.find('METADATA')
+ r = finder.find(LEGACY_METADATA_FILENAME)
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
diff --git a/src/pip/_vendor/distlib/metadata.py b/src/pip/_vendor/distlib/metadata.py
index 2d61378e994..6d5e236090d 100644
--- a/src/pip/_vendor/distlib/metadata.py
+++ b/src/pip/_vendor/distlib/metadata.py
@@ -5,7 +5,7 @@
#
"""Implementation of the Metadata for Python packages PEPs.
-Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
+Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and withdrawn 2.0).
"""
from __future__ import unicode_literals
@@ -194,38 +194,12 @@ def _has_marker(keys, markers):
return '2.0'
+# This follows the rules about transforming keys as described in
+# https://www.python.org/dev/peps/pep-0566/#id17
_ATTR2FIELD = {
- 'metadata_version': 'Metadata-Version',
- 'name': 'Name',
- 'version': 'Version',
- 'platform': 'Platform',
- 'supported_platform': 'Supported-Platform',
- 'summary': 'Summary',
- 'description': 'Description',
- 'keywords': 'Keywords',
- 'home_page': 'Home-page',
- 'author': 'Author',
- 'author_email': 'Author-email',
- 'maintainer': 'Maintainer',
- 'maintainer_email': 'Maintainer-email',
- 'license': 'License',
- 'classifier': 'Classifier',
- 'download_url': 'Download-URL',
- 'obsoletes_dist': 'Obsoletes-Dist',
- 'provides_dist': 'Provides-Dist',
- 'requires_dist': 'Requires-Dist',
- 'setup_requires_dist': 'Setup-Requires-Dist',
- 'requires_python': 'Requires-Python',
- 'requires_external': 'Requires-External',
- 'requires': 'Requires',
- 'provides': 'Provides',
- 'obsoletes': 'Obsoletes',
- 'project_url': 'Project-URL',
- 'private_version': 'Private-Version',
- 'obsoleted_by': 'Obsoleted-By',
- 'extension': 'Extension',
- 'provides_extra': 'Provides-Extra',
+ name.lower().replace("-", "_"): name for name in _ALL_FIELDS
}
+_FIELD2ATTR = {field: attr for attr, field in _ATTR2FIELD.items()}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
@@ -262,7 +236,7 @@ def _get_name_and_version(name, version, for_filename=False):
class LegacyMetadata(object):
"""The legacy metadata of a release.
- Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
+ Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
@@ -381,6 +355,11 @@ def read_file(self, fileob):
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
+
+ # PEP 566 specifies that the body be used for the description, if
+ # available
+ body = msg.get_payload()
+ self["Description"] = body if body else self["Description"]
# logger.debug('Attempting to set metadata for %s', self)
# self.set_metadata_version()
@@ -567,57 +546,21 @@ def todict(self, skip_missing=False):
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
+ This is as per https://www.python.org/dev/peps/pep-0566/#id17.
"""
self.set_metadata_version()
- mapping_1_0 = (
- ('metadata_version', 'Metadata-Version'),
- ('name', 'Name'),
- ('version', 'Version'),
- ('summary', 'Summary'),
- ('home_page', 'Home-page'),
- ('author', 'Author'),
- ('author_email', 'Author-email'),
- ('license', 'License'),
- ('description', 'Description'),
- ('keywords', 'Keywords'),
- ('platform', 'Platform'),
- ('classifiers', 'Classifier'),
- ('download_url', 'Download-URL'),
- )
+ fields = _version2fieldlist(self['Metadata-Version'])
data = {}
- for key, field_name in mapping_1_0:
+
+ for field_name in fields:
if not skip_missing or field_name in self._fields:
- data[key] = self[field_name]
-
- if self['Metadata-Version'] == '1.2':
- mapping_1_2 = (
- ('requires_dist', 'Requires-Dist'),
- ('requires_python', 'Requires-Python'),
- ('requires_external', 'Requires-External'),
- ('provides_dist', 'Provides-Dist'),
- ('obsoletes_dist', 'Obsoletes-Dist'),
- ('project_url', 'Project-URL'),
- ('maintainer', 'Maintainer'),
- ('maintainer_email', 'Maintainer-email'),
- )
- for key, field_name in mapping_1_2:
- if not skip_missing or field_name in self._fields:
- if key != 'project_url':
- data[key] = self[field_name]
- else:
- data[key] = [','.join(u) for u in self[field_name]]
-
- elif self['Metadata-Version'] == '1.1':
- mapping_1_1 = (
- ('provides', 'Provides'),
- ('requires', 'Requires'),
- ('obsoletes', 'Obsoletes'),
- )
- for key, field_name in mapping_1_1:
- if not skip_missing or field_name in self._fields:
+ key = _FIELD2ATTR[field_name]
+ if key != 'project_url':
data[key] = self[field_name]
+ else:
+ data[key] = [','.join(u) for u in self[field_name]]
return data
@@ -1003,10 +946,14 @@ def _from_legacy(self):
LEGACY_MAPPING = {
'name': 'Name',
'version': 'Version',
- 'license': 'License',
+ ('extensions', 'python.details', 'license'): 'License',
'summary': 'Summary',
'description': 'Description',
- 'classifiers': 'Classifier',
+ ('extensions', 'python.project', 'project_urls', 'Home'): 'Home-page',
+ ('extensions', 'python.project', 'contacts', 0, 'name'): 'Author',
+ ('extensions', 'python.project', 'contacts', 0, 'email'): 'Author-email',
+ 'source_url': 'Download-URL',
+ ('extensions', 'python.details', 'classifiers'): 'Classifier',
}
def _to_legacy(self):
@@ -1034,16 +981,29 @@ def process_entries(entries):
assert self._data and not self._legacy
result = LegacyMetadata()
nmd = self._data
+ # import pdb; pdb.set_trace()
for nk, ok in self.LEGACY_MAPPING.items():
- if nk in nmd:
- result[ok] = nmd[nk]
+ if not isinstance(nk, tuple):
+ if nk in nmd:
+ result[ok] = nmd[nk]
+ else:
+ d = nmd
+ found = True
+ for k in nk:
+ try:
+ d = d[k]
+ except (KeyError, IndexError):
+ found = False
+ break
+ if found:
+ result[ok] = d
r1 = process_entries(self.run_requires + self.meta_requires)
r2 = process_entries(self.build_requires + self.dev_requires)
if self.extras:
result['Provides-Extra'] = sorted(self.extras)
result['Requires-Dist'] = sorted(r1)
result['Setup-Requires-Dist'] = sorted(r2)
- # TODO: other fields such as contacts
+ # TODO: any other fields wanted
return result
def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
diff --git a/src/pip/_vendor/distlib/scripts.py b/src/pip/_vendor/distlib/scripts.py
index 51859741867..03f8f21e0ff 100644
--- a/src/pip/_vendor/distlib/scripts.py
+++ b/src/pip/_vendor/distlib/scripts.py
@@ -48,7 +48,7 @@
'''
-def _enquote_executable(executable):
+def enquote_executable(executable):
if ' ' in executable:
# make sure we quote only the executable in case of env
# for example /usr/bin/env "/dir with spaces/bin/jython"
@@ -63,6 +63,8 @@ def _enquote_executable(executable):
executable = '"%s"' % executable
return executable
+# Keep the old name around (for now), as there is at least one project using it!
+_enquote_executable = enquote_executable
class ScriptMaker(object):
"""
@@ -88,6 +90,7 @@ def __init__(self, source_dir, target_dir, add_launchers=True,
self._is_nt = os.name == 'nt' or (
os.name == 'java' and os._name == 'nt')
+ self.version_info = sys.version_info
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
@@ -185,7 +188,7 @@ def _get_shebang(self, encoding, post_interp=b'', options=None):
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
- executable = _enquote_executable(executable)
+ executable = enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
@@ -293,10 +296,10 @@ def _make_script(self, entry, filenames, options=None):
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
- scriptnames.add('%s%s' % (name, sys.version_info[0]))
+ scriptnames.add('%s%s' % (name, self.version_info[0]))
if 'X.Y' in self.variants:
- scriptnames.add('%s-%s.%s' % (name, sys.version_info[0],
- sys.version_info[1]))
+ scriptnames.add('%s-%s.%s' % (name, self.version_info[0],
+ self.version_info[1]))
if options and options.get('gui', False):
ext = 'pyw'
else:
diff --git a/src/pip/_vendor/distlib/wheel.py b/src/pip/_vendor/distlib/wheel.py
index bd179383ac9..1e2c7a020c9 100644
--- a/src/pip/_vendor/distlib/wheel.py
+++ b/src/pip/_vendor/distlib/wheel.py
@@ -26,7 +26,8 @@
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
-from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
+from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
+ LEGACY_METADATA_FILENAME)
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
@@ -221,10 +222,12 @@ def metadata(self):
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
- if file_version < (1, 1):
- fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, 'METADATA']
- else:
- fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
+ # if file_version < (1, 1):
+ # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME,
+ # LEGACY_METADATA_FILENAME]
+ # else:
+ # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
+ fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]
result = None
for fn in fns:
try:
@@ -299,10 +302,9 @@ def get_hash(self, data, hash_kind=None):
return hash_kind, result
def write_record(self, records, record_path, base):
- records = list(records) # make a copy for sorting
+ records = list(records) # make a copy, as mutated
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
- records.sort()
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
@@ -425,6 +427,18 @@ def build(self, paths, tags=None, wheel_version=None):
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
+ # sort the entries by archive path. Not needed by any spec, but it
+ # keeps the archive listing and RECORD tidier than they would otherwise
+ # be. Use the number of path segments to keep directory entries together,
+ # and keep the dist-info stuff at the end.
+ def sorter(t):
+ ap = t[0]
+ n = ap.count('/')
+ if '.dist-info' in ap:
+ n += 10000
+ return (n, ap)
+ archive_paths = sorted(archive_paths, key=sorter)
+
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
@@ -476,7 +490,7 @@ def install(self, paths, maker, **kwargs):
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
- metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
+ metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
@@ -619,7 +633,7 @@ def install(self, paths, maker, **kwargs):
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
- s += ' %s' % v.flags
+ s += ' [%s]' % ','.join(v.flags)
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
@@ -773,7 +787,7 @@ def verify(self):
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
- metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
+ metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
@@ -842,7 +856,7 @@ def update(self, modifier, dest_dir=None, **kwargs):
def get_version(path_map, info_dir):
version = path = None
- key = '%s/%s' % (info_dir, METADATA_FILENAME)
+ key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
@@ -868,7 +882,7 @@ def update_version(version, path):
if updated:
md = Metadata(path=path)
md.version = updated
- legacy = not path.endswith(METADATA_FILENAME)
+ legacy = path.endswith(LEGACY_METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
diff --git a/src/pip/_vendor/html5lib/__init__.py b/src/pip/_vendor/html5lib/__init__.py
index 049123492e2..d1d82f157f8 100644
--- a/src/pip/_vendor/html5lib/__init__.py
+++ b/src/pip/_vendor/html5lib/__init__.py
@@ -32,4 +32,4 @@
# this has to be at the top level, see how setup.py parses this
#: Distribution version number.
-__version__ = "1.0.1"
+__version__ = "1.1"
diff --git a/src/pip/_vendor/html5lib/_ihatexml.py b/src/pip/_vendor/html5lib/_ihatexml.py
index 4c77717bbc0..3ff803c1952 100644
--- a/src/pip/_vendor/html5lib/_ihatexml.py
+++ b/src/pip/_vendor/html5lib/_ihatexml.py
@@ -136,6 +136,7 @@ def normaliseCharList(charList):
i += j
return rv
+
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
@@ -254,7 +255,7 @@ def toXmlName(self, name):
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
- warnings.warn("Coercing non-XML name", DataLossWarning)
+ warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
@@ -262,7 +263,7 @@ def toXmlName(self, name):
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
- warnings.warn("Coercing non-XML name", DataLossWarning)
+ warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
diff --git a/src/pip/_vendor/html5lib/_inputstream.py b/src/pip/_vendor/html5lib/_inputstream.py
index a65e55f64bf..e0bb37602c8 100644
--- a/src/pip/_vendor/html5lib/_inputstream.py
+++ b/src/pip/_vendor/html5lib/_inputstream.py
@@ -1,10 +1,11 @@
from __future__ import absolute_import, division, unicode_literals
-from pip._vendor.six import text_type, binary_type
+from pip._vendor.six import text_type
from pip._vendor.six.moves import http_client, urllib
import codecs
import re
+from io import BytesIO, StringIO
from pip._vendor import webencodings
@@ -12,13 +13,6 @@
from .constants import _ReparseException
from . import _utils
-from io import StringIO
-
-try:
- from io import BytesIO
-except ImportError:
- BytesIO = StringIO
-
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
@@ -40,13 +34,13 @@
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
-non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
- 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
- 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
- 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
- 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
- 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
- 0x10FFFE, 0x10FFFF])
+non_bmp_invalid_codepoints = {0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
+ 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
+ 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
+ 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
+ 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
+ 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
+ 0x10FFFE, 0x10FFFF}
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]")
@@ -367,7 +361,7 @@ def charsUntil(self, characters, opposite=False):
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
- if char is not None:
+ if char is not EOF:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
@@ -449,7 +443,7 @@ def openStream(self, source):
try:
stream.seek(stream.tell())
- except: # pylint:disable=bare-except
+ except Exception:
stream = BufferedStream(stream)
return stream
@@ -461,7 +455,7 @@ def determineEncoding(self, chardet=True):
if charEncoding[0] is not None:
return charEncoding
- # If we've been overriden, we've been overriden
+ # If we've been overridden, we've been overridden
charEncoding = lookupEncoding(self.override_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
@@ -664,9 +658,7 @@ def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
- p = self.position
- data = self[p:p + len(bytes)]
- rv = data.startswith(bytes)
+ rv = self.startswith(bytes, self.position)
if rv:
self.position += len(bytes)
return rv
@@ -674,15 +666,11 @@ def matchBytes(self, bytes):
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
- newPosition = self[self.position:].find(bytes)
- if newPosition > -1:
- # XXX: This is ugly, but I can't see a nicer way to fix this.
- if self._position == -1:
- self._position = 0
- self._position += (newPosition + len(bytes) - 1)
- return True
- else:
+ try:
+ self._position = self.index(bytes, self.position) + len(bytes) - 1
+ except ValueError:
raise StopIteration
+ return True
class EncodingParser(object):
@@ -694,6 +682,9 @@ def __init__(self, data):
self.encoding = None
def getEncoding(self):
+ if b"= (3, 7):
+ attributeMap = dict
+else:
+ attributeMap = OrderedDict
+
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
@@ -228,6 +234,14 @@ def emitCurrentToken(self):
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
token["name"] = token["name"].translate(asciiUpper2Lower)
+ if token["type"] == tokenTypes["StartTag"]:
+ raw = token["data"]
+ data = attributeMap(raw)
+ if len(raw) > len(data):
+ # we had some duplicated attribute, fix so first wins
+ data.update(raw[::-1])
+ token["data"] = data
+
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
diff --git a/src/pip/_vendor/html5lib/_trie/__init__.py b/src/pip/_vendor/html5lib/_trie/__init__.py
index a5ba4bf123a..07bad5d31c1 100644
--- a/src/pip/_vendor/html5lib/_trie/__init__.py
+++ b/src/pip/_vendor/html5lib/_trie/__init__.py
@@ -1,14 +1,5 @@
from __future__ import absolute_import, division, unicode_literals
-from .py import Trie as PyTrie
+from .py import Trie
-Trie = PyTrie
-
-# pylint:disable=wrong-import-position
-try:
- from .datrie import Trie as DATrie
-except ImportError:
- pass
-else:
- Trie = DATrie
-# pylint:enable=wrong-import-position
+__all__ = ["Trie"]
diff --git a/src/pip/_vendor/html5lib/_trie/datrie.py b/src/pip/_vendor/html5lib/_trie/datrie.py
deleted file mode 100644
index e2e5f86621c..00000000000
--- a/src/pip/_vendor/html5lib/_trie/datrie.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from __future__ import absolute_import, division, unicode_literals
-
-from datrie import Trie as DATrie
-from pip._vendor.six import text_type
-
-from ._base import Trie as ABCTrie
-
-
-class Trie(ABCTrie):
- def __init__(self, data):
- chars = set()
- for key in data.keys():
- if not isinstance(key, text_type):
- raise TypeError("All keys must be strings")
- for char in key:
- chars.add(char)
-
- self._data = DATrie("".join(chars))
- for key, value in data.items():
- self._data[key] = value
-
- def __contains__(self, key):
- return key in self._data
-
- def __len__(self):
- return len(self._data)
-
- def __iter__(self):
- raise NotImplementedError()
-
- def __getitem__(self, key):
- return self._data[key]
-
- def keys(self, prefix=None):
- return self._data.keys(prefix)
-
- def has_keys_with_prefix(self, prefix):
- return self._data.has_keys_with_prefix(prefix)
-
- def longest_prefix(self, prefix):
- return self._data.longest_prefix(prefix)
-
- def longest_prefix_item(self, prefix):
- return self._data.longest_prefix_item(prefix)
diff --git a/src/pip/_vendor/html5lib/_utils.py b/src/pip/_vendor/html5lib/_utils.py
index 96eb17b2c17..d7c4926afce 100644
--- a/src/pip/_vendor/html5lib/_utils.py
+++ b/src/pip/_vendor/html5lib/_utils.py
@@ -2,6 +2,11 @@
from types import ModuleType
+try:
+ from collections.abc import Mapping
+except ImportError:
+ from collections import Mapping
+
from pip._vendor.six import text_type, PY3
if PY3:
@@ -30,7 +35,7 @@
# We need this with u"" because of http://bugs.jython.org/issue2039
_x = eval('u"\\uD800"') # pylint:disable=eval-used
assert isinstance(_x, text_type)
-except: # pylint:disable=bare-except
+except Exception:
supports_lone_surrogates = False
else:
supports_lone_surrogates = True
@@ -50,9 +55,6 @@ class MethodDispatcher(dict):
"""
def __init__(self, items=()):
- # Using _dictEntries instead of directly assigning to self is about
- # twice as fast. Please do careful performance testing before changing
- # anything here.
_dictEntries = []
for name, value in items:
if isinstance(name, (list, tuple, frozenset, set)):
@@ -67,6 +69,36 @@ def __init__(self, items=()):
def __getitem__(self, key):
return dict.get(self, key, self.default)
+ def __get__(self, instance, owner=None):
+ return BoundMethodDispatcher(instance, self)
+
+
+class BoundMethodDispatcher(Mapping):
+ """Wraps a MethodDispatcher, binding its return values to `instance`"""
+ def __init__(self, instance, dispatcher):
+ self.instance = instance
+ self.dispatcher = dispatcher
+
+ def __getitem__(self, key):
+ # see https://docs.python.org/3/reference/datamodel.html#object.__get__
+ # on a function, __get__ is used to bind a function to an instance as a bound method
+ return self.dispatcher[key].__get__(self.instance)
+
+ def get(self, key, default):
+ if key in self.dispatcher:
+ return self[key]
+ else:
+ return default
+
+ def __iter__(self):
+ return iter(self.dispatcher)
+
+ def __len__(self):
+ return len(self.dispatcher)
+
+ def __contains__(self, key):
+ return key in self.dispatcher
+
# Some utility functions to deal with weirdness around UCS2 vs UCS4
# python builds
diff --git a/src/pip/_vendor/html5lib/constants.py b/src/pip/_vendor/html5lib/constants.py
index 1ff804190cd..fe3e237cd8a 100644
--- a/src/pip/_vendor/html5lib/constants.py
+++ b/src/pip/_vendor/html5lib/constants.py
@@ -519,8 +519,8 @@
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
-unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
- adjustForeignAttributes.items()])
+unadjustForeignAttributes = {(ns, local): qname for qname, (prefix, local, ns) in
+ adjustForeignAttributes.items()}
spaceCharacters = frozenset([
"\t",
@@ -544,8 +544,7 @@
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
-asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
- for c in string.ascii_uppercase])
+asciiUpper2Lower = {ord(c): ord(c.lower()) for c in string.ascii_uppercase}
# Heading elements need to be ordered
headingElements = (
@@ -2934,7 +2933,7 @@
tokenTypes["EmptyTag"]])
-prefixes = dict([(v, k) for k, v in namespaces.items()])
+prefixes = {v: k for k, v in namespaces.items()}
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
diff --git a/src/pip/_vendor/html5lib/filters/sanitizer.py b/src/pip/_vendor/html5lib/filters/sanitizer.py
index af8e77b81e3..aa7431d1312 100644
--- a/src/pip/_vendor/html5lib/filters/sanitizer.py
+++ b/src/pip/_vendor/html5lib/filters/sanitizer.py
@@ -1,6 +1,15 @@
+"""Deprecated from html5lib 1.1.
+
+See `here `_ for
+information about its deprecation; `Bleach `_
+is recommended as a replacement. Please let us know in the aforementioned issue
+if Bleach is unsuitable for your needs.
+
+"""
from __future__ import absolute_import, division, unicode_literals
import re
+import warnings
from xml.sax.saxutils import escape, unescape
from pip._vendor.six.moves import urllib_parse as urlparse
@@ -11,6 +20,14 @@
__all__ = ["Filter"]
+_deprecation_msg = (
+ "html5lib's sanitizer is deprecated; see " +
+ "https://github.com/html5lib/html5lib-python/issues/443 and please let " +
+ "us know if Bleach is unsuitable for your needs"
+)
+
+warnings.warn(_deprecation_msg, DeprecationWarning)
+
allowed_elements = frozenset((
(namespaces['html'], 'a'),
(namespaces['html'], 'abbr'),
@@ -750,6 +767,9 @@ def __init__(self,
"""
super(Filter, self).__init__(source)
+
+ warnings.warn(_deprecation_msg, DeprecationWarning)
+
self.allowed_elements = allowed_elements
self.allowed_attributes = allowed_attributes
self.allowed_css_properties = allowed_css_properties
diff --git a/src/pip/_vendor/html5lib/html5parser.py b/src/pip/_vendor/html5lib/html5parser.py
index ae41a133761..d06784f3d25 100644
--- a/src/pip/_vendor/html5lib/html5parser.py
+++ b/src/pip/_vendor/html5lib/html5parser.py
@@ -2,7 +2,6 @@
from pip._vendor.six import with_metaclass, viewkeys
import types
-from collections import OrderedDict
from . import _inputstream
from . import _tokenizer
@@ -119,8 +118,8 @@ def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=Fa
self.tree = tree(namespaceHTMLElements)
self.errors = []
- self.phases = dict([(name, cls(self, self.tree)) for name, cls in
- getPhases(debug).items()])
+ self.phases = {name: cls(self, self.tree) for name, cls in
+ getPhases(debug).items()}
def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs):
@@ -202,7 +201,7 @@ def mainLoop(self):
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
- for token in self.normalizedTokens():
+ for token in self.tokenizer:
prev_token = None
new_token = token
while new_token is not None:
@@ -260,10 +259,6 @@ def mainLoop(self):
if reprocess:
assert self.phase not in phases
- def normalizedTokens(self):
- for token in self.tokenizer:
- yield self.normalizeToken(token)
-
def parse(self, stream, *args, **kwargs):
"""Parse a HTML document into a well-formed tree
@@ -325,17 +320,6 @@ def parseError(self, errorcode="XXX-undefined-error", datavars=None):
if self.strict:
raise ParseError(E[errorcode] % datavars)
- def normalizeToken(self, token):
- # HTML5 specific normalizations to the token stream
- if token["type"] == tokenTypes["StartTag"]:
- raw = token["data"]
- token["data"] = OrderedDict(raw)
- if len(raw) > len(token["data"]):
- # we had some duplicated attribute, fix so first wins
- token["data"].update(raw[::-1])
-
- return token
-
def adjustMathMLAttributes(self, token):
adjust_attributes(token, adjustMathMLAttributes)
@@ -413,16 +397,12 @@ def parseRCDataRawtext(self, token, contentType):
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
- type_names = dict((value, key) for key, value in
- tokenTypes.items())
+ type_names = {value: key for key, value in tokenTypes.items()}
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
- try:
- info = {"type": type_names[token['type']]}
- except:
- raise
+ info = {"type": type_names[token['type']]}
if token['type'] in tagTokenTypes:
info["name"] = token['name']
@@ -446,10 +426,13 @@ def getMetaclass(use_metaclass, metaclass_func):
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
+ __slots__ = ("parser", "tree", "__startTagCache", "__endTagCache")
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
+ self.__startTagCache = {}
+ self.__endTagCache = {}
def processEOF(self):
raise NotImplementedError
@@ -469,7 +452,21 @@ def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
- return self.startTagHandler[token["name"]](token)
+ # Note the caching is done here rather than BoundMethodDispatcher as doing it there
+ # requires a circular reference to the Phase, and this ends up with a significant
+ # (CPython 2.7, 3.8) GC cost when parsing many short inputs
+ name = token["name"]
+ # In Py2, using `in` is quicker in general than try/except KeyError
+ # In Py3, `in` is quicker when there are few cache hits (typically short inputs)
+ if name in self.__startTagCache:
+ func = self.__startTagCache[name]
+ else:
+ func = self.__startTagCache[name] = self.startTagHandler[name]
+ # bound the cache size in case we get loads of unknown tags
+ while len(self.__startTagCache) > len(self.startTagHandler) * 1.1:
+ # this makes the eviction policy random on Py < 3.7 and FIFO >= 3.7
+ self.__startTagCache.pop(next(iter(self.__startTagCache)))
+ return func(token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
@@ -482,9 +479,25 @@ def startTagHtml(self, token):
self.parser.firstStartTag = False
def processEndTag(self, token):
- return self.endTagHandler[token["name"]](token)
+ # Note the caching is done here rather than BoundMethodDispatcher as doing it there
+ # requires a circular reference to the Phase, and this ends up with a significant
+ # (CPython 2.7, 3.8) GC cost when parsing many short inputs
+ name = token["name"]
+ # In Py2, using `in` is quicker in general than try/except KeyError
+ # In Py3, `in` is quicker when there are few cache hits (typically short inputs)
+ if name in self.__endTagCache:
+ func = self.__endTagCache[name]
+ else:
+ func = self.__endTagCache[name] = self.endTagHandler[name]
+ # bound the cache size in case we get loads of unknown tags
+ while len(self.__endTagCache) > len(self.endTagHandler) * 1.1:
+ # this makes the eviction policy random on Py < 3.7 and FIFO >= 3.7
+ self.__endTagCache.pop(next(iter(self.__endTagCache)))
+ return func(token)
class InitialPhase(Phase):
+ __slots__ = tuple()
+
def processSpaceCharacters(self, token):
pass
@@ -613,6 +626,8 @@ def processEOF(self):
return True
class BeforeHtmlPhase(Phase):
+ __slots__ = tuple()
+
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
@@ -648,19 +663,7 @@ def processEndTag(self, token):
return token
class BeforeHeadPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("head", self.startTagHead)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- (("head", "body", "html", "br"), self.endTagImplyHead)
- ])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
@@ -693,28 +696,19 @@ def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", startTagHtml),
+ ("head", startTagHead)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ (("head", "body", "html", "br"), endTagImplyHead)
+ ])
+ endTagHandler.default = endTagOther
+
class InHeadPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("title", self.startTagTitle),
- (("noframes", "style"), self.startTagNoFramesStyle),
- ("noscript", self.startTagNoscript),
- ("script", self.startTagScript),
- (("base", "basefont", "bgsound", "command", "link"),
- self.startTagBaseLinkCommand),
- ("meta", self.startTagMeta),
- ("head", self.startTagHead)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("head", self.endTagHead),
- (("br", "html", "body"), self.endTagHtmlBodyBr)
- ])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
# the real thing
def processEOF(self):
@@ -796,22 +790,27 @@ def endTagOther(self, token):
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
- class InHeadNoscriptPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", startTagHtml),
+ ("title", startTagTitle),
+ (("noframes", "style"), startTagNoFramesStyle),
+ ("noscript", startTagNoscript),
+ ("script", startTagScript),
+ (("base", "basefont", "bgsound", "command", "link"),
+ startTagBaseLinkCommand),
+ ("meta", startTagMeta),
+ ("head", startTagHead)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ ("head", endTagHead),
+ (("br", "html", "body"), endTagHtmlBodyBr)
+ ])
+ endTagHandler.default = endTagOther
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- (("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand),
- (("head", "noscript"), self.startTagHeadNoscript),
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("noscript", self.endTagNoscript),
- ("br", self.endTagBr),
- ])
- self.endTagHandler.default = self.endTagOther
+ class InHeadNoscriptPhase(Phase):
+ __slots__ = tuple()
def processEOF(self):
self.parser.parseError("eof-in-head-noscript")
@@ -860,23 +859,21 @@ def anythingElse(self):
# Caller must raise parse error first!
self.endTagNoscript(impliedTagToken("noscript"))
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", startTagHtml),
+ (("basefont", "bgsound", "link", "meta", "noframes", "style"), startTagBaseLinkCommand),
+ (("head", "noscript"), startTagHeadNoscript),
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ ("noscript", endTagNoscript),
+ ("br", endTagBr),
+ ])
+ endTagHandler.default = endTagOther
+
class AfterHeadPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("body", self.startTagBody),
- ("frameset", self.startTagFrameset),
- (("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
- "style", "title"),
- self.startTagFromHead),
- ("head", self.startTagHead)
- ])
- self.startTagHandler.default = self.startTagOther
- self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"),
- self.endTagHtmlBodyBr)])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
def processEOF(self):
self.anythingElse()
@@ -927,80 +924,30 @@ def anythingElse(self):
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", startTagHtml),
+ ("body", startTagBody),
+ ("frameset", startTagFrameset),
+ (("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
+ "style", "title"),
+ startTagFromHead),
+ ("head", startTagHead)
+ ])
+ startTagHandler.default = startTagOther
+ endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"),
+ endTagHtmlBodyBr)])
+ endTagHandler.default = endTagOther
+
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
+ __slots__ = ("processSpaceCharacters",)
+ def __init__(self, *args, **kwargs):
+ super(InBodyPhase, self).__init__(*args, **kwargs)
# Set this to the default handler
self.processSpaceCharacters = self.processSpaceCharactersNonPre
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- (("base", "basefont", "bgsound", "command", "link", "meta",
- "script", "style", "title"),
- self.startTagProcessInHead),
- ("body", self.startTagBody),
- ("frameset", self.startTagFrameset),
- (("address", "article", "aside", "blockquote", "center", "details",
- "dir", "div", "dl", "fieldset", "figcaption", "figure",
- "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
- "section", "summary", "ul"),
- self.startTagCloseP),
- (headingElements, self.startTagHeading),
- (("pre", "listing"), self.startTagPreListing),
- ("form", self.startTagForm),
- (("li", "dd", "dt"), self.startTagListItem),
- ("plaintext", self.startTagPlaintext),
- ("a", self.startTagA),
- (("b", "big", "code", "em", "font", "i", "s", "small", "strike",
- "strong", "tt", "u"), self.startTagFormatting),
- ("nobr", self.startTagNobr),
- ("button", self.startTagButton),
- (("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
- ("xmp", self.startTagXmp),
- ("table", self.startTagTable),
- (("area", "br", "embed", "img", "keygen", "wbr"),
- self.startTagVoidFormatting),
- (("param", "source", "track"), self.startTagParamSource),
- ("input", self.startTagInput),
- ("hr", self.startTagHr),
- ("image", self.startTagImage),
- ("isindex", self.startTagIsIndex),
- ("textarea", self.startTagTextarea),
- ("iframe", self.startTagIFrame),
- ("noscript", self.startTagNoscript),
- (("noembed", "noframes"), self.startTagRawtext),
- ("select", self.startTagSelect),
- (("rp", "rt"), self.startTagRpRt),
- (("option", "optgroup"), self.startTagOpt),
- (("math"), self.startTagMath),
- (("svg"), self.startTagSvg),
- (("caption", "col", "colgroup", "frame", "head",
- "tbody", "td", "tfoot", "th", "thead",
- "tr"), self.startTagMisplaced)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("body", self.endTagBody),
- ("html", self.endTagHtml),
- (("address", "article", "aside", "blockquote", "button", "center",
- "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
- "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
- "section", "summary", "ul"), self.endTagBlock),
- ("form", self.endTagForm),
- ("p", self.endTagP),
- (("dd", "dt", "li"), self.endTagListItem),
- (headingElements, self.endTagHeading),
- (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
- "strike", "strong", "tt", "u"), self.endTagFormatting),
- (("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
- ("br", self.endTagBr),
- ])
- self.endTagHandler.default = self.endTagOther
-
def isMatchingFormattingElement(self, node1, node2):
return (node1.name == node2.name and
node1.namespace == node2.namespace and
@@ -1650,14 +1597,73 @@ def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ (("base", "basefont", "bgsound", "command", "link", "meta",
+ "script", "style", "title"),
+ startTagProcessInHead),
+ ("body", startTagBody),
+ ("frameset", startTagFrameset),
+ (("address", "article", "aside", "blockquote", "center", "details",
+ "dir", "div", "dl", "fieldset", "figcaption", "figure",
+ "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
+ "section", "summary", "ul"),
+ startTagCloseP),
+ (headingElements, startTagHeading),
+ (("pre", "listing"), startTagPreListing),
+ ("form", startTagForm),
+ (("li", "dd", "dt"), startTagListItem),
+ ("plaintext", startTagPlaintext),
+ ("a", startTagA),
+ (("b", "big", "code", "em", "font", "i", "s", "small", "strike",
+ "strong", "tt", "u"), startTagFormatting),
+ ("nobr", startTagNobr),
+ ("button", startTagButton),
+ (("applet", "marquee", "object"), startTagAppletMarqueeObject),
+ ("xmp", startTagXmp),
+ ("table", startTagTable),
+ (("area", "br", "embed", "img", "keygen", "wbr"),
+ startTagVoidFormatting),
+ (("param", "source", "track"), startTagParamSource),
+ ("input", startTagInput),
+ ("hr", startTagHr),
+ ("image", startTagImage),
+ ("isindex", startTagIsIndex),
+ ("textarea", startTagTextarea),
+ ("iframe", startTagIFrame),
+ ("noscript", startTagNoscript),
+ (("noembed", "noframes"), startTagRawtext),
+ ("select", startTagSelect),
+ (("rp", "rt"), startTagRpRt),
+ (("option", "optgroup"), startTagOpt),
+ (("math"), startTagMath),
+ (("svg"), startTagSvg),
+ (("caption", "col", "colgroup", "frame", "head",
+ "tbody", "td", "tfoot", "th", "thead",
+ "tr"), startTagMisplaced)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ ("body", endTagBody),
+ ("html", endTagHtml),
+ (("address", "article", "aside", "blockquote", "button", "center",
+ "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
+ "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
+ "section", "summary", "ul"), endTagBlock),
+ ("form", endTagForm),
+ ("p", endTagP),
+ (("dd", "dt", "li"), endTagListItem),
+ (headingElements, endTagHeading),
+ (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
+ "strike", "strong", "tt", "u"), endTagFormatting),
+ (("applet", "marquee", "object"), endTagAppletMarqueeObject),
+ ("br", endTagBr),
+ ])
+ endTagHandler.default = endTagOther
+
class TextPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
- self.startTagHandler = _utils.MethodDispatcher([])
- self.startTagHandler.default = self.startTagOther
- self.endTagHandler = _utils.MethodDispatcher([
- ("script", self.endTagScript)])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
def processCharacters(self, token):
self.tree.insertText(token["data"])
@@ -1683,30 +1689,15 @@ def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
+ startTagHandler = _utils.MethodDispatcher([])
+ startTagHandler.default = startTagOther
+ endTagHandler = _utils.MethodDispatcher([
+ ("script", endTagScript)])
+ endTagHandler.default = endTagOther
+
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("caption", self.startTagCaption),
- ("colgroup", self.startTagColgroup),
- ("col", self.startTagCol),
- (("tbody", "tfoot", "thead"), self.startTagRowGroup),
- (("td", "th", "tr"), self.startTagImplyTbody),
- ("table", self.startTagTable),
- (("style", "script"), self.startTagStyleScript),
- ("input", self.startTagInput),
- ("form", self.startTagForm)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("table", self.endTagTable),
- (("body", "caption", "col", "colgroup", "html", "tbody", "td",
- "tfoot", "th", "thead", "tr"), self.endTagIgnore)
- ])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
# helper methods
def clearStackToTableContext(self):
@@ -1828,9 +1819,32 @@ def endTagOther(self, token):
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ ("caption", startTagCaption),
+ ("colgroup", startTagColgroup),
+ ("col", startTagCol),
+ (("tbody", "tfoot", "thead"), startTagRowGroup),
+ (("td", "th", "tr"), startTagImplyTbody),
+ ("table", startTagTable),
+ (("style", "script"), startTagStyleScript),
+ ("input", startTagInput),
+ ("form", startTagForm)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ ("table", endTagTable),
+ (("body", "caption", "col", "colgroup", "html", "tbody", "td",
+ "tfoot", "th", "thead", "tr"), endTagIgnore)
+ ])
+ endTagHandler.default = endTagOther
+
class InTableTextPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
+ __slots__ = ("originalPhase", "characterTokens")
+
+ def __init__(self, *args, **kwargs):
+ super(InTableTextPhase, self).__init__(*args, **kwargs)
self.originalPhase = None
self.characterTokens = []
@@ -1875,23 +1889,7 @@ def processEndTag(self, token):
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
- "thead", "tr"), self.startTagTableElement)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("caption", self.endTagCaption),
- ("table", self.endTagTable),
- (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
- "thead", "tr"), self.endTagIgnore)
- ])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
@@ -1944,23 +1942,24 @@ def endTagIgnore(self, token):
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
+ "thead", "tr"), startTagTableElement)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ ("caption", endTagCaption),
+ ("table", endTagTable),
+ (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
+ "thead", "tr"), endTagIgnore)
+ ])
+ endTagHandler.default = endTagOther
+
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
-
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("col", self.startTagCol)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("colgroup", self.endTagColgroup),
- ("col", self.endTagCol)
- ])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
@@ -2010,26 +2009,21 @@ def endTagOther(self, token):
if not ignoreEndTag:
return token
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ ("col", startTagCol)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ ("colgroup", endTagColgroup),
+ ("col", endTagCol)
+ ])
+ endTagHandler.default = endTagOther
+
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("tr", self.startTagTr),
- (("td", "th"), self.startTagTableCell),
- (("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
- self.startTagTableOther)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- (("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
- ("table", self.endTagTable),
- (("body", "caption", "col", "colgroup", "html", "td", "th",
- "tr"), self.endTagIgnore)
- ])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
# helper methods
def clearStackToTableBodyContext(self):
@@ -2108,26 +2102,26 @@ def endTagIgnore(self, token):
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ ("tr", startTagTr),
+ (("td", "th"), startTagTableCell),
+ (("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
+ startTagTableOther)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ (("tbody", "tfoot", "thead"), endTagTableRowGroup),
+ ("table", endTagTable),
+ (("body", "caption", "col", "colgroup", "html", "td", "th",
+ "tr"), endTagIgnore)
+ ])
+ endTagHandler.default = endTagOther
+
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- (("td", "th"), self.startTagTableCell),
- (("caption", "col", "colgroup", "tbody", "tfoot", "thead",
- "tr"), self.startTagTableOther)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("tr", self.endTagTr),
- ("table", self.endTagTable),
- (("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
- (("body", "caption", "col", "colgroup", "html", "td", "th"),
- self.endTagIgnore)
- ])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
@@ -2197,23 +2191,26 @@ def endTagIgnore(self, token):
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ (("td", "th"), startTagTableCell),
+ (("caption", "col", "colgroup", "tbody", "tfoot", "thead",
+ "tr"), startTagTableOther)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ ("tr", endTagTr),
+ ("table", endTagTable),
+ (("tbody", "tfoot", "thead"), endTagTableRowGroup),
+ (("body", "caption", "col", "colgroup", "html", "td", "th"),
+ endTagIgnore)
+ ])
+ endTagHandler.default = endTagOther
+
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
- "thead", "tr"), self.startTagTableOther)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- (("td", "th"), self.endTagTableCell),
- (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
- (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
- ])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
# helper
def closeCell(self):
@@ -2273,26 +2270,22 @@ def endTagImply(self, token):
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
+ "thead", "tr"), startTagTableOther)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ (("td", "th"), endTagTableCell),
+ (("body", "caption", "col", "colgroup", "html"), endTagIgnore),
+ (("table", "tbody", "tfoot", "thead", "tr"), endTagImply)
+ ])
+ endTagHandler.default = endTagOther
+
class InSelectPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("option", self.startTagOption),
- ("optgroup", self.startTagOptgroup),
- ("select", self.startTagSelect),
- (("input", "keygen", "textarea"), self.startTagInput),
- ("script", self.startTagScript)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("option", self.endTagOption),
- ("optgroup", self.endTagOptgroup),
- ("select", self.endTagSelect)
- ])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
@@ -2373,21 +2366,25 @@ def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
- class InSelectInTablePhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
- self.startTagTable)
- ])
- self.startTagHandler.default = self.startTagOther
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ ("option", startTagOption),
+ ("optgroup", startTagOptgroup),
+ ("select", startTagSelect),
+ (("input", "keygen", "textarea"), startTagInput),
+ ("script", startTagScript)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ ("option", endTagOption),
+ ("optgroup", endTagOptgroup),
+ ("select", endTagSelect)
+ ])
+ endTagHandler.default = endTagOther
- self.endTagHandler = _utils.MethodDispatcher([
- (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
- self.endTagTable)
- ])
- self.endTagHandler.default = self.endTagOther
+ class InSelectInTablePhase(Phase):
+ __slots__ = tuple()
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
@@ -2412,7 +2409,21 @@ def endTagTable(self, token):
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
+ startTagHandler = _utils.MethodDispatcher([
+ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
+ startTagTable)
+ ])
+ startTagHandler.default = startTagOther
+
+ endTagHandler = _utils.MethodDispatcher([
+ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
+ endTagTable)
+ ])
+ endTagHandler.default = endTagOther
+
class InForeignContentPhase(Phase):
+ __slots__ = tuple()
+
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
@@ -2422,9 +2433,6 @@ class InForeignContentPhase(Phase):
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
@@ -2478,7 +2486,7 @@ def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
- set(token["data"].keys()) & set(["color", "face", "size"]))):
+ set(token["data"].keys()) & {"color", "face", "size"})):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
@@ -2528,16 +2536,7 @@ def processEndTag(self, token):
return new_token
class AfterBodyPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)])
- self.endTagHandler.default = self.endTagOther
+ __slots__ = tuple()
def processEOF(self):
# Stop parsing
@@ -2574,23 +2573,17 @@ def endTagOther(self, token):
self.parser.phase = self.parser.phases["inBody"]
return token
- class InFramesetPhase(Phase):
- # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", startTagHtml)
+ ])
+ startTagHandler.default = startTagOther
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("frameset", self.startTagFrameset),
- ("frame", self.startTagFrame),
- ("noframes", self.startTagNoframes)
- ])
- self.startTagHandler.default = self.startTagOther
+ endTagHandler = _utils.MethodDispatcher([("html", endTagHtml)])
+ endTagHandler.default = endTagOther
- self.endTagHandler = _utils.MethodDispatcher([
- ("frameset", self.endTagFrameset)
- ])
- self.endTagHandler.default = self.endTagOther
+ class InFramesetPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
+ __slots__ = tuple()
def processEOF(self):
if self.tree.openElements[-1].name != "html":
@@ -2631,21 +2624,22 @@ def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
- class AfterFramesetPhase(Phase):
- # http://www.whatwg.org/specs/web-apps/current-work/#after3
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ ("frameset", startTagFrameset),
+ ("frame", startTagFrame),
+ ("noframes", startTagNoframes)
+ ])
+ startTagHandler.default = startTagOther
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("noframes", self.startTagNoframes)
- ])
- self.startTagHandler.default = self.startTagOther
+ endTagHandler = _utils.MethodDispatcher([
+ ("frameset", endTagFrameset)
+ ])
+ endTagHandler.default = endTagOther
- self.endTagHandler = _utils.MethodDispatcher([
- ("html", self.endTagHtml)
- ])
- self.endTagHandler.default = self.endTagOther
+ class AfterFramesetPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#after3
+ __slots__ = tuple()
def processEOF(self):
# Stop parsing
@@ -2668,14 +2662,19 @@ def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
- class AfterAfterBodyPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", Phase.startTagHtml),
+ ("noframes", startTagNoframes)
+ ])
+ startTagHandler.default = startTagOther
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml)
- ])
- self.startTagHandler.default = self.startTagOther
+ endTagHandler = _utils.MethodDispatcher([
+ ("html", endTagHtml)
+ ])
+ endTagHandler.default = endTagOther
+
+ class AfterAfterBodyPhase(Phase):
+ __slots__ = tuple()
def processEOF(self):
pass
@@ -2706,15 +2705,13 @@ def processEndTag(self, token):
self.parser.phase = self.parser.phases["inBody"]
return token
- class AfterAfterFramesetPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", startTagHtml)
+ ])
+ startTagHandler.default = startTagOther
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("noframes", self.startTagNoFrames)
- ])
- self.startTagHandler.default = self.startTagOther
+ class AfterAfterFramesetPhase(Phase):
+ __slots__ = tuple()
def processEOF(self):
pass
@@ -2741,6 +2738,13 @@ def startTagOther(self, token):
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
+
+ startTagHandler = _utils.MethodDispatcher([
+ ("html", startTagHtml),
+ ("noframes", startTagNoFrames)
+ ])
+ startTagHandler.default = startTagOther
+
# pylint:enable=unused-argument
return {
@@ -2774,8 +2778,8 @@ def processEndTag(self, token):
def adjust_attributes(token, replacements):
needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
if needs_adjustment:
- token['data'] = OrderedDict((replacements.get(k, k), v)
- for k, v in token['data'].items())
+ token['data'] = type(token['data'])((replacements.get(k, k), v)
+ for k, v in token['data'].items())
def impliedTagToken(name, type="EndTag", attributes=None,
diff --git a/src/pip/_vendor/html5lib/serializer.py b/src/pip/_vendor/html5lib/serializer.py
index 53f4d44c397..d5669d8c149 100644
--- a/src/pip/_vendor/html5lib/serializer.py
+++ b/src/pip/_vendor/html5lib/serializer.py
@@ -274,7 +274,7 @@ def serialize(self, treewalker, encoding=None):
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
- self.serializeError("System identifer contains both single and double quote characters")
+ self.serializeError("System identifier contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
diff --git a/src/pip/_vendor/html5lib/treebuilders/base.py b/src/pip/_vendor/html5lib/treebuilders/base.py
index 73973db51b8..965fce29d3b 100644
--- a/src/pip/_vendor/html5lib/treebuilders/base.py
+++ b/src/pip/_vendor/html5lib/treebuilders/base.py
@@ -10,9 +10,9 @@
listElementsMap = {
None: (frozenset(scopingElements), False),
- "button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
- "list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
- (namespaces["html"], "ul")])), False),
+ "button": (frozenset(scopingElements | {(namespaces["html"], "button")}), False),
+ "list": (frozenset(scopingElements | {(namespaces["html"], "ol"),
+ (namespaces["html"], "ul")}), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
@@ -28,7 +28,7 @@ def __init__(self, name):
:arg name: The tag name associated with the node
"""
- # The tag name assocaited with the node
+ # The tag name associated with the node
self.name = name
# The parent of the current node (or None for the document node)
self.parent = None
diff --git a/src/pip/_vendor/html5lib/treebuilders/etree.py b/src/pip/_vendor/html5lib/treebuilders/etree.py
index 0dedf441643..ea92dc301fe 100644
--- a/src/pip/_vendor/html5lib/treebuilders/etree.py
+++ b/src/pip/_vendor/html5lib/treebuilders/etree.py
@@ -5,6 +5,8 @@
import re
+from copy import copy
+
from . import base
from .. import _ihatexml
from .. import constants
@@ -61,16 +63,17 @@ def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
- # Delete existing attributes first
- # XXX - there may be a better way to do this...
- for key in list(self._element.attrib.keys()):
- del self._element.attrib[key]
- for key, value in attributes.items():
- if isinstance(key, tuple):
- name = "{%s}%s" % (key[2], key[1])
- else:
- name = key
- self._element.set(name, value)
+ el_attrib = self._element.attrib
+ el_attrib.clear()
+ if attributes:
+ # calling .items _always_ allocates, and the above truthy check is cheaper than the
+ # allocation on average
+ for key, value in attributes.items():
+ if isinstance(key, tuple):
+ name = "{%s}%s" % (key[2], key[1])
+ else:
+ name = key
+ el_attrib[name] = value
attributes = property(_getAttributes, _setAttributes)
@@ -129,8 +132,8 @@ def insertText(self, data, insertBefore=None):
def cloneNode(self):
element = type(self)(self.name, self.namespace)
- for name, value in self.attributes.items():
- element.attributes[name] = value
+ if self._element.attrib:
+ element._element.attrib = copy(self._element.attrib)
return element
def reparentChildren(self, newParent):
diff --git a/src/pip/_vendor/html5lib/treebuilders/etree_lxml.py b/src/pip/_vendor/html5lib/treebuilders/etree_lxml.py
index ca12a99cccf..f037759f42e 100644
--- a/src/pip/_vendor/html5lib/treebuilders/etree_lxml.py
+++ b/src/pip/_vendor/html5lib/treebuilders/etree_lxml.py
@@ -16,6 +16,11 @@
import re
import sys
+try:
+ from collections.abc import MutableMapping
+except ImportError:
+ from collections import MutableMapping
+
from . import base
from ..constants import DataLossWarning
from .. import constants
@@ -23,6 +28,7 @@
from .. import _ihatexml
import lxml.etree as etree
+from pip._vendor.six import PY3, binary_type
fullTree = True
@@ -44,7 +50,11 @@ def __init__(self):
self._childNodes = []
def appendChild(self, element):
- self._elementTree.getroot().addnext(element._element)
+ last = self._elementTree.getroot()
+ for last in self._elementTree.getroot().itersiblings():
+ pass
+
+ last.addnext(element._element)
def _getChildNodes(self):
return self._childNodes
@@ -185,26 +195,37 @@ def __init__(self, namespaceHTMLElements, fullTree=False):
infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
self.namespaceHTMLElements = namespaceHTMLElements
- class Attributes(dict):
- def __init__(self, element, value=None):
- if value is None:
- value = {}
+ class Attributes(MutableMapping):
+ def __init__(self, element):
self._element = element
- dict.__init__(self, value) # pylint:disable=non-parent-init-called
- for key, value in self.items():
- if isinstance(key, tuple):
- name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
- else:
- name = infosetFilter.coerceAttribute(key)
- self._element._element.attrib[name] = value
- def __setitem__(self, key, value):
- dict.__setitem__(self, key, value)
+ def _coerceKey(self, key):
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
- self._element._element.attrib[name] = value
+ return name
+
+ def __getitem__(self, key):
+ value = self._element._element.attrib[self._coerceKey(key)]
+ if not PY3 and isinstance(value, binary_type):
+ value = value.decode("ascii")
+ return value
+
+ def __setitem__(self, key, value):
+ self._element._element.attrib[self._coerceKey(key)] = value
+
+ def __delitem__(self, key):
+ del self._element._element.attrib[self._coerceKey(key)]
+
+ def __iter__(self):
+ return iter(self._element._element.attrib)
+
+ def __len__(self):
+ return len(self._element._element.attrib)
+
+ def clear(self):
+ return self._element._element.attrib.clear()
class Element(builder.Element):
def __init__(self, name, namespace):
@@ -225,8 +246,10 @@ def _getName(self):
def _getAttributes(self):
return self._attributes
- def _setAttributes(self, attributes):
- self._attributes = Attributes(self, attributes)
+ def _setAttributes(self, value):
+ attributes = self.attributes
+ attributes.clear()
+ attributes.update(value)
attributes = property(_getAttributes, _setAttributes)
@@ -234,8 +257,11 @@ def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
- def appendChild(self, child):
- builder.Element.appendChild(self, child)
+ def cloneNode(self):
+ element = type(self)(self.name, self.namespace)
+ if self._element.attrib:
+ element._element.attrib.update(self._element.attrib)
+ return element
class Comment(builder.Comment):
def __init__(self, data):
diff --git a/src/pip/_vendor/html5lib/treewalkers/__init__.py b/src/pip/_vendor/html5lib/treewalkers/__init__.py
index 9bec2076f3f..b2d3aac3137 100644
--- a/src/pip/_vendor/html5lib/treewalkers/__init__.py
+++ b/src/pip/_vendor/html5lib/treewalkers/__init__.py
@@ -2,10 +2,10 @@
tree, generating tokens identical to those produced by the tokenizer
module.
-To create a tree walker for a new type of tree, you need to do
+To create a tree walker for a new type of tree, you need to
implement a tree walker object (called TreeWalker by convention) that
-implements a 'serialize' method taking a tree as sole argument and
-returning an iterator generating tokens.
+implements a 'serialize' method which takes a tree as sole argument and
+returns an iterator which generates tokens.
"""
from __future__ import absolute_import, division, unicode_literals
diff --git a/src/pip/_vendor/html5lib/treewalkers/etree.py b/src/pip/_vendor/html5lib/treewalkers/etree.py
index 95fc0c17030..837b27ec486 100644
--- a/src/pip/_vendor/html5lib/treewalkers/etree.py
+++ b/src/pip/_vendor/html5lib/treewalkers/etree.py
@@ -127,4 +127,5 @@ def getParentNode(self, node):
return locals()
+
getETreeModule = moduleFactoryFactory(getETreeBuilder)
diff --git a/src/pip/_vendor/html5lib/treewalkers/etree_lxml.py b/src/pip/_vendor/html5lib/treewalkers/etree_lxml.py
index e81ddf33b2e..c56af390fe2 100644
--- a/src/pip/_vendor/html5lib/treewalkers/etree_lxml.py
+++ b/src/pip/_vendor/html5lib/treewalkers/etree_lxml.py
@@ -1,6 +1,8 @@
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
+from collections import OrderedDict
+
from lxml import etree
from ..treebuilders.etree import tag_regexp
@@ -163,7 +165,7 @@ def getNodeDetails(self, node):
else:
namespace = None
tag = ensure_str(node.tag)
- attrs = {}
+ attrs = OrderedDict()
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
diff --git a/src/pip/_vendor/idna/core.py b/src/pip/_vendor/idna/core.py
index 9c3bba2ad7d..41ec5c711d1 100644
--- a/src/pip/_vendor/idna/core.py
+++ b/src/pip/_vendor/idna/core.py
@@ -300,6 +300,8 @@ def ulabel(label):
label = label.lower()
if label.startswith(_alabel_prefix):
label = label[len(_alabel_prefix):]
+ if not label:
+ raise IDNAError('Malformed A-label, no Punycode eligible content found')
if label.decode('ascii')[-1] == '-':
raise IDNAError('A-label must not end with a hyphen')
else:
diff --git a/src/pip/_vendor/idna/idnadata.py b/src/pip/_vendor/idna/idnadata.py
index 2b81c522cf5..a284e4c84ac 100644
--- a/src/pip/_vendor/idna/idnadata.py
+++ b/src/pip/_vendor/idna/idnadata.py
@@ -1,6 +1,6 @@
# This file is automatically generated by tools/idna-data
-__version__ = "12.1.0"
+__version__ = "13.0.0"
scripts = {
'Greek': (
0x37000000374,
@@ -48,16 +48,18 @@
0x300700003008,
0x30210000302a,
0x30380000303c,
- 0x340000004db6,
- 0x4e0000009ff0,
+ 0x340000004dc0,
+ 0x4e0000009ffd,
0xf9000000fa6e,
0xfa700000fada,
- 0x200000002a6d7,
+ 0x16ff000016ff2,
+ 0x200000002a6de,
0x2a7000002b735,
0x2b7400002b81e,
0x2b8200002cea2,
0x2ceb00002ebe1,
0x2f8000002fa1e,
+ 0x300000003134b,
),
'Hebrew': (
0x591000005c8,
@@ -389,9 +391,9 @@
0x853: 68,
0x854: 82,
0x855: 68,
- 0x856: 85,
- 0x857: 85,
- 0x858: 85,
+ 0x856: 82,
+ 0x857: 82,
+ 0x858: 82,
0x860: 68,
0x861: 85,
0x862: 68,
@@ -432,6 +434,16 @@
0x8bb: 68,
0x8bc: 68,
0x8bd: 68,
+ 0x8be: 68,
+ 0x8bf: 68,
+ 0x8c0: 68,
+ 0x8c1: 68,
+ 0x8c2: 68,
+ 0x8c3: 68,
+ 0x8c4: 68,
+ 0x8c5: 68,
+ 0x8c6: 68,
+ 0x8c7: 68,
0x8e2: 85,
0x1806: 85,
0x1807: 68,
@@ -756,6 +768,34 @@
0x10f52: 68,
0x10f53: 68,
0x10f54: 82,
+ 0x10fb0: 68,
+ 0x10fb1: 85,
+ 0x10fb2: 68,
+ 0x10fb3: 68,
+ 0x10fb4: 82,
+ 0x10fb5: 82,
+ 0x10fb6: 82,
+ 0x10fb7: 85,
+ 0x10fb8: 68,
+ 0x10fb9: 82,
+ 0x10fba: 82,
+ 0x10fbb: 68,
+ 0x10fbc: 68,
+ 0x10fbd: 82,
+ 0x10fbe: 68,
+ 0x10fbf: 68,
+ 0x10fc0: 85,
+ 0x10fc1: 68,
+ 0x10fc2: 82,
+ 0x10fc3: 82,
+ 0x10fc4: 68,
+ 0x10fc5: 85,
+ 0x10fc6: 85,
+ 0x10fc7: 85,
+ 0x10fc8: 85,
+ 0x10fc9: 82,
+ 0x10fca: 68,
+ 0x10fcb: 76,
0x110bd: 85,
0x110cd: 85,
0x1e900: 68,
@@ -1129,7 +1169,7 @@
0x8400000085c,
0x8600000086b,
0x8a0000008b5,
- 0x8b6000008be,
+ 0x8b6000008c8,
0x8d3000008e2,
0x8e300000958,
0x96000000964,
@@ -1188,7 +1228,7 @@
0xb3c00000b45,
0xb4700000b49,
0xb4b00000b4e,
- 0xb5600000b58,
+ 0xb5500000b58,
0xb5f00000b64,
0xb6600000b70,
0xb7100000b72,
@@ -1233,8 +1273,7 @@
0xce000000ce4,
0xce600000cf0,
0xcf100000cf3,
- 0xd0000000d04,
- 0xd0500000d0d,
+ 0xd0000000d0d,
0xd0e00000d11,
0xd1200000d45,
0xd4600000d49,
@@ -1243,7 +1282,7 @@
0xd5f00000d64,
0xd6600000d70,
0xd7a00000d80,
- 0xd8200000d84,
+ 0xd8100000d84,
0xd8500000d97,
0xd9a00000db2,
0xdb300000dbc,
@@ -1358,6 +1397,7 @@
0x1a9000001a9a,
0x1aa700001aa8,
0x1ab000001abe,
+ 0x1abf00001ac1,
0x1b0000001b4c,
0x1b5000001b5a,
0x1b6b00001b74,
@@ -1609,10 +1649,10 @@
0x30a1000030fb,
0x30fc000030ff,
0x310500003130,
- 0x31a0000031bb,
+ 0x31a0000031c0,
0x31f000003200,
- 0x340000004db6,
- 0x4e0000009ff0,
+ 0x340000004dc0,
+ 0x4e0000009ffd,
0xa0000000a48d,
0xa4d00000a4fe,
0xa5000000a60d,
@@ -1727,8 +1767,11 @@
0xa7bd0000a7be,
0xa7bf0000a7c0,
0xa7c30000a7c4,
- 0xa7f70000a7f8,
+ 0xa7c80000a7c9,
+ 0xa7ca0000a7cb,
+ 0xa7f60000a7f8,
0xa7fa0000a828,
+ 0xa82c0000a82d,
0xa8400000a874,
0xa8800000a8c6,
0xa8d00000a8da,
@@ -1753,7 +1796,7 @@
0xab200000ab27,
0xab280000ab2f,
0xab300000ab5b,
- 0xab600000ab68,
+ 0xab600000ab6a,
0xabc00000abeb,
0xabec0000abee,
0xabf00000abfa,
@@ -1827,9 +1870,13 @@
0x10cc000010cf3,
0x10d0000010d28,
0x10d3000010d3a,
+ 0x10e8000010eaa,
+ 0x10eab00010ead,
+ 0x10eb000010eb2,
0x10f0000010f1d,
0x10f2700010f28,
0x10f3000010f51,
+ 0x10fb000010fc5,
0x10fe000010ff7,
0x1100000011047,
0x1106600011070,
@@ -1838,12 +1885,12 @@
0x110f0000110fa,
0x1110000011135,
0x1113600011140,
- 0x1114400011147,
+ 0x1114400011148,
0x1115000011174,
0x1117600011177,
0x11180000111c5,
0x111c9000111cd,
- 0x111d0000111db,
+ 0x111ce000111db,
0x111dc000111dd,
0x1120000011212,
0x1121300011238,
@@ -1872,7 +1919,7 @@
0x1137000011375,
0x114000001144b,
0x114500001145a,
- 0x1145e00011460,
+ 0x1145e00011462,
0x11480000114c6,
0x114c7000114c8,
0x114d0000114da,
@@ -1889,7 +1936,14 @@
0x117300001173a,
0x118000001183b,
0x118c0000118ea,
- 0x118ff00011900,
+ 0x118ff00011907,
+ 0x119090001190a,
+ 0x1190c00011914,
+ 0x1191500011917,
+ 0x1191800011936,
+ 0x1193700011939,
+ 0x1193b00011944,
+ 0x119500001195a,
0x119a0000119a8,
0x119aa000119d8,
0x119da000119e2,
@@ -1920,6 +1974,7 @@
0x11d9300011d99,
0x11da000011daa,
0x11ee000011ef7,
+ 0x11fb000011fb1,
0x120000001239a,
0x1248000012544,
0x130000001342f,
@@ -1939,9 +1994,11 @@
0x16f4f00016f88,
0x16f8f00016fa0,
0x16fe000016fe2,
- 0x16fe300016fe4,
+ 0x16fe300016fe5,
+ 0x16ff000016ff2,
0x17000000187f8,
- 0x1880000018af3,
+ 0x1880000018cd6,
+ 0x18d0000018d09,
0x1b0000001b11f,
0x1b1500001b153,
0x1b1640001b168,
@@ -1971,11 +2028,13 @@
0x1e8d00001e8d7,
0x1e9220001e94c,
0x1e9500001e95a,
- 0x200000002a6d7,
+ 0x1fbf00001fbfa,
+ 0x200000002a6de,
0x2a7000002b735,
0x2b7400002b81e,
0x2b8200002cea2,
0x2ceb00002ebe1,
+ 0x300000003134b,
),
'CONTEXTJ': (
0x200c0000200e,
diff --git a/src/pip/_vendor/idna/package_data.py b/src/pip/_vendor/idna/package_data.py
index b5d8216558a..ce1c521d23a 100644
--- a/src/pip/_vendor/idna/package_data.py
+++ b/src/pip/_vendor/idna/package_data.py
@@ -1,2 +1,2 @@
-__version__ = '2.9'
+__version__ = '2.10'
diff --git a/src/pip/_vendor/idna/uts46data.py b/src/pip/_vendor/idna/uts46data.py
index 2711136d7d2..3766dd49f6d 100644
--- a/src/pip/_vendor/idna/uts46data.py
+++ b/src/pip/_vendor/idna/uts46data.py
@@ -4,7 +4,7 @@
"""IDNA Mapping Table from UTS46."""
-__version__ = "12.1.0"
+__version__ = "13.0.0"
def _seg_0():
return [
(0x0, '3'),
@@ -1074,7 +1074,7 @@ def _seg_10():
(0x8A0, 'V'),
(0x8B5, 'X'),
(0x8B6, 'V'),
- (0x8BE, 'X'),
+ (0x8C8, 'X'),
(0x8D3, 'V'),
(0x8E2, 'X'),
(0x8E3, 'V'),
@@ -1205,7 +1205,7 @@ def _seg_11():
(0xB49, 'X'),
(0xB4B, 'V'),
(0xB4E, 'X'),
- (0xB56, 'V'),
+ (0xB55, 'V'),
(0xB58, 'X'),
(0xB5C, 'M', u'ଡ଼'),
(0xB5D, 'M', u'ଢ଼'),
@@ -1299,8 +1299,6 @@ def _seg_12():
(0xCF1, 'V'),
(0xCF3, 'X'),
(0xD00, 'V'),
- (0xD04, 'X'),
- (0xD05, 'V'),
(0xD0D, 'X'),
(0xD0E, 'V'),
(0xD11, 'X'),
@@ -1314,7 +1312,7 @@ def _seg_12():
(0xD64, 'X'),
(0xD66, 'V'),
(0xD80, 'X'),
- (0xD82, 'V'),
+ (0xD81, 'V'),
(0xD84, 'X'),
(0xD85, 'V'),
(0xD97, 'X'),
@@ -1355,12 +1353,12 @@ def _seg_12():
(0xEA5, 'V'),
(0xEA6, 'X'),
(0xEA7, 'V'),
+ (0xEB3, 'M', u'ໍາ'),
+ (0xEB4, 'V'),
]
def _seg_13():
return [
- (0xEB3, 'M', u'ໍາ'),
- (0xEB4, 'V'),
(0xEBE, 'X'),
(0xEC0, 'V'),
(0xEC5, 'X'),
@@ -1459,12 +1457,12 @@ def _seg_13():
(0x12C8, 'V'),
(0x12D7, 'X'),
(0x12D8, 'V'),
+ (0x1311, 'X'),
+ (0x1312, 'V'),
]
def _seg_14():
return [
- (0x1311, 'X'),
- (0x1312, 'V'),
(0x1316, 'X'),
(0x1318, 'V'),
(0x135B, 'X'),
@@ -1553,7 +1551,7 @@ def _seg_14():
(0x1AA0, 'V'),
(0x1AAE, 'X'),
(0x1AB0, 'V'),
- (0x1ABF, 'X'),
+ (0x1AC1, 'X'),
(0x1B00, 'V'),
(0x1B4C, 'X'),
(0x1B50, 'V'),
@@ -1563,12 +1561,12 @@ def _seg_14():
(0x1BFC, 'V'),
(0x1C38, 'X'),
(0x1C3B, 'V'),
+ (0x1C4A, 'X'),
+ (0x1C4D, 'V'),
]
def _seg_15():
return [
- (0x1C4A, 'X'),
- (0x1C4D, 'V'),
(0x1C80, 'M', u'в'),
(0x1C81, 'M', u'д'),
(0x1C82, 'M', u'о'),
@@ -1667,12 +1665,12 @@ def _seg_15():
(0x1D4E, 'V'),
(0x1D4F, 'M', u'k'),
(0x1D50, 'M', u'm'),
+ (0x1D51, 'M', u'ŋ'),
+ (0x1D52, 'M', u'o'),
]
def _seg_16():
return [
- (0x1D51, 'M', u'ŋ'),
- (0x1D52, 'M', u'o'),
(0x1D53, 'M', u'ɔ'),
(0x1D54, 'M', u'ᴖ'),
(0x1D55, 'M', u'ᴗ'),
@@ -1771,12 +1769,12 @@ def _seg_16():
(0x1E1C, 'M', u'ḝ'),
(0x1E1D, 'V'),
(0x1E1E, 'M', u'ḟ'),
+ (0x1E1F, 'V'),
+ (0x1E20, 'M', u'ḡ'),
]
def _seg_17():
return [
- (0x1E1F, 'V'),
- (0x1E20, 'M', u'ḡ'),
(0x1E21, 'V'),
(0x1E22, 'M', u'ḣ'),
(0x1E23, 'V'),
@@ -1875,12 +1873,12 @@ def _seg_17():
(0x1E80, 'M', u'ẁ'),
(0x1E81, 'V'),
(0x1E82, 'M', u'ẃ'),
+ (0x1E83, 'V'),
+ (0x1E84, 'M', u'ẅ'),
]
def _seg_18():
return [
- (0x1E83, 'V'),
- (0x1E84, 'M', u'ẅ'),
(0x1E85, 'V'),
(0x1E86, 'M', u'ẇ'),
(0x1E87, 'V'),
@@ -1979,12 +1977,12 @@ def _seg_18():
(0x1EE9, 'V'),
(0x1EEA, 'M', u'ừ'),
(0x1EEB, 'V'),
+ (0x1EEC, 'M', u'ử'),
+ (0x1EED, 'V'),
]
def _seg_19():
return [
- (0x1EEC, 'M', u'ử'),
- (0x1EED, 'V'),
(0x1EEE, 'M', u'ữ'),
(0x1EEF, 'V'),
(0x1EF0, 'M', u'ự'),
@@ -2083,12 +2081,12 @@ def _seg_19():
(0x1F80, 'M', u'ἀι'),
(0x1F81, 'M', u'ἁι'),
(0x1F82, 'M', u'ἂι'),
+ (0x1F83, 'M', u'ἃι'),
+ (0x1F84, 'M', u'ἄι'),
]
def _seg_20():
return [
- (0x1F83, 'M', u'ἃι'),
- (0x1F84, 'M', u'ἄι'),
(0x1F85, 'M', u'ἅι'),
(0x1F86, 'M', u'ἆι'),
(0x1F87, 'M', u'ἇι'),
@@ -2187,12 +2185,12 @@ def _seg_20():
(0x1FEE, '3', u' ̈́'),
(0x1FEF, '3', u'`'),
(0x1FF0, 'X'),
+ (0x1FF2, 'M', u'ὼι'),
+ (0x1FF3, 'M', u'ωι'),
]
def _seg_21():
return [
- (0x1FF2, 'M', u'ὼι'),
- (0x1FF3, 'M', u'ωι'),
(0x1FF4, 'M', u'ώι'),
(0x1FF5, 'X'),
(0x1FF6, 'V'),
@@ -2291,12 +2289,12 @@ def _seg_21():
(0x20C0, 'X'),
(0x20D0, 'V'),
(0x20F1, 'X'),
+ (0x2100, '3', u'a/c'),
+ (0x2101, '3', u'a/s'),
]
def _seg_22():
return [
- (0x2100, '3', u'a/c'),
- (0x2101, '3', u'a/s'),
(0x2102, 'M', u'c'),
(0x2103, 'M', u'°c'),
(0x2104, 'V'),
@@ -2395,12 +2393,12 @@ def _seg_22():
(0x2175, 'M', u'vi'),
(0x2176, 'M', u'vii'),
(0x2177, 'M', u'viii'),
+ (0x2178, 'M', u'ix'),
+ (0x2179, 'M', u'x'),
]
def _seg_23():
return [
- (0x2178, 'M', u'ix'),
- (0x2179, 'M', u'x'),
(0x217A, 'M', u'xi'),
(0x217B, 'M', u'xii'),
(0x217C, 'M', u'l'),
@@ -2499,12 +2497,12 @@ def _seg_23():
(0x24B5, '3', u'(z)'),
(0x24B6, 'M', u'a'),
(0x24B7, 'M', u'b'),
+ (0x24B8, 'M', u'c'),
+ (0x24B9, 'M', u'd'),
]
def _seg_24():
return [
- (0x24B8, 'M', u'c'),
- (0x24B9, 'M', u'd'),
(0x24BA, 'M', u'e'),
(0x24BB, 'M', u'f'),
(0x24BC, 'M', u'g'),
@@ -2566,7 +2564,7 @@ def _seg_24():
(0x2B74, 'X'),
(0x2B76, 'V'),
(0x2B96, 'X'),
- (0x2B98, 'V'),
+ (0x2B97, 'V'),
(0x2C00, 'M', u'ⰰ'),
(0x2C01, 'M', u'ⰱ'),
(0x2C02, 'M', u'ⰲ'),
@@ -2603,12 +2601,12 @@ def _seg_24():
(0x2C21, 'M', u'ⱑ'),
(0x2C22, 'M', u'ⱒ'),
(0x2C23, 'M', u'ⱓ'),
+ (0x2C24, 'M', u'ⱔ'),
+ (0x2C25, 'M', u'ⱕ'),
]
def _seg_25():
return [
- (0x2C24, 'M', u'ⱔ'),
- (0x2C25, 'M', u'ⱕ'),
(0x2C26, 'M', u'ⱖ'),
(0x2C27, 'M', u'ⱗ'),
(0x2C28, 'M', u'ⱘ'),
@@ -2707,12 +2705,12 @@ def _seg_25():
(0x2CBA, 'M', u'ⲻ'),
(0x2CBB, 'V'),
(0x2CBC, 'M', u'ⲽ'),
+ (0x2CBD, 'V'),
+ (0x2CBE, 'M', u'ⲿ'),
]
def _seg_26():
return [
- (0x2CBD, 'V'),
- (0x2CBE, 'M', u'ⲿ'),
(0x2CBF, 'V'),
(0x2CC0, 'M', u'ⳁ'),
(0x2CC1, 'V'),
@@ -2787,7 +2785,7 @@ def _seg_26():
(0x2DD8, 'V'),
(0x2DDF, 'X'),
(0x2DE0, 'V'),
- (0x2E50, 'X'),
+ (0x2E53, 'X'),
(0x2E80, 'V'),
(0x2E9A, 'X'),
(0x2E9B, 'V'),
@@ -2811,12 +2809,12 @@ def _seg_26():
(0x2F0D, 'M', u'冖'),
(0x2F0E, 'M', u'冫'),
(0x2F0F, 'M', u'几'),
+ (0x2F10, 'M', u'凵'),
+ (0x2F11, 'M', u'刀'),
]
def _seg_27():
return [
- (0x2F10, 'M', u'凵'),
- (0x2F11, 'M', u'刀'),
(0x2F12, 'M', u'力'),
(0x2F13, 'M', u'勹'),
(0x2F14, 'M', u'匕'),
@@ -2915,12 +2913,12 @@ def _seg_27():
(0x2F71, 'M', u'禸'),
(0x2F72, 'M', u'禾'),
(0x2F73, 'M', u'穴'),
+ (0x2F74, 'M', u'立'),
+ (0x2F75, 'M', u'竹'),
]
def _seg_28():
return [
- (0x2F74, 'M', u'立'),
- (0x2F75, 'M', u'竹'),
(0x2F76, 'M', u'米'),
(0x2F77, 'M', u'糸'),
(0x2F78, 'M', u'缶'),
@@ -3019,12 +3017,12 @@ def _seg_28():
(0x2FD5, 'M', u'龠'),
(0x2FD6, 'X'),
(0x3000, '3', u' '),
+ (0x3001, 'V'),
+ (0x3002, 'M', u'.'),
]
def _seg_29():
return [
- (0x3001, 'V'),
- (0x3002, 'M', u'.'),
(0x3003, 'V'),
(0x3036, 'M', u'〒'),
(0x3037, 'V'),
@@ -3123,12 +3121,12 @@ def _seg_29():
(0x317C, 'M', u'ᄯ'),
(0x317D, 'M', u'ᄲ'),
(0x317E, 'M', u'ᄶ'),
+ (0x317F, 'M', u'ᅀ'),
+ (0x3180, 'M', u'ᅇ'),
]
def _seg_30():
return [
- (0x317F, 'M', u'ᅀ'),
- (0x3180, 'M', u'ᅇ'),
(0x3181, 'M', u'ᅌ'),
(0x3182, 'M', u'ᇱ'),
(0x3183, 'M', u'ᇲ'),
@@ -3160,8 +3158,6 @@ def _seg_30():
(0x319E, 'M', u'地'),
(0x319F, 'M', u'人'),
(0x31A0, 'V'),
- (0x31BB, 'X'),
- (0x31C0, 'V'),
(0x31E4, 'X'),
(0x31F0, 'V'),
(0x3200, '3', u'(ᄀ)'),
@@ -3227,14 +3223,14 @@ def _seg_30():
(0x323C, '3', u'(監)'),
(0x323D, '3', u'(企)'),
(0x323E, '3', u'(資)'),
- ]
-
-def _seg_31():
- return [
(0x323F, '3', u'(協)'),
(0x3240, '3', u'(祭)'),
(0x3241, '3', u'(休)'),
(0x3242, '3', u'(自)'),
+ ]
+
+def _seg_31():
+ return [
(0x3243, '3', u'(至)'),
(0x3244, 'M', u'問'),
(0x3245, 'M', u'幼'),
@@ -3331,14 +3327,14 @@ def _seg_31():
(0x32A7, 'M', u'左'),
(0x32A8, 'M', u'右'),
(0x32A9, 'M', u'医'),
- ]
-
-def _seg_32():
- return [
(0x32AA, 'M', u'宗'),
(0x32AB, 'M', u'学'),
(0x32AC, 'M', u'監'),
(0x32AD, 'M', u'企'),
+ ]
+
+def _seg_32():
+ return [
(0x32AE, 'M', u'資'),
(0x32AF, 'M', u'協'),
(0x32B0, 'M', u'夜'),
@@ -3435,14 +3431,14 @@ def _seg_32():
(0x330B, 'M', u'カイリ'),
(0x330C, 'M', u'カラット'),
(0x330D, 'M', u'カロリー'),
- ]
-
-def _seg_33():
- return [
(0x330E, 'M', u'ガロン'),
(0x330F, 'M', u'ガンマ'),
(0x3310, 'M', u'ギガ'),
(0x3311, 'M', u'ギニー'),
+ ]
+
+def _seg_33():
+ return [
(0x3312, 'M', u'キュリー'),
(0x3313, 'M', u'ギルダー'),
(0x3314, 'M', u'キロ'),
@@ -3539,14 +3535,14 @@ def _seg_33():
(0x336F, 'M', u'23点'),
(0x3370, 'M', u'24点'),
(0x3371, 'M', u'hpa'),
- ]
-
-def _seg_34():
- return [
(0x3372, 'M', u'da'),
(0x3373, 'M', u'au'),
(0x3374, 'M', u'bar'),
(0x3375, 'M', u'ov'),
+ ]
+
+def _seg_34():
+ return [
(0x3376, 'M', u'pc'),
(0x3377, 'M', u'dm'),
(0x3378, 'M', u'dm2'),
@@ -3643,14 +3639,14 @@ def _seg_34():
(0x33D3, 'M', u'lx'),
(0x33D4, 'M', u'mb'),
(0x33D5, 'M', u'mil'),
- ]
-
-def _seg_35():
- return [
(0x33D6, 'M', u'mol'),
(0x33D7, 'M', u'ph'),
(0x33D8, 'X'),
(0x33D9, 'M', u'ppm'),
+ ]
+
+def _seg_35():
+ return [
(0x33DA, 'M', u'pr'),
(0x33DB, 'M', u'sr'),
(0x33DC, 'M', u'sv'),
@@ -3690,9 +3686,7 @@ def _seg_35():
(0x33FE, 'M', u'31日'),
(0x33FF, 'M', u'gal'),
(0x3400, 'V'),
- (0x4DB6, 'X'),
- (0x4DC0, 'V'),
- (0x9FF0, 'X'),
+ (0x9FFD, 'X'),
(0xA000, 'V'),
(0xA48D, 'X'),
(0xA490, 'V'),
@@ -3747,16 +3741,16 @@ def _seg_35():
(0xA66D, 'V'),
(0xA680, 'M', u'ꚁ'),
(0xA681, 'V'),
- ]
-
-def _seg_36():
- return [
(0xA682, 'M', u'ꚃ'),
(0xA683, 'V'),
(0xA684, 'M', u'ꚅ'),
(0xA685, 'V'),
(0xA686, 'M', u'ꚇ'),
(0xA687, 'V'),
+ ]
+
+def _seg_36():
+ return [
(0xA688, 'M', u'ꚉ'),
(0xA689, 'V'),
(0xA68A, 'M', u'ꚋ'),
@@ -3851,16 +3845,16 @@ def _seg_36():
(0xA766, 'M', u'ꝧ'),
(0xA767, 'V'),
(0xA768, 'M', u'ꝩ'),
- ]
-
-def _seg_37():
- return [
(0xA769, 'V'),
(0xA76A, 'M', u'ꝫ'),
(0xA76B, 'V'),
(0xA76C, 'M', u'ꝭ'),
(0xA76D, 'V'),
(0xA76E, 'M', u'ꝯ'),
+ ]
+
+def _seg_37():
+ return [
(0xA76F, 'V'),
(0xA770, 'M', u'ꝯ'),
(0xA771, 'V'),
@@ -3935,12 +3929,17 @@ def _seg_37():
(0xA7C4, 'M', u'ꞔ'),
(0xA7C5, 'M', u'ʂ'),
(0xA7C6, 'M', u'ᶎ'),
- (0xA7C7, 'X'),
- (0xA7F7, 'V'),
+ (0xA7C7, 'M', u'ꟈ'),
+ (0xA7C8, 'V'),
+ (0xA7C9, 'M', u'ꟊ'),
+ (0xA7CA, 'V'),
+ (0xA7CB, 'X'),
+ (0xA7F5, 'M', u'ꟶ'),
+ (0xA7F6, 'V'),
(0xA7F8, 'M', u'ħ'),
(0xA7F9, 'M', u'œ'),
(0xA7FA, 'V'),
- (0xA82C, 'X'),
+ (0xA82D, 'X'),
(0xA830, 'V'),
(0xA83A, 'X'),
(0xA840, 'V'),
@@ -3955,11 +3954,11 @@ def _seg_37():
(0xA97D, 'X'),
(0xA980, 'V'),
(0xA9CE, 'X'),
+ (0xA9CF, 'V'),
]
def _seg_38():
return [
- (0xA9CF, 'V'),
(0xA9DA, 'X'),
(0xA9DE, 'V'),
(0xA9FF, 'X'),
@@ -3989,7 +3988,9 @@ def _seg_38():
(0xAB5E, 'M', u'ɫ'),
(0xAB5F, 'M', u'ꭒ'),
(0xAB60, 'V'),
- (0xAB68, 'X'),
+ (0xAB69, 'M', u'ʍ'),
+ (0xAB6A, 'V'),
+ (0xAB6C, 'X'),
(0xAB70, 'M', u'Ꭰ'),
(0xAB71, 'M', u'Ꭱ'),
(0xAB72, 'M', u'Ꭲ'),
@@ -4058,11 +4059,11 @@ def _seg_38():
(0xABB1, 'M', u'Ꮱ'),
(0xABB2, 'M', u'Ꮲ'),
(0xABB3, 'M', u'Ꮳ'),
- (0xABB4, 'M', u'Ꮴ'),
]
def _seg_39():
return [
+ (0xABB4, 'M', u'Ꮴ'),
(0xABB5, 'M', u'Ꮵ'),
(0xABB6, 'M', u'Ꮶ'),
(0xABB7, 'M', u'Ꮷ'),
@@ -4162,11 +4163,11 @@ def _seg_39():
(0xF94C, 'M', u'樓'),
(0xF94D, 'M', u'淚'),
(0xF94E, 'M', u'漏'),
- (0xF94F, 'M', u'累'),
]
def _seg_40():
return [
+ (0xF94F, 'M', u'累'),
(0xF950, 'M', u'縷'),
(0xF951, 'M', u'陋'),
(0xF952, 'M', u'勒'),
@@ -4266,11 +4267,11 @@ def _seg_40():
(0xF9B0, 'M', u'聆'),
(0xF9B1, 'M', u'鈴'),
(0xF9B2, 'M', u'零'),
- (0xF9B3, 'M', u'靈'),
]
def _seg_41():
return [
+ (0xF9B3, 'M', u'靈'),
(0xF9B4, 'M', u'領'),
(0xF9B5, 'M', u'例'),
(0xF9B6, 'M', u'禮'),
@@ -4370,11 +4371,11 @@ def _seg_41():
(0xFA16, 'M', u'猪'),
(0xFA17, 'M', u'益'),
(0xFA18, 'M', u'礼'),
- (0xFA19, 'M', u'神'),
]
def _seg_42():
return [
+ (0xFA19, 'M', u'神'),
(0xFA1A, 'M', u'祥'),
(0xFA1B, 'M', u'福'),
(0xFA1C, 'M', u'靖'),
@@ -4474,11 +4475,11 @@ def _seg_42():
(0xFA7F, 'M', u'奔'),
(0xFA80, 'M', u'婢'),
(0xFA81, 'M', u'嬨'),
- (0xFA82, 'M', u'廒'),
]
def _seg_43():
return [
+ (0xFA82, 'M', u'廒'),
(0xFA83, 'M', u'廙'),
(0xFA84, 'M', u'彩'),
(0xFA85, 'M', u'徭'),
@@ -4578,11 +4579,11 @@ def _seg_43():
(0xFB14, 'M', u'մե'),
(0xFB15, 'M', u'մի'),
(0xFB16, 'M', u'վն'),
- (0xFB17, 'M', u'մխ'),
]
def _seg_44():
return [
+ (0xFB17, 'M', u'մխ'),
(0xFB18, 'X'),
(0xFB1D, 'M', u'יִ'),
(0xFB1E, 'V'),
@@ -4682,11 +4683,11 @@ def _seg_44():
(0xFBEE, 'M', u'ئو'),
(0xFBF0, 'M', u'ئۇ'),
(0xFBF2, 'M', u'ئۆ'),
- (0xFBF4, 'M', u'ئۈ'),
]
def _seg_45():
return [
+ (0xFBF4, 'M', u'ئۈ'),
(0xFBF6, 'M', u'ئې'),
(0xFBF9, 'M', u'ئى'),
(0xFBFC, 'M', u'ی'),
@@ -4786,11 +4787,11 @@ def _seg_45():
(0xFC5D, 'M', u'ىٰ'),
(0xFC5E, '3', u' ٌّ'),
(0xFC5F, '3', u' ٍّ'),
- (0xFC60, '3', u' َّ'),
]
def _seg_46():
return [
+ (0xFC60, '3', u' َّ'),
(0xFC61, '3', u' ُّ'),
(0xFC62, '3', u' ِّ'),
(0xFC63, '3', u' ّٰ'),
@@ -4890,11 +4891,11 @@ def _seg_46():
(0xFCC1, 'M', u'فم'),
(0xFCC2, 'M', u'قح'),
(0xFCC3, 'M', u'قم'),
- (0xFCC4, 'M', u'كج'),
]
def _seg_47():
return [
+ (0xFCC4, 'M', u'كج'),
(0xFCC5, 'M', u'كح'),
(0xFCC6, 'M', u'كخ'),
(0xFCC7, 'M', u'كل'),
@@ -4994,11 +4995,11 @@ def _seg_47():
(0xFD25, 'M', u'شج'),
(0xFD26, 'M', u'شح'),
(0xFD27, 'M', u'شخ'),
- (0xFD28, 'M', u'شم'),
]
def _seg_48():
return [
+ (0xFD28, 'M', u'شم'),
(0xFD29, 'M', u'شر'),
(0xFD2A, 'M', u'سر'),
(0xFD2B, 'M', u'صر'),
@@ -5098,11 +5099,11 @@ def _seg_48():
(0xFDAC, 'M', u'لجي'),
(0xFDAD, 'M', u'لمي'),
(0xFDAE, 'M', u'يحي'),
- (0xFDAF, 'M', u'يجي'),
]
def _seg_49():
return [
+ (0xFDAF, 'M', u'يجي'),
(0xFDB0, 'M', u'يمي'),
(0xFDB1, 'M', u'ممي'),
(0xFDB2, 'M', u'قمي'),
@@ -5202,11 +5203,11 @@ def _seg_49():
(0xFE64, '3', u'<'),
(0xFE65, '3', u'>'),
(0xFE66, '3', u'='),
- (0xFE67, 'X'),
]
def _seg_50():
return [
+ (0xFE67, 'X'),
(0xFE68, '3', u'\\'),
(0xFE69, '3', u'$'),
(0xFE6A, '3', u'%'),
@@ -5306,11 +5307,11 @@ def _seg_50():
(0xFF21, 'M', u'a'),
(0xFF22, 'M', u'b'),
(0xFF23, 'M', u'c'),
- (0xFF24, 'M', u'd'),
]
def _seg_51():
return [
+ (0xFF24, 'M', u'd'),
(0xFF25, 'M', u'e'),
(0xFF26, 'M', u'f'),
(0xFF27, 'M', u'g'),
@@ -5410,11 +5411,11 @@ def _seg_51():
(0xFF85, 'M', u'ナ'),
(0xFF86, 'M', u'ニ'),
(0xFF87, 'M', u'ヌ'),
- (0xFF88, 'M', u'ネ'),
]
def _seg_52():
return [
+ (0xFF88, 'M', u'ネ'),
(0xFF89, 'M', u'ノ'),
(0xFF8A, 'M', u'ハ'),
(0xFF8B, 'M', u'ヒ'),
@@ -5514,11 +5515,11 @@ def _seg_52():
(0x10000, 'V'),
(0x1000C, 'X'),
(0x1000D, 'V'),
- (0x10027, 'X'),
]
def _seg_53():
return [
+ (0x10027, 'X'),
(0x10028, 'V'),
(0x1003B, 'X'),
(0x1003C, 'V'),
@@ -5536,7 +5537,7 @@ def _seg_53():
(0x10137, 'V'),
(0x1018F, 'X'),
(0x10190, 'V'),
- (0x1019C, 'X'),
+ (0x1019D, 'X'),
(0x101A0, 'V'),
(0x101A1, 'X'),
(0x101D0, 'V'),
@@ -5618,11 +5619,11 @@ def _seg_53():
(0x104BC, 'M', u'𐓤'),
(0x104BD, 'M', u'𐓥'),
(0x104BE, 'M', u'𐓦'),
- (0x104BF, 'M', u'𐓧'),
]
def _seg_54():
return [
+ (0x104BF, 'M', u'𐓧'),
(0x104C0, 'M', u'𐓨'),
(0x104C1, 'M', u'𐓩'),
(0x104C2, 'M', u'𐓪'),
@@ -5722,11 +5723,11 @@ def _seg_54():
(0x10B9D, 'X'),
(0x10BA9, 'V'),
(0x10BB0, 'X'),
- (0x10C00, 'V'),
]
def _seg_55():
return [
+ (0x10C00, 'V'),
(0x10C49, 'X'),
(0x10C80, 'M', u'𐳀'),
(0x10C81, 'M', u'𐳁'),
@@ -5788,10 +5789,18 @@ def _seg_55():
(0x10D3A, 'X'),
(0x10E60, 'V'),
(0x10E7F, 'X'),
+ (0x10E80, 'V'),
+ (0x10EAA, 'X'),
+ (0x10EAB, 'V'),
+ (0x10EAE, 'X'),
+ (0x10EB0, 'V'),
+ (0x10EB2, 'X'),
(0x10F00, 'V'),
(0x10F28, 'X'),
(0x10F30, 'V'),
(0x10F5A, 'X'),
+ (0x10FB0, 'V'),
+ (0x10FCC, 'X'),
(0x10FE0, 'V'),
(0x10FF7, 'X'),
(0x11000, 'V'),
@@ -5809,17 +5818,19 @@ def _seg_55():
(0x11100, 'V'),
(0x11135, 'X'),
(0x11136, 'V'),
- (0x11147, 'X'),
+ (0x11148, 'X'),
(0x11150, 'V'),
(0x11177, 'X'),
(0x11180, 'V'),
- (0x111CE, 'X'),
- (0x111D0, 'V'),
(0x111E0, 'X'),
(0x111E1, 'V'),
(0x111F5, 'X'),
(0x11200, 'V'),
(0x11212, 'X'),
+ ]
+
+def _seg_56():
+ return [
(0x11213, 'V'),
(0x1123F, 'X'),
(0x11280, 'V'),
@@ -5827,10 +5838,6 @@ def _seg_55():
(0x11288, 'V'),
(0x11289, 'X'),
(0x1128A, 'V'),
- ]
-
-def _seg_56():
- return [
(0x1128E, 'X'),
(0x1128F, 'V'),
(0x1129E, 'X'),
@@ -5871,11 +5878,9 @@ def _seg_56():
(0x11370, 'V'),
(0x11375, 'X'),
(0x11400, 'V'),
- (0x1145A, 'X'),
- (0x1145B, 'V'),
(0x1145C, 'X'),
(0x1145D, 'V'),
- (0x11460, 'X'),
+ (0x11462, 'X'),
(0x11480, 'V'),
(0x114C8, 'X'),
(0x114D0, 'V'),
@@ -5926,22 +5931,36 @@ def _seg_56():
(0x118B5, 'M', u'𑣕'),
(0x118B6, 'M', u'𑣖'),
(0x118B7, 'M', u'𑣗'),
+ ]
+
+def _seg_57():
+ return [
(0x118B8, 'M', u'𑣘'),
(0x118B9, 'M', u'𑣙'),
(0x118BA, 'M', u'𑣚'),
(0x118BB, 'M', u'𑣛'),
(0x118BC, 'M', u'𑣜'),
- ]
-
-def _seg_57():
- return [
(0x118BD, 'M', u'𑣝'),
(0x118BE, 'M', u'𑣞'),
(0x118BF, 'M', u'𑣟'),
(0x118C0, 'V'),
(0x118F3, 'X'),
(0x118FF, 'V'),
- (0x11900, 'X'),
+ (0x11907, 'X'),
+ (0x11909, 'V'),
+ (0x1190A, 'X'),
+ (0x1190C, 'V'),
+ (0x11914, 'X'),
+ (0x11915, 'V'),
+ (0x11917, 'X'),
+ (0x11918, 'V'),
+ (0x11936, 'X'),
+ (0x11937, 'V'),
+ (0x11939, 'X'),
+ (0x1193B, 'V'),
+ (0x11947, 'X'),
+ (0x11950, 'V'),
+ (0x1195A, 'X'),
(0x119A0, 'V'),
(0x119A8, 'X'),
(0x119AA, 'V'),
@@ -5996,6 +6015,8 @@ def _seg_57():
(0x11DAA, 'X'),
(0x11EE0, 'V'),
(0x11EF9, 'X'),
+ (0x11FB0, 'V'),
+ (0x11FB1, 'X'),
(0x11FC0, 'V'),
(0x11FF2, 'X'),
(0x11FFF, 'V'),
@@ -6014,6 +6035,10 @@ def _seg_57():
(0x16A39, 'X'),
(0x16A40, 'V'),
(0x16A5F, 'X'),
+ ]
+
+def _seg_58():
+ return [
(0x16A60, 'V'),
(0x16A6A, 'X'),
(0x16A6E, 'V'),
@@ -6035,10 +6060,6 @@ def _seg_57():
(0x16E40, 'M', u'𖹠'),
(0x16E41, 'M', u'𖹡'),
(0x16E42, 'M', u'𖹢'),
- ]
-
-def _seg_58():
- return [
(0x16E43, 'M', u'𖹣'),
(0x16E44, 'M', u'𖹤'),
(0x16E45, 'M', u'𖹥'),
@@ -6077,11 +6098,15 @@ def _seg_58():
(0x16F8F, 'V'),
(0x16FA0, 'X'),
(0x16FE0, 'V'),
- (0x16FE4, 'X'),
+ (0x16FE5, 'X'),
+ (0x16FF0, 'V'),
+ (0x16FF2, 'X'),
(0x17000, 'V'),
(0x187F8, 'X'),
(0x18800, 'V'),
- (0x18AF3, 'X'),
+ (0x18CD6, 'X'),
+ (0x18D00, 'V'),
+ (0x18D09, 'X'),
(0x1B000, 'V'),
(0x1B11F, 'X'),
(0x1B150, 'V'),
@@ -6114,6 +6139,10 @@ def _seg_58():
(0x1D163, 'M', u'𝅘𝅥𝅱'),
(0x1D164, 'M', u'𝅘𝅥𝅲'),
(0x1D165, 'V'),
+ ]
+
+def _seg_59():
+ return [
(0x1D173, 'X'),
(0x1D17B, 'V'),
(0x1D1BB, 'M', u'𝆹𝅥'),
@@ -6139,10 +6168,6 @@ def _seg_58():
(0x1D404, 'M', u'e'),
(0x1D405, 'M', u'f'),
(0x1D406, 'M', u'g'),
- ]
-
-def _seg_59():
- return [
(0x1D407, 'M', u'h'),
(0x1D408, 'M', u'i'),
(0x1D409, 'M', u'j'),
@@ -6218,6 +6243,10 @@ def _seg_59():
(0x1D44F, 'M', u'b'),
(0x1D450, 'M', u'c'),
(0x1D451, 'M', u'd'),
+ ]
+
+def _seg_60():
+ return [
(0x1D452, 'M', u'e'),
(0x1D453, 'M', u'f'),
(0x1D454, 'M', u'g'),
@@ -6243,10 +6272,6 @@ def _seg_59():
(0x1D468, 'M', u'a'),
(0x1D469, 'M', u'b'),
(0x1D46A, 'M', u'c'),
- ]
-
-def _seg_60():
- return [
(0x1D46B, 'M', u'd'),
(0x1D46C, 'M', u'e'),
(0x1D46D, 'M', u'f'),
@@ -6322,6 +6347,10 @@ def _seg_60():
(0x1D4B6, 'M', u'a'),
(0x1D4B7, 'M', u'b'),
(0x1D4B8, 'M', u'c'),
+ ]
+
+def _seg_61():
+ return [
(0x1D4B9, 'M', u'd'),
(0x1D4BA, 'X'),
(0x1D4BB, 'M', u'f'),
@@ -6347,10 +6376,6 @@ def _seg_60():
(0x1D4CF, 'M', u'z'),
(0x1D4D0, 'M', u'a'),
(0x1D4D1, 'M', u'b'),
- ]
-
-def _seg_61():
- return [
(0x1D4D2, 'M', u'c'),
(0x1D4D3, 'M', u'd'),
(0x1D4D4, 'M', u'e'),
@@ -6426,6 +6451,10 @@ def _seg_61():
(0x1D51B, 'M', u'x'),
(0x1D51C, 'M', u'y'),
(0x1D51D, 'X'),
+ ]
+
+def _seg_62():
+ return [
(0x1D51E, 'M', u'a'),
(0x1D51F, 'M', u'b'),
(0x1D520, 'M', u'c'),
@@ -6451,10 +6480,6 @@ def _seg_61():
(0x1D534, 'M', u'w'),
(0x1D535, 'M', u'x'),
(0x1D536, 'M', u'y'),
- ]
-
-def _seg_62():
- return [
(0x1D537, 'M', u'z'),
(0x1D538, 'M', u'a'),
(0x1D539, 'M', u'b'),
@@ -6530,6 +6555,10 @@ def _seg_62():
(0x1D581, 'M', u'v'),
(0x1D582, 'M', u'w'),
(0x1D583, 'M', u'x'),
+ ]
+
+def _seg_63():
+ return [
(0x1D584, 'M', u'y'),
(0x1D585, 'M', u'z'),
(0x1D586, 'M', u'a'),
@@ -6555,10 +6584,6 @@ def _seg_62():
(0x1D59A, 'M', u'u'),
(0x1D59B, 'M', u'v'),
(0x1D59C, 'M', u'w'),
- ]
-
-def _seg_63():
- return [
(0x1D59D, 'M', u'x'),
(0x1D59E, 'M', u'y'),
(0x1D59F, 'M', u'z'),
@@ -6634,6 +6659,10 @@ def _seg_63():
(0x1D5E5, 'M', u'r'),
(0x1D5E6, 'M', u's'),
(0x1D5E7, 'M', u't'),
+ ]
+
+def _seg_64():
+ return [
(0x1D5E8, 'M', u'u'),
(0x1D5E9, 'M', u'v'),
(0x1D5EA, 'M', u'w'),
@@ -6659,10 +6688,6 @@ def _seg_63():
(0x1D5FE, 'M', u'q'),
(0x1D5FF, 'M', u'r'),
(0x1D600, 'M', u's'),
- ]
-
-def _seg_64():
- return [
(0x1D601, 'M', u't'),
(0x1D602, 'M', u'u'),
(0x1D603, 'M', u'v'),
@@ -6738,6 +6763,10 @@ def _seg_64():
(0x1D649, 'M', u'n'),
(0x1D64A, 'M', u'o'),
(0x1D64B, 'M', u'p'),
+ ]
+
+def _seg_65():
+ return [
(0x1D64C, 'M', u'q'),
(0x1D64D, 'M', u'r'),
(0x1D64E, 'M', u's'),
@@ -6763,10 +6792,6 @@ def _seg_64():
(0x1D662, 'M', u'm'),
(0x1D663, 'M', u'n'),
(0x1D664, 'M', u'o'),
- ]
-
-def _seg_65():
- return [
(0x1D665, 'M', u'p'),
(0x1D666, 'M', u'q'),
(0x1D667, 'M', u'r'),
@@ -6842,6 +6867,10 @@ def _seg_65():
(0x1D6AE, 'M', u'η'),
(0x1D6AF, 'M', u'θ'),
(0x1D6B0, 'M', u'ι'),
+ ]
+
+def _seg_66():
+ return [
(0x1D6B1, 'M', u'κ'),
(0x1D6B2, 'M', u'λ'),
(0x1D6B3, 'M', u'μ'),
@@ -6867,10 +6896,6 @@ def _seg_65():
(0x1D6C7, 'M', u'ζ'),
(0x1D6C8, 'M', u'η'),
(0x1D6C9, 'M', u'θ'),
- ]
-
-def _seg_66():
- return [
(0x1D6CA, 'M', u'ι'),
(0x1D6CB, 'M', u'κ'),
(0x1D6CC, 'M', u'λ'),
@@ -6946,6 +6971,10 @@ def _seg_66():
(0x1D714, 'M', u'ω'),
(0x1D715, 'M', u'∂'),
(0x1D716, 'M', u'ε'),
+ ]
+
+def _seg_67():
+ return [
(0x1D717, 'M', u'θ'),
(0x1D718, 'M', u'κ'),
(0x1D719, 'M', u'φ'),
@@ -6971,10 +7000,6 @@ def _seg_66():
(0x1D72D, 'M', u'θ'),
(0x1D72E, 'M', u'σ'),
(0x1D72F, 'M', u'τ'),
- ]
-
-def _seg_67():
- return [
(0x1D730, 'M', u'υ'),
(0x1D731, 'M', u'φ'),
(0x1D732, 'M', u'χ'),
@@ -7050,6 +7075,10 @@ def _seg_67():
(0x1D779, 'M', u'κ'),
(0x1D77A, 'M', u'λ'),
(0x1D77B, 'M', u'μ'),
+ ]
+
+def _seg_68():
+ return [
(0x1D77C, 'M', u'ν'),
(0x1D77D, 'M', u'ξ'),
(0x1D77E, 'M', u'ο'),
@@ -7075,10 +7104,6 @@ def _seg_67():
(0x1D793, 'M', u'δ'),
(0x1D794, 'M', u'ε'),
(0x1D795, 'M', u'ζ'),
- ]
-
-def _seg_68():
- return [
(0x1D796, 'M', u'η'),
(0x1D797, 'M', u'θ'),
(0x1D798, 'M', u'ι'),
@@ -7154,6 +7179,10 @@ def _seg_68():
(0x1D7E1, 'M', u'9'),
(0x1D7E2, 'M', u'0'),
(0x1D7E3, 'M', u'1'),
+ ]
+
+def _seg_69():
+ return [
(0x1D7E4, 'M', u'2'),
(0x1D7E5, 'M', u'3'),
(0x1D7E6, 'M', u'4'),
@@ -7179,10 +7208,6 @@ def _seg_68():
(0x1D7FA, 'M', u'4'),
(0x1D7FB, 'M', u'5'),
(0x1D7FC, 'M', u'6'),
- ]
-
-def _seg_69():
- return [
(0x1D7FD, 'M', u'7'),
(0x1D7FE, 'M', u'8'),
(0x1D7FF, 'M', u'9'),
@@ -7258,6 +7283,10 @@ def _seg_69():
(0x1E95A, 'X'),
(0x1E95E, 'V'),
(0x1E960, 'X'),
+ ]
+
+def _seg_70():
+ return [
(0x1EC71, 'V'),
(0x1ECB5, 'X'),
(0x1ED01, 'V'),
@@ -7283,10 +7312,6 @@ def _seg_69():
(0x1EE12, 'M', u'ق'),
(0x1EE13, 'M', u'ر'),
(0x1EE14, 'M', u'ش'),
- ]
-
-def _seg_70():
- return [
(0x1EE15, 'M', u'ت'),
(0x1EE16, 'M', u'ث'),
(0x1EE17, 'M', u'خ'),
@@ -7362,6 +7387,10 @@ def _seg_70():
(0x1EE68, 'M', u'ط'),
(0x1EE69, 'M', u'ي'),
(0x1EE6A, 'M', u'ك'),
+ ]
+
+def _seg_71():
+ return [
(0x1EE6B, 'X'),
(0x1EE6C, 'M', u'م'),
(0x1EE6D, 'M', u'ن'),
@@ -7387,10 +7416,6 @@ def _seg_70():
(0x1EE81, 'M', u'ب'),
(0x1EE82, 'M', u'ج'),
(0x1EE83, 'M', u'د'),
- ]
-
-def _seg_71():
- return [
(0x1EE84, 'M', u'ه'),
(0x1EE85, 'M', u'و'),
(0x1EE86, 'M', u'ز'),
@@ -7466,10 +7491,13 @@ def _seg_71():
(0x1F106, '3', u'5,'),
(0x1F107, '3', u'6,'),
(0x1F108, '3', u'7,'),
+ ]
+
+def _seg_72():
+ return [
(0x1F109, '3', u'8,'),
(0x1F10A, '3', u'9,'),
(0x1F10B, 'V'),
- (0x1F10D, 'X'),
(0x1F110, '3', u'(a)'),
(0x1F111, '3', u'(b)'),
(0x1F112, '3', u'(c)'),
@@ -7491,10 +7519,6 @@ def _seg_71():
(0x1F122, '3', u'(s)'),
(0x1F123, '3', u'(t)'),
(0x1F124, '3', u'(u)'),
- ]
-
-def _seg_72():
- return [
(0x1F125, '3', u'(v)'),
(0x1F126, '3', u'(w)'),
(0x1F127, '3', u'(x)'),
@@ -7542,11 +7566,10 @@ def _seg_72():
(0x1F16A, 'M', u'mc'),
(0x1F16B, 'M', u'md'),
(0x1F16C, 'M', u'mr'),
- (0x1F16D, 'X'),
- (0x1F170, 'V'),
+ (0x1F16D, 'V'),
(0x1F190, 'M', u'dj'),
(0x1F191, 'V'),
- (0x1F1AD, 'X'),
+ (0x1F1AE, 'X'),
(0x1F1E6, 'V'),
(0x1F200, 'M', u'ほか'),
(0x1F201, 'M', u'ココ'),
@@ -7572,6 +7595,10 @@ def _seg_72():
(0x1F221, 'M', u'終'),
(0x1F222, 'M', u'生'),
(0x1F223, 'M', u'販'),
+ ]
+
+def _seg_73():
+ return [
(0x1F224, 'M', u'声'),
(0x1F225, 'M', u'吹'),
(0x1F226, 'M', u'演'),
@@ -7595,10 +7622,6 @@ def _seg_72():
(0x1F238, 'M', u'申'),
(0x1F239, 'M', u'割'),
(0x1F23A, 'M', u'営'),
- ]
-
-def _seg_73():
- return [
(0x1F23B, 'M', u'配'),
(0x1F23C, 'X'),
(0x1F240, 'M', u'〔本〕'),
@@ -7617,11 +7640,11 @@ def _seg_73():
(0x1F260, 'V'),
(0x1F266, 'X'),
(0x1F300, 'V'),
- (0x1F6D6, 'X'),
+ (0x1F6D8, 'X'),
(0x1F6E0, 'V'),
(0x1F6ED, 'X'),
(0x1F6F0, 'V'),
- (0x1F6FB, 'X'),
+ (0x1F6FD, 'X'),
(0x1F700, 'V'),
(0x1F774, 'X'),
(0x1F780, 'V'),
@@ -7638,32 +7661,51 @@ def _seg_73():
(0x1F888, 'X'),
(0x1F890, 'V'),
(0x1F8AE, 'X'),
+ (0x1F8B0, 'V'),
+ (0x1F8B2, 'X'),
(0x1F900, 'V'),
- (0x1F90C, 'X'),
- (0x1F90D, 'V'),
- (0x1F972, 'X'),
- (0x1F973, 'V'),
- (0x1F977, 'X'),
+ (0x1F979, 'X'),
(0x1F97A, 'V'),
- (0x1F9A3, 'X'),
- (0x1F9A5, 'V'),
- (0x1F9AB, 'X'),
- (0x1F9AE, 'V'),
- (0x1F9CB, 'X'),
+ (0x1F9CC, 'X'),
(0x1F9CD, 'V'),
(0x1FA54, 'X'),
(0x1FA60, 'V'),
(0x1FA6E, 'X'),
(0x1FA70, 'V'),
- (0x1FA74, 'X'),
+ (0x1FA75, 'X'),
(0x1FA78, 'V'),
(0x1FA7B, 'X'),
(0x1FA80, 'V'),
- (0x1FA83, 'X'),
+ (0x1FA87, 'X'),
(0x1FA90, 'V'),
- (0x1FA96, 'X'),
+ (0x1FAA9, 'X'),
+ (0x1FAB0, 'V'),
+ (0x1FAB7, 'X'),
+ (0x1FAC0, 'V'),
+ (0x1FAC3, 'X'),
+ (0x1FAD0, 'V'),
+ (0x1FAD7, 'X'),
+ (0x1FB00, 'V'),
+ (0x1FB93, 'X'),
+ (0x1FB94, 'V'),
+ (0x1FBCB, 'X'),
+ (0x1FBF0, 'M', u'0'),
+ (0x1FBF1, 'M', u'1'),
+ (0x1FBF2, 'M', u'2'),
+ (0x1FBF3, 'M', u'3'),
+ (0x1FBF4, 'M', u'4'),
+ (0x1FBF5, 'M', u'5'),
+ (0x1FBF6, 'M', u'6'),
+ (0x1FBF7, 'M', u'7'),
+ (0x1FBF8, 'M', u'8'),
+ (0x1FBF9, 'M', u'9'),
+ ]
+
+def _seg_74():
+ return [
+ (0x1FBFA, 'X'),
(0x20000, 'V'),
- (0x2A6D7, 'X'),
+ (0x2A6DE, 'X'),
(0x2A700, 'V'),
(0x2B735, 'X'),
(0x2B740, 'V'),
@@ -7699,10 +7741,6 @@ def _seg_73():
(0x2F818, 'M', u'冤'),
(0x2F819, 'M', u'仌'),
(0x2F81A, 'M', u'冬'),
- ]
-
-def _seg_74():
- return [
(0x2F81B, 'M', u'况'),
(0x2F81C, 'M', u'𩇟'),
(0x2F81D, 'M', u'凵'),
@@ -7765,6 +7803,10 @@ def _seg_74():
(0x2F859, 'M', u'𡓤'),
(0x2F85A, 'M', u'売'),
(0x2F85B, 'M', u'壷'),
+ ]
+
+def _seg_75():
+ return [
(0x2F85C, 'M', u'夆'),
(0x2F85D, 'M', u'多'),
(0x2F85E, 'M', u'夢'),
@@ -7803,10 +7845,6 @@ def _seg_74():
(0x2F880, 'M', u'嵼'),
(0x2F881, 'M', u'巡'),
(0x2F882, 'M', u'巢'),
- ]
-
-def _seg_75():
- return [
(0x2F883, 'M', u'㠯'),
(0x2F884, 'M', u'巽'),
(0x2F885, 'M', u'帨'),
@@ -7869,6 +7907,10 @@ def _seg_75():
(0x2F8C0, 'M', u'揅'),
(0x2F8C1, 'M', u'掩'),
(0x2F8C2, 'M', u'㨮'),
+ ]
+
+def _seg_76():
+ return [
(0x2F8C3, 'M', u'摩'),
(0x2F8C4, 'M', u'摾'),
(0x2F8C5, 'M', u'撝'),
@@ -7907,10 +7949,6 @@ def _seg_75():
(0x2F8E6, 'M', u'椔'),
(0x2F8E7, 'M', u'㮝'),
(0x2F8E8, 'M', u'楂'),
- ]
-
-def _seg_76():
- return [
(0x2F8E9, 'M', u'榣'),
(0x2F8EA, 'M', u'槪'),
(0x2F8EB, 'M', u'檨'),
@@ -7973,6 +8011,10 @@ def _seg_76():
(0x2F924, 'M', u'犀'),
(0x2F925, 'M', u'犕'),
(0x2F926, 'M', u'𤜵'),
+ ]
+
+def _seg_77():
+ return [
(0x2F927, 'M', u'𤠔'),
(0x2F928, 'M', u'獺'),
(0x2F929, 'M', u'王'),
@@ -8011,10 +8053,6 @@ def _seg_76():
(0x2F94C, 'M', u'䂖'),
(0x2F94D, 'M', u'𥐝'),
(0x2F94E, 'M', u'硎'),
- ]
-
-def _seg_77():
- return [
(0x2F94F, 'M', u'碌'),
(0x2F950, 'M', u'磌'),
(0x2F951, 'M', u'䃣'),
@@ -8077,6 +8115,10 @@ def _seg_77():
(0x2F98B, 'M', u'舁'),
(0x2F98C, 'M', u'舄'),
(0x2F98D, 'M', u'辞'),
+ ]
+
+def _seg_78():
+ return [
(0x2F98E, 'M', u'䑫'),
(0x2F98F, 'M', u'芑'),
(0x2F990, 'M', u'芋'),
@@ -8115,10 +8157,6 @@ def _seg_77():
(0x2F9B1, 'M', u'𧃒'),
(0x2F9B2, 'M', u'䕫'),
(0x2F9B3, 'M', u'虐'),
- ]
-
-def _seg_78():
- return [
(0x2F9B4, 'M', u'虜'),
(0x2F9B5, 'M', u'虧'),
(0x2F9B6, 'M', u'虩'),
@@ -8181,6 +8219,10 @@ def _seg_78():
(0x2F9EF, 'M', u'䦕'),
(0x2F9F0, 'M', u'閷'),
(0x2F9F1, 'M', u'𨵷'),
+ ]
+
+def _seg_79():
+ return [
(0x2F9F2, 'M', u'䧦'),
(0x2F9F3, 'M', u'雃'),
(0x2F9F4, 'M', u'嶲'),
@@ -8219,16 +8261,14 @@ def _seg_78():
(0x2FA16, 'M', u'䵖'),
(0x2FA17, 'M', u'黹'),
(0x2FA18, 'M', u'黾'),
- ]
-
-def _seg_79():
- return [
(0x2FA19, 'M', u'鼅'),
(0x2FA1A, 'M', u'鼏'),
(0x2FA1B, 'M', u'鼖'),
(0x2FA1C, 'M', u'鼻'),
(0x2FA1D, 'M', u'𪘀'),
(0x2FA1E, 'X'),
+ (0x30000, 'V'),
+ (0x3134B, 'X'),
(0xE0100, 'I'),
(0xE01F0, 'X'),
]
diff --git a/src/pip/_vendor/packaging/__about__.py b/src/pip/_vendor/packaging/__about__.py
index 5161d141be7..4d998578d7b 100644
--- a/src/pip/_vendor/packaging/__about__.py
+++ b/src/pip/_vendor/packaging/__about__.py
@@ -18,10 +18,10 @@
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
-__version__ = "20.3"
+__version__ = "20.4"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
-__license__ = "BSD or Apache License, Version 2.0"
+__license__ = "BSD-2-Clause or Apache-2.0"
__copyright__ = "Copyright 2014-2019 %s" % __author__
diff --git a/src/pip/_vendor/packaging/_compat.py b/src/pip/_vendor/packaging/_compat.py
index a145f7eeb39..e54bd4ede87 100644
--- a/src/pip/_vendor/packaging/_compat.py
+++ b/src/pip/_vendor/packaging/_compat.py
@@ -5,9 +5,9 @@
import sys
-from ._typing import MYPY_CHECK_RUNNING
+from ._typing import TYPE_CHECKING
-if MYPY_CHECK_RUNNING: # pragma: no cover
+if TYPE_CHECKING: # pragma: no cover
from typing import Any, Dict, Tuple, Type
diff --git a/src/pip/_vendor/packaging/_typing.py b/src/pip/_vendor/packaging/_typing.py
index 945b39c30a0..2846133bd8d 100644
--- a/src/pip/_vendor/packaging/_typing.py
+++ b/src/pip/_vendor/packaging/_typing.py
@@ -18,22 +18,31 @@
In packaging, all static-typing related imports should be guarded as follows:
- from pip._vendor.packaging._typing import MYPY_CHECK_RUNNING
+ from pip._vendor.packaging._typing import TYPE_CHECKING
- if MYPY_CHECK_RUNNING:
+ if TYPE_CHECKING:
from typing import ...
Ref: https://github.com/python/mypy/issues/3216
"""
-MYPY_CHECK_RUNNING = False
+__all__ = ["TYPE_CHECKING", "cast"]
-if MYPY_CHECK_RUNNING: # pragma: no cover
- import typing
-
- cast = typing.cast
+# The TYPE_CHECKING constant defined by the typing module is False at runtime
+# but True while type checking.
+if False: # pragma: no cover
+ from typing import TYPE_CHECKING
+else:
+ TYPE_CHECKING = False
+
+# typing's cast syntax requires calling typing.cast at runtime, but we don't
+# want to import typing at runtime. Here, we inform the type checkers that
+# we're importing `typing.cast` as `cast` and re-implement typing.cast's
+# runtime behavior in a block that is ignored by type checkers.
+if TYPE_CHECKING: # pragma: no cover
+ # not executed at runtime
+ from typing import cast
else:
- # typing's cast() is needed at runtime, but we don't want to import typing.
- # Thus, we use a dummy no-op version, which we tell mypy to ignore.
- def cast(type_, value): # type: ignore
+ # executed at runtime
+ def cast(type_, value): # noqa
return value
diff --git a/src/pip/_vendor/packaging/markers.py b/src/pip/_vendor/packaging/markers.py
index b24f8edf934..ed642b01fcc 100644
--- a/src/pip/_vendor/packaging/markers.py
+++ b/src/pip/_vendor/packaging/markers.py
@@ -13,10 +13,10 @@
from pip._vendor.pyparsing import Literal as L # noqa
from ._compat import string_types
-from ._typing import MYPY_CHECK_RUNNING
+from ._typing import TYPE_CHECKING
from .specifiers import Specifier, InvalidSpecifier
-if MYPY_CHECK_RUNNING: # pragma: no cover
+if TYPE_CHECKING: # pragma: no cover
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
Operator = Callable[[str, str], bool]
diff --git a/src/pip/_vendor/packaging/requirements.py b/src/pip/_vendor/packaging/requirements.py
index 1e32a9376ec..5e64101c43d 100644
--- a/src/pip/_vendor/packaging/requirements.py
+++ b/src/pip/_vendor/packaging/requirements.py
@@ -11,11 +11,11 @@
from pip._vendor.pyparsing import Literal as L # noqa
from pip._vendor.six.moves.urllib import parse as urlparse
-from ._typing import MYPY_CHECK_RUNNING
+from ._typing import TYPE_CHECKING
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
-if MYPY_CHECK_RUNNING: # pragma: no cover
+if TYPE_CHECKING: # pragma: no cover
from typing import List
diff --git a/src/pip/_vendor/packaging/specifiers.py b/src/pip/_vendor/packaging/specifiers.py
index 94987486d4b..fe09bb1dbb2 100644
--- a/src/pip/_vendor/packaging/specifiers.py
+++ b/src/pip/_vendor/packaging/specifiers.py
@@ -9,10 +9,11 @@
import re
from ._compat import string_types, with_metaclass
-from ._typing import MYPY_CHECK_RUNNING
+from ._typing import TYPE_CHECKING
+from .utils import canonicalize_version
from .version import Version, LegacyVersion, parse
-if MYPY_CHECK_RUNNING: # pragma: no cover
+if TYPE_CHECKING: # pragma: no cover
from typing import (
List,
Dict,
@@ -132,9 +133,14 @@ def __str__(self):
# type: () -> str
return "{0}{1}".format(*self._spec)
+ @property
+ def _canonical_spec(self):
+ # type: () -> Tuple[str, Union[Version, str]]
+ return self._spec[0], canonicalize_version(self._spec[1])
+
def __hash__(self):
# type: () -> int
- return hash(self._spec)
+ return hash(self._canonical_spec)
def __eq__(self, other):
# type: (object) -> bool
@@ -146,7 +152,7 @@ def __eq__(self, other):
elif not isinstance(other, self.__class__):
return NotImplemented
- return self._spec == other._spec
+ return self._canonical_spec == other._canonical_spec
def __ne__(self, other):
# type: (object) -> bool
@@ -510,12 +516,20 @@ def _compare_not_equal(self, prospective, spec):
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
# type: (ParsedVersion, str) -> bool
- return prospective <= Version(spec)
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
# type: (ParsedVersion, str) -> bool
- return prospective >= Version(spec)
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec_str):
diff --git a/src/pip/_vendor/packaging/tags.py b/src/pip/_vendor/packaging/tags.py
index 300faab8476..9064910b8ba 100644
--- a/src/pip/_vendor/packaging/tags.py
+++ b/src/pip/_vendor/packaging/tags.py
@@ -22,9 +22,9 @@
import sysconfig
import warnings
-from ._typing import MYPY_CHECK_RUNNING, cast
+from ._typing import TYPE_CHECKING, cast
-if MYPY_CHECK_RUNNING: # pragma: no cover
+if TYPE_CHECKING: # pragma: no cover
from typing import (
Dict,
FrozenSet,
@@ -58,6 +58,12 @@
class Tag(object):
+ """
+ A representation of the tag triple for a wheel.
+
+ Instances are considered immutable and thus are hashable. Equality checking
+ is also supported.
+ """
__slots__ = ["_interpreter", "_abi", "_platform"]
@@ -108,6 +114,12 @@ def __repr__(self):
def parse_tag(tag):
# type: (str) -> FrozenSet[Tag]
+ """
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+ Returning a set is required due to the possibility that the tag is a
+ compressed tag set.
+ """
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
@@ -541,7 +553,7 @@ def __init__(self, file):
def unpack(fmt):
# type: (str) -> int
try:
- result, = struct.unpack(
+ (result,) = struct.unpack(
fmt, file.read(struct.calcsize(fmt))
) # type: (int, )
except struct.error:
diff --git a/src/pip/_vendor/packaging/utils.py b/src/pip/_vendor/packaging/utils.py
index 44f1bf98732..19579c1a0fa 100644
--- a/src/pip/_vendor/packaging/utils.py
+++ b/src/pip/_vendor/packaging/utils.py
@@ -5,19 +5,22 @@
import re
-from ._typing import MYPY_CHECK_RUNNING
+from ._typing import TYPE_CHECKING, cast
from .version import InvalidVersion, Version
-if MYPY_CHECK_RUNNING: # pragma: no cover
- from typing import Union
+if TYPE_CHECKING: # pragma: no cover
+ from typing import NewType, Union
+
+ NormalizedName = NewType("NormalizedName", str)
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
- # type: (str) -> str
+ # type: (str) -> NormalizedName
# This is taken from PEP 503.
- return _canonicalize_regex.sub("-", name).lower()
+ value = _canonicalize_regex.sub("-", name).lower()
+ return cast("NormalizedName", value)
def canonicalize_version(_version):
diff --git a/src/pip/_vendor/packaging/version.py b/src/pip/_vendor/packaging/version.py
index f39a2a12a1b..00371e86a87 100644
--- a/src/pip/_vendor/packaging/version.py
+++ b/src/pip/_vendor/packaging/version.py
@@ -8,9 +8,9 @@
import re
from ._structures import Infinity, NegativeInfinity
-from ._typing import MYPY_CHECK_RUNNING
+from ._typing import TYPE_CHECKING
-if MYPY_CHECK_RUNNING: # pragma: no cover
+if TYPE_CHECKING: # pragma: no cover
from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
from ._structures import InfinityType, NegativeInfinityType
diff --git a/src/pip/_vendor/requests/__init__.py b/src/pip/_vendor/requests/__init__.py
index e47bcb20149..517458b5a25 100644
--- a/src/pip/_vendor/requests/__init__.py
+++ b/src/pip/_vendor/requests/__init__.py
@@ -90,18 +90,29 @@ def _check_cryptography(cryptography_version):
"version!".format(urllib3.__version__, chardet.__version__),
RequestsDependencyWarning)
-# Attempt to enable urllib3's SNI support, if possible
-from pip._internal.utils.compat import WINDOWS
-if not WINDOWS:
+# Attempt to enable urllib3's fallback for SNI support
+# if the standard library doesn't support SNI or the
+# 'ssl' library isn't available.
+try:
+ # Note: This logic prevents upgrading cryptography on Windows, if imported
+ # as part of pip.
+ from pip._internal.utils.compat import WINDOWS
+ if not WINDOWS:
+ raise ImportError("pip internals: don't import cryptography on Windows")
try:
+ import ssl
+ except ImportError:
+ ssl = None
+
+ if not getattr(ssl, "HAS_SNI", False):
from pip._vendor.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
# Check cryptography version
from cryptography import __version__ as cryptography_version
_check_cryptography(cryptography_version)
- except ImportError:
- pass
+except ImportError:
+ pass
# urllib3's DependencyWarnings should be silenced.
from pip._vendor.urllib3.exceptions import DependencyWarning
diff --git a/src/pip/_vendor/requests/__version__.py b/src/pip/_vendor/requests/__version__.py
index b9e7df4881a..531e26ceb24 100644
--- a/src/pip/_vendor/requests/__version__.py
+++ b/src/pip/_vendor/requests/__version__.py
@@ -5,8 +5,8 @@
__title__ = 'requests'
__description__ = 'Python HTTP for Humans.'
__url__ = 'https://requests.readthedocs.io'
-__version__ = '2.23.0'
-__build__ = 0x022300
+__version__ = '2.24.0'
+__build__ = 0x022400
__author__ = 'Kenneth Reitz'
__author_email__ = 'me@kennethreitz.org'
__license__ = 'Apache 2.0'
diff --git a/src/pip/_vendor/requests/exceptions.py b/src/pip/_vendor/requests/exceptions.py
index a91e1fd114e..9ef9e6e97b8 100644
--- a/src/pip/_vendor/requests/exceptions.py
+++ b/src/pip/_vendor/requests/exceptions.py
@@ -94,11 +94,11 @@ class ChunkedEncodingError(RequestException):
class ContentDecodingError(RequestException, BaseHTTPError):
- """Failed to decode response content"""
+ """Failed to decode response content."""
class StreamConsumedError(RequestException, TypeError):
- """The content for this response was already consumed"""
+ """The content for this response was already consumed."""
class RetryError(RequestException):
@@ -106,21 +106,18 @@ class RetryError(RequestException):
class UnrewindableBodyError(RequestException):
- """Requests encountered an error when trying to rewind a body"""
+ """Requests encountered an error when trying to rewind a body."""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
- pass
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""A file was opened in text mode, but Requests determined its binary length."""
- pass
class RequestsDependencyWarning(RequestsWarning):
"""An imported dependency doesn't match the expected version range."""
- pass
diff --git a/src/pip/_vendor/requests/models.py b/src/pip/_vendor/requests/models.py
index 8a3085d3783..015e715dad3 100644
--- a/src/pip/_vendor/requests/models.py
+++ b/src/pip/_vendor/requests/models.py
@@ -473,12 +473,12 @@ def prepare_body(self, data, files, json=None):
not isinstance(data, (basestring, list, tuple, Mapping))
])
- try:
- length = super_len(data)
- except (TypeError, AttributeError, UnsupportedOperation):
- length = None
-
if is_stream:
+ try:
+ length = super_len(data)
+ except (TypeError, AttributeError, UnsupportedOperation):
+ length = None
+
body = data
if getattr(body, 'tell', None) is not None:
@@ -916,7 +916,7 @@ def links(self):
return l
def raise_for_status(self):
- """Raises stored :class:`HTTPError`, if one occurred."""
+ """Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
diff --git a/src/pip/_vendor/requests/sessions.py b/src/pip/_vendor/requests/sessions.py
index 2845880bf41..e8e2d609a78 100644
--- a/src/pip/_vendor/requests/sessions.py
+++ b/src/pip/_vendor/requests/sessions.py
@@ -658,11 +658,13 @@ def send(self, request, **kwargs):
extract_cookies_to_jar(self.cookies, request, r.raw)
- # Redirect resolving generator.
- gen = self.resolve_redirects(r, request, **kwargs)
-
# Resolve redirects if allowed.
- history = [resp for resp in gen] if allow_redirects else []
+ if allow_redirects:
+ # Redirect resolving generator.
+ gen = self.resolve_redirects(r, request, **kwargs)
+ history = [resp for resp in gen]
+ else:
+ history = []
# Shuffle things around if there's history.
if history:
diff --git a/src/pip/_vendor/six.py b/src/pip/_vendor/six.py
index 5fe9f8e141e..83f69783d1a 100644
--- a/src/pip/_vendor/six.py
+++ b/src/pip/_vendor/six.py
@@ -29,7 +29,7 @@
import types
__author__ = "Benjamin Peterson "
-__version__ = "1.14.0"
+__version__ = "1.15.0"
# Useful for very coarse version differentiation.
@@ -890,12 +890,11 @@ def ensure_binary(s, encoding='utf-8', errors='strict'):
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
+ if isinstance(s, binary_type):
+ return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
- elif isinstance(s, binary_type):
- return s
- else:
- raise TypeError("not expecting type '%s'" % type(s))
+ raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
@@ -909,12 +908,15 @@ def ensure_str(s, encoding='utf-8', errors='strict'):
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
- if not isinstance(s, (text_type, binary_type)):
- raise TypeError("not expecting type '%s'" % type(s))
+ # Optimization: Fast return for the common case.
+ if type(s) is str:
+ return s
if PY2 and isinstance(s, text_type):
- s = s.encode(encoding, errors)
+ return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
- s = s.decode(encoding, errors)
+ return s.decode(encoding, errors)
+ elif not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
return s
diff --git a/src/pip/_vendor/toml.py b/src/pip/_vendor/toml.py
deleted file mode 100644
index dac398837b3..00000000000
--- a/src/pip/_vendor/toml.py
+++ /dev/null
@@ -1,1039 +0,0 @@
-"""Python module which parses and emits TOML.
-
-Released under the MIT license.
-"""
-import re
-import io
-import datetime
-from os import linesep
-import sys
-
-__version__ = "0.9.6"
-_spec_ = "0.4.0"
-
-
-class TomlDecodeError(Exception):
- """Base toml Exception / Error."""
- pass
-
-
-class TomlTz(datetime.tzinfo):
- def __init__(self, toml_offset):
- if toml_offset == "Z":
- self._raw_offset = "+00:00"
- else:
- self._raw_offset = toml_offset
- self._sign = -1 if self._raw_offset[0] == '-' else 1
- self._hours = int(self._raw_offset[1:3])
- self._minutes = int(self._raw_offset[4:6])
-
- def tzname(self, dt):
- return "UTC" + self._raw_offset
-
- def utcoffset(self, dt):
- return self._sign * datetime.timedelta(hours=self._hours,
- minutes=self._minutes)
-
- def dst(self, dt):
- return datetime.timedelta(0)
-
-
-class InlineTableDict(object):
- """Sentinel subclass of dict for inline tables."""
-
-
-def _get_empty_inline_table(_dict):
- class DynamicInlineTableDict(_dict, InlineTableDict):
- """Concrete sentinel subclass for inline tables.
- It is a subclass of _dict which is passed in dynamically at load time
- It is also a subclass of InlineTableDict
- """
-
- return DynamicInlineTableDict()
-
-
-try:
- _range = xrange
-except NameError:
- unicode = str
- _range = range
- basestring = str
- unichr = chr
-
-try:
- FNFError = FileNotFoundError
-except NameError:
- FNFError = IOError
-
-
-def load(f, _dict=dict):
- """Parses named file or files as toml and returns a dictionary
-
- Args:
- f: Path to the file to open, array of files to read into single dict
- or a file descriptor
- _dict: (optional) Specifies the class of the returned toml dictionary
-
- Returns:
- Parsed toml file represented as a dictionary
-
- Raises:
- TypeError -- When f is invalid type
- TomlDecodeError: Error while decoding toml
- IOError / FileNotFoundError -- When an array with no valid (existing)
- (Python 2 / Python 3) file paths is passed
- """
-
- if isinstance(f, basestring):
- with io.open(f, encoding='utf-8') as ffile:
- return loads(ffile.read(), _dict)
- elif isinstance(f, list):
- from os import path as op
- from warnings import warn
- if not [path for path in f if op.exists(path)]:
- error_msg = "Load expects a list to contain filenames only."
- error_msg += linesep
- error_msg += ("The list needs to contain the path of at least one "
- "existing file.")
- raise FNFError(error_msg)
- d = _dict()
- for l in f:
- if op.exists(l):
- d.update(load(l))
- else:
- warn("Non-existent filename in list with at least one valid "
- "filename")
- return d
- else:
- try:
- return loads(f.read(), _dict)
- except AttributeError:
- raise TypeError("You can only load a file descriptor, filename or "
- "list")
-
-
-_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$')
-
-
-def loads(s, _dict=dict):
- """Parses string as toml
-
- Args:
- s: String to be parsed
- _dict: (optional) Specifies the class of the returned toml dictionary
-
- Returns:
- Parsed toml file represented as a dictionary
-
- Raises:
- TypeError: When a non-string is passed
- TomlDecodeError: Error while decoding toml
- """
-
- implicitgroups = []
- retval = _dict()
- currentlevel = retval
- if not isinstance(s, basestring):
- raise TypeError("Expecting something like a string")
-
- if not isinstance(s, unicode):
- s = s.decode('utf8')
-
- sl = list(s)
- openarr = 0
- openstring = False
- openstrchar = ""
- multilinestr = False
- arrayoftables = False
- beginline = True
- keygroup = False
- keyname = 0
- for i, item in enumerate(sl):
- if item == '\r' and sl[i + 1] == '\n':
- sl[i] = ' '
- continue
- if keyname:
- if item == '\n':
- raise TomlDecodeError("Key name found without value."
- " Reached end of line.")
- if openstring:
- if item == openstrchar:
- keyname = 2
- openstring = False
- openstrchar = ""
- continue
- elif keyname == 1:
- if item.isspace():
- keyname = 2
- continue
- elif item.isalnum() or item == '_' or item == '-':
- continue
- elif keyname == 2 and item.isspace():
- continue
- if item == '=':
- keyname = 0
- else:
- raise TomlDecodeError("Found invalid character in key name: '" +
- item + "'. Try quoting the key name.")
- if item == "'" and openstrchar != '"':
- k = 1
- try:
- while sl[i - k] == "'":
- k += 1
- if k == 3:
- break
- except IndexError:
- pass
- if k == 3:
- multilinestr = not multilinestr
- openstring = multilinestr
- else:
- openstring = not openstring
- if openstring:
- openstrchar = "'"
- else:
- openstrchar = ""
- if item == '"' and openstrchar != "'":
- oddbackslash = False
- k = 1
- tripquote = False
- try:
- while sl[i - k] == '"':
- k += 1
- if k == 3:
- tripquote = True
- break
- if k == 1 or (k == 3 and tripquote):
- while sl[i - k] == '\\':
- oddbackslash = not oddbackslash
- k += 1
- except IndexError:
- pass
- if not oddbackslash:
- if tripquote:
- multilinestr = not multilinestr
- openstring = multilinestr
- else:
- openstring = not openstring
- if openstring:
- openstrchar = '"'
- else:
- openstrchar = ""
- if item == '#' and (not openstring and not keygroup and
- not arrayoftables):
- j = i
- try:
- while sl[j] != '\n':
- sl[j] = ' '
- j += 1
- except IndexError:
- break
- if item == '[' and (not openstring and not keygroup and
- not arrayoftables):
- if beginline:
- if len(sl) > i + 1 and sl[i + 1] == '[':
- arrayoftables = True
- else:
- keygroup = True
- else:
- openarr += 1
- if item == ']' and not openstring:
- if keygroup:
- keygroup = False
- elif arrayoftables:
- if sl[i - 1] == ']':
- arrayoftables = False
- else:
- openarr -= 1
- if item == '\n':
- if openstring or multilinestr:
- if not multilinestr:
- raise TomlDecodeError("Unbalanced quotes")
- if ((sl[i - 1] == "'" or sl[i - 1] == '"') and (
- sl[i - 2] == sl[i - 1])):
- sl[i] = sl[i - 1]
- if sl[i - 3] == sl[i - 1]:
- sl[i - 3] = ' '
- elif openarr:
- sl[i] = ' '
- else:
- beginline = True
- elif beginline and sl[i] != ' ' and sl[i] != '\t':
- beginline = False
- if not keygroup and not arrayoftables:
- if sl[i] == '=':
- raise TomlDecodeError("Found empty keyname. ")
- keyname = 1
- s = ''.join(sl)
- s = s.split('\n')
- multikey = None
- multilinestr = ""
- multibackslash = False
- for line in s:
- if not multilinestr or multibackslash or '\n' not in multilinestr:
- line = line.strip()
- if line == "" and (not multikey or multibackslash):
- continue
- if multikey:
- if multibackslash:
- multilinestr += line
- else:
- multilinestr += line
- multibackslash = False
- if len(line) > 2 and (line[-1] == multilinestr[0] and
- line[-2] == multilinestr[0] and
- line[-3] == multilinestr[0]):
- try:
- value, vtype = _load_value(multilinestr, _dict)
- except ValueError as err:
- raise TomlDecodeError(str(err))
- currentlevel[multikey] = value
- multikey = None
- multilinestr = ""
- else:
- k = len(multilinestr) - 1
- while k > -1 and multilinestr[k] == '\\':
- multibackslash = not multibackslash
- k -= 1
- if multibackslash:
- multilinestr = multilinestr[:-1]
- else:
- multilinestr += "\n"
- continue
- if line[0] == '[':
- arrayoftables = False
- if len(line) == 1:
- raise TomlDecodeError("Opening key group bracket on line by "
- "itself.")
- if line[1] == '[':
- arrayoftables = True
- line = line[2:]
- splitstr = ']]'
- else:
- line = line[1:]
- splitstr = ']'
- i = 1
- quotesplits = _get_split_on_quotes(line)
- quoted = False
- for quotesplit in quotesplits:
- if not quoted and splitstr in quotesplit:
- break
- i += quotesplit.count(splitstr)
- quoted = not quoted
- line = line.split(splitstr, i)
- if len(line) < i + 1 or line[-1].strip() != "":
- raise TomlDecodeError("Key group not on a line by itself.")
- groups = splitstr.join(line[:-1]).split('.')
- i = 0
- while i < len(groups):
- groups[i] = groups[i].strip()
- if len(groups[i]) > 0 and (groups[i][0] == '"' or
- groups[i][0] == "'"):
- groupstr = groups[i]
- j = i + 1
- while not groupstr[0] == groupstr[-1]:
- j += 1
- if j > len(groups) + 2:
- raise TomlDecodeError("Invalid group name '" +
- groupstr + "' Something " +
- "went wrong.")
- groupstr = '.'.join(groups[i:j]).strip()
- groups[i] = groupstr[1:-1]
- groups[i + 1:j] = []
- else:
- if not _groupname_re.match(groups[i]):
- raise TomlDecodeError("Invalid group name '" +
- groups[i] + "'. Try quoting it.")
- i += 1
- currentlevel = retval
- for i in _range(len(groups)):
- group = groups[i]
- if group == "":
- raise TomlDecodeError("Can't have a keygroup with an empty "
- "name")
- try:
- currentlevel[group]
- if i == len(groups) - 1:
- if group in implicitgroups:
- implicitgroups.remove(group)
- if arrayoftables:
- raise TomlDecodeError("An implicitly defined "
- "table can't be an array")
- elif arrayoftables:
- currentlevel[group].append(_dict())
- else:
- raise TomlDecodeError("What? " + group +
- " already exists?" +
- str(currentlevel))
- except TypeError:
- currentlevel = currentlevel[-1]
- try:
- currentlevel[group]
- except KeyError:
- currentlevel[group] = _dict()
- if i == len(groups) - 1 and arrayoftables:
- currentlevel[group] = [_dict()]
- except KeyError:
- if i != len(groups) - 1:
- implicitgroups.append(group)
- currentlevel[group] = _dict()
- if i == len(groups) - 1 and arrayoftables:
- currentlevel[group] = [_dict()]
- currentlevel = currentlevel[group]
- if arrayoftables:
- try:
- currentlevel = currentlevel[-1]
- except KeyError:
- pass
- elif line[0] == "{":
- if line[-1] != "}":
- raise TomlDecodeError("Line breaks are not allowed in inline"
- "objects")
- try:
- _load_inline_object(line, currentlevel, _dict, multikey,
- multibackslash)
- except ValueError as err:
- raise TomlDecodeError(str(err))
- elif "=" in line:
- try:
- ret = _load_line(line, currentlevel, _dict, multikey,
- multibackslash)
- except ValueError as err:
- raise TomlDecodeError(str(err))
- if ret is not None:
- multikey, multilinestr, multibackslash = ret
- return retval
-
-
-def _load_inline_object(line, currentlevel, _dict, multikey=False,
- multibackslash=False):
- candidate_groups = line[1:-1].split(",")
- groups = []
- if len(candidate_groups) == 1 and not candidate_groups[0].strip():
- candidate_groups.pop()
- while len(candidate_groups) > 0:
- candidate_group = candidate_groups.pop(0)
- try:
- _, value = candidate_group.split('=', 1)
- except ValueError:
- raise ValueError("Invalid inline table encountered")
- value = value.strip()
- if ((value[0] == value[-1] and value[0] in ('"', "'")) or (
- value[0] in '-0123456789' or
- value in ('true', 'false') or
- (value[0] == "[" and value[-1] == "]") or
- (value[0] == '{' and value[-1] == '}'))):
- groups.append(candidate_group)
- elif len(candidate_groups) > 0:
- candidate_groups[0] = candidate_group + "," + candidate_groups[0]
- else:
- raise ValueError("Invalid inline table value encountered")
- for group in groups:
- status = _load_line(group, currentlevel, _dict, multikey,
- multibackslash)
- if status is not None:
- break
-
-
-# Matches a TOML number, which allows underscores for readability
-_number_with_underscores = re.compile('([0-9])(_([0-9]))*')
-
-
-def _strictly_valid_num(n):
- n = n.strip()
- if not n:
- return False
- if n[0] == '_':
- return False
- if n[-1] == '_':
- return False
- if "_." in n or "._" in n:
- return False
- if len(n) == 1:
- return True
- if n[0] == '0' and n[1] != '.':
- return False
- if n[0] == '+' or n[0] == '-':
- n = n[1:]
- if n[0] == '0' and n[1] != '.':
- return False
- if '__' in n:
- return False
- return True
-
-
-def _get_split_on_quotes(line):
- doublequotesplits = line.split('"')
- quoted = False
- quotesplits = []
- if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]:
- singlequotesplits = doublequotesplits[0].split("'")
- doublequotesplits = doublequotesplits[1:]
- while len(singlequotesplits) % 2 == 0 and len(doublequotesplits):
- singlequotesplits[-1] += '"' + doublequotesplits[0]
- doublequotesplits = doublequotesplits[1:]
- if "'" in singlequotesplits[-1]:
- singlequotesplits = (singlequotesplits[:-1] +
- singlequotesplits[-1].split("'"))
- quotesplits += singlequotesplits
- for doublequotesplit in doublequotesplits:
- if quoted:
- quotesplits.append(doublequotesplit)
- else:
- quotesplits += doublequotesplit.split("'")
- quoted = not quoted
- return quotesplits
-
-
-def _load_line(line, currentlevel, _dict, multikey, multibackslash):
- i = 1
- quotesplits = _get_split_on_quotes(line)
- quoted = False
- for quotesplit in quotesplits:
- if not quoted and '=' in quotesplit:
- break
- i += quotesplit.count('=')
- quoted = not quoted
- pair = line.split('=', i)
- strictly_valid = _strictly_valid_num(pair[-1])
- if _number_with_underscores.match(pair[-1]):
- pair[-1] = pair[-1].replace('_', '')
- while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and
- pair[-1][0] != "'" and pair[-1][0] != '"' and
- pair[-1][0] != '[' and pair[-1][0] != '{' and
- pair[-1] != 'true' and pair[-1] != 'false'):
- try:
- float(pair[-1])
- break
- except ValueError:
- pass
- if _load_date(pair[-1]) is not None:
- break
- i += 1
- prev_val = pair[-1]
- pair = line.split('=', i)
- if prev_val == pair[-1]:
- raise ValueError("Invalid date or number")
- if strictly_valid:
- strictly_valid = _strictly_valid_num(pair[-1])
- pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()]
- if (pair[0][0] == '"' or pair[0][0] == "'") and \
- (pair[0][-1] == '"' or pair[0][-1] == "'"):
- pair[0] = pair[0][1:-1]
- if len(pair[1]) > 2 and ((pair[1][0] == '"' or pair[1][0] == "'") and
- pair[1][1] == pair[1][0] and
- pair[1][2] == pair[1][0] and
- not (len(pair[1]) > 5 and
- pair[1][-1] == pair[1][0] and
- pair[1][-2] == pair[1][0] and
- pair[1][-3] == pair[1][0])):
- k = len(pair[1]) - 1
- while k > -1 and pair[1][k] == '\\':
- multibackslash = not multibackslash
- k -= 1
- if multibackslash:
- multilinestr = pair[1][:-1]
- else:
- multilinestr = pair[1] + "\n"
- multikey = pair[0]
- else:
- value, vtype = _load_value(pair[1], _dict, strictly_valid)
- try:
- currentlevel[pair[0]]
- raise ValueError("Duplicate keys!")
- except KeyError:
- if multikey:
- return multikey, multilinestr, multibackslash
- else:
- currentlevel[pair[0]] = value
-
-
-def _load_date(val):
- microsecond = 0
- tz = None
- try:
- if len(val) > 19:
- if val[19] == '.':
- if val[-1].upper() == 'Z':
- subsecondval = val[20:-1]
- tzval = "Z"
- else:
- subsecondvalandtz = val[20:]
- if '+' in subsecondvalandtz:
- splitpoint = subsecondvalandtz.index('+')
- subsecondval = subsecondvalandtz[:splitpoint]
- tzval = subsecondvalandtz[splitpoint:]
- elif '-' in subsecondvalandtz:
- splitpoint = subsecondvalandtz.index('-')
- subsecondval = subsecondvalandtz[:splitpoint]
- tzval = subsecondvalandtz[splitpoint:]
- tz = TomlTz(tzval)
- microsecond = int(int(subsecondval) *
- (10 ** (6 - len(subsecondval))))
- else:
- tz = TomlTz(val[19:])
- except ValueError:
- tz = None
- if "-" not in val[1:]:
- return None
- try:
- d = datetime.datetime(
- int(val[:4]), int(val[5:7]),
- int(val[8:10]), int(val[11:13]),
- int(val[14:16]), int(val[17:19]), microsecond, tz)
- except ValueError:
- return None
- return d
-
-
-def _load_unicode_escapes(v, hexbytes, prefix):
- skip = False
- i = len(v) - 1
- while i > -1 and v[i] == '\\':
- skip = not skip
- i -= 1
- for hx in hexbytes:
- if skip:
- skip = False
- i = len(hx) - 1
- while i > -1 and hx[i] == '\\':
- skip = not skip
- i -= 1
- v += prefix
- v += hx
- continue
- hxb = ""
- i = 0
- hxblen = 4
- if prefix == "\\U":
- hxblen = 8
- hxb = ''.join(hx[i:i + hxblen]).lower()
- if hxb.strip('0123456789abcdef'):
- raise ValueError("Invalid escape sequence: " + hxb)
- if hxb[0] == "d" and hxb[1].strip('01234567'):
- raise ValueError("Invalid escape sequence: " + hxb +
- ". Only scalar unicode points are allowed.")
- v += unichr(int(hxb, 16))
- v += unicode(hx[len(hxb):])
- return v
-
-
-# Unescape TOML string values.
-
-# content after the \
-_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"']
-# What it should be replaced by
-_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"']
-# Used for substitution
-_escape_to_escapedchars = dict(zip(_escapes, _escapedchars))
-
-
-def _unescape(v):
- """Unescape characters in a TOML string."""
- i = 0
- backslash = False
- while i < len(v):
- if backslash:
- backslash = False
- if v[i] in _escapes:
- v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:]
- elif v[i] == '\\':
- v = v[:i - 1] + v[i:]
- elif v[i] == 'u' or v[i] == 'U':
- i += 1
- else:
- raise ValueError("Reserved escape sequence used")
- continue
- elif v[i] == '\\':
- backslash = True
- i += 1
- return v
-
-
-def _load_value(v, _dict, strictly_valid=True):
- if not v:
- raise ValueError("Empty value is invalid")
- if v == 'true':
- return (True, "bool")
- elif v == 'false':
- return (False, "bool")
- elif v[0] == '"':
- testv = v[1:].split('"')
- triplequote = False
- triplequotecount = 0
- if len(testv) > 1 and testv[0] == '' and testv[1] == '':
- testv = testv[2:]
- triplequote = True
- closed = False
- for tv in testv:
- if tv == '':
- if triplequote:
- triplequotecount += 1
- else:
- closed = True
- else:
- oddbackslash = False
- try:
- i = -1
- j = tv[i]
- while j == '\\':
- oddbackslash = not oddbackslash
- i -= 1
- j = tv[i]
- except IndexError:
- pass
- if not oddbackslash:
- if closed:
- raise ValueError("Stuff after closed string. WTF?")
- else:
- if not triplequote or triplequotecount > 1:
- closed = True
- else:
- triplequotecount = 0
- escapeseqs = v.split('\\')[1:]
- backslash = False
- for i in escapeseqs:
- if i == '':
- backslash = not backslash
- else:
- if i[0] not in _escapes and (i[0] != 'u' and i[0] != 'U' and
- not backslash):
- raise ValueError("Reserved escape sequence used")
- if backslash:
- backslash = False
- for prefix in ["\\u", "\\U"]:
- if prefix in v:
- hexbytes = v.split(prefix)
- v = _load_unicode_escapes(hexbytes[0], hexbytes[1:], prefix)
- v = _unescape(v)
- if len(v) > 1 and v[1] == '"' and (len(v) < 3 or v[1] == v[2]):
- v = v[2:-2]
- return (v[1:-1], "str")
- elif v[0] == "'":
- if v[1] == "'" and (len(v) < 3 or v[1] == v[2]):
- v = v[2:-2]
- return (v[1:-1], "str")
- elif v[0] == '[':
- return (_load_array(v, _dict), "array")
- elif v[0] == '{':
- inline_object = _get_empty_inline_table(_dict)
- _load_inline_object(v, inline_object, _dict)
- return (inline_object, "inline_object")
- else:
- parsed_date = _load_date(v)
- if parsed_date is not None:
- return (parsed_date, "date")
- if not strictly_valid:
- raise ValueError("Weirdness with leading zeroes or "
- "underscores in your number.")
- itype = "int"
- neg = False
- if v[0] == '-':
- neg = True
- v = v[1:]
- elif v[0] == '+':
- v = v[1:]
- v = v.replace('_', '')
- if '.' in v or 'e' in v or 'E' in v:
- if '.' in v and v.split('.', 1)[1] == '':
- raise ValueError("This float is missing digits after "
- "the point")
- if v[0] not in '0123456789':
- raise ValueError("This float doesn't have a leading digit")
- v = float(v)
- itype = "float"
- else:
- v = int(v)
- if neg:
- return (0 - v, itype)
- return (v, itype)
-
-
-def _bounded_string(s):
- if len(s) == 0:
- return True
- if s[-1] != s[0]:
- return False
- i = -2
- backslash = False
- while len(s) + i > 0:
- if s[i] == "\\":
- backslash = not backslash
- i -= 1
- else:
- break
- return not backslash
-
-
-def _load_array(a, _dict):
- atype = None
- retval = []
- a = a.strip()
- if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip():
- strarray = False
- tmpa = a[1:-1].strip()
- if tmpa != '' and (tmpa[0] == '"' or tmpa[0] == "'"):
- strarray = True
- if not a[1:-1].strip().startswith('{'):
- a = a[1:-1].split(',')
- else:
- # a is an inline object, we must find the matching parenthesis
- # to define groups
- new_a = []
- start_group_index = 1
- end_group_index = 2
- in_str = False
- while end_group_index < len(a[1:]):
- if a[end_group_index] == '"' or a[end_group_index] == "'":
- if in_str:
- backslash_index = end_group_index - 1
- while (backslash_index > -1 and
- a[backslash_index] == '\\'):
- in_str = not in_str
- backslash_index -= 1
- in_str = not in_str
- if in_str or a[end_group_index] != '}':
- end_group_index += 1
- continue
-
- # Increase end_group_index by 1 to get the closing bracket
- end_group_index += 1
- new_a.append(a[start_group_index:end_group_index])
-
- # The next start index is at least after the closing bracket, a
- # closing bracket can be followed by a comma since we are in
- # an array.
- start_group_index = end_group_index + 1
- while (start_group_index < len(a[1:]) and
- a[start_group_index] != '{'):
- start_group_index += 1
- end_group_index = start_group_index + 1
- a = new_a
- b = 0
- if strarray:
- while b < len(a) - 1:
- ab = a[b].strip()
- while (not _bounded_string(ab) or
- (len(ab) > 2 and
- ab[0] == ab[1] == ab[2] and
- ab[-2] != ab[0] and
- ab[-3] != ab[0])):
- a[b] = a[b] + ',' + a[b + 1]
- ab = a[b].strip()
- if b < len(a) - 2:
- a = a[:b + 1] + a[b + 2:]
- else:
- a = a[:b + 1]
- b += 1
- else:
- al = list(a[1:-1])
- a = []
- openarr = 0
- j = 0
- for i in _range(len(al)):
- if al[i] == '[':
- openarr += 1
- elif al[i] == ']':
- openarr -= 1
- elif al[i] == ',' and not openarr:
- a.append(''.join(al[j:i]))
- j = i + 1
- a.append(''.join(al[j:]))
- for i in _range(len(a)):
- a[i] = a[i].strip()
- if a[i] != '':
- nval, ntype = _load_value(a[i], _dict)
- if atype:
- if ntype != atype:
- raise ValueError("Not a homogeneous array")
- else:
- atype = ntype
- retval.append(nval)
- return retval
-
-
-def dump(o, f):
- """Writes out dict as toml to a file
-
- Args:
- o: Object to dump into toml
- f: File descriptor where the toml should be stored
-
- Returns:
- String containing the toml corresponding to dictionary
-
- Raises:
- TypeError: When anything other than file descriptor is passed
- """
-
- if not f.write:
- raise TypeError("You can only dump an object to a file descriptor")
- d = dumps(o)
- f.write(d)
- return d
-
-
-def dumps(o, preserve=False):
- """Stringifies input dict as toml
-
- Args:
- o: Object to dump into toml
-
- preserve: Boolean parameter. If true, preserve inline tables.
-
- Returns:
- String containing the toml corresponding to dict
- """
-
- retval = ""
- addtoretval, sections = _dump_sections(o, "")
- retval += addtoretval
- while sections != {}:
- newsections = {}
- for section in sections:
- addtoretval, addtosections = _dump_sections(sections[section],
- section, preserve)
- if addtoretval or (not addtoretval and not addtosections):
- if retval and retval[-2:] != "\n\n":
- retval += "\n"
- retval += "[" + section + "]\n"
- if addtoretval:
- retval += addtoretval
- for s in addtosections:
- newsections[section + "." + s] = addtosections[s]
- sections = newsections
- return retval
-
-
-def _dump_sections(o, sup, preserve=False):
- retstr = ""
- if sup != "" and sup[-1] != ".":
- sup += '.'
- retdict = o.__class__()
- arraystr = ""
- for section in o:
- section = unicode(section)
- qsection = section
- if not re.match(r'^[A-Za-z0-9_-]+$', section):
- if '"' in section:
- qsection = "'" + section + "'"
- else:
- qsection = '"' + section + '"'
- if not isinstance(o[section], dict):
- arrayoftables = False
- if isinstance(o[section], list):
- for a in o[section]:
- if isinstance(a, dict):
- arrayoftables = True
- if arrayoftables:
- for a in o[section]:
- arraytabstr = "\n"
- arraystr += "[[" + sup + qsection + "]]\n"
- s, d = _dump_sections(a, sup + qsection)
- if s:
- if s[0] == "[":
- arraytabstr += s
- else:
- arraystr += s
- while d != {}:
- newd = {}
- for dsec in d:
- s1, d1 = _dump_sections(d[dsec], sup + qsection +
- "." + dsec)
- if s1:
- arraytabstr += ("[" + sup + qsection + "." +
- dsec + "]\n")
- arraytabstr += s1
- for s1 in d1:
- newd[dsec + "." + s1] = d1[s1]
- d = newd
- arraystr += arraytabstr
- else:
- if o[section] is not None:
- retstr += (qsection + " = " +
- unicode(_dump_value(o[section])) + '\n')
- elif preserve and isinstance(o[section], InlineTableDict):
- retstr += (qsection + " = " + _dump_inline_table(o[section]))
- else:
- retdict[qsection] = o[section]
- retstr += arraystr
- return (retstr, retdict)
-
-
-def _dump_inline_table(section):
- """Preserve inline table in its compact syntax instead of expanding
- into subsection.
-
- https://github.com/toml-lang/toml#user-content-inline-table
- """
- retval = ""
- if isinstance(section, dict):
- val_list = []
- for k, v in section.items():
- val = _dump_inline_table(v)
- val_list.append(k + " = " + val)
- retval += "{ " + ", ".join(val_list) + " }\n"
- return retval
- else:
- return unicode(_dump_value(section))
-
-
-def _dump_value(v):
- dump_funcs = {
- str: _dump_str,
- unicode: _dump_str,
- list: _dump_list,
- int: lambda v: v,
- bool: lambda v: unicode(v).lower(),
- float: _dump_float,
- datetime.datetime: lambda v: v.isoformat().replace('+00:00', 'Z'),
- }
- # Lookup function corresponding to v's type
- dump_fn = dump_funcs.get(type(v))
- if dump_fn is None and hasattr(v, '__iter__'):
- dump_fn = dump_funcs[list]
- # Evaluate function (if it exists) else return v
- return dump_fn(v) if dump_fn is not None else dump_funcs[str](v)
-
-
-def _dump_str(v):
- if sys.version_info < (3,) and hasattr(v, 'decode') and isinstance(v, str):
- v = v.decode('utf-8')
- v = "%r" % v
- if v[0] == 'u':
- v = v[1:]
- singlequote = v.startswith("'")
- if singlequote or v.startswith('"'):
- v = v[1:-1]
- if singlequote:
- v = v.replace("\\'", "'")
- v = v.replace('"', '\\"')
- v = v.split("\\x")
- while len(v) > 1:
- i = -1
- if not v[0]:
- v = v[1:]
- v[0] = v[0].replace("\\\\", "\\")
- # No, I don't know why != works and == breaks
- joinx = v[0][i] != "\\"
- while v[0][:i] and v[0][i] == "\\":
- joinx = not joinx
- i -= 1
- if joinx:
- joiner = "x"
- else:
- joiner = "u00"
- v = [v[0] + joiner + v[1]] + v[2:]
- return unicode('"' + v[0] + '"')
-
-
-def _dump_list(v):
- retval = "["
- for u in v:
- retval += " " + unicode(_dump_value(u)) + ","
- retval += "]"
- return retval
-
-
-def _dump_float(v):
- return "{0:.16}".format(v).replace("e+0", "e+").replace("e-0", "e-")
diff --git a/src/pip/_vendor/toml/LICENSE b/src/pip/_vendor/toml/LICENSE
index 08e981ffacf..5010e3075e6 100644
--- a/src/pip/_vendor/toml/LICENSE
+++ b/src/pip/_vendor/toml/LICENSE
@@ -1,11 +1,12 @@
The MIT License
-Copyright 2013-2018 William Pearson
+Copyright 2013-2019 William Pearson
Copyright 2015-2016 Julien Enselme
Copyright 2016 Google Inc.
Copyright 2017 Samuel Vasko
Copyright 2017 Nate Prewitt
Copyright 2017 Jack Evans
+Copyright 2019 Filippo Broggini
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/src/pip/_vendor/toml/__init__.py b/src/pip/_vendor/toml/__init__.py
index 015d73cbe4e..7a08fe72540 100644
--- a/src/pip/_vendor/toml/__init__.py
+++ b/src/pip/_vendor/toml/__init__.py
@@ -6,16 +6,20 @@
from pip._vendor.toml import encoder
from pip._vendor.toml import decoder
-__version__ = "0.10.0"
+__version__ = "0.10.1"
_spec_ = "0.5.0"
load = decoder.load
loads = decoder.loads
TomlDecoder = decoder.TomlDecoder
TomlDecodeError = decoder.TomlDecodeError
+TomlPreserveCommentDecoder = decoder.TomlPreserveCommentDecoder
dump = encoder.dump
dumps = encoder.dumps
TomlEncoder = encoder.TomlEncoder
TomlArraySeparatorEncoder = encoder.TomlArraySeparatorEncoder
TomlPreserveInlineDictEncoder = encoder.TomlPreserveInlineDictEncoder
+TomlNumpyEncoder = encoder.TomlNumpyEncoder
+TomlPreserveCommentEncoder = encoder.TomlPreserveCommentEncoder
+TomlPathlibEncoder = encoder.TomlPathlibEncoder
diff --git a/src/pip/_vendor/toml/common.py b/src/pip/_vendor/toml/common.py
new file mode 100644
index 00000000000..a5d673dac5f
--- /dev/null
+++ b/src/pip/_vendor/toml/common.py
@@ -0,0 +1,6 @@
+# content after the \
+escapes = ['0', 'b', 'f', 'n', 'r', 't', '"']
+# What it should be replaced by
+escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"']
+# Used for substitution
+escape_to_escapedchars = dict(zip(_escapes, _escapedchars))
diff --git a/src/pip/_vendor/toml/decoder.py b/src/pip/_vendor/toml/decoder.py
index 20be459122d..e4887770c3b 100644
--- a/src/pip/_vendor/toml/decoder.py
+++ b/src/pip/_vendor/toml/decoder.py
@@ -24,7 +24,7 @@ def _detect_pathlib_path(p):
def _ispath(p):
- if isinstance(p, basestring):
+ if isinstance(p, (bytes, basestring)):
return True
return _detect_pathlib_path(p)
@@ -44,7 +44,7 @@ def _getpath(p):
FNFError = IOError
-TIME_RE = re.compile("([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
+TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
class TomlDecodeError(ValueError):
@@ -66,6 +66,27 @@ def __init__(self, msg, doc, pos):
_number_with_underscores = re.compile('([0-9])(_([0-9]))*')
+class CommentValue(object):
+ def __init__(self, val, comment, beginline, _dict):
+ self.val = val
+ separator = "\n" if beginline else " "
+ self.comment = separator + comment
+ self._dict = _dict
+
+ def __getitem__(self, key):
+ return self.val[key]
+
+ def __setitem__(self, key, value):
+ self.val[key] = value
+
+ def dump(self, dump_value_func):
+ retstr = dump_value_func(self.val)
+ if isinstance(self.val, self._dict):
+ return self.comment + "\n" + unicode(retstr)
+ else:
+ return unicode(retstr) + self.comment
+
+
def _strictly_valid_num(n):
n = n.strip()
if not n:
@@ -96,6 +117,7 @@ def load(f, _dict=dict, decoder=None):
f: Path to the file to open, array of files to read into single dict
or a file descriptor
_dict: (optional) Specifies the class of the returned toml dictionary
+ decoder: The decoder to use
Returns:
Parsed toml file represented as a dictionary
@@ -120,9 +142,9 @@ def load(f, _dict=dict, decoder=None):
"existing file.")
raise FNFError(error_msg)
if decoder is None:
- decoder = TomlDecoder()
+ decoder = TomlDecoder(_dict)
d = decoder.get_empty_table()
- for l in f:
+ for l in f: # noqa: E741
if op.exists(l):
d.update(load(l, _dict, decoder))
else:
@@ -177,19 +199,30 @@ def loads(s, _dict=dict, decoder=None):
keygroup = False
dottedkey = False
keyname = 0
+ key = ''
+ prev_key = ''
+ line_no = 1
+
for i, item in enumerate(sl):
if item == '\r' and sl[i + 1] == '\n':
sl[i] = ' '
continue
if keyname:
+ key += item
if item == '\n':
raise TomlDecodeError("Key name found without value."
" Reached end of line.", original, i)
if openstring:
if item == openstrchar:
- keyname = 2
- openstring = False
- openstrchar = ""
+ oddbackslash = False
+ k = 1
+ while i >= k and sl[i - k] == '\\':
+ oddbackslash = not oddbackslash
+ k += 1
+ if not oddbackslash:
+ keyname = 2
+ openstring = False
+ openstrchar = ""
continue
elif keyname == 1:
if item.isspace():
@@ -220,6 +253,8 @@ def loads(s, _dict=dict, decoder=None):
continue
if item == '=':
keyname = 0
+ prev_key = key[:-1].rstrip()
+ key = ''
dottedkey = False
else:
raise TomlDecodeError("Found invalid character in key name: '" +
@@ -272,12 +307,16 @@ def loads(s, _dict=dict, decoder=None):
if item == '#' and (not openstring and not keygroup and
not arrayoftables):
j = i
+ comment = ""
try:
while sl[j] != '\n':
+ comment += s[j]
sl[j] = ' '
j += 1
except IndexError:
break
+ if not openarr:
+ decoder.preserve_comment(line_no, prev_key, comment, beginline)
if item == '[' and (not openstring and not keygroup and
not arrayoftables):
if beginline:
@@ -308,12 +347,20 @@ def loads(s, _dict=dict, decoder=None):
sl[i] = ' '
else:
beginline = True
+ line_no += 1
elif beginline and sl[i] != ' ' and sl[i] != '\t':
beginline = False
if not keygroup and not arrayoftables:
if sl[i] == '=':
raise TomlDecodeError("Found empty keyname. ", original, i)
keyname = 1
+ key += item
+ if keyname:
+ raise TomlDecodeError("Key name found without value."
+ " Reached end of file.", original, len(s))
+ if openstring: # reached EOF and have an unterminated string
+ raise TomlDecodeError("Unterminated string found."
+ " Reached end of file.", original, len(s))
s = ''.join(sl)
s = s.split('\n')
multikey = None
@@ -323,6 +370,9 @@ def loads(s, _dict=dict, decoder=None):
for idx, line in enumerate(s):
if idx > 0:
pos += len(s[idx - 1]) + 1
+
+ decoder.embed_comments(idx, currentlevel)
+
if not multilinestr or multibackslash or '\n' not in multilinestr:
line = line.strip()
if line == "" and (not multikey or multibackslash):
@@ -333,9 +383,14 @@ def loads(s, _dict=dict, decoder=None):
else:
multilinestr += line
multibackslash = False
- if len(line) > 2 and (line[-1] == multilinestr[0] and
- line[-2] == multilinestr[0] and
- line[-3] == multilinestr[0]):
+ closed = False
+ if multilinestr[0] == '[':
+ closed = line[-1] == ']'
+ elif len(line) > 2:
+ closed = (line[-1] == multilinestr[0] and
+ line[-2] == multilinestr[0] and
+ line[-3] == multilinestr[0])
+ if closed:
try:
value, vtype = decoder.load_value(multilinestr)
except ValueError as err:
@@ -663,7 +718,8 @@ def load_line(self, line, currentlevel, multikey, multibackslash):
while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and
pair[-1][0] != "'" and pair[-1][0] != '"' and
pair[-1][0] != '[' and pair[-1][0] != '{' and
- pair[-1] != 'true' and pair[-1] != 'false'):
+ pair[-1].strip() != 'true' and
+ pair[-1].strip() != 'false'):
try:
float(pair[-1])
break
@@ -671,6 +727,8 @@ def load_line(self, line, currentlevel, multikey, multibackslash):
pass
if _load_date(pair[-1]) is not None:
break
+ if TIME_RE.match(pair[-1]):
+ break
i += 1
prev_val = pair[-1]
pair = line.split('=', i)
@@ -704,16 +762,10 @@ def load_line(self, line, currentlevel, multikey, multibackslash):
pair[0] = levels[-1].strip()
elif (pair[0][0] == '"' or pair[0][0] == "'") and \
(pair[0][-1] == pair[0][0]):
- pair[0] = pair[0][1:-1]
- if len(pair[1]) > 2 and ((pair[1][0] == '"' or pair[1][0] == "'") and
- pair[1][1] == pair[1][0] and
- pair[1][2] == pair[1][0] and
- not (len(pair[1]) > 5 and
- pair[1][-1] == pair[1][0] and
- pair[1][-2] == pair[1][0] and
- pair[1][-3] == pair[1][0])):
- k = len(pair[1]) - 1
- while k > -1 and pair[1][k] == '\\':
+ pair[0] = _unescape(pair[0][1:-1])
+ k, koffset = self._load_line_multiline_str(pair[1])
+ if k > -1:
+ while k > -1 and pair[1][k + koffset] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
@@ -734,6 +786,26 @@ def load_line(self, line, currentlevel, multikey, multibackslash):
else:
currentlevel[pair[0]] = value
+ def _load_line_multiline_str(self, p):
+ poffset = 0
+ if len(p) < 3:
+ return -1, poffset
+ if p[0] == '[' and (p.strip()[-1] != ']' and
+ self._load_array_isstrarray(p)):
+ newp = p[1:].strip().split(',')
+ while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'":
+ newp = newp[:-2] + [newp[-2] + ',' + newp[-1]]
+ newp = newp[-1]
+ poffset = len(p) - len(newp)
+ p = newp
+ if p[0] != '"' and p[0] != "'":
+ return -1, poffset
+ if p[1] != p[0] or p[2] != p[0]:
+ return -1, poffset
+ if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]:
+ return -1, poffset
+ return len(p) - 1, poffset
+
def load_value(self, v, strictly_valid=True):
if not v:
raise ValueError("Empty value is invalid")
@@ -769,7 +841,8 @@ def load_value(self, v, strictly_valid=True):
pass
if not oddbackslash:
if closed:
- raise ValueError("Stuff after closed string. WTF?")
+ raise ValueError("Found tokens after a closed " +
+ "string. Invalid TOML.")
else:
if not triplequote or triplequotecount > 1:
closed = True
@@ -857,15 +930,18 @@ def bounded_string(self, s):
break
return not backslash
+ def _load_array_isstrarray(self, a):
+ a = a[1:-1].strip()
+ if a != '' and (a[0] == '"' or a[0] == "'"):
+ return True
+ return False
+
def load_array(self, a):
atype = None
retval = []
a = a.strip()
if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip():
- strarray = False
- tmpa = a[1:-1].strip()
- if tmpa != '' and (tmpa[0] == '"' or tmpa[0] == "'"):
- strarray = True
+ strarray = self._load_array_isstrarray(a)
if not a[1:-1].strip().startswith('{'):
a = a[1:-1].split(',')
else:
@@ -874,6 +950,7 @@ def load_array(self, a):
new_a = []
start_group_index = 1
end_group_index = 2
+ open_bracket_count = 1 if a[start_group_index] == '{' else 0
in_str = False
while end_group_index < len(a[1:]):
if a[end_group_index] == '"' or a[end_group_index] == "'":
@@ -884,9 +961,15 @@ def load_array(self, a):
in_str = not in_str
backslash_index -= 1
in_str = not in_str
+ if not in_str and a[end_group_index] == '{':
+ open_bracket_count += 1
if in_str or a[end_group_index] != '}':
end_group_index += 1
continue
+ elif a[end_group_index] == '}' and open_bracket_count > 1:
+ open_bracket_count -= 1
+ end_group_index += 1
+ continue
# Increase end_group_index by 1 to get the closing bracket
end_group_index += 1
@@ -943,3 +1026,27 @@ def load_array(self, a):
atype = ntype
retval.append(nval)
return retval
+
+ def preserve_comment(self, line_no, key, comment, beginline):
+ pass
+
+ def embed_comments(self, idx, currentlevel):
+ pass
+
+
+class TomlPreserveCommentDecoder(TomlDecoder):
+
+ def __init__(self, _dict=dict):
+ self.saved_comments = {}
+ super(TomlPreserveCommentDecoder, self).__init__(_dict)
+
+ def preserve_comment(self, line_no, key, comment, beginline):
+ self.saved_comments[line_no] = (key, comment, beginline)
+
+ def embed_comments(self, idx, currentlevel):
+ if idx not in self.saved_comments:
+ return
+
+ key, comment, beginline = self.saved_comments[idx]
+ currentlevel[key] = CommentValue(currentlevel[key], comment, beginline,
+ self._dict)
diff --git a/src/pip/_vendor/toml/encoder.py b/src/pip/_vendor/toml/encoder.py
index 53b0bd5ace5..a8b03c7bea8 100644
--- a/src/pip/_vendor/toml/encoder.py
+++ b/src/pip/_vendor/toml/encoder.py
@@ -1,6 +1,7 @@
import datetime
import re
import sys
+from decimal import Decimal
from pip._vendor.toml.decoder import InlineTableDict
@@ -8,12 +9,13 @@
unicode = str
-def dump(o, f):
+def dump(o, f, encoder=None):
"""Writes out dict as toml to a file
Args:
o: Object to dump into toml
f: File descriptor where the toml should be stored
+ encoder: The ``TomlEncoder`` to use for constructing the output string
Returns:
String containing the toml corresponding to dictionary
@@ -24,7 +26,7 @@ def dump(o, f):
if not f.write:
raise TypeError("You can only dump an object to a file descriptor")
- d = dumps(o)
+ d = dumps(o, encoder=encoder)
f.write(d)
return d
@@ -34,11 +36,22 @@ def dumps(o, encoder=None):
Args:
o: Object to dump into toml
-
- preserve: Boolean parameter. If true, preserve inline tables.
+ encoder: The ``TomlEncoder`` to use for constructing the output string
Returns:
String containing the toml corresponding to dict
+
+ Examples:
+ ```python
+ >>> import toml
+ >>> output = {
+ ... 'a': "I'm a string",
+ ... 'b': ["I'm", "a", "list"],
+ ... 'c': 2400
+ ... }
+ >>> toml.dumps(output)
+ 'a = "I\'m a string"\nb = [ "I\'m", "a", "list",]\nc = 2400\n'
+ ```
"""
retval = ""
@@ -46,7 +59,13 @@ def dumps(o, encoder=None):
encoder = TomlEncoder(o.__class__)
addtoretval, sections = encoder.dump_sections(o, "")
retval += addtoretval
+ outer_objs = [id(o)]
while sections:
+ section_ids = [id(section) for section in sections]
+ for outer_obj in outer_objs:
+ if outer_obj in section_ids:
+ raise ValueError("Circular reference detected")
+ outer_objs += section_ids
newsections = encoder.get_empty_table()
for section in sections:
addtoretval, addtosections = encoder.dump_sections(
@@ -96,7 +115,7 @@ def _dump_str(v):
def _dump_float(v):
- return "{0:.16}".format(v).replace("e+0", "e+").replace("e-0", "e-")
+ return "{}".format(v).replace("e+0", "e+").replace("e-0", "e-")
def _dump_time(v):
@@ -119,6 +138,7 @@ def __init__(self, _dict=dict, preserve=False):
bool: lambda v: unicode(v).lower(),
int: lambda v: v,
float: _dump_float,
+ Decimal: _dump_float,
datetime.datetime: lambda v: v.isoformat().replace('+00:00', 'Z'),
datetime.time: _dump_time,
datetime.date: lambda v: v.isoformat()
@@ -169,10 +189,7 @@ def dump_sections(self, o, sup):
section = unicode(section)
qsection = section
if not re.match(r'^[A-Za-z0-9_-]+$', section):
- if '"' in section:
- qsection = "'" + section + "'"
- else:
- qsection = '"' + section + '"'
+ qsection = _dump_str(section)
if not isinstance(o[section], dict):
arrayoftables = False
if isinstance(o[section], list):
@@ -248,3 +265,40 @@ def dump_list(self, v):
t = s
retval += "]"
return retval
+
+
+class TomlNumpyEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict, preserve=False):
+ import numpy as np
+ super(TomlNumpyEncoder, self).__init__(_dict, preserve)
+ self.dump_funcs[np.float16] = _dump_float
+ self.dump_funcs[np.float32] = _dump_float
+ self.dump_funcs[np.float64] = _dump_float
+ self.dump_funcs[np.int16] = self._dump_int
+ self.dump_funcs[np.int32] = self._dump_int
+ self.dump_funcs[np.int64] = self._dump_int
+
+ def _dump_int(self, v):
+ return "{}".format(int(v))
+
+
+class TomlPreserveCommentEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict, preserve=False):
+ from pip._vendor.toml.decoder import CommentValue
+ super(TomlPreserveCommentEncoder, self).__init__(_dict, preserve)
+ self.dump_funcs[CommentValue] = lambda v: v.dump(self.dump_value)
+
+
+class TomlPathlibEncoder(TomlEncoder):
+
+ def _dump_pathlib_path(self, v):
+ return _dump_str(str(v))
+
+ def dump_value(self, v):
+ if (3, 4) <= sys.version_info:
+ import pathlib
+ if isinstance(v, pathlib.PurePath):
+ v = str(v)
+ return super(TomlPathlibEncoder, self).dump_value(v)
diff --git a/src/pip/_vendor/urllib3/__init__.py b/src/pip/_vendor/urllib3/__init__.py
index 9bd8323f91e..667e9bce9e3 100644
--- a/src/pip/_vendor/urllib3/__init__.py
+++ b/src/pip/_vendor/urllib3/__init__.py
@@ -22,7 +22,7 @@
__author__ = "Andrey Petrov (andrey.petrov@shazow.net)"
__license__ = "MIT"
-__version__ = "1.25.8"
+__version__ = "1.25.9"
__all__ = (
"HTTPConnectionPool",
diff --git a/src/pip/_vendor/urllib3/connection.py b/src/pip/_vendor/urllib3/connection.py
index 71e6790b1b9..6da1cf4b6dc 100644
--- a/src/pip/_vendor/urllib3/connection.py
+++ b/src/pip/_vendor/urllib3/connection.py
@@ -1,4 +1,5 @@
from __future__ import absolute_import
+import re
import datetime
import logging
import os
@@ -58,6 +59,8 @@ class ConnectionError(Exception):
# (ie test_recent_date is failing) update it to ~6 months before the current date.
RECENT_DATE = datetime.date(2019, 1, 1)
+_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]")
+
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
@@ -184,6 +187,17 @@ def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
+ def putrequest(self, method, url, *args, **kwargs):
+ """Send a request to the server"""
+ match = _CONTAINS_CONTROL_CHAR_RE.search(method)
+ if match:
+ raise ValueError(
+ "Method cannot contain non-token characters %r (found at least %r)"
+ % (method, match.group())
+ )
+
+ return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)
+
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
@@ -223,7 +237,12 @@ def request_chunked(self, method, url, body=None, headers=None):
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme["https"]
+ cert_reqs = None
+ ca_certs = None
+ ca_cert_dir = None
+ ca_cert_data = None
ssl_version = None
+ assert_fingerprint = None
def __init__(
self,
@@ -251,19 +270,6 @@ def __init__(
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = "https"
-
-class VerifiedHTTPSConnection(HTTPSConnection):
- """
- Based on httplib.HTTPSConnection but wraps the socket with
- SSL certification.
- """
-
- cert_reqs = None
- ca_certs = None
- ca_cert_dir = None
- ssl_version = None
- assert_fingerprint = None
-
def set_cert(
self,
key_file=None,
@@ -274,6 +280,7 @@ def set_cert(
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
+ ca_cert_data=None,
):
"""
This method should only be called once, before the connection is used.
@@ -294,6 +301,7 @@ def set_cert(
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+ self.ca_cert_data = ca_cert_data
def connect(self):
# Add certificate verification
@@ -344,6 +352,7 @@ def connect(self):
if (
not self.ca_certs
and not self.ca_cert_dir
+ and not self.ca_cert_data
and default_ssl_context
and hasattr(context, "load_default_certs")
):
@@ -356,6 +365,7 @@ def connect(self):
key_password=self.key_password,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
+ ca_cert_data=self.ca_cert_data,
server_hostname=server_hostname,
ssl_context=context,
)
@@ -406,9 +416,8 @@ def _match_hostname(cert, asserted_hostname):
raise
-if ssl:
- # Make a copy for testing.
- UnverifiedHTTPSConnection = HTTPSConnection
- HTTPSConnection = VerifiedHTTPSConnection
-else:
- HTTPSConnection = DummyConnection
+if not ssl:
+ HTTPSConnection = DummyConnection # noqa: F811
+
+
+VerifiedHTTPSConnection = HTTPSConnection
diff --git a/src/pip/_vendor/urllib3/connectionpool.py b/src/pip/_vendor/urllib3/connectionpool.py
index d42eb7be673..5f044dbd90f 100644
--- a/src/pip/_vendor/urllib3/connectionpool.py
+++ b/src/pip/_vendor/urllib3/connectionpool.py
@@ -65,6 +65,11 @@ class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+
+ .. note::
+ ConnectionPool.urlopen() does not normalize or percent-encode target URIs
+ which is useful if your target server doesn't support percent-encoded
+ target URIs.
"""
scheme = None
@@ -760,21 +765,6 @@ def urlopen(
**response_kw
)
- def drain_and_release_conn(response):
- try:
- # discard any remaining response body, the connection will be
- # released back to the pool once the entire response is read
- response.read()
- except (
- TimeoutError,
- HTTPException,
- SocketError,
- ProtocolError,
- BaseSSLError,
- SSLError,
- ):
- pass
-
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
@@ -785,15 +775,11 @@ def drain_and_release_conn(response):
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
- # Drain and release the connection for this response, since
- # we're not returning it to be released manually.
- drain_and_release_conn(response)
+ response.drain_conn()
raise
return response
- # drain and return the connection to the pool before recursing
- drain_and_release_conn(response)
-
+ response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
@@ -819,15 +805,11 @@ def drain_and_release_conn(response):
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
- # Drain and release the connection for this response, since
- # we're not returning it to be released manually.
- drain_and_release_conn(response)
+ response.drain_conn()
raise
return response
- # drain and return the connection to the pool before recursing
- drain_and_release_conn(response)
-
+ response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
diff --git a/src/pip/_vendor/urllib3/contrib/pyopenssl.py b/src/pip/_vendor/urllib3/contrib/pyopenssl.py
index fc99d34bd4c..d8fe0629c42 100644
--- a/src/pip/_vendor/urllib3/contrib/pyopenssl.py
+++ b/src/pip/_vendor/urllib3/contrib/pyopenssl.py
@@ -450,9 +450,12 @@ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
cafile = cafile.encode("utf-8")
if capath is not None:
capath = capath.encode("utf-8")
- self._ctx.load_verify_locations(cafile, capath)
- if cadata is not None:
- self._ctx.load_verify_locations(BytesIO(cadata))
+ try:
+ self._ctx.load_verify_locations(cafile, capath)
+ if cadata is not None:
+ self._ctx.load_verify_locations(BytesIO(cadata))
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("unable to load trusted certificates: %r" % e)
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_chain_file(certfile)
diff --git a/src/pip/_vendor/urllib3/contrib/securetransport.py b/src/pip/_vendor/urllib3/contrib/securetransport.py
index 87d844afa78..a6b7e94ade5 100644
--- a/src/pip/_vendor/urllib3/contrib/securetransport.py
+++ b/src/pip/_vendor/urllib3/contrib/securetransport.py
@@ -819,6 +819,11 @@ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
if capath is not None:
raise ValueError("SecureTransport does not support cert directories")
+ # Raise if cafile does not exist.
+ if cafile is not None:
+ with open(cafile):
+ pass
+
self._trust_bundle = cafile or cadata
def load_cert_chain(self, certfile, keyfile=None, password=None):
diff --git a/src/pip/_vendor/urllib3/exceptions.py b/src/pip/_vendor/urllib3/exceptions.py
index 0a74c79b5ea..5cc4d8a4f17 100644
--- a/src/pip/_vendor/urllib3/exceptions.py
+++ b/src/pip/_vendor/urllib3/exceptions.py
@@ -45,7 +45,10 @@ class SSLError(HTTPError):
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
- pass
+
+ def __init__(self, message, error, *args):
+ super(ProxyError, self).__init__(message, error, *args)
+ self.original_error = error
class DecodeError(HTTPError):
@@ -195,6 +198,20 @@ class DependencyWarning(HTTPWarning):
pass
+class InvalidProxyConfigurationWarning(HTTPWarning):
+ """
+ Warned when using an HTTPS proxy and an HTTPS URL. Currently
+ urllib3 doesn't support HTTPS proxies and the proxy will be
+ contacted via HTTP instead. This warning can be fixed by
+ changing your HTTPS proxy URL into an HTTP proxy URL.
+
+ If you encounter this warning read this:
+ https://github.com/urllib3/urllib3/issues/1850
+ """
+
+ pass
+
+
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
diff --git a/src/pip/_vendor/urllib3/poolmanager.py b/src/pip/_vendor/urllib3/poolmanager.py
index 242a2f8203f..e2bd3bd8dba 100644
--- a/src/pip/_vendor/urllib3/poolmanager.py
+++ b/src/pip/_vendor/urllib3/poolmanager.py
@@ -2,11 +2,17 @@
import collections
import functools
import logging
+import warnings
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
-from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
+from .exceptions import (
+ LocationValueError,
+ MaxRetryError,
+ ProxySchemeUnknown,
+ InvalidProxyConfigurationWarning,
+)
from .packages import six
from .packages.six.moves.urllib.parse import urljoin
from .request import RequestMethods
@@ -359,6 +365,7 @@ def urlopen(self, method, url, redirect=True, **kw):
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
+ response.drain_conn()
raise
return response
@@ -366,6 +373,8 @@ def urlopen(self, method, url, redirect=True, **kw):
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
+
+ response.drain_conn()
return self.urlopen(method, redirect_location, **kw)
@@ -452,9 +461,22 @@ def _set_proxy_headers(self, url, headers=None):
headers_.update(headers)
return headers_
+ def _validate_proxy_scheme_url_selection(self, url_scheme):
+ if url_scheme == "https" and self.proxy.scheme == "https":
+ warnings.warn(
+ "Your proxy configuration specified an HTTPS scheme for the proxy. "
+ "Are you sure you want to use HTTPS to contact the proxy? "
+ "This most likely indicates an error in your configuration. "
+ "Read this issue for more info: "
+ "https://github.com/urllib3/urllib3/issues/1850",
+ InvalidProxyConfigurationWarning,
+ stacklevel=3,
+ )
+
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
+ self._validate_proxy_scheme_url_selection(u.scheme)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
diff --git a/src/pip/_vendor/urllib3/response.py b/src/pip/_vendor/urllib3/response.py
index 6090a7350f9..7dc9b93caef 100644
--- a/src/pip/_vendor/urllib3/response.py
+++ b/src/pip/_vendor/urllib3/response.py
@@ -20,6 +20,7 @@
ResponseNotChunked,
IncompleteRead,
InvalidHeader,
+ HTTPError,
)
from .packages.six import string_types as basestring, PY3
from .packages.six.moves import http_client as httplib
@@ -277,6 +278,17 @@ def release_conn(self):
self._pool._put_conn(self._connection)
self._connection = None
+ def drain_conn(self):
+ """
+ Read and discard any remaining HTTP response data in the response connection.
+
+ Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
+ """
+ try:
+ self.read()
+ except (HTTPError, SocketError, BaseSSLError, HTTPException):
+ pass
+
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
diff --git a/src/pip/_vendor/urllib3/util/retry.py b/src/pip/_vendor/urllib3/util/retry.py
index 5a049fe65e0..ee30c91b147 100644
--- a/src/pip/_vendor/urllib3/util/retry.py
+++ b/src/pip/_vendor/urllib3/util/retry.py
@@ -13,6 +13,7 @@
ReadTimeoutError,
ResponseError,
InvalidHeader,
+ ProxyError,
)
from ..packages import six
@@ -306,6 +307,8 @@ def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
+ if isinstance(err, ProxyError):
+ err = err.original_error
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
diff --git a/src/pip/_vendor/urllib3/util/ssl_.py b/src/pip/_vendor/urllib3/util/ssl_.py
index 3f78296f656..d3b463d49f5 100644
--- a/src/pip/_vendor/urllib3/util/ssl_.py
+++ b/src/pip/_vendor/urllib3/util/ssl_.py
@@ -119,12 +119,15 @@ def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
- def load_verify_locations(self, cafile=None, capath=None):
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
+ if cadata is not None:
+ raise SSLError("CA data not supported in older Pythons")
+
def set_ciphers(self, cipher_suite):
self.ciphers = cipher_suite
@@ -305,6 +308,7 @@ def ssl_wrap_socket(
ssl_context=None,
ca_cert_dir=None,
key_password=None,
+ ca_cert_data=None,
):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
@@ -323,6 +327,9 @@ def ssl_wrap_socket(
SSLContext.load_verify_locations().
:param key_password:
Optional password if the keyfile is encrypted.
+ :param ca_cert_data:
+ Optional string containing CA certificates in PEM format suitable for
+ passing as the cadata parameter to SSLContext.load_verify_locations()
"""
context = ssl_context
if context is None:
@@ -331,9 +338,9 @@ def ssl_wrap_socket(
# this code.
context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
- if ca_certs or ca_cert_dir:
+ if ca_certs or ca_cert_dir or ca_cert_data:
try:
- context.load_verify_locations(ca_certs, ca_cert_dir)
+ context.load_verify_locations(ca_certs, ca_cert_dir, ca_cert_data)
except IOError as e: # Platform-specific: Python 2.7
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
diff --git a/src/pip/_vendor/urllib3/util/timeout.py b/src/pip/_vendor/urllib3/util/timeout.py
index 9883700556e..b61fea75c50 100644
--- a/src/pip/_vendor/urllib3/util/timeout.py
+++ b/src/pip/_vendor/urllib3/util/timeout.py
@@ -98,7 +98,7 @@ def __init__(self, total=None, connect=_Default, read=_Default):
self.total = self._validate_timeout(total, "total")
self._start_connect = None
- def __str__(self):
+ def __repr__(self):
return "%s(connect=%r, read=%r, total=%r)" % (
type(self).__name__,
self._connect,
@@ -106,6 +106,9 @@ def __str__(self):
self.total,
)
+ # __str__ provided for backwards compatibility
+ __str__ = __repr__
+
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
diff --git a/src/pip/_vendor/urllib3/util/url.py b/src/pip/_vendor/urllib3/util/url.py
index 5f8aee629a7..0eb0b6a8cc5 100644
--- a/src/pip/_vendor/urllib3/util/url.py
+++ b/src/pip/_vendor/urllib3/util/url.py
@@ -18,7 +18,7 @@
SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
URI_RE = re.compile(
r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
- r"(?://([^/?#]*))?"
+ r"(?://([^\\/?#]*))?"
r"([^?#]*)"
r"(?:\?([^#]*))?"
r"(?:#(.*))?$",
diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt
index e032f5f732a..06fa1358f00 100644
--- a/src/pip/_vendor/vendor.txt
+++ b/src/pip/_vendor/vendor.txt
@@ -1,24 +1,24 @@
-appdirs==1.4.3
+appdirs==1.4.4
CacheControl==0.12.6
colorama==0.4.3
contextlib2==0.6.0.post1
-distlib==0.3.0
+distlib==0.3.1
distro==1.5.0
-html5lib==1.0.1
+html5lib==1.1
ipaddress==1.0.23 # Only needed on 2.6 and 2.7
msgpack==1.0.0
-packaging==20.3
+packaging==20.4
pep517==0.8.2
progress==1.5
pyparsing==2.4.7
-requests==2.23.0
- certifi==2020.04.05.1
+requests==2.24.0
+ certifi==2020.06.20
chardet==3.0.4
- idna==2.9
- urllib3==1.25.8
+ idna==2.10
+ urllib3==1.25.9
resolvelib==0.4.0
retrying==1.3.3
setuptools==44.0.0
-six==1.14.0
-toml==0.10.0
+six==1.15.0
+toml==0.10.1
webencodings==0.5.1
diff --git a/tests/functional/test_debug.py b/tests/functional/test_debug.py
index cf7f71729c1..f309604df58 100644
--- a/tests/functional/test_debug.py
+++ b/tests/functional/test_debug.py
@@ -36,10 +36,11 @@ def test_debug__library_versions(script):
"""
args = ['debug']
result = script.pip(*args, allow_stderr_warning=True)
- stdout = result.stdout
+ print(result.stdout)
+
vendored_versions = create_vendor_txt_map()
for name, value in vendored_versions.items():
- assert '{}=={}'.format(name, value) in stdout
+ assert '{}=={}'.format(name, value) in result.stdout
@pytest.mark.parametrize(
diff --git a/tools/automation/vendoring/patches/certifi.patch b/tools/automation/vendoring/patches/certifi.patch
new file mode 100644
index 00000000000..9d5395a7b6b
--- /dev/null
+++ b/tools/automation/vendoring/patches/certifi.patch
@@ -0,0 +1,13 @@
+diff --git a/src/pip/_vendor/certifi/core.py b/src/pip/_vendor/certifi/core.py
+index 5d2b8cd32..8987449f6 100644
+--- a/src/pip/_vendor/certifi/core.py
++++ b/src/pip/_vendor/certifi/core.py
+@@ -33,7 +33,7 @@ try:
+ # We also have to hold onto the actual context manager, because
+ # it will do the cleanup whenever it gets garbage collected, so
+ # we will also store that at the global level as well.
+- _CACERT_CTX = get_path("certifi", "cacert.pem")
++ _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem")
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
+
+ return _CACERT_PATH
diff --git a/tools/automation/vendoring/patches/html5lib.patch b/tools/automation/vendoring/patches/html5lib.patch
deleted file mode 100644
index ae9cafe2d8e..00000000000
--- a/tools/automation/vendoring/patches/html5lib.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-diff --git a/src/pip/_vendor/html5lib/_trie/_base.py b/src/pip/_vendor/html5lib/_trie/_base.py
-index a1158bbb..6b71975f 100644
---- a/src/pip/_vendor/html5lib/_trie/_base.py
-+++ b/src/pip/_vendor/html5lib/_trie/_base.py
-@@ -1,6 +1,9 @@
- from __future__ import absolute_import, division, unicode_literals
-
--from collections import Mapping
-+try:
-+ from collections.abc import Mapping
-+except ImportError: # Python 2.7
-+ from collections import Mapping
-
-
- class Trie(Mapping):
-diff --git a/src/pip/_vendor/html5lib/treebuilders/dom.py b/src/pip/_vendor/html5lib/treebuilders/dom.py
-index dcfac220..d8b53004 100644
---- a/src/pip/_vendor/html5lib/treebuilders/dom.py
-+++ b/src/pip/_vendor/html5lib/treebuilders/dom.py
-@@ -1,7 +1,10 @@
- from __future__ import absolute_import, division, unicode_literals
-
-
--from collections import MutableMapping
-+try:
-+ from collections.abc import MutableMapping
-+except ImportError: # Python 2.7
-+ from collections import MutableMapping
- from xml.dom import minidom, Node
- import weakref
-
-diff --git a/src/pip/_vendor/html5lib/_utils.py b/src/pip/_vendor/html5lib/_utils.py
-index 0703afb3..96eb17b2 100644
---- a/src/pip/_vendor/html5lib/_utils.py
-+++ b/src/pip/_vendor/html5lib/_utils.py
-@@ -2,12 +2,15 @@ from __future__ import absolute_import, division, unicode_literals
-
- from types import ModuleType
-
--from pip._vendor.six import text_type
-+from pip._vendor.six import text_type, PY3
-
--try:
-- import xml.etree.cElementTree as default_etree
--except ImportError:
-+if PY3:
- import xml.etree.ElementTree as default_etree
-+else:
-+ try:
-+ import xml.etree.cElementTree as default_etree
-+ except ImportError:
-+ import xml.etree.ElementTree as default_etree
-
-
- __all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair",
diff --git a/tools/automation/vendoring/patches/requests.patch b/tools/automation/vendoring/patches/requests.patch
index 75bf25ab284..08795ad3a3b 100644
--- a/tools/automation/vendoring/patches/requests.patch
+++ b/tools/automation/vendoring/patches/requests.patch
@@ -21,35 +21,22 @@ index 6336a07d..9582fa73 100644
# Kinda cool, though, right?
diff --git a/src/pip/_vendor/requests/__init__.py b/src/pip/_vendor/requests/__init__.py
-index 9c3b769..36a4ef40 100644
+index dc83261a8..517458b5a 100644
--- a/src/pip/_vendor/requests/__init__.py
+++ b/src/pip/_vendor/requests/__init__.py
-@@ -80,13 +80,15 @@ except (AssertionError, ValueError):
- RequestsDependencyWarning)
+@@ -94,6 +94,11 @@ except (AssertionError, ValueError):
+ # if the standard library doesn't support SNI or the
+ # 'ssl' library isn't available.
+ try:
++ # Note: This logic prevents upgrading cryptography on Windows, if imported
++ # as part of pip.
++ from pip._internal.utils.compat import WINDOWS
++ if not WINDOWS:
++ raise ImportError("pip internals: don't import cryptography on Windows")
+ try:
+ import ssl
+ except ImportError:
- # Attempt to enable urllib3's SNI support, if possible
--try:
-- from pip._vendor.urllib3.contrib import pyopenssl
-- pyopenssl.inject_into_urllib3()
--
-- # Check cryptography version
-- from cryptography import __version__ as cryptography_version
-- _check_cryptography(cryptography_version)
--except ImportError:
-- pass
-+from pip._internal.utils.compat import WINDOWS
-+if not WINDOWS:
-+ try:
-+ from pip._vendor.urllib3.contrib import pyopenssl
-+ pyopenssl.inject_into_urllib3()
-+
-+ # Check cryptography version
-+ from cryptography import __version__ as cryptography_version
-+ _check_cryptography(cryptography_version)
-+ except ImportError:
-+ pass
-
- # urllib3's DependencyWarnings should be silenced.
diff --git a/src/pip/_vendor/requests/compat.py b/src/pip/_vendor/requests/compat.py
index eb6530d..353ec29 100644
--- a/src/pip/_vendor/requests/compat.py