diff --git a/.gitignore b/.gitignore index 0cb035e..7cd6cd7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,5 @@ +# Hide cache __pycache__/ -# Pyinstaller build files -dist/ -build/ -*.spec - -# Installer data -package.zip -.installer_cache - # Hide x2 testing files -*.x2 +*.xpp diff --git a/main.py b/main.py deleted file mode 100644 index ff4fe6a..0000000 --- a/main.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2022-2023 iiPython - -# Modules -import os -import sys -from xpp import ( - load_sections, load_cli, config, - Interpreter -) - -# Initialization -cli = load_cli() # Render CLI - -# Load filepath -filepath = cli.filepath -if filepath is None: - cli.show_help() - -elif filepath == ".": - filepath = config.get("main", "main.xpp") - -if not os.path.isfile(filepath): - sys.exit("X++ Exception: no such file") - -# Load file content -with open(filepath, "r") as f: - data = f.read() - -# Run file -sections = load_sections(data, filepath) -interpreter = Interpreter( - filepath, sections, - config = config, - cli_vals = cli.vals -) -interpreter.run_section("main") diff --git a/main.x2 b/main.x2 deleted file mode 100644 index 3632bed..0000000 --- a/main.x2 +++ /dev/null @@ -1 +0,0 @@ -prt "Hello, world!" diff --git a/main.xpp b/main.xpp index cf51a96..b9dc89d 100644 --- a/main.xpp +++ b/main.xpp @@ -1,4 +1,6 @@ -var woah "hello world" -:: starting from 0 -prt (chr woah 2) (chr woah 8) -prt (chr woah 2 8) +var y 1 +rep 5 loop y ?y +prt y + +:loop x + ret (add x 1) diff --git a/md/documents.md b/md/documents.md index 0ab3043..f5216da 100644 --- a/md/documents.md +++ b/md/documents.md @@ -1,4 +1,4 @@ -# [x2](../README.md) / Documents +# [x++](../README.md) / Documents ## Table of Contents @@ -21,9 +21,9 @@ ## About -Welcome to the documents page of the official x2 documentation. In this section, you will be learning about the individual components that make up x2. +Welcome to the documents page of the official x++ documentation. In this section, you will be learning about the individual components that make up x++. -If you are looking for a beginner's guide for x2, check out the [tutorials](./tutorials.md) instead. +If you are looking for a beginner's guide for x++, check out the [tutorials](./tutorials.md) instead. ## Documents @@ -40,4 +40,4 @@ If you are looking for a beginner's guide for x2, check out the [tutorials](./tu Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--documents) \ No newline at end of file +[↑ Go To Top](#x--documents) \ No newline at end of file diff --git a/md/documents/comments.md b/md/documents/comments.md index 3ebdaa5..de157e8 100644 --- a/md/documents/comments.md +++ b/md/documents/comments.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Documents](../documents.md) / Comments +# [x++](../../README.md) / [Documents](../documents.md) / Comments ## Table of Contents @@ -37,4 +37,4 @@ out 5 :: This is a comment Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--documents--comments) \ No newline at end of file +[↑ Go To Top](#x--documents--comments) \ No newline at end of file diff --git a/md/documents/comparators.md b/md/documents/comparators.md index 9d7767d..2e55e5b 100644 --- a/md/documents/comparators.md +++ b/md/documents/comparators.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Documents](../documents.md) / Comparators +# [x++](../../README.md) / [Documents](../documents.md) / Comparators ## Table of Contents @@ -55,9 +55,9 @@ ## About -Comparators are symbols or keywords that compare two different variables or values and trigger a branch in the x2 thread. Each comparator has its functionality. +Comparators are symbols or keywords that compare two different variables or values and trigger a branch in the x++ thread. Each comparator has its functionality. -Expressions are segments in the code that creates a branch in the x2 thread. They are always made up of three components: the `source`, the `comparator`, and the `target`, and they are arranged like so: +Expressions are segments in the code that creates a branch in the x++ thread. They are always made up of three components: the `source`, the `comparator`, and the `target`, and they are arranged like so: ```xt @@ -302,4 +302,4 @@ cmp 5 from int "out \"true\"" Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--documents--comparators) \ No newline at end of file +[↑ Go To Top](#x--documents--comparators) \ No newline at end of file diff --git a/md/documents/configurations.md b/md/documents/configurations.md index 5d97031..b22da5a 100644 --- a/md/documents/configurations.md +++ b/md/documents/configurations.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Documents](../documents.md) / Configurations +# [x++](../../README.md) / [Documents](../documents.md) / Configurations ## Table of Contents @@ -62,9 +62,9 @@ The configuration file can also contain non-essential information, such as the a "contributors": [ "contributor A", "contributor B" ], - "description": "This is my x2 project", + "description": "This is my x++ project", "main": "main.xt", - "name": "my-x2-project", + "name": "my-x++-project", "version": "1.0.0" } ``` @@ -105,4 +105,4 @@ Throws errors silently. Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--documents--configurations) \ No newline at end of file +[↑ Go To Top](#x--documents--configurations) \ No newline at end of file diff --git a/md/documents/dataTypes.md b/md/documents/dataTypes.md index 4490f34..b00bbd1 100644 --- a/md/documents/dataTypes.md +++ b/md/documents/dataTypes.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Documents](../documents.md) / Data Types +# [x++](../../README.md) / [Documents](../documents.md) / Data Types ## Table of Contents @@ -50,9 +50,9 @@ Data types are classifications of variables or values. Each data type can only store a specific type of value. For example, a string cannot contain an integer value, or a boolean cannot contain a string value. -Because x2 is a dynamic programming language, you can change the data type of a variable or a value during run-time. +Because x++ is a dynamic programming language, you can change the data type of a variable or a value during run-time. -x2 is also a weakly-typed programming language. You do not (and cannot) directly specify what the data type of a variable or value is. The interpreter will read the value and determine the data type during run-time. +x++ is also a weakly-typed programming language. You do not (and cannot) directly specify what the data type of a variable or value is. The interpreter will read the value and determine the data type during run-time. Strongly-typed language, such as [Java](https://en.wikipedia.org/wiki/Java_(programming_language)): @@ -66,7 +66,7 @@ Weakly-typed language, such as [JavaScript](https://en.wikipedia.org/wiki/JavaSc let myInteger = 5; ``` -Similarly, in x2, you define a variable with a data type of an integer like so: +Similarly, in x++, you define a variable with a data type of an integer like so: ```xt psh 5 myInteger @@ -78,7 +78,7 @@ psh 5 myInteger A boolean value represents either `true` or `false`. -Currently, it is impossible to get a boolean value within vanilla x2. Because of that, integers are used instead to represent a true or false value, where `1` represents `true` and `0` represents `false`. +Currently, it is impossible to get a boolean value within vanilla x++. Because of that, integers are used instead to represent a true or false value, where `1` represents `true` and `0` represents `false`. --- @@ -130,7 +130,7 @@ psh 05 myInteger out myInteger ``` -In vanilla x2, it can also be used as a boolean value, where `1` represents `true` and `0` represents `false`: +In vanilla x++, it can also be used as a boolean value, where `1` represents `true` and `0` represents `false`: ```xt psh 0 hamburgerIsEaten @@ -143,7 +143,7 @@ evl hamburgerIsEaten == 1 "out \"Someone ate my hamburger\"" Null is a data type that represents nothing. -It cannot be defined normally in x2 and acts as the default value to undefined variables: +It cannot be defined normally in x++ and acts as the default value to undefined variables: ```xt out nonexistingVariable @@ -207,7 +207,7 @@ Below is a list of all the escape codes for reference: > Source: [Python DS - Python 3 Escape Sequences](https://www.python-ds.com/python-3-escape-sequences) -You can also string interpolation within an x2 string. All string interpolations must be a valid x2 statement and be surrounded by `$()`: +You can also string interpolation within an x++ string. All string interpolations must be a valid x++ statement and be surrounded by `$()`: ```xt psh 5 myInteger @@ -226,4 +226,4 @@ cmp "abc" < "cba" "out \"true\"" Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--documents--data-types) \ No newline at end of file +[↑ Go To Top](#x--documents--data-types) \ No newline at end of file diff --git a/md/documents/operators.md b/md/documents/operators.md index 8511d5b..208be41 100644 --- a/md/documents/operators.md +++ b/md/documents/operators.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Documents](../documents.md) / Operators +# [x++](../../README.md) / [Documents](../documents.md) / Operators ## Table of Contents @@ -125,19 +125,19 @@ Operators are keywords that process arguments. A statement is a line of code that tells the interpreter what to do. They are always made up of two components: the `operator` and the `arguments`, and they are arranged like so: -```xt +```xpp [...arguments] ``` For example: -```xt +```xpp out "Hello, world!" ``` Or: -```xt +```xpp add 1 2 sum ``` @@ -145,7 +145,7 @@ A statement can also be wrapped around parentheses (`()`) to group them. Some statements return values, which can be stored in a variable: -```xt +```xpp psh (add 1 2) sum ``` @@ -153,7 +153,7 @@ psh (add 1 2) sum ### Add -```xt +```xpp add [sum] ``` @@ -169,7 +169,7 @@ Returns: [Float](./dataTypes.md#float) or [Integer](./dataTypes.md#integer) or [ Example: -```xt +```xpp add 5 10 sum out sum ``` @@ -178,7 +178,7 @@ out sum ### Call -```xt +```xpp call
[...arguments] ``` @@ -194,7 +194,7 @@ Returns: Any Example: -```xt +```xpp call mySection 5 10 output out output @@ -207,7 +207,7 @@ out output ### Character -```xt +```xpp char [output] ``` @@ -223,7 +223,7 @@ Returns: [String](./dataTypes.md#string) Example: -```xt +```xpp char 3 "Hello, world!" output out output ``` @@ -232,7 +232,7 @@ out output ### Clear -```xt +```xpp cls ``` @@ -242,7 +242,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp out "Hello, world!" cls out "Hello, world!" @@ -252,7 +252,7 @@ out "Hello, world!" ### Compare -```xt +```xpp cmp [false branch] ``` @@ -268,7 +268,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp cmp 5 == 5 "jmp true" "jmp false" :true @@ -282,7 +282,7 @@ cmp 5 == 5 "jmp true" "jmp false" ### Constant -```xt +```xpp cnst ``` @@ -297,7 +297,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp cnst 5 myInteger out myInteger ``` @@ -306,7 +306,7 @@ out myInteger ### Decrement -```xt +```xpp dec [output] ``` @@ -323,7 +323,7 @@ Returns: [Float](./dataTypes.md#float) or [Integer](./dataTypes.md#integer) Example: -```xt +```xpp psh 5 myInteger dec myInteger out myInteger @@ -333,7 +333,7 @@ out myInteger ### Divide -```xt +```xpp div [quotient] ``` @@ -349,7 +349,7 @@ Returns: [Float](./dataTypes.md#float) or [Integer](./dataTypes.md#integer) Example: -```xt +```xpp div 10 5 quotient out quotient ``` @@ -358,7 +358,7 @@ out quotient ### End -```xt +```xpp end ``` @@ -368,7 +368,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp :main out "Hello, world!" end @@ -379,7 +379,7 @@ Example: ### Evaluate -```xt +```xpp evl ``` @@ -393,7 +393,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp evl "print('Hello, world!')" ``` @@ -401,8 +401,8 @@ evl "print('Hello, world!')" ### Exit -```xt -ext +```xpp +expp ``` Exits the process. @@ -411,9 +411,9 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp out "Hello, world!" -ext +expp out "Hello, world!" ``` @@ -421,7 +421,7 @@ out "Hello, world!" ### Float -```xt +```xpp flt [output] ``` @@ -438,7 +438,7 @@ Returns: [Float](./dataTypes.md#float) Example: -```xt +```xpp flt "5" myFloat out myFloat ``` @@ -447,7 +447,7 @@ out myFloat ### Import -```xt +```xpp imp [as ] ``` @@ -464,15 +464,15 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt -imp "myFile.xt" as "myPackage" +```xpp +imp "myFile.xpp" as "myPackage" ``` --- ### Index -```xt +```xpp idx [output] ``` @@ -488,7 +488,7 @@ Returns: [Integer](./dataTypes.md#integer) Example: -```xt +```xpp idx "ello" "Hello, world!" output out output ``` @@ -497,7 +497,7 @@ out output ### Increment -```xt +```xpp inc [output] ``` @@ -514,7 +514,7 @@ Returns: [Float](./dataTypes.md#float) or [Integer](./dataTypes.md#integer) Example: -```xt +```xpp psh 5 myInteger inc myInteger out myInteger @@ -524,7 +524,7 @@ out myInteger ### Is A Number -```xt +```xpp inm [output] ``` @@ -539,7 +539,7 @@ Returns: [Integer](./dataTypes.md#integer) Example: -```xt +```xpp inm 5 output out output ``` @@ -548,7 +548,7 @@ out output ### Is Any Number -```xt +```xpp inms [output] ``` @@ -563,7 +563,7 @@ Returns: [Integer](./dataTypes.md#integer) Example: -```xt +```xpp inms "5" output out output ``` @@ -572,7 +572,7 @@ out output ### Jump -```xt +```xpp jmp
``` @@ -586,7 +586,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp jmp mySection :mySection @@ -597,7 +597,7 @@ jmp mySection ### Length -```xt +```xpp len [output] ``` @@ -612,7 +612,7 @@ Returns: [Integer](./dataTypes.md#integer) Example: -```xt +```xpp len "Hello, world!" output out output ``` @@ -621,7 +621,7 @@ out output ### Load -```xt +```xpp load [output] ``` @@ -636,17 +636,17 @@ Returns: [String](./dataTypes.md#string) Example: -In `file.xt`: +In `file.xpp`: -```xt +```xpp :mySection out "Hello, world!" ``` -In `main.xt`: +In `main.xpp`: -```xt -load "file.xt" output +```xpp +load "file.xpp" output out output ``` @@ -654,7 +654,7 @@ out output ### Lowercase -```xt +```xpp lwr [output] ``` @@ -671,7 +671,7 @@ Returns: [String](./dataTypes.md#string) Example: -```xt +```xpp lwr "Hello, world!" output out output ``` @@ -680,7 +680,7 @@ out output ### Multiply -```xt +```xpp mul [product] ``` @@ -696,7 +696,7 @@ Returns: [Float](./dataTypes.md#float) or [Integer](./dataTypes.md#integer) or [ Example: -```xt +```xpp mul 5 10 product out product ``` @@ -705,7 +705,7 @@ out product ### Number -```xt +```xpp num [output] ``` @@ -722,7 +722,7 @@ Returns: [Float](./dataTypes.md#float) or [Integer](./dataTypes.md#integer) Example: -```xt +```xpp num "5" myInteger out myInteger ``` @@ -731,7 +731,7 @@ out myInteger ### Output -```xt +```xpp out [...value] ``` @@ -745,7 +745,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp out "Hello, world!" ``` @@ -753,7 +753,7 @@ out "Hello, world!" ### Pop -```xt +```xpp pop [output] ``` @@ -768,7 +768,7 @@ Returns: Any Example: -```xt +```xpp psh 5 myInteger pop "myInterger" output out output @@ -778,7 +778,7 @@ out output ### Provoke -```xt +```xpp pvk
[...arguments] ``` @@ -793,7 +793,7 @@ Returns: Any Example: -```xt +```xpp pvk mySection 5 10 :mySection a b @@ -805,7 +805,7 @@ pvk mySection 5 10 ### Push -```xt +```xpp psh ``` @@ -820,7 +820,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp psh 5 myInteger out myInteger ``` @@ -829,7 +829,7 @@ out myInteger ### Random Number Generator -```xt +```xpp rng [output] ``` @@ -845,7 +845,7 @@ Returns: [Integer](./dataTypes.md#integer) Example: -```xt +```xpp rng 0 5 myInteger out myInteger ``` @@ -854,7 +854,7 @@ out myInteger ### Read -```xt +```xpp read [prompt] [output] ``` @@ -869,7 +869,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp read "What is your name: " name out name ``` @@ -878,7 +878,7 @@ out name ### Remove -```xt +```xpp rem ``` @@ -892,7 +892,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp psh 5 myInteger out myInteger rem myInteger @@ -903,7 +903,7 @@ out myInteger ### Repeat -```xt +```xpp rep ``` @@ -918,7 +918,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp rep 5 "out \"Hello, world!\"" ``` @@ -926,7 +926,7 @@ rep 5 "out \"Hello, world!\"" ### Return -```xt +```xpp ret ``` @@ -940,7 +940,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp :mySection ret 5 ``` @@ -949,7 +949,7 @@ Example: ### Round -```xt +```xpp rnd [precision] ``` @@ -966,7 +966,7 @@ Returns: [Float](./dataTypes.md#float) or [Integer](./dataTypes.md#integer) Example: -```xt +```xpp psh 5.5 myInteger rnd myInteger out myInteger @@ -976,7 +976,7 @@ out myInteger ### Save -```xt +```xpp save [output] ``` @@ -991,15 +991,15 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt -save "file.xt" "Hello, world!" +```xpp +save "file.xpp" "Hello, world!" ``` --- ### Skip -```xt +```xpp skp ``` @@ -1009,7 +1009,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp skp ``` @@ -1017,7 +1017,7 @@ skp ### Slice -```xt +```xpp slc [output] ``` @@ -1034,7 +1034,7 @@ Returns: [String](./dataTypes.md#string) Example: -```xt +```xpp slc 0 5 "Hello, world!" output out output ``` @@ -1043,7 +1043,7 @@ out output ### String -```xt +```xpp str [output] ``` @@ -1060,7 +1060,7 @@ Returns: [String](./dataTypes.md#string) Example: -```xt +```xpp str 5 myString out myString ``` @@ -1069,7 +1069,7 @@ out myString ### Subtract -```xt +```xpp sub [difference] ``` @@ -1085,7 +1085,7 @@ Returns: [Float](./dataTypes.md#float) or [Integer](./dataTypes.md#integer) Example: -```xt +```xpp sub 10 5 difference out difference ``` @@ -1094,7 +1094,7 @@ out difference ### Throw -```xt +```xpp thrw ``` @@ -1110,7 +1110,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp out "Hello, world!" thrw "Error message!" out "Hello, world!" @@ -1120,7 +1120,7 @@ out "Hello, world!" ### Try -```xt +```xpp try [error statement] ``` @@ -1135,7 +1135,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp try "thrw \"Error message!\"" "out \"An error has occurred\"" ``` @@ -1143,7 +1143,7 @@ try "thrw \"Error message!\"" "out \"An error has occurred\"" ### Uppercase -```xt +```xpp upr [output] ``` @@ -1160,7 +1160,7 @@ Returns: [String](./dataTypes.md#string) Example: -```xt +```xpp upr "Hello, world!" output out output ``` @@ -1169,11 +1169,11 @@ out output ### Wait -```xt +```xpp wait ``` -Waits a certain number of seconds before executing the next statement. +Waits a certain number of seconds before executing the nexpp statement. | Parameter | Type | Optional | Description | | :-: | :-: | :-: | :-: | @@ -1183,7 +1183,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp out "Hello, world!" wait 5 out "Hello, world!" @@ -1193,7 +1193,7 @@ out "Hello, world!" ### While -```xt +```xpp whl ``` @@ -1208,7 +1208,7 @@ Returns: [Null](./dataTypes.md#null) Example: -```xt +```xpp psh 0 @myInteger whl @myInteger < 5 "jmp mySection" @@ -1221,4 +1221,4 @@ whl @myInteger < 5 "jmp mySection" Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--documents--operators) \ No newline at end of file +[↑ Go To Top](#x--documents--operators) \ No newline at end of file diff --git a/md/documents/packages.md b/md/documents/packages.md index 0e1eafd..634254c 100644 --- a/md/documents/packages.md +++ b/md/documents/packages.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Documents](../documents.md) / Packages +# [x++](../../README.md) / [Documents](../documents.md) / Packages ## Table of Contents @@ -23,13 +23,13 @@ ## About -Packages are x2 files written by other people that can be imported to your x2 project using the `imp` operator: +Packages are x++ files written by other people that can be imported to your x++ project using the `imp` operator: ```xt imp "examplePackage" ``` -All packages are located in the `pkg/` folder. If one is not found, the interpreter will automatically create one. When attempting to import a package, the interpreter will look for the `pkg//main.xt` file. If one is not found, an error is thrown. +All packages are located in the `pkg/` folder. If one is not found, the interpreter will automatically create one. When attempting to import a package, the interpreter will look for the `pkg//main.xpp` file. If one is not found, an error is thrown. Imported packages are only available in that one specific file. When attempting to use the package elsewhere, an error is thrown. @@ -43,12 +43,12 @@ examplePackage.mySection The same applies to importing files. -To import a file from the x2 project, the path must contain the `.xt` extension. The interpreter will then attempt to find the file from the current working directory. If one is not found, an error is thrown. +To import a file from the x++ project, the path must contain the `.xpp` extension. The interpreter will then attempt to find the file from the current working directory. If one is not found, an error is thrown. -Currently, the [standard library](../standardLibrary.md) is built into the x2 interpreter. You can install more packages at the [official website](https://x2.iipython.cf/). +Currently, the [standard library](../standardLibrary.md) is built into the x++ interpreter. You can install more packages from GitHub. --- Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--documents--packages) \ No newline at end of file +[↑ Go To Top](#x--documents--packages) \ No newline at end of file diff --git a/md/documents/sections.md b/md/documents/sections.md index 90ae0ff..468c040 100644 --- a/md/documents/sections.md +++ b/md/documents/sections.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Documents](../documents.md) / Sections +# [x++](../../README.md) / [Documents](../documents.md) / Sections ## Table of Contents @@ -27,13 +27,13 @@ A section is a set of statements that get executed when using the operators `cal Sections always start with a colon `:`, and their name can only contain alphabetical letters: -```xt +```xpp :mySection ``` By default, a section is public, which means other files can import and use the section. To define a private section, append an at symbol (`@`) at the beginning of the section name. A private section can only be used by other sections within the file: -```xt +```xpp :@mySection ``` @@ -41,17 +41,17 @@ Statements that are not within a section are located in the global section prede A section can also take arguments. Any variables separated by space and placed directly after the section name are considered arguments: -```xt +```xpp :myMethod argument0 argument1 ``` > All arguments of a method or a function must be passed. An error is thrown if an argument is missing. -Three types of sections exist in x2. +Three types of sections exist in x++. A section that does not take any additional arguments nor return a value is considered a `section` and generally invoked using the `jmp` operator: -```xt +```xpp jmp mySection :mySection @@ -60,7 +60,7 @@ jmp mySection A section that does take additional arguments but does not return a value is considered a `method` and generally invoked using the `pvk` operator. -```xt +```xpp pvk myMethod :myMethod a b @@ -70,7 +70,7 @@ pvk myMethod A section that does take additional arguments and returns a value is considered a `function` and can be invoked using either the `pvk` or `call` operator. The `pvk` operator is used when the returned value is not wanted and the opposite is true for `call`: -```xt +```xpp call myFunction a b output out output @@ -83,4 +83,4 @@ out output Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--documents--sections) \ No newline at end of file +[↑ Go To Top](#x--documents--sections) \ No newline at end of file diff --git a/md/documents/variables.md b/md/documents/variables.md index 85fa299..0621e22 100644 --- a/md/documents/variables.md +++ b/md/documents/variables.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Documents](../documents.md) / Variables +# [x++](../../README.md) / [Documents](../documents.md) / Variables ## Table of Contents @@ -27,13 +27,13 @@ A variable is a label that stores a value that can be referenced and modified at Even though a variable can contain any character besides spaces, it is recommended to use only alphabetical letters. A variable is defined using the `psh` operator: -```xt +```xpp psh 5 myInteger ``` A variable can then be referenced using the variable name: -```xt +```xpp out myInteger ``` @@ -43,31 +43,31 @@ There are two other types of variables, `file variable` and `global variable`. A A file variable is defined by appending an at symbol (`@`) in front of the variable name: -```xt +```xpp psh 5 @myInteger ``` A global variable is defined by appending a hashtag (`#`) in front of the variable name: -```xt +```xpp psh 5 #myInteger ``` Both file and global are not garbage collected and must be manually deleted from the memory using the `rem` operator: -```xt +```xpp rem 5 @myInteger ``` Or: -```xt +```xpp rem 5 #myInteger ``` A variable can also be constant, which means it cannot be deleted using the `rem` operator. A constant variable can be defined using the `cnst` operator: -```xt +```xpp cnst 5 myInteger ``` @@ -77,4 +77,4 @@ Attempting to delete the variable will cause an error to be thrown. Note that ev Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--documents--variables) \ No newline at end of file +[↑ Go To Top](#x--documents--variables) \ No newline at end of file diff --git a/md/tutorials.md b/md/tutorials.md index 7b3c929..4a39885 100644 --- a/md/tutorials.md +++ b/md/tutorials.md @@ -1,4 +1,4 @@ -# [x2](../README.md) / Tutorials +# [x++](../README.md) / Tutorials ## Table of Contents @@ -17,9 +17,9 @@ ## About -Welcome to the tutorials of the official x2 documentation. In this section, you will be learning all the basics within x2, including familiaring the syntax of x2 as well as creating your first x2 project! +Welcome to the tutorials of the official x++ documentation. In this section, you will be learning all the basics within x++, including familiaring the syntax of x++ as well as creating your first x++ project! -If you are looking for information for a specific component of x2, check out the [documents](./documents.md) instead. +If you are looking for information for a specific component of x++, check out the [documents](./documents.md) instead. ## Tutorials @@ -32,4 +32,4 @@ If you are looking for information for a specific component of x2, check out the Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--tutorials) \ No newline at end of file +[↑ Go To Top](#x--tutorials) \ No newline at end of file diff --git a/md/tutorials/1helloWorld.md b/md/tutorials/1helloWorld.md index 5502a3d..d87fb3f 100644 --- a/md/tutorials/1helloWorld.md +++ b/md/tutorials/1helloWorld.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Tutorials](../tutorials.md) / 1. Hello, world! +# [x++](../../README.md) / [Tutorials](../tutorials.md) / 1. Hello, world! ## Table of Contents @@ -19,9 +19,9 @@ ## Lesson -Welcome to the first lesson of the tutorials! In this lesson, you will be learning how to write your first x2 project, how to define and use a variable, and how to print a value into the terminal. +Welcome to the first lesson of the tutorials! In this lesson, you will be learning how to write your first x++ project, how to define and use a variable, and how to print a value into the terminal. -First, make sure you properly installed x2 to your device if you haven't done so already. You can follow the guide in the [getting started](../../README.md#getting-started) section for additional help. +First, make sure you properly installed x++ to your device if you haven't done so already. You can follow the guide in the [getting started](../../README.md#getting-started) section for additional help. Once you are done, open up the `main.xt` file. Inside, you should find something like this: @@ -31,7 +31,7 @@ Once you are done, open up the `main.xt` file. Inside, you should find something out "Hello, world!" ``` -This is the default template when you first installed x2. It is important for you to know what the code is doing. Let's break it down. +This is the default template when you first installed x++. It is important for you to know what the code is doing. Let's break it down. The first line in the file is a comment. A comment is a text that provides information to the developer, such as you. It is ignored by the interpreter as it is intended for only developers to read. @@ -39,7 +39,7 @@ The second line defines a section. A section is a region of code that will be ex The third line prints out a string with the value `"Hello, world!"`. A string is a series of characters wrapped around by double-quotes (`"`). You can think of it like a character, word, phrase, or a sentence. The `out` is an operator, which means it takes arguments, processes them, and does a specific action based on what the arguments are. In the case, the `out` operator simply takes the arguments and prints them in the terminal. -Although indentations in x2 aren't necessary, it is a good practice to use indentation to group the statements, so you or other developers won't get confused. +Although indentations in x++ aren't necessary, it is a good practice to use indentation to group the statements, so you or other developers won't get confused. Now that you understand what your code is doing, let's try it out. Using what you've learned, try printing out your name and age in the format `My name is and I am years old` in the terminal. @@ -87,7 +87,7 @@ Let's try it! out "My name is $(pop name) and I am $(pop age) years old" ``` -You did it! You made your first ever x2 project. +You did it! You made your first ever x++ project. In the next lesson, you will learn how to get user inputs and how to manupulate strings and numbers. @@ -95,4 +95,4 @@ In the next lesson, you will learn how to get user inputs and how to manupulate Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--tutorials--1-hello-world) \ No newline at end of file +[↑ Go To Top](#x--tutorials--1-hello-world) \ No newline at end of file diff --git a/md/tutorials/2userInput.md b/md/tutorials/2userInput.md index d23d1c8..bed2e54 100644 --- a/md/tutorials/2userInput.md +++ b/md/tutorials/2userInput.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Tutorials](../tutorials.md) / 2. User Input +# [x++](../../README.md) / [Tutorials](../tutorials.md) / 2. User Input ## Table of Contents @@ -19,9 +19,9 @@ ## Lesson -In the previous lesson, you learned how to write your first x2 project, define and use a variable, and output a value into the terminal. In this lesson, you will learn how to get a user input and how to manipulate strings and numbers. +In the previous lesson, you learned how to write your first x++ project, define and use a variable, and output a value into the terminal. In this lesson, you will learn how to get a user input and how to manipulate strings and numbers. -So far, you should've gotten this in your `main.xt` file: +So far, you should've gotten this in your `main.xpp` file: ```xt :: Main @@ -104,4 +104,4 @@ In the next lesson, you will learn how to make branches using the `cmp` operator Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--tutorials--2-user-input) \ No newline at end of file +[↑ Go To Top](#x--tutorials--2-user-input) \ No newline at end of file diff --git a/md/tutorials/3branches.md b/md/tutorials/3branches.md index ad70cc4..559e439 100644 --- a/md/tutorials/3branches.md +++ b/md/tutorials/3branches.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Tutorials](../tutorials.md) / 3. Branches +# [x++](../../README.md) / [Tutorials](../tutorials.md) / 3. Branches ## Table of Contents @@ -87,10 +87,10 @@ Did you get something like this: cmp age > 16 "out \"You can also drive a car\"" ``` -In the next lesson, you will be learning how to make a calculator using x2. +In the next lesson, you will be learning how to make a calculator using x++. --- Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--tutorials--3-branches) \ No newline at end of file +[↑ Go To Top](#x--tutorials--3-branches) \ No newline at end of file diff --git a/md/tutorials/4calculator.md b/md/tutorials/4calculator.md index c3600bf..caed468 100644 --- a/md/tutorials/4calculator.md +++ b/md/tutorials/4calculator.md @@ -1,4 +1,4 @@ -# [x2](../../README.md) / [Tutorials](../tutorials.md) / 4. Calculator +# [x++](../../README.md) / [Tutorials](../tutorials.md) / 4. Calculator ## Table of Contents @@ -19,14 +19,14 @@ ## Lesson -From the last three lessons, you learned how to print values into the terminal, get user inputs, modify strings and numbers, and create branches. In this lesson, you will be using all of this knowledge to construct a calculator using x2. Even though it may seem difficult at first, it is just made up of simple components. Let's break it down. +From the last three lessons, you learned how to print values into the terminal, get user inputs, modify strings and numbers, and create branches. In this lesson, you will be using all of this knowledge to construct a calculator using x++. Even though it may seem difficult at first, it is just made up of simple components. Let's break it down. Let's introduce the program and get some inputs from the user using the `out` and `read` operators: ```xt :: Main :main - out "Welcome to the x2 calculator!" + out "Welcome to the x++ calculator!" out "-----" read "Please enter your first number: " a read "Please enter your second number: " b @@ -38,7 +38,7 @@ You can also use escape codes to format your texts, such as `\t` to indent your ```xt :: Main :main - out "Welcome to the x2 calculator!" + out "Welcome to the x++ calculator!" out "-----" read "Please enter your first number: " a read "Please enter your second number: " b @@ -57,7 +57,7 @@ Currently, the numbers you got from the user are a string. You need to parse it ```xt :: Main :main - out "Welcome to the x2 calculator!" + out "Welcome to the x++ calculator!" out "-----" read "Please enter your first number: " a read "Please enter your second number: " b @@ -79,7 +79,7 @@ Now we can use the `cmp` operator to check what operator the user selected and a ```xt :: Main :main - out "Welcome to the x2 calculator!" + out "Welcome to the x++ calculator!" out "-----" read "Please enter your first number: " a read "Please enter your second number: " b @@ -105,7 +105,7 @@ Since you defined `c` as the answer to the equation, you can simply output it to ```xt :: Main :main - out "Welcome to the x2 calculator!" + out "Welcome to the x++ calculator!" out "-----" read "Please enter your first number: " a read "Please enter your second number: " b @@ -135,4 +135,4 @@ In the next lesson, you will be learning how to define and jump into another sec Last Updated: February 6th, 2022 by Dm123321_31mD -[↑ Go To Top](#x2--tutorials--4-calculator) \ No newline at end of file +[↑ Go To Top](#x--tutorials--4-calculator) \ No newline at end of file diff --git a/python/DLLs/_asyncio.pyd b/python/DLLs/_asyncio.pyd deleted file mode 100644 index 4120f51..0000000 Binary files a/python/DLLs/_asyncio.pyd and /dev/null differ diff --git a/python/DLLs/_bz2.pyd b/python/DLLs/_bz2.pyd deleted file mode 100644 index b42a0bf..0000000 Binary files a/python/DLLs/_bz2.pyd and /dev/null differ diff --git a/python/DLLs/_ctypes.pyd b/python/DLLs/_ctypes.pyd deleted file mode 100644 index 4a7e5fd..0000000 Binary files a/python/DLLs/_ctypes.pyd and /dev/null differ diff --git a/python/DLLs/_ctypes_test.pyd b/python/DLLs/_ctypes_test.pyd deleted file mode 100644 index aedc178..0000000 Binary files a/python/DLLs/_ctypes_test.pyd and /dev/null differ diff --git a/python/DLLs/_decimal.pyd b/python/DLLs/_decimal.pyd deleted file mode 100644 index 8569ecd..0000000 Binary files a/python/DLLs/_decimal.pyd and /dev/null differ diff --git a/python/DLLs/_elementtree.pyd b/python/DLLs/_elementtree.pyd deleted file mode 100644 index 3d46cf6..0000000 Binary files a/python/DLLs/_elementtree.pyd and /dev/null differ diff --git a/python/DLLs/_hashlib.pyd b/python/DLLs/_hashlib.pyd deleted file mode 100644 index ea40909..0000000 Binary files a/python/DLLs/_hashlib.pyd and /dev/null differ diff --git a/python/DLLs/_lzma.pyd b/python/DLLs/_lzma.pyd deleted file mode 100644 index 37a32ae..0000000 Binary files a/python/DLLs/_lzma.pyd and /dev/null differ diff --git a/python/DLLs/_multiprocessing.pyd b/python/DLLs/_multiprocessing.pyd deleted file mode 100644 index 39a1465..0000000 Binary files a/python/DLLs/_multiprocessing.pyd and /dev/null differ diff --git a/python/DLLs/_overlapped.pyd b/python/DLLs/_overlapped.pyd deleted file mode 100644 index 5ba32c1..0000000 Binary files a/python/DLLs/_overlapped.pyd and /dev/null differ diff --git a/python/DLLs/_queue.pyd b/python/DLLs/_queue.pyd deleted file mode 100644 index 0b8978a..0000000 Binary files a/python/DLLs/_queue.pyd and /dev/null differ diff --git a/python/DLLs/_socket.pyd b/python/DLLs/_socket.pyd deleted file mode 100644 index 85f25a6..0000000 Binary files a/python/DLLs/_socket.pyd and /dev/null differ diff --git a/python/DLLs/_ssl.pyd b/python/DLLs/_ssl.pyd deleted file mode 100644 index 8ed155c..0000000 Binary files a/python/DLLs/_ssl.pyd and /dev/null differ diff --git a/python/DLLs/_uuid.pyd b/python/DLLs/_uuid.pyd deleted file mode 100644 index ed8d34e..0000000 Binary files a/python/DLLs/_uuid.pyd and /dev/null differ diff --git a/python/DLLs/_zoneinfo.pyd b/python/DLLs/_zoneinfo.pyd deleted file mode 100644 index b2b9e85..0000000 Binary files a/python/DLLs/_zoneinfo.pyd and /dev/null differ diff --git a/python/DLLs/libcrypto-1_1.dll b/python/DLLs/libcrypto-1_1.dll deleted file mode 100644 index 6610e7b..0000000 Binary files a/python/DLLs/libcrypto-1_1.dll and /dev/null differ diff --git a/python/DLLs/libffi-8.dll b/python/DLLs/libffi-8.dll deleted file mode 100644 index 216de69..0000000 Binary files a/python/DLLs/libffi-8.dll and /dev/null differ diff --git a/python/DLLs/libssl-1_1.dll b/python/DLLs/libssl-1_1.dll deleted file mode 100644 index 5c74668..0000000 Binary files a/python/DLLs/libssl-1_1.dll and /dev/null differ diff --git a/python/DLLs/pyexpat.pyd b/python/DLLs/pyexpat.pyd deleted file mode 100644 index 83393a1..0000000 Binary files a/python/DLLs/pyexpat.pyd and /dev/null differ diff --git a/python/DLLs/python_lib.cat b/python/DLLs/python_lib.cat deleted file mode 100644 index dcf7d4f..0000000 Binary files a/python/DLLs/python_lib.cat and /dev/null differ diff --git a/python/DLLs/python_tools.cat b/python/DLLs/python_tools.cat deleted file mode 100644 index 1dd2b57..0000000 Binary files a/python/DLLs/python_tools.cat and /dev/null differ diff --git a/python/DLLs/select.pyd b/python/DLLs/select.pyd deleted file mode 100644 index de0f24a..0000000 Binary files a/python/DLLs/select.pyd and /dev/null differ diff --git a/python/DLLs/unicodedata.pyd b/python/DLLs/unicodedata.pyd deleted file mode 100644 index 0becb42..0000000 Binary files a/python/DLLs/unicodedata.pyd and /dev/null differ diff --git a/python/LICENSE.txt b/python/LICENSE.txt deleted file mode 100644 index 0e704cb..0000000 --- a/python/LICENSE.txt +++ /dev/null @@ -1,650 +0,0 @@ -A. HISTORY OF THE SOFTWARE -========================== - -Python was created in the early 1990s by Guido van Rossum at Stichting -Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands -as a successor of a language called ABC. Guido remains Python's -principal author, although it includes many contributions from others. - -In 1995, Guido continued his work on Python at the Corporation for -National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) -in Reston, Virginia where he released several versions of the -software. - -In May 2000, Guido and the Python core development team moved to -BeOpen.com to form the BeOpen PythonLabs team. In October of the same -year, the PythonLabs team moved to Digital Creations, which became -Zope Corporation. In 2001, the Python Software Foundation (PSF, see -https://www.python.org/psf/) was formed, a non-profit organization -created specifically to own Python-related Intellectual Property. -Zope Corporation was a sponsoring member of the PSF. - -All Python releases are Open Source (see https://opensource.org for -the Open Source Definition). Historically, most, but not all, Python -releases have also been GPL-compatible; the table below summarizes -the various releases. - - Release Derived Year Owner GPL- - from compatible? (1) - - 0.9.0 thru 1.2 1991-1995 CWI yes - 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes - 1.6 1.5.2 2000 CNRI no - 2.0 1.6 2000 BeOpen.com no - 1.6.1 1.6 2001 CNRI yes (2) - 2.1 2.0+1.6.1 2001 PSF no - 2.0.1 2.0+1.6.1 2001 PSF yes - 2.1.1 2.1+2.0.1 2001 PSF yes - 2.1.2 2.1.1 2002 PSF yes - 2.1.3 2.1.2 2002 PSF yes - 2.2 and above 2.1.1 2001-now PSF yes - -Footnotes: - -(1) GPL-compatible doesn't mean that we're distributing Python under - the GPL. All Python licenses, unlike the GPL, let you distribute - a modified version without making your changes open source. The - GPL-compatible licenses make it possible to combine Python with - other software that is released under the GPL; the others don't. - -(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, - because its license has a choice of law clause. According to - CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 - is "not incompatible" with the GPL. - -Thanks to the many outside volunteers who have worked under Guido's -direction to make these releases possible. - - -B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON -=============================================================== - -Python software and documentation are licensed under the -Python Software Foundation License Version 2. - -Starting with Python 3.8.6, examples, recipes, and other code in -the documentation are dual licensed under the PSF License Version 2 -and the Zero-Clause BSD license. - -Some software incorporated into Python is under different licenses. -The licenses are listed with code falling under that license. - - -PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 --------------------------------------------- - -1. This LICENSE AGREEMENT is between the Python Software Foundation -("PSF"), and the Individual or Organization ("Licensee") accessing and -otherwise using this software ("Python") in source or binary form and -its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, PSF hereby -grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -analyze, test, perform and/or display publicly, prepare derivative works, -distribute, and otherwise use Python alone or in any derivative version, -provided, however, that PSF's License Agreement and PSF's notice of copyright, -i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation; -All Rights Reserved" are retained in Python alone or in any derivative version -prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python. - -4. PSF is making Python available to Licensee on an "AS IS" -basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any -relationship of agency, partnership, or joint venture between PSF and -Licensee. This License Agreement does not grant permission to use PSF -trademarks or trade name in a trademark sense to endorse or promote -products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using Python, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. - - -BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 -------------------------------------------- - -BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 - -1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an -office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the -Individual or Organization ("Licensee") accessing and otherwise using -this software in source or binary form and its associated -documentation ("the Software"). - -2. Subject to the terms and conditions of this BeOpen Python License -Agreement, BeOpen hereby grants Licensee a non-exclusive, -royalty-free, world-wide license to reproduce, analyze, test, perform -and/or display publicly, prepare derivative works, distribute, and -otherwise use the Software alone or in any derivative version, -provided, however, that the BeOpen Python License is retained in the -Software, alone or in any derivative version prepared by Licensee. - -3. BeOpen is making the Software available to Licensee on an "AS IS" -basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE -SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS -AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY -DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -5. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -6. This License Agreement shall be governed by and interpreted in all -respects by the law of the State of California, excluding conflict of -law provisions. Nothing in this License Agreement shall be deemed to -create any relationship of agency, partnership, or joint venture -between BeOpen and Licensee. This License Agreement does not grant -permission to use BeOpen trademarks or trade names in a trademark -sense to endorse or promote products or services of Licensee, or any -third party. As an exception, the "BeOpen Python" logos available at -http://www.pythonlabs.com/logos.html may be used according to the -permissions granted on that web page. - -7. By copying, installing or otherwise using the software, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. - - -CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 ---------------------------------------- - -1. This LICENSE AGREEMENT is between the Corporation for National -Research Initiatives, having an office at 1895 Preston White Drive, -Reston, VA 20191 ("CNRI"), and the Individual or Organization -("Licensee") accessing and otherwise using Python 1.6.1 software in -source or binary form and its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, CNRI -hereby grants Licensee a nonexclusive, royalty-free, world-wide -license to reproduce, analyze, test, perform and/or display publicly, -prepare derivative works, distribute, and otherwise use Python 1.6.1 -alone or in any derivative version, provided, however, that CNRI's -License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) -1995-2001 Corporation for National Research Initiatives; All Rights -Reserved" are retained in Python 1.6.1 alone or in any derivative -version prepared by Licensee. Alternately, in lieu of CNRI's License -Agreement, Licensee may substitute the following text (omitting the -quotes): "Python 1.6.1 is made available subject to the terms and -conditions in CNRI's License Agreement. This Agreement together with -Python 1.6.1 may be located on the internet using the following -unique, persistent identifier (known as a handle): 1895.22/1013. This -Agreement may also be obtained from a proxy server on the internet -using the following URL: http://hdl.handle.net/1895.22/1013". - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python 1.6.1 or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python 1.6.1. - -4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" -basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. This License Agreement shall be governed by the federal -intellectual property law of the United States, including without -limitation the federal copyright law, and, to the extent such -U.S. federal law does not apply, by the law of the Commonwealth of -Virginia, excluding Virginia's conflict of law provisions. -Notwithstanding the foregoing, with regard to derivative works based -on Python 1.6.1 that incorporate non-separable material that was -previously distributed under the GNU General Public License (GPL), the -law of the Commonwealth of Virginia shall govern this License -Agreement only as to issues arising under or with respect to -Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this -License Agreement shall be deemed to create any relationship of -agency, partnership, or joint venture between CNRI and Licensee. This -License Agreement does not grant permission to use CNRI trademarks or -trade name in a trademark sense to endorse or promote products or -services of Licensee, or any third party. - -8. By clicking on the "ACCEPT" button where indicated, or by copying, -installing or otherwise using Python 1.6.1, Licensee agrees to be -bound by the terms and conditions of this License Agreement. - - ACCEPT - - -CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 --------------------------------------------------- - -Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, -The Netherlands. All rights reserved. - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose and without fee is hereby granted, -provided that the above copyright notice appear in all copies and that -both that copyright notice and this permission notice appear in -supporting documentation, and that the name of Stichting Mathematisch -Centrum or CWI not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior -permission. - -STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO -THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE -FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION ----------------------------------------------------------------------- - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - - - -Additional Conditions for this Windows binary build ---------------------------------------------------- - -This program is linked with and uses Microsoft Distributable Code, -copyrighted by Microsoft Corporation. The Microsoft Distributable Code -is embedded in each .exe, .dll and .pyd file as a result of running -the code through a linker. - -If you further distribute programs that include the Microsoft -Distributable Code, you must comply with the restrictions on -distribution specified by Microsoft. In particular, you must require -distributors and external end users to agree to terms that protect the -Microsoft Distributable Code at least as much as Microsoft's own -requirements for the Distributable Code. See Microsoft's documentation -(included in its developer tools and on its website at microsoft.com) -for specific details. - -Redistribution of the Windows binary build of the Python interpreter -complies with this agreement, provided that you do not: - -- alter any copyright, trademark or patent notice in Microsoft's -Distributable Code; - -- use Microsoft's trademarks in your programs' names or in a way that -suggests your programs come from or are endorsed by Microsoft; - -- distribute Microsoft's Distributable Code to run on a platform other -than Microsoft operating systems, run-time technologies or application -platforms; or - -- include Microsoft Distributable Code in malicious, deceptive or -unlawful programs. - -These restrictions apply only to the Microsoft Distributable Code as -defined above, not to Python itself or any programs running on the -Python interpreter. The redistribution of the Python interpreter and -libraries is governed by the Python Software License included with this -file, or by other licenses as marked. - - - --------------------------------------------------------------------------- - -This program, "bzip2", the associated library "libbzip2", and all -documentation, are copyright (C) 1996-2019 Julian R Seward. All -rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. The origin of this software must not be misrepresented; you must - not claim that you wrote the original software. If you use this - software in a product, an acknowledgment in the product - documentation would be appreciated but is not required. - -3. Altered source versions must be plainly marked as such, and must - not be misrepresented as being the original software. - -4. The name of the author may not be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Julian Seward, jseward@acm.org -bzip2/libbzip2 version 1.0.8 of 13 July 2019 - --------------------------------------------------------------------------- - - - LICENSE ISSUES - ============== - - The OpenSSL toolkit stays under a double license, i.e. both the conditions of - the OpenSSL License and the original SSLeay license apply to the toolkit. - See below for the actual license texts. - - OpenSSL License - --------------- - -/* ==================================================================== - * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ - - Original SSLeay License - ----------------------- - -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ - - -libffi - Copyright (c) 1996-2022 Anthony Green, Red Hat, Inc and others. -See source files for details. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -``Software''), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -This software is copyrighted by the Regents of the University of -California, Sun Microsystems, Inc., Scriptics Corporation, ActiveState -Corporation and other parties. The following terms apply to all files -associated with the software unless explicitly disclaimed in -individual files. - -The authors hereby grant permission to use, copy, modify, distribute, -and license this software and its documentation for any purpose, provided -that existing copyright notices are retained in all copies and that this -notice is included verbatim in any distributions. No written agreement, -license, or royalty fee is required for any of the authorized uses. -Modifications to this software may be copyrighted by their authors -and need not follow the licensing terms described here, provided that -the new terms are clearly indicated on the first page of each file where -they apply. - -IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY -FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES -ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY -DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE -IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE -NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR -MODIFICATIONS. - -GOVERNMENT USE: If you are acquiring this software on behalf of the -U.S. government, the Government shall have only "Restricted Rights" -in the software and related documentation as defined in the Federal -Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you -are acquiring the software on behalf of the Department of Defense, the -software shall be classified as "Commercial Computer Software" and the -Government shall have only "Restricted Rights" as defined in Clause -252.227-7014 (b) (3) of DFARs. Notwithstanding the foregoing, the -authors grant the U.S. Government and others acting in its behalf -permission to use and distribute the software in accordance with the -terms specified in this license. - -This software is copyrighted by the Regents of the University of -California, Sun Microsystems, Inc., Scriptics Corporation, ActiveState -Corporation, Apple Inc. and other parties. The following terms apply to -all files associated with the software unless explicitly disclaimed in -individual files. - -The authors hereby grant permission to use, copy, modify, distribute, -and license this software and its documentation for any purpose, provided -that existing copyright notices are retained in all copies and that this -notice is included verbatim in any distributions. No written agreement, -license, or royalty fee is required for any of the authorized uses. -Modifications to this software may be copyrighted by their authors -and need not follow the licensing terms described here, provided that -the new terms are clearly indicated on the first page of each file where -they apply. - -IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY -FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES -ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY -DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE -IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE -NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR -MODIFICATIONS. - -GOVERNMENT USE: If you are acquiring this software on behalf of the -U.S. government, the Government shall have only "Restricted Rights" -in the software and related documentation as defined in the Federal -Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you -are acquiring the software on behalf of the Department of Defense, the -software shall be classified as "Commercial Computer Software" and the -Government shall have only "Restricted Rights" as defined in Clause -252.227-7013 (b) (3) of DFARs. Notwithstanding the foregoing, the -authors grant the U.S. Government and others acting in its behalf -permission to use and distribute the software in accordance with the -terms specified in this license. - -Copyright (c) 1993-1999 Ioi Kim Lam. -Copyright (c) 2000-2001 Tix Project Group. -Copyright (c) 2004 ActiveState - -This software is copyrighted by the above entities -and other parties. The following terms apply to all files associated -with the software unless explicitly disclaimed in individual files. - -The authors hereby grant permission to use, copy, modify, distribute, -and license this software and its documentation for any purpose, provided -that existing copyright notices are retained in all copies and that this -notice is included verbatim in any distributions. No written agreement, -license, or royalty fee is required for any of the authorized uses. -Modifications to this software may be copyrighted by their authors -and need not follow the licensing terms described here, provided that -the new terms are clearly indicated on the first page of each file where -they apply. - -IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY -FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES -ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY -DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE -IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE -NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR -MODIFICATIONS. - -GOVERNMENT USE: If you are acquiring this software on behalf of the -U.S. government, the Government shall have only "Restricted Rights" -in the software and related documentation as defined in the Federal -Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you -are acquiring the software on behalf of the Department of Defense, the -software shall be classified as "Commercial Computer Software" and the -Government shall have only "Restricted Rights" as defined in Clause -252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the -authors grant the U.S. Government and others acting in its behalf -permission to use and distribute the software in accordance with the -terms specified in this license. - ----------------------------------------------------------------------- - -Parts of this software are based on the Tcl/Tk software copyrighted by -the Regents of the University of California, Sun Microsystems, Inc., -and other parties. The original license terms of the Tcl/Tk software -distribution is included in the file docs/license.tcltk. - -Parts of this software are based on the HTML Library software -copyrighted by Sun Microsystems, Inc. The original license terms of -the HTML Library software distribution is included in the file -docs/license.html_lib. - diff --git a/python/Lib/__future__.py b/python/Lib/__future__.py deleted file mode 100644 index c339770..0000000 --- a/python/Lib/__future__.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Record of phased-in incompatible language changes. - -Each line is of the form: - - FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," - CompilerFlag ")" - -where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples -of the same form as sys.version_info: - - (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int - PY_MINOR_VERSION, # the 1; an int - PY_MICRO_VERSION, # the 0; an int - PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string - PY_RELEASE_SERIAL # the 3; an int - ) - -OptionalRelease records the first release in which - - from __future__ import FeatureName - -was accepted. - -In the case of MandatoryReleases that have not yet occurred, -MandatoryRelease predicts the release in which the feature will become part -of the language. - -Else MandatoryRelease records when the feature became part of the language; -in releases at or after that, modules no longer need - - from __future__ import FeatureName - -to use the feature in question, but may continue to use such imports. - -MandatoryRelease may also be None, meaning that a planned feature got -dropped or that the release version is undetermined. - -Instances of class _Feature have two corresponding methods, -.getOptionalRelease() and .getMandatoryRelease(). - -CompilerFlag is the (bitfield) flag that should be passed in the fourth -argument to the builtin function compile() to enable the feature in -dynamically compiled code. This flag is stored in the .compiler_flag -attribute on _Future instances. These values must match the appropriate -#defines of CO_xxx flags in Include/cpython/compile.h. - -No feature line is ever to be deleted from this file. -""" - -all_feature_names = [ - "nested_scopes", - "generators", - "division", - "absolute_import", - "with_statement", - "print_function", - "unicode_literals", - "barry_as_FLUFL", - "generator_stop", - "annotations", -] - -__all__ = ["all_feature_names"] + all_feature_names - -# The CO_xxx symbols are defined here under the same names defined in -# code.h and used by compile.h, so that an editor search will find them here. -# However, they're not exported in __all__, because they don't really belong to -# this module. -CO_NESTED = 0x0010 # nested_scopes -CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) -CO_FUTURE_DIVISION = 0x20000 # division -CO_FUTURE_ABSOLUTE_IMPORT = 0x40000 # perform absolute imports by default -CO_FUTURE_WITH_STATEMENT = 0x80000 # with statement -CO_FUTURE_PRINT_FUNCTION = 0x100000 # print function -CO_FUTURE_UNICODE_LITERALS = 0x200000 # unicode string literals -CO_FUTURE_BARRY_AS_BDFL = 0x400000 -CO_FUTURE_GENERATOR_STOP = 0x800000 # StopIteration becomes RuntimeError in generators -CO_FUTURE_ANNOTATIONS = 0x1000000 # annotations become strings at runtime - - -class _Feature: - - def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): - self.optional = optionalRelease - self.mandatory = mandatoryRelease - self.compiler_flag = compiler_flag - - def getOptionalRelease(self): - """Return first release in which this feature was recognized. - - This is a 5-tuple, of the same form as sys.version_info. - """ - return self.optional - - def getMandatoryRelease(self): - """Return release in which this feature will become mandatory. - - This is a 5-tuple, of the same form as sys.version_info, or, if - the feature was dropped, or the release date is undetermined, is None. - """ - return self.mandatory - - def __repr__(self): - return "_Feature" + repr((self.optional, - self.mandatory, - self.compiler_flag)) - - -nested_scopes = _Feature((2, 1, 0, "beta", 1), - (2, 2, 0, "alpha", 0), - CO_NESTED) - -generators = _Feature((2, 2, 0, "alpha", 1), - (2, 3, 0, "final", 0), - CO_GENERATOR_ALLOWED) - -division = _Feature((2, 2, 0, "alpha", 2), - (3, 0, 0, "alpha", 0), - CO_FUTURE_DIVISION) - -absolute_import = _Feature((2, 5, 0, "alpha", 1), - (3, 0, 0, "alpha", 0), - CO_FUTURE_ABSOLUTE_IMPORT) - -with_statement = _Feature((2, 5, 0, "alpha", 1), - (2, 6, 0, "alpha", 0), - CO_FUTURE_WITH_STATEMENT) - -print_function = _Feature((2, 6, 0, "alpha", 2), - (3, 0, 0, "alpha", 0), - CO_FUTURE_PRINT_FUNCTION) - -unicode_literals = _Feature((2, 6, 0, "alpha", 2), - (3, 0, 0, "alpha", 0), - CO_FUTURE_UNICODE_LITERALS) - -barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2), - (4, 0, 0, "alpha", 0), - CO_FUTURE_BARRY_AS_BDFL) - -generator_stop = _Feature((3, 5, 0, "beta", 1), - (3, 7, 0, "alpha", 0), - CO_FUTURE_GENERATOR_STOP) - -annotations = _Feature((3, 7, 0, "beta", 1), - None, - CO_FUTURE_ANNOTATIONS) diff --git a/python/Lib/__hello__.py b/python/Lib/__hello__.py deleted file mode 100644 index 06b4f14..0000000 --- a/python/Lib/__hello__.py +++ /dev/null @@ -1,16 +0,0 @@ -initialized = True - -class TestFrozenUtf8_1: - """\u00b6""" - -class TestFrozenUtf8_2: - """\u03c0""" - -class TestFrozenUtf8_4: - """\U0001f600""" - -def main(): - print("Hello world!") - -if __name__ == '__main__': - main() diff --git a/python/Lib/_aix_support.py b/python/Lib/_aix_support.py deleted file mode 100644 index fa3f30a..0000000 --- a/python/Lib/_aix_support.py +++ /dev/null @@ -1,91 +0,0 @@ -"""Shared AIX support functions.""" - -import sys -import sysconfig - -try: - import subprocess -except ImportError: # pragma: no cover - # _aix_support is used in distutils by setup.py to build C extensions, - # before subprocess dependencies like _posixsubprocess are available. - import _bootsubprocess as subprocess - - -def _aix_tag(vrtl, bd): - # type: (List[int], int) -> str - # Infer the ABI bitwidth from maxsize (assuming 64 bit as the default) - _sz = 32 if sys.maxsize == (2**31-1) else 64 - _bd = bd if bd != 0 else 9988 - # vrtl[version, release, technology_level] - return "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(vrtl[0], vrtl[1], vrtl[2], _bd, _sz) - - -# extract version, release and technology level from a VRMF string -def _aix_vrtl(vrmf): - # type: (str) -> List[int] - v, r, tl = vrmf.split(".")[:3] - return [int(v[-1]), int(r), int(tl)] - - -def _aix_bos_rte(): - # type: () -> Tuple[str, int] - """ - Return a Tuple[str, int] e.g., ['7.1.4.34', 1806] - The fileset bos.rte represents the current AIX run-time level. It's VRMF and - builddate reflect the current ABI levels of the runtime environment. - If no builddate is found give a value that will satisfy pep425 related queries - """ - # All AIX systems to have lslpp installed in this location - out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.rte"]) - out = out.decode("utf-8") - out = out.strip().split(":") # type: ignore - _bd = int(out[-1]) if out[-1] != '' else 9988 - return (str(out[2]), _bd) - - -def aix_platform(): - # type: () -> str - """ - AIX filesets are identified by four decimal values: V.R.M.F. - V (version) and R (release) can be retrieved using ``uname`` - Since 2007, starting with AIX 5.3 TL7, the M value has been - included with the fileset bos.rte and represents the Technology - Level (TL) of AIX. The F (Fix) value also increases, but is not - relevant for comparing releases and binary compatibility. - For binary compatibility the so-called builddate is needed. - Again, the builddate of an AIX release is associated with bos.rte. - AIX ABI compatibility is described as guaranteed at: https://www.ibm.com/\ - support/knowledgecenter/en/ssw_aix_72/install/binary_compatability.html - - For pep425 purposes the AIX platform tag becomes: - "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(v, r, tl, builddate, bitsize) - e.g., "aix-6107-1415-32" for AIX 6.1 TL7 bd 1415, 32-bit - and, "aix-6107-1415-64" for AIX 6.1 TL7 bd 1415, 64-bit - """ - vrmf, bd = _aix_bos_rte() - return _aix_tag(_aix_vrtl(vrmf), bd) - - -# extract vrtl from the BUILD_GNU_TYPE as an int -def _aix_bgt(): - # type: () -> List[int] - gnu_type = sysconfig.get_config_var("BUILD_GNU_TYPE") - if not gnu_type: - raise ValueError("BUILD_GNU_TYPE is not defined") - return _aix_vrtl(vrmf=gnu_type) - - -def aix_buildtag(): - # type: () -> str - """ - Return the platform_tag of the system Python was built on. - """ - # AIX_BUILDDATE is defined by configure with: - # lslpp -Lcq bos.rte | awk -F: '{ print $NF }' - build_date = sysconfig.get_config_var("AIX_BUILDDATE") - try: - build_date = int(build_date) - except (ValueError, TypeError): - raise ValueError(f"AIX_BUILDDATE is not defined or invalid: " - f"{build_date!r}") - return _aix_tag(_aix_bgt(), build_date) diff --git a/python/Lib/_bootsubprocess.py b/python/Lib/_bootsubprocess.py deleted file mode 100644 index 377d166..0000000 --- a/python/Lib/_bootsubprocess.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Basic subprocess implementation for POSIX which only uses os functions. Only -implement features required by setup.py to build C extension modules when -subprocess is unavailable. setup.py is not used on Windows. -""" -import os - - -# distutils.spawn used by distutils.command.build_ext -# calls subprocess.Popen().wait() -class Popen: - def __init__(self, cmd, env=None): - self._cmd = cmd - self._env = env - self.returncode = None - - def wait(self): - pid = os.fork() - if pid == 0: - # Child process - try: - if self._env is not None: - os.execve(self._cmd[0], self._cmd, self._env) - else: - os.execv(self._cmd[0], self._cmd) - finally: - os._exit(1) - else: - # Parent process - _, status = os.waitpid(pid, 0) - self.returncode = os.waitstatus_to_exitcode(status) - - return self.returncode - - -def _check_cmd(cmd): - # Use regex [a-zA-Z0-9./-]+: reject empty string, space, etc. - safe_chars = [] - for first, last in (("a", "z"), ("A", "Z"), ("0", "9")): - for ch in range(ord(first), ord(last) + 1): - safe_chars.append(chr(ch)) - safe_chars.append("./-") - safe_chars = ''.join(safe_chars) - - if isinstance(cmd, (tuple, list)): - check_strs = cmd - elif isinstance(cmd, str): - check_strs = [cmd] - else: - return False - - for arg in check_strs: - if not isinstance(arg, str): - return False - if not arg: - # reject empty string - return False - for ch in arg: - if ch not in safe_chars: - return False - - return True - - -# _aix_support used by distutil.util calls subprocess.check_output() -def check_output(cmd, **kwargs): - if kwargs: - raise NotImplementedError(repr(kwargs)) - - if not _check_cmd(cmd): - raise ValueError(f"unsupported command: {cmd!r}") - - tmp_filename = "check_output.tmp" - if not isinstance(cmd, str): - cmd = " ".join(cmd) - cmd = f"{cmd} >{tmp_filename}" - - try: - # system() spawns a shell - status = os.system(cmd) - exitcode = os.waitstatus_to_exitcode(status) - if exitcode: - raise ValueError(f"Command {cmd!r} returned non-zero " - f"exit status {exitcode!r}") - - try: - with open(tmp_filename, "rb") as fp: - stdout = fp.read() - except FileNotFoundError: - stdout = b'' - finally: - try: - os.unlink(tmp_filename) - except OSError: - pass - - return stdout diff --git a/python/Lib/_collections_abc.py b/python/Lib/_collections_abc.py deleted file mode 100644 index 13fe137..0000000 --- a/python/Lib/_collections_abc.py +++ /dev/null @@ -1,1121 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. - -Unit tests are in test_collections. -""" - -from abc import ABCMeta, abstractmethod -import sys - -GenericAlias = type(list[int]) -EllipsisType = type(...) -def _f(): pass -FunctionType = type(_f) -del _f - -__all__ = ["Awaitable", "Coroutine", - "AsyncIterable", "AsyncIterator", "AsyncGenerator", - "Hashable", "Iterable", "Iterator", "Generator", "Reversible", - "Sized", "Container", "Callable", "Collection", - "Set", "MutableSet", - "Mapping", "MutableMapping", - "MappingView", "KeysView", "ItemsView", "ValuesView", - "Sequence", "MutableSequence", - "ByteString", - ] - -# This module has been renamed from collections.abc to _collections_abc to -# speed up interpreter startup. Some of the types such as MutableMapping are -# required early but collections module imports a lot of other modules. -# See issue #19218 -__name__ = "collections.abc" - -# Private list of types that we want to register with the various ABCs -# so that they will pass tests like: -# it = iter(somebytearray) -# assert isinstance(it, Iterable) -# Note: in other implementations, these types might not be distinct -# and they may have their own implementation specific types that -# are not included on this list. -bytes_iterator = type(iter(b'')) -bytearray_iterator = type(iter(bytearray())) -#callable_iterator = ??? -dict_keyiterator = type(iter({}.keys())) -dict_valueiterator = type(iter({}.values())) -dict_itemiterator = type(iter({}.items())) -list_iterator = type(iter([])) -list_reverseiterator = type(iter(reversed([]))) -range_iterator = type(iter(range(0))) -longrange_iterator = type(iter(range(1 << 1000))) -set_iterator = type(iter(set())) -str_iterator = type(iter("")) -tuple_iterator = type(iter(())) -zip_iterator = type(iter(zip())) -## views ## -dict_keys = type({}.keys()) -dict_values = type({}.values()) -dict_items = type({}.items()) -## misc ## -mappingproxy = type(type.__dict__) -generator = type((lambda: (yield))()) -## coroutine ## -async def _coro(): pass -_coro = _coro() -coroutine = type(_coro) -_coro.close() # Prevent ResourceWarning -del _coro -## asynchronous generator ## -async def _ag(): yield -_ag = _ag() -async_generator = type(_ag) -del _ag - - -### ONE-TRICK PONIES ### - -def _check_methods(C, *methods): - mro = C.__mro__ - for method in methods: - for B in mro: - if method in B.__dict__: - if B.__dict__[method] is None: - return NotImplemented - break - else: - return NotImplemented - return True - -class Hashable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __hash__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Hashable: - return _check_methods(C, "__hash__") - return NotImplemented - - -class Awaitable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __await__(self): - yield - - @classmethod - def __subclasshook__(cls, C): - if cls is Awaitable: - return _check_methods(C, "__await__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class Coroutine(Awaitable): - - __slots__ = () - - @abstractmethod - def send(self, value): - """Send a value into the coroutine. - Return next yielded value or raise StopIteration. - """ - raise StopIteration - - @abstractmethod - def throw(self, typ, val=None, tb=None): - """Raise an exception in the coroutine. - Return next yielded value or raise StopIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - def close(self): - """Raise GeneratorExit inside coroutine. - """ - try: - self.throw(GeneratorExit) - except (GeneratorExit, StopIteration): - pass - else: - raise RuntimeError("coroutine ignored GeneratorExit") - - @classmethod - def __subclasshook__(cls, C): - if cls is Coroutine: - return _check_methods(C, '__await__', 'send', 'throw', 'close') - return NotImplemented - - -Coroutine.register(coroutine) - - -class AsyncIterable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __aiter__(self): - return AsyncIterator() - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncIterable: - return _check_methods(C, "__aiter__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class AsyncIterator(AsyncIterable): - - __slots__ = () - - @abstractmethod - async def __anext__(self): - """Return the next item or raise StopAsyncIteration when exhausted.""" - raise StopAsyncIteration - - def __aiter__(self): - return self - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncIterator: - return _check_methods(C, "__anext__", "__aiter__") - return NotImplemented - - -class AsyncGenerator(AsyncIterator): - - __slots__ = () - - async def __anext__(self): - """Return the next item from the asynchronous generator. - When exhausted, raise StopAsyncIteration. - """ - return await self.asend(None) - - @abstractmethod - async def asend(self, value): - """Send a value into the asynchronous generator. - Return next yielded value or raise StopAsyncIteration. - """ - raise StopAsyncIteration - - @abstractmethod - async def athrow(self, typ, val=None, tb=None): - """Raise an exception in the asynchronous generator. - Return next yielded value or raise StopAsyncIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - async def aclose(self): - """Raise GeneratorExit inside coroutine. - """ - try: - await self.athrow(GeneratorExit) - except (GeneratorExit, StopAsyncIteration): - pass - else: - raise RuntimeError("asynchronous generator ignored GeneratorExit") - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncGenerator: - return _check_methods(C, '__aiter__', '__anext__', - 'asend', 'athrow', 'aclose') - return NotImplemented - - -AsyncGenerator.register(async_generator) - - -class Iterable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __iter__(self): - while False: - yield None - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterable: - return _check_methods(C, "__iter__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class Iterator(Iterable): - - __slots__ = () - - @abstractmethod - def __next__(self): - 'Return the next item from the iterator. When exhausted, raise StopIteration' - raise StopIteration - - def __iter__(self): - return self - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterator: - return _check_methods(C, '__iter__', '__next__') - return NotImplemented - - -Iterator.register(bytes_iterator) -Iterator.register(bytearray_iterator) -#Iterator.register(callable_iterator) -Iterator.register(dict_keyiterator) -Iterator.register(dict_valueiterator) -Iterator.register(dict_itemiterator) -Iterator.register(list_iterator) -Iterator.register(list_reverseiterator) -Iterator.register(range_iterator) -Iterator.register(longrange_iterator) -Iterator.register(set_iterator) -Iterator.register(str_iterator) -Iterator.register(tuple_iterator) -Iterator.register(zip_iterator) - - -class Reversible(Iterable): - - __slots__ = () - - @abstractmethod - def __reversed__(self): - while False: - yield None - - @classmethod - def __subclasshook__(cls, C): - if cls is Reversible: - return _check_methods(C, "__reversed__", "__iter__") - return NotImplemented - - -class Generator(Iterator): - - __slots__ = () - - def __next__(self): - """Return the next item from the generator. - When exhausted, raise StopIteration. - """ - return self.send(None) - - @abstractmethod - def send(self, value): - """Send a value into the generator. - Return next yielded value or raise StopIteration. - """ - raise StopIteration - - @abstractmethod - def throw(self, typ, val=None, tb=None): - """Raise an exception in the generator. - Return next yielded value or raise StopIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - def close(self): - """Raise GeneratorExit inside generator. - """ - try: - self.throw(GeneratorExit) - except (GeneratorExit, StopIteration): - pass - else: - raise RuntimeError("generator ignored GeneratorExit") - - @classmethod - def __subclasshook__(cls, C): - if cls is Generator: - return _check_methods(C, '__iter__', '__next__', - 'send', 'throw', 'close') - return NotImplemented - - -Generator.register(generator) - - -class Sized(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __len__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Sized: - return _check_methods(C, "__len__") - return NotImplemented - - -class Container(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __contains__(self, x): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Container: - return _check_methods(C, "__contains__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class Collection(Sized, Iterable, Container): - - __slots__ = () - - @classmethod - def __subclasshook__(cls, C): - if cls is Collection: - return _check_methods(C, "__len__", "__iter__", "__contains__") - return NotImplemented - - -class _CallableGenericAlias(GenericAlias): - """ Represent `Callable[argtypes, resulttype]`. - - This sets ``__args__`` to a tuple containing the flattened ``argtypes`` - followed by ``resulttype``. - - Example: ``Callable[[int, str], float]`` sets ``__args__`` to - ``(int, str, float)``. - """ - - __slots__ = () - - def __new__(cls, origin, args): - if not (isinstance(args, tuple) and len(args) == 2): - raise TypeError( - "Callable must be used as Callable[[arg, ...], result].") - t_args, t_result = args - if isinstance(t_args, (tuple, list)): - args = (*t_args, t_result) - elif not _is_param_expr(t_args): - raise TypeError(f"Expected a list of types, an ellipsis, " - f"ParamSpec, or Concatenate. Got {t_args}") - return super().__new__(cls, origin, args) - - def __repr__(self): - if len(self.__args__) == 2 and _is_param_expr(self.__args__[0]): - return super().__repr__() - return (f'collections.abc.Callable' - f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], ' - f'{_type_repr(self.__args__[-1])}]') - - def __reduce__(self): - args = self.__args__ - if not (len(args) == 2 and _is_param_expr(args[0])): - args = list(args[:-1]), args[-1] - return _CallableGenericAlias, (Callable, args) - - def __getitem__(self, item): - # Called during TypeVar substitution, returns the custom subclass - # rather than the default types.GenericAlias object. Most of the - # code is copied from typing's _GenericAlias and the builtin - # types.GenericAlias. - - if not isinstance(item, tuple): - item = (item,) - # A special case in PEP 612 where if X = Callable[P, int], - # then X[int, str] == X[[int, str]]. - if (len(self.__parameters__) == 1 - and _is_param_expr(self.__parameters__[0]) - and item and not _is_param_expr(item[0])): - item = (item,) - - new_args = super().__getitem__(item).__args__ - - # args[0] occurs due to things like Z[[int, str, bool]] from PEP 612 - if not isinstance(new_args[0], (tuple, list)): - t_result = new_args[-1] - t_args = new_args[:-1] - new_args = (t_args, t_result) - return _CallableGenericAlias(Callable, tuple(new_args)) - -def _is_param_expr(obj): - """Checks if obj matches either a list of types, ``...``, ``ParamSpec`` or - ``_ConcatenateGenericAlias`` from typing.py - """ - if obj is Ellipsis: - return True - if isinstance(obj, list): - return True - obj = type(obj) - names = ('ParamSpec', '_ConcatenateGenericAlias') - return obj.__module__ == 'typing' and any(obj.__name__ == name for name in names) - -def _type_repr(obj): - """Return the repr() of an object, special-casing types (internal helper). - - Copied from :mod:`typing` since collections.abc - shouldn't depend on that module. - """ - if isinstance(obj, GenericAlias): - return repr(obj) - if isinstance(obj, type): - if obj.__module__ == 'builtins': - return obj.__qualname__ - return f'{obj.__module__}.{obj.__qualname__}' - if obj is Ellipsis: - return '...' - if isinstance(obj, FunctionType): - return obj.__name__ - return repr(obj) - - -class Callable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __call__(self, *args, **kwds): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Callable: - return _check_methods(C, "__call__") - return NotImplemented - - __class_getitem__ = classmethod(_CallableGenericAlias) - - -### SETS ### - - -class Set(Collection): - """A set is a finite, iterable container. - - This class provides concrete generic implementations of all - methods except for __contains__, __iter__ and __len__. - - To override the comparisons (presumably for speed, as the - semantics are fixed), redefine __le__ and __ge__, - then the other operations will automatically follow suit. - """ - - __slots__ = () - - def __le__(self, other): - if not isinstance(other, Set): - return NotImplemented - if len(self) > len(other): - return False - for elem in self: - if elem not in other: - return False - return True - - def __lt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) < len(other) and self.__le__(other) - - def __gt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) > len(other) and self.__ge__(other) - - def __ge__(self, other): - if not isinstance(other, Set): - return NotImplemented - if len(self) < len(other): - return False - for elem in other: - if elem not in self: - return False - return True - - def __eq__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) == len(other) and self.__le__(other) - - @classmethod - def _from_iterable(cls, it): - '''Construct an instance of the class from any iterable input. - - Must override this method if the class constructor signature - does not accept an iterable for an input. - ''' - return cls(it) - - def __and__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - return self._from_iterable(value for value in other if value in self) - - __rand__ = __and__ - - def isdisjoint(self, other): - 'Return True if two sets have a null intersection.' - for value in other: - if value in self: - return False - return True - - def __or__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - chain = (e for s in (self, other) for e in s) - return self._from_iterable(chain) - - __ror__ = __or__ - - def __sub__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return self._from_iterable(value for value in self - if value not in other) - - def __rsub__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return self._from_iterable(value for value in other - if value not in self) - - def __xor__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return (self - other) | (other - self) - - __rxor__ = __xor__ - - def _hash(self): - """Compute the hash value of a set. - - Note that we don't define __hash__: not all sets are hashable. - But if you define a hashable set type, its __hash__ should - call this function. - - This must be compatible __eq__. - - All sets ought to compare equal if they contain the same - elements, regardless of how they are implemented, and - regardless of the order of the elements; so there's not much - freedom for __eq__ or __hash__. We match the algorithm used - by the built-in frozenset type. - """ - MAX = sys.maxsize - MASK = 2 * MAX + 1 - n = len(self) - h = 1927868237 * (n + 1) - h &= MASK - for x in self: - hx = hash(x) - h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 - h &= MASK - h ^= (h >> 11) ^ (h >> 25) - h = h * 69069 + 907133923 - h &= MASK - if h > MAX: - h -= MASK + 1 - if h == -1: - h = 590923713 - return h - - -Set.register(frozenset) - - -class MutableSet(Set): - """A mutable set is a finite, iterable container. - - This class provides concrete generic implementations of all - methods except for __contains__, __iter__, __len__, - add(), and discard(). - - To override the comparisons (presumably for speed, as the - semantics are fixed), all you have to do is redefine __le__ and - then the other operations will automatically follow suit. - """ - - __slots__ = () - - @abstractmethod - def add(self, value): - """Add an element.""" - raise NotImplementedError - - @abstractmethod - def discard(self, value): - """Remove an element. Do not raise an exception if absent.""" - raise NotImplementedError - - def remove(self, value): - """Remove an element. If not a member, raise a KeyError.""" - if value not in self: - raise KeyError(value) - self.discard(value) - - def pop(self): - """Return the popped value. Raise KeyError if empty.""" - it = iter(self) - try: - value = next(it) - except StopIteration: - raise KeyError from None - self.discard(value) - return value - - def clear(self): - """This is slow (creates N new iterators!) but effective.""" - try: - while True: - self.pop() - except KeyError: - pass - - def __ior__(self, it): - for value in it: - self.add(value) - return self - - def __iand__(self, it): - for value in (self - it): - self.discard(value) - return self - - def __ixor__(self, it): - if it is self: - self.clear() - else: - if not isinstance(it, Set): - it = self._from_iterable(it) - for value in it: - if value in self: - self.discard(value) - else: - self.add(value) - return self - - def __isub__(self, it): - if it is self: - self.clear() - else: - for value in it: - self.discard(value) - return self - - -MutableSet.register(set) - - -### MAPPINGS ### - -class Mapping(Collection): - """A Mapping is a generic container for associating key/value - pairs. - - This class provides concrete generic implementations of all - methods except for __getitem__, __iter__, and __len__. - """ - - __slots__ = () - - # Tell ABCMeta.__new__ that this class should have TPFLAGS_MAPPING set. - __abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING - - @abstractmethod - def __getitem__(self, key): - raise KeyError - - def get(self, key, default=None): - 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' - try: - return self[key] - except KeyError: - return default - - def __contains__(self, key): - try: - self[key] - except KeyError: - return False - else: - return True - - def keys(self): - "D.keys() -> a set-like object providing a view on D's keys" - return KeysView(self) - - def items(self): - "D.items() -> a set-like object providing a view on D's items" - return ItemsView(self) - - def values(self): - "D.values() -> an object providing a view on D's values" - return ValuesView(self) - - def __eq__(self, other): - if not isinstance(other, Mapping): - return NotImplemented - return dict(self.items()) == dict(other.items()) - - __reversed__ = None - -Mapping.register(mappingproxy) - - -class MappingView(Sized): - - __slots__ = '_mapping', - - def __init__(self, mapping): - self._mapping = mapping - - def __len__(self): - return len(self._mapping) - - def __repr__(self): - return '{0.__class__.__name__}({0._mapping!r})'.format(self) - - __class_getitem__ = classmethod(GenericAlias) - - -class KeysView(MappingView, Set): - - __slots__ = () - - @classmethod - def _from_iterable(cls, it): - return set(it) - - def __contains__(self, key): - return key in self._mapping - - def __iter__(self): - yield from self._mapping - - -KeysView.register(dict_keys) - - -class ItemsView(MappingView, Set): - - __slots__ = () - - @classmethod - def _from_iterable(cls, it): - return set(it) - - def __contains__(self, item): - key, value = item - try: - v = self._mapping[key] - except KeyError: - return False - else: - return v is value or v == value - - def __iter__(self): - for key in self._mapping: - yield (key, self._mapping[key]) - - -ItemsView.register(dict_items) - - -class ValuesView(MappingView, Collection): - - __slots__ = () - - def __contains__(self, value): - for key in self._mapping: - v = self._mapping[key] - if v is value or v == value: - return True - return False - - def __iter__(self): - for key in self._mapping: - yield self._mapping[key] - - -ValuesView.register(dict_values) - - -class MutableMapping(Mapping): - """A MutableMapping is a generic container for associating - key/value pairs. - - This class provides concrete generic implementations of all - methods except for __getitem__, __setitem__, __delitem__, - __iter__, and __len__. - """ - - __slots__ = () - - @abstractmethod - def __setitem__(self, key, value): - raise KeyError - - @abstractmethod - def __delitem__(self, key): - raise KeyError - - __marker = object() - - def pop(self, key, default=__marker): - '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - ''' - try: - value = self[key] - except KeyError: - if default is self.__marker: - raise - return default - else: - del self[key] - return value - - def popitem(self): - '''D.popitem() -> (k, v), remove and return some (key, value) pair - as a 2-tuple; but raise KeyError if D is empty. - ''' - try: - key = next(iter(self)) - except StopIteration: - raise KeyError from None - value = self[key] - del self[key] - return key, value - - def clear(self): - 'D.clear() -> None. Remove all items from D.' - try: - while True: - self.popitem() - except KeyError: - pass - - def update(self, other=(), /, **kwds): - ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. - If E present and has a .keys() method, does: for k in E: D[k] = E[k] - If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v - In either case, this is followed by: for k, v in F.items(): D[k] = v - ''' - if isinstance(other, Mapping): - for key in other: - self[key] = other[key] - elif hasattr(other, "keys"): - for key in other.keys(): - self[key] = other[key] - else: - for key, value in other: - self[key] = value - for key, value in kwds.items(): - self[key] = value - - def setdefault(self, key, default=None): - 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' - try: - return self[key] - except KeyError: - self[key] = default - return default - - -MutableMapping.register(dict) - - -### SEQUENCES ### - -class Sequence(Reversible, Collection): - """All the operations on a read-only sequence. - - Concrete subclasses must override __new__ or __init__, - __getitem__, and __len__. - """ - - __slots__ = () - - # Tell ABCMeta.__new__ that this class should have TPFLAGS_SEQUENCE set. - __abc_tpflags__ = 1 << 5 # Py_TPFLAGS_SEQUENCE - - @abstractmethod - def __getitem__(self, index): - raise IndexError - - def __iter__(self): - i = 0 - try: - while True: - v = self[i] - yield v - i += 1 - except IndexError: - return - - def __contains__(self, value): - for v in self: - if v is value or v == value: - return True - return False - - def __reversed__(self): - for i in reversed(range(len(self))): - yield self[i] - - def index(self, value, start=0, stop=None): - '''S.index(value, [start, [stop]]) -> integer -- return first index of value. - Raises ValueError if the value is not present. - - Supporting start and stop arguments is optional, but - recommended. - ''' - if start is not None and start < 0: - start = max(len(self) + start, 0) - if stop is not None and stop < 0: - stop += len(self) - - i = start - while stop is None or i < stop: - try: - v = self[i] - except IndexError: - break - if v is value or v == value: - return i - i += 1 - raise ValueError - - def count(self, value): - 'S.count(value) -> integer -- return number of occurrences of value' - return sum(1 for v in self if v is value or v == value) - -Sequence.register(tuple) -Sequence.register(str) -Sequence.register(range) -Sequence.register(memoryview) - - -class ByteString(Sequence): - """This unifies bytes and bytearray. - - XXX Should add all their methods. - """ - - __slots__ = () - -ByteString.register(bytes) -ByteString.register(bytearray) - - -class MutableSequence(Sequence): - """All the operations on a read-write sequence. - - Concrete subclasses must provide __new__ or __init__, - __getitem__, __setitem__, __delitem__, __len__, and insert(). - """ - - __slots__ = () - - @abstractmethod - def __setitem__(self, index, value): - raise IndexError - - @abstractmethod - def __delitem__(self, index): - raise IndexError - - @abstractmethod - def insert(self, index, value): - 'S.insert(index, value) -- insert value before index' - raise IndexError - - def append(self, value): - 'S.append(value) -- append value to the end of the sequence' - self.insert(len(self), value) - - def clear(self): - 'S.clear() -> None -- remove all items from S' - try: - while True: - self.pop() - except IndexError: - pass - - def reverse(self): - 'S.reverse() -- reverse *IN PLACE*' - n = len(self) - for i in range(n//2): - self[i], self[n-i-1] = self[n-i-1], self[i] - - def extend(self, values): - 'S.extend(iterable) -- extend sequence by appending elements from the iterable' - if values is self: - values = list(values) - for v in values: - self.append(v) - - def pop(self, index=-1): - '''S.pop([index]) -> item -- remove and return item at index (default last). - Raise IndexError if list is empty or index is out of range. - ''' - v = self[index] - del self[index] - return v - - def remove(self, value): - '''S.remove(value) -- remove first occurrence of value. - Raise ValueError if the value is not present. - ''' - del self[self.index(value)] - - def __iadd__(self, values): - self.extend(values) - return self - - -MutableSequence.register(list) -MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/python/Lib/_compat_pickle.py b/python/Lib/_compat_pickle.py deleted file mode 100644 index 9b6115f..0000000 --- a/python/Lib/_compat_pickle.py +++ /dev/null @@ -1,252 +0,0 @@ -# This module is used to map the old Python 2 names to the new names used in -# Python 3 for the pickle module. This needed to make pickle streams -# generated with Python 2 loadable by Python 3. - -# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import -# lib2to3 and use the mapping defined there, because lib2to3 uses pickle. -# Thus, this could cause the module to be imported recursively. -IMPORT_MAPPING = { - '__builtin__' : 'builtins', - 'copy_reg': 'copyreg', - 'Queue': 'queue', - 'SocketServer': 'socketserver', - 'ConfigParser': 'configparser', - 'repr': 'reprlib', - 'tkFileDialog': 'tkinter.filedialog', - 'tkSimpleDialog': 'tkinter.simpledialog', - 'tkColorChooser': 'tkinter.colorchooser', - 'tkCommonDialog': 'tkinter.commondialog', - 'Dialog': 'tkinter.dialog', - 'Tkdnd': 'tkinter.dnd', - 'tkFont': 'tkinter.font', - 'tkMessageBox': 'tkinter.messagebox', - 'ScrolledText': 'tkinter.scrolledtext', - 'Tkconstants': 'tkinter.constants', - 'Tix': 'tkinter.tix', - 'ttk': 'tkinter.ttk', - 'Tkinter': 'tkinter', - 'markupbase': '_markupbase', - '_winreg': 'winreg', - 'thread': '_thread', - 'dummy_thread': '_dummy_thread', - 'dbhash': 'dbm.bsd', - 'dumbdbm': 'dbm.dumb', - 'dbm': 'dbm.ndbm', - 'gdbm': 'dbm.gnu', - 'xmlrpclib': 'xmlrpc.client', - 'SimpleXMLRPCServer': 'xmlrpc.server', - 'httplib': 'http.client', - 'htmlentitydefs' : 'html.entities', - 'HTMLParser' : 'html.parser', - 'Cookie': 'http.cookies', - 'cookielib': 'http.cookiejar', - 'BaseHTTPServer': 'http.server', - 'test.test_support': 'test.support', - 'commands': 'subprocess', - 'urlparse' : 'urllib.parse', - 'robotparser' : 'urllib.robotparser', - 'urllib2': 'urllib.request', - 'anydbm': 'dbm', - '_abcoll' : 'collections.abc', -} - - -# This contains rename rules that are easy to handle. We ignore the more -# complex stuff (e.g. mapping the names in the urllib and types modules). -# These rules should be run before import names are fixed. -NAME_MAPPING = { - ('__builtin__', 'xrange'): ('builtins', 'range'), - ('__builtin__', 'reduce'): ('functools', 'reduce'), - ('__builtin__', 'intern'): ('sys', 'intern'), - ('__builtin__', 'unichr'): ('builtins', 'chr'), - ('__builtin__', 'unicode'): ('builtins', 'str'), - ('__builtin__', 'long'): ('builtins', 'int'), - ('itertools', 'izip'): ('builtins', 'zip'), - ('itertools', 'imap'): ('builtins', 'map'), - ('itertools', 'ifilter'): ('builtins', 'filter'), - ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'), - ('itertools', 'izip_longest'): ('itertools', 'zip_longest'), - ('UserDict', 'IterableUserDict'): ('collections', 'UserDict'), - ('UserList', 'UserList'): ('collections', 'UserList'), - ('UserString', 'UserString'): ('collections', 'UserString'), - ('whichdb', 'whichdb'): ('dbm', 'whichdb'), - ('_socket', 'fromfd'): ('socket', 'fromfd'), - ('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'), - ('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'), - ('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'), - ('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'), - ('urllib', 'getproxies'): ('urllib.request', 'getproxies'), - ('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'), - ('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'), - ('urllib', 'quote'): ('urllib.parse', 'quote'), - ('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'), - ('urllib', 'unquote'): ('urllib.parse', 'unquote'), - ('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'), - ('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'), - ('urllib', 'urlencode'): ('urllib.parse', 'urlencode'), - ('urllib', 'urlopen'): ('urllib.request', 'urlopen'), - ('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'), - ('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'), - ('urllib2', 'URLError'): ('urllib.error', 'URLError'), -} - -PYTHON2_EXCEPTIONS = ( - "ArithmeticError", - "AssertionError", - "AttributeError", - "BaseException", - "BufferError", - "BytesWarning", - "DeprecationWarning", - "EOFError", - "EnvironmentError", - "Exception", - "FloatingPointError", - "FutureWarning", - "GeneratorExit", - "IOError", - "ImportError", - "ImportWarning", - "IndentationError", - "IndexError", - "KeyError", - "KeyboardInterrupt", - "LookupError", - "MemoryError", - "NameError", - "NotImplementedError", - "OSError", - "OverflowError", - "PendingDeprecationWarning", - "ReferenceError", - "RuntimeError", - "RuntimeWarning", - # StandardError is gone in Python 3, so we map it to Exception - "StopIteration", - "SyntaxError", - "SyntaxWarning", - "SystemError", - "SystemExit", - "TabError", - "TypeError", - "UnboundLocalError", - "UnicodeDecodeError", - "UnicodeEncodeError", - "UnicodeError", - "UnicodeTranslateError", - "UnicodeWarning", - "UserWarning", - "ValueError", - "Warning", - "ZeroDivisionError", -) - -try: - WindowsError -except NameError: - pass -else: - PYTHON2_EXCEPTIONS += ("WindowsError",) - -for excname in PYTHON2_EXCEPTIONS: - NAME_MAPPING[("exceptions", excname)] = ("builtins", excname) - -MULTIPROCESSING_EXCEPTIONS = ( - 'AuthenticationError', - 'BufferTooShort', - 'ProcessError', - 'TimeoutError', -) - -for excname in MULTIPROCESSING_EXCEPTIONS: - NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname) - -# Same, but for 3.x to 2.x -REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items()) -assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING) -REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items()) -assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING) - -# Non-mutual mappings. - -IMPORT_MAPPING.update({ - 'cPickle': 'pickle', - '_elementtree': 'xml.etree.ElementTree', - 'FileDialog': 'tkinter.filedialog', - 'SimpleDialog': 'tkinter.simpledialog', - 'DocXMLRPCServer': 'xmlrpc.server', - 'SimpleHTTPServer': 'http.server', - 'CGIHTTPServer': 'http.server', - # For compatibility with broken pickles saved in old Python 3 versions - 'UserDict': 'collections', - 'UserList': 'collections', - 'UserString': 'collections', - 'whichdb': 'dbm', - 'StringIO': 'io', - 'cStringIO': 'io', -}) - -REVERSE_IMPORT_MAPPING.update({ - '_bz2': 'bz2', - '_dbm': 'dbm', - '_functools': 'functools', - '_gdbm': 'gdbm', - '_pickle': 'pickle', -}) - -NAME_MAPPING.update({ - ('__builtin__', 'basestring'): ('builtins', 'str'), - ('exceptions', 'StandardError'): ('builtins', 'Exception'), - ('UserDict', 'UserDict'): ('collections', 'UserDict'), - ('socket', '_socketobject'): ('socket', 'SocketType'), -}) - -REVERSE_NAME_MAPPING.update({ - ('_functools', 'reduce'): ('__builtin__', 'reduce'), - ('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'), - ('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'), - ('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'), - ('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'), - ('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'), - ('xmlrpc.server', 'XMLRPCDocGenerator'): - ('DocXMLRPCServer', 'XMLRPCDocGenerator'), - ('xmlrpc.server', 'DocXMLRPCRequestHandler'): - ('DocXMLRPCServer', 'DocXMLRPCRequestHandler'), - ('xmlrpc.server', 'DocXMLRPCServer'): - ('DocXMLRPCServer', 'DocXMLRPCServer'), - ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'): - ('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'), - ('http.server', 'SimpleHTTPRequestHandler'): - ('SimpleHTTPServer', 'SimpleHTTPRequestHandler'), - ('http.server', 'CGIHTTPRequestHandler'): - ('CGIHTTPServer', 'CGIHTTPRequestHandler'), - ('_socket', 'socket'): ('socket', '_socketobject'), -}) - -PYTHON3_OSERROR_EXCEPTIONS = ( - 'BrokenPipeError', - 'ChildProcessError', - 'ConnectionAbortedError', - 'ConnectionError', - 'ConnectionRefusedError', - 'ConnectionResetError', - 'FileExistsError', - 'FileNotFoundError', - 'InterruptedError', - 'IsADirectoryError', - 'NotADirectoryError', - 'PermissionError', - 'ProcessLookupError', - 'TimeoutError', -) - -for excname in PYTHON3_OSERROR_EXCEPTIONS: - REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError') - -PYTHON3_IMPORTERROR_EXCEPTIONS = ( - 'ModuleNotFoundError', -) - -for excname in PYTHON3_IMPORTERROR_EXCEPTIONS: - REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError') -del excname diff --git a/python/Lib/_compression.py b/python/Lib/_compression.py deleted file mode 100644 index 60b6a6e..0000000 --- a/python/Lib/_compression.py +++ /dev/null @@ -1,162 +0,0 @@ -"""Internal classes used by the gzip, lzma and bz2 modules""" - -import io -import sys - -BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE # Compressed data read chunk size - - -class BaseStream(io.BufferedIOBase): - """Mode-checking helper functions.""" - - def _check_not_closed(self): - if self.closed: - raise ValueError("I/O operation on closed file") - - def _check_can_read(self): - if not self.readable(): - raise io.UnsupportedOperation("File not open for reading") - - def _check_can_write(self): - if not self.writable(): - raise io.UnsupportedOperation("File not open for writing") - - def _check_can_seek(self): - if not self.readable(): - raise io.UnsupportedOperation("Seeking is only supported " - "on files open for reading") - if not self.seekable(): - raise io.UnsupportedOperation("The underlying file object " - "does not support seeking") - - -class DecompressReader(io.RawIOBase): - """Adapts the decompressor API to a RawIOBase reader API""" - - def readable(self): - return True - - def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args): - self._fp = fp - self._eof = False - self._pos = 0 # Current offset in decompressed stream - - # Set to size of decompressed stream once it is known, for SEEK_END - self._size = -1 - - # Save the decompressor factory and arguments. - # If the file contains multiple compressed streams, each - # stream will need a separate decompressor object. A new decompressor - # object is also needed when implementing a backwards seek(). - self._decomp_factory = decomp_factory - self._decomp_args = decomp_args - self._decompressor = self._decomp_factory(**self._decomp_args) - - # Exception class to catch from decompressor signifying invalid - # trailing data to ignore - self._trailing_error = trailing_error - - def close(self): - self._decompressor = None - return super().close() - - def seekable(self): - return self._fp.seekable() - - def readinto(self, b): - with memoryview(b) as view, view.cast("B") as byte_view: - data = self.read(len(byte_view)) - byte_view[:len(data)] = data - return len(data) - - def read(self, size=-1): - if size < 0: - return self.readall() - - if not size or self._eof: - return b"" - data = None # Default if EOF is encountered - # Depending on the input data, our call to the decompressor may not - # return any data. In this case, try again after reading another block. - while True: - if self._decompressor.eof: - rawblock = (self._decompressor.unused_data or - self._fp.read(BUFFER_SIZE)) - if not rawblock: - break - # Continue to next stream. - self._decompressor = self._decomp_factory( - **self._decomp_args) - try: - data = self._decompressor.decompress(rawblock, size) - except self._trailing_error: - # Trailing data isn't a valid compressed stream; ignore it. - break - else: - if self._decompressor.needs_input: - rawblock = self._fp.read(BUFFER_SIZE) - if not rawblock: - raise EOFError("Compressed file ended before the " - "end-of-stream marker was reached") - else: - rawblock = b"" - data = self._decompressor.decompress(rawblock, size) - if data: - break - if not data: - self._eof = True - self._size = self._pos - return b"" - self._pos += len(data) - return data - - def readall(self): - chunks = [] - # sys.maxsize means the max length of output buffer is unlimited, - # so that the whole input buffer can be decompressed within one - # .decompress() call. - while data := self.read(sys.maxsize): - chunks.append(data) - - return b"".join(chunks) - - # Rewind the file to the beginning of the data stream. - def _rewind(self): - self._fp.seek(0) - self._eof = False - self._pos = 0 - self._decompressor = self._decomp_factory(**self._decomp_args) - - def seek(self, offset, whence=io.SEEK_SET): - # Recalculate offset as an absolute file position. - if whence == io.SEEK_SET: - pass - elif whence == io.SEEK_CUR: - offset = self._pos + offset - elif whence == io.SEEK_END: - # Seeking relative to EOF - we need to know the file's size. - if self._size < 0: - while self.read(io.DEFAULT_BUFFER_SIZE): - pass - offset = self._size + offset - else: - raise ValueError("Invalid value for whence: {}".format(whence)) - - # Make it so that offset is the number of bytes to skip forward. - if offset < self._pos: - self._rewind() - else: - offset -= self._pos - - # Read and discard data until we reach the desired position. - while offset > 0: - data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset)) - if not data: - break - offset -= len(data) - - return self._pos - - def tell(self): - """Return the current file position.""" - return self._pos diff --git a/python/Lib/_markupbase.py b/python/Lib/_markupbase.py deleted file mode 100644 index 592d53d..0000000 --- a/python/Lib/_markupbase.py +++ /dev/null @@ -1,396 +0,0 @@ -"""Shared support for scanning document type declarations in HTML and XHTML. - -This module is used as a foundation for the html.parser module. It has no -documented public API and should not be used directly. - -""" - -import re - -_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match -_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match -_commentclose = re.compile(r'--\s*>') -_markedsectionclose = re.compile(r']\s*]\s*>') - -# An analysis of the MS-Word extensions is available at -# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf - -_msmarkedsectionclose = re.compile(r']\s*>') - -del re - - -class ParserBase: - """Parser base class which provides some common support methods used - by the SGML/HTML and XHTML parsers.""" - - def __init__(self): - if self.__class__ is ParserBase: - raise RuntimeError( - "_markupbase.ParserBase must be subclassed") - - def reset(self): - self.lineno = 1 - self.offset = 0 - - def getpos(self): - """Return current line number and offset.""" - return self.lineno, self.offset - - # Internal -- update line number and offset. This should be - # called for each piece of data exactly once, in order -- in other - # words the concatenation of all the input strings to this - # function should be exactly the entire input. - def updatepos(self, i, j): - if i >= j: - return j - rawdata = self.rawdata - nlines = rawdata.count("\n", i, j) - if nlines: - self.lineno = self.lineno + nlines - pos = rawdata.rindex("\n", i, j) # Should not fail - self.offset = j-(pos+1) - else: - self.offset = self.offset + j-i - return j - - _decl_otherchars = '' - - # Internal -- parse declaration (for use by subclasses). - def parse_declaration(self, i): - # This is some sort of declaration; in "HTML as - # deployed," this should only be the document type - # declaration (""). - # ISO 8879:1986, however, has more complex - # declaration syntax for elements in , including: - # --comment-- - # [marked section] - # name in the following list: ENTITY, DOCTYPE, ELEMENT, - # ATTLIST, NOTATION, SHORTREF, USEMAP, - # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM - rawdata = self.rawdata - j = i + 2 - assert rawdata[i:j] == "": - # the empty comment - return j + 1 - if rawdata[j:j+1] in ("-", ""): - # Start of comment followed by buffer boundary, - # or just a buffer boundary. - return -1 - # A simple, practical version could look like: ((name|stringlit) S*) + '>' - n = len(rawdata) - if rawdata[j:j+2] == '--': #comment - # Locate --.*-- as the body of the comment - return self.parse_comment(i) - elif rawdata[j] == '[': #marked section - # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section - # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA - # Note that this is extended by Microsoft Office "Save as Web" function - # to include [if...] and [endif]. - return self.parse_marked_section(i) - else: #all other declaration elements - decltype, j = self._scan_name(j, i) - if j < 0: - return j - if decltype == "doctype": - self._decl_otherchars = '' - while j < n: - c = rawdata[j] - if c == ">": - # end of declaration syntax - data = rawdata[i+2:j] - if decltype == "doctype": - self.handle_decl(data) - else: - # According to the HTML5 specs sections "8.2.4.44 Bogus - # comment state" and "8.2.4.45 Markup declaration open - # state", a comment token should be emitted. - # Calling unknown_decl provides more flexibility though. - self.unknown_decl(data) - return j + 1 - if c in "\"'": - m = _declstringlit_match(rawdata, j) - if not m: - return -1 # incomplete - j = m.end() - elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": - name, j = self._scan_name(j, i) - elif c in self._decl_otherchars: - j = j + 1 - elif c == "[": - # this could be handled in a separate doctype parser - if decltype == "doctype": - j = self._parse_doctype_subset(j + 1, i) - elif decltype in {"attlist", "linktype", "link", "element"}: - # must tolerate []'d groups in a content model in an element declaration - # also in data attribute specifications of attlist declaration - # also link type declaration subsets in linktype declarations - # also link attribute specification lists in link declarations - raise AssertionError("unsupported '[' char in %s declaration" % decltype) - else: - raise AssertionError("unexpected '[' char in declaration") - else: - raise AssertionError("unexpected %r char in declaration" % rawdata[j]) - if j < 0: - return j - return -1 # incomplete - - # Internal -- parse a marked section - # Override this to handle MS-word extension syntax content - def parse_marked_section(self, i, report=1): - rawdata= self.rawdata - assert rawdata[i:i+3] == ' ending - match= _markedsectionclose.search(rawdata, i+3) - elif sectName in {"if", "else", "endif"}: - # look for MS Office ]> ending - match= _msmarkedsectionclose.search(rawdata, i+3) - else: - raise AssertionError( - 'unknown status keyword %r in marked section' % rawdata[i+3:j] - ) - if not match: - return -1 - if report: - j = match.start(0) - self.unknown_decl(rawdata[i+3: j]) - return match.end(0) - - # Internal -- parse comment, return length or -1 if not terminated - def parse_comment(self, i, report=1): - rawdata = self.rawdata - if rawdata[i:i+4] != ' a, b, c - abc_to_rgb(a, b, c) --> r, g, b - -All inputs and outputs are triples of floats in the range [0.0...1.0] -(with the exception of I and Q, which covers a slightly larger range). -Inputs outside the valid range may cause exceptions or invalid outputs. - -Supported color systems: -RGB: Red, Green, Blue components -YIQ: Luminance, Chrominance (used by composite video signals) -HLS: Hue, Luminance, Saturation -HSV: Hue, Saturation, Value -""" - -# References: -# http://en.wikipedia.org/wiki/YIQ -# http://en.wikipedia.org/wiki/HLS_color_space -# http://en.wikipedia.org/wiki/HSV_color_space - -__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", - "rgb_to_hsv","hsv_to_rgb"] - -# Some floating point constants - -ONE_THIRD = 1.0/3.0 -ONE_SIXTH = 1.0/6.0 -TWO_THIRD = 2.0/3.0 - -# YIQ: used by composite video signals (linear combinations of RGB) -# Y: perceived grey level (0.0 == black, 1.0 == white) -# I, Q: color components -# -# There are a great many versions of the constants used in these formulae. -# The ones in this library uses constants from the FCC version of NTSC. - -def rgb_to_yiq(r, g, b): - y = 0.30*r + 0.59*g + 0.11*b - i = 0.74*(r-y) - 0.27*(b-y) - q = 0.48*(r-y) + 0.41*(b-y) - return (y, i, q) - -def yiq_to_rgb(y, i, q): - # r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48) - # b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48) - # g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59 - - r = y + 0.9468822170900693*i + 0.6235565819861433*q - g = y - 0.27478764629897834*i - 0.6356910791873801*q - b = y - 1.1085450346420322*i + 1.7090069284064666*q - - if r < 0.0: - r = 0.0 - if g < 0.0: - g = 0.0 - if b < 0.0: - b = 0.0 - if r > 1.0: - r = 1.0 - if g > 1.0: - g = 1.0 - if b > 1.0: - b = 1.0 - return (r, g, b) - - -# HLS: Hue, Luminance, Saturation -# H: position in the spectrum -# L: color lightness -# S: color saturation - -def rgb_to_hls(r, g, b): - maxc = max(r, g, b) - minc = min(r, g, b) - sumc = (maxc+minc) - rangec = (maxc-minc) - l = sumc/2.0 - if minc == maxc: - return 0.0, l, 0.0 - if l <= 0.5: - s = rangec / sumc - else: - s = rangec / (2.0-sumc) - rc = (maxc-r) / rangec - gc = (maxc-g) / rangec - bc = (maxc-b) / rangec - if r == maxc: - h = bc-gc - elif g == maxc: - h = 2.0+rc-bc - else: - h = 4.0+gc-rc - h = (h/6.0) % 1.0 - return h, l, s - -def hls_to_rgb(h, l, s): - if s == 0.0: - return l, l, l - if l <= 0.5: - m2 = l * (1.0+s) - else: - m2 = l+s-(l*s) - m1 = 2.0*l - m2 - return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) - -def _v(m1, m2, hue): - hue = hue % 1.0 - if hue < ONE_SIXTH: - return m1 + (m2-m1)*hue*6.0 - if hue < 0.5: - return m2 - if hue < TWO_THIRD: - return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 - return m1 - - -# HSV: Hue, Saturation, Value -# H: position in the spectrum -# S: color saturation ("purity") -# V: color brightness - -def rgb_to_hsv(r, g, b): - maxc = max(r, g, b) - minc = min(r, g, b) - rangec = (maxc-minc) - v = maxc - if minc == maxc: - return 0.0, 0.0, v - s = rangec / maxc - rc = (maxc-r) / rangec - gc = (maxc-g) / rangec - bc = (maxc-b) / rangec - if r == maxc: - h = bc-gc - elif g == maxc: - h = 2.0+rc-bc - else: - h = 4.0+gc-rc - h = (h/6.0) % 1.0 - return h, s, v - -def hsv_to_rgb(h, s, v): - if s == 0.0: - return v, v, v - i = int(h*6.0) # XXX assume int() truncates! - f = (h*6.0) - i - p = v*(1.0 - s) - q = v*(1.0 - s*f) - t = v*(1.0 - s*(1.0-f)) - i = i%6 - if i == 0: - return v, t, p - if i == 1: - return q, v, p - if i == 2: - return p, v, t - if i == 3: - return p, q, v - if i == 4: - return t, p, v - if i == 5: - return v, p, q - # Cannot get here diff --git a/python/Lib/compileall.py b/python/Lib/compileall.py deleted file mode 100644 index abd83a7..0000000 --- a/python/Lib/compileall.py +++ /dev/null @@ -1,463 +0,0 @@ -"""Module/script to byte-compile all .py files to .pyc files. - -When called as a script with arguments, this compiles the directories -given as arguments recursively; the -l option prevents it from -recursing into directories. - -Without arguments, it compiles all modules on sys.path, without -recursing into subdirectories. (Even though it should do so for -packages -- for now, you'll have to deal with packages separately.) - -See module py_compile for details of the actual byte-compilation. -""" -import os -import sys -import importlib.util -import py_compile -import struct -import filecmp - -from functools import partial -from pathlib import Path - -__all__ = ["compile_dir","compile_file","compile_path"] - -def _walk_dir(dir, maxlevels, quiet=0): - if quiet < 2 and isinstance(dir, os.PathLike): - dir = os.fspath(dir) - if not quiet: - print('Listing {!r}...'.format(dir)) - try: - names = os.listdir(dir) - except OSError: - if quiet < 2: - print("Can't list {!r}".format(dir)) - names = [] - names.sort() - for name in names: - if name == '__pycache__': - continue - fullname = os.path.join(dir, name) - if not os.path.isdir(fullname): - yield fullname - elif (maxlevels > 0 and name != os.curdir and name != os.pardir and - os.path.isdir(fullname) and not os.path.islink(fullname)): - yield from _walk_dir(fullname, maxlevels=maxlevels - 1, - quiet=quiet) - -def compile_dir(dir, maxlevels=None, ddir=None, force=False, - rx=None, quiet=0, legacy=False, optimize=-1, workers=1, - invalidation_mode=None, *, stripdir=None, - prependdir=None, limit_sl_dest=None, hardlink_dupes=False): - """Byte-compile all modules in the given directory tree. - - Arguments (only dir is required): - - dir: the directory to byte-compile - maxlevels: maximum recursion level (default `sys.getrecursionlimit()`) - ddir: the directory that will be prepended to the path to the - file as it is compiled into each byte-code file. - force: if True, force compilation, even if timestamps are up-to-date - quiet: full output with False or 0, errors only with 1, - no output with 2 - legacy: if True, produce legacy pyc paths instead of PEP 3147 paths - optimize: int or list of optimization levels or -1 for level of - the interpreter. Multiple levels leads to multiple compiled - files each with one optimization level. - workers: maximum number of parallel workers - invalidation_mode: how the up-to-dateness of the pyc will be checked - stripdir: part of path to left-strip from source file path - prependdir: path to prepend to beginning of original file path, applied - after stripdir - limit_sl_dest: ignore symlinks if they are pointing outside of - the defined path - hardlink_dupes: hardlink duplicated pyc files - """ - ProcessPoolExecutor = None - if ddir is not None and (stripdir is not None or prependdir is not None): - raise ValueError(("Destination dir (ddir) cannot be used " - "in combination with stripdir or prependdir")) - if ddir is not None: - stripdir = dir - prependdir = ddir - ddir = None - if workers < 0: - raise ValueError('workers must be greater or equal to 0') - if workers != 1: - # Check if this is a system where ProcessPoolExecutor can function. - from concurrent.futures.process import _check_system_limits - try: - _check_system_limits() - except NotImplementedError: - workers = 1 - else: - from concurrent.futures import ProcessPoolExecutor - if maxlevels is None: - maxlevels = sys.getrecursionlimit() - files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels) - success = True - if workers != 1 and ProcessPoolExecutor is not None: - # If workers == 0, let ProcessPoolExecutor choose - workers = workers or None - with ProcessPoolExecutor(max_workers=workers) as executor: - results = executor.map(partial(compile_file, - ddir=ddir, force=force, - rx=rx, quiet=quiet, - legacy=legacy, - optimize=optimize, - invalidation_mode=invalidation_mode, - stripdir=stripdir, - prependdir=prependdir, - limit_sl_dest=limit_sl_dest, - hardlink_dupes=hardlink_dupes), - files) - success = min(results, default=True) - else: - for file in files: - if not compile_file(file, ddir, force, rx, quiet, - legacy, optimize, invalidation_mode, - stripdir=stripdir, prependdir=prependdir, - limit_sl_dest=limit_sl_dest, - hardlink_dupes=hardlink_dupes): - success = False - return success - -def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, - legacy=False, optimize=-1, - invalidation_mode=None, *, stripdir=None, prependdir=None, - limit_sl_dest=None, hardlink_dupes=False): - """Byte-compile one file. - - Arguments (only fullname is required): - - fullname: the file to byte-compile - ddir: if given, the directory name compiled in to the - byte-code file. - force: if True, force compilation, even if timestamps are up-to-date - quiet: full output with False or 0, errors only with 1, - no output with 2 - legacy: if True, produce legacy pyc paths instead of PEP 3147 paths - optimize: int or list of optimization levels or -1 for level of - the interpreter. Multiple levels leads to multiple compiled - files each with one optimization level. - invalidation_mode: how the up-to-dateness of the pyc will be checked - stripdir: part of path to left-strip from source file path - prependdir: path to prepend to beginning of original file path, applied - after stripdir - limit_sl_dest: ignore symlinks if they are pointing outside of - the defined path. - hardlink_dupes: hardlink duplicated pyc files - """ - - if ddir is not None and (stripdir is not None or prependdir is not None): - raise ValueError(("Destination dir (ddir) cannot be used " - "in combination with stripdir or prependdir")) - - success = True - if quiet < 2 and isinstance(fullname, os.PathLike): - fullname = os.fspath(fullname) - name = os.path.basename(fullname) - - dfile = None - - if ddir is not None: - dfile = os.path.join(ddir, name) - - if stripdir is not None: - fullname_parts = fullname.split(os.path.sep) - stripdir_parts = stripdir.split(os.path.sep) - ddir_parts = list(fullname_parts) - - for spart, opart in zip(stripdir_parts, fullname_parts): - if spart == opart: - ddir_parts.remove(spart) - - dfile = os.path.join(*ddir_parts) - - if prependdir is not None: - if dfile is None: - dfile = os.path.join(prependdir, fullname) - else: - dfile = os.path.join(prependdir, dfile) - - if isinstance(optimize, int): - optimize = [optimize] - - # Use set() to remove duplicates. - # Use sorted() to create pyc files in a deterministic order. - optimize = sorted(set(optimize)) - - if hardlink_dupes and len(optimize) < 2: - raise ValueError("Hardlinking of duplicated bytecode makes sense " - "only for more than one optimization level") - - if rx is not None: - mo = rx.search(fullname) - if mo: - return success - - if limit_sl_dest is not None and os.path.islink(fullname): - if Path(limit_sl_dest).resolve() not in Path(fullname).resolve().parents: - return success - - opt_cfiles = {} - - if os.path.isfile(fullname): - for opt_level in optimize: - if legacy: - opt_cfiles[opt_level] = fullname + 'c' - else: - if opt_level >= 0: - opt = opt_level if opt_level >= 1 else '' - cfile = (importlib.util.cache_from_source( - fullname, optimization=opt)) - opt_cfiles[opt_level] = cfile - else: - cfile = importlib.util.cache_from_source(fullname) - opt_cfiles[opt_level] = cfile - - head, tail = name[:-3], name[-3:] - if tail == '.py': - if not force: - try: - mtime = int(os.stat(fullname).st_mtime) - expect = struct.pack('<4sLL', importlib.util.MAGIC_NUMBER, - 0, mtime & 0xFFFF_FFFF) - for cfile in opt_cfiles.values(): - with open(cfile, 'rb') as chandle: - actual = chandle.read(12) - if expect != actual: - break - else: - return success - except OSError: - pass - if not quiet: - print('Compiling {!r}...'.format(fullname)) - try: - for index, opt_level in enumerate(optimize): - cfile = opt_cfiles[opt_level] - ok = py_compile.compile(fullname, cfile, dfile, True, - optimize=opt_level, - invalidation_mode=invalidation_mode) - if index > 0 and hardlink_dupes: - previous_cfile = opt_cfiles[optimize[index - 1]] - if filecmp.cmp(cfile, previous_cfile, shallow=False): - os.unlink(cfile) - os.link(previous_cfile, cfile) - except py_compile.PyCompileError as err: - success = False - if quiet >= 2: - return success - elif quiet: - print('*** Error compiling {!r}...'.format(fullname)) - else: - print('*** ', end='') - # escape non-printable characters in msg - encoding = sys.stdout.encoding or sys.getdefaultencoding() - msg = err.msg.encode(encoding, errors='backslashreplace').decode(encoding) - print(msg) - except (SyntaxError, UnicodeError, OSError) as e: - success = False - if quiet >= 2: - return success - elif quiet: - print('*** Error compiling {!r}...'.format(fullname)) - else: - print('*** ', end='') - print(e.__class__.__name__ + ':', e) - else: - if ok == 0: - success = False - return success - -def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0, - legacy=False, optimize=-1, - invalidation_mode=None): - """Byte-compile all module on sys.path. - - Arguments (all optional): - - skip_curdir: if true, skip current directory (default True) - maxlevels: max recursion level (default 0) - force: as for compile_dir() (default False) - quiet: as for compile_dir() (default 0) - legacy: as for compile_dir() (default False) - optimize: as for compile_dir() (default -1) - invalidation_mode: as for compiler_dir() - """ - success = True - for dir in sys.path: - if (not dir or dir == os.curdir) and skip_curdir: - if quiet < 2: - print('Skipping current directory') - else: - success = success and compile_dir( - dir, - maxlevels, - None, - force, - quiet=quiet, - legacy=legacy, - optimize=optimize, - invalidation_mode=invalidation_mode, - ) - return success - - -def main(): - """Script main program.""" - import argparse - - parser = argparse.ArgumentParser( - description='Utilities to support installing Python libraries.') - parser.add_argument('-l', action='store_const', const=0, - default=None, dest='maxlevels', - help="don't recurse into subdirectories") - parser.add_argument('-r', type=int, dest='recursion', - help=('control the maximum recursion level. ' - 'if `-l` and `-r` options are specified, ' - 'then `-r` takes precedence.')) - parser.add_argument('-f', action='store_true', dest='force', - help='force rebuild even if timestamps are up to date') - parser.add_argument('-q', action='count', dest='quiet', default=0, - help='output only error messages; -qq will suppress ' - 'the error messages as well.') - parser.add_argument('-b', action='store_true', dest='legacy', - help='use legacy (pre-PEP3147) compiled file locations') - parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None, - help=('directory to prepend to file paths for use in ' - 'compile-time tracebacks and in runtime ' - 'tracebacks in cases where the source file is ' - 'unavailable')) - parser.add_argument('-s', metavar='STRIPDIR', dest='stripdir', - default=None, - help=('part of path to left-strip from path ' - 'to source file - for example buildroot. ' - '`-d` and `-s` options cannot be ' - 'specified together.')) - parser.add_argument('-p', metavar='PREPENDDIR', dest='prependdir', - default=None, - help=('path to add as prefix to path ' - 'to source file - for example / to make ' - 'it absolute when some part is removed ' - 'by `-s` option. ' - '`-d` and `-p` options cannot be ' - 'specified together.')) - parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None, - help=('skip files matching the regular expression; ' - 'the regexp is searched for in the full path ' - 'of each file considered for compilation')) - parser.add_argument('-i', metavar='FILE', dest='flist', - help=('add all the files and directories listed in ' - 'FILE to the list considered for compilation; ' - 'if "-", names are read from stdin')) - parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*', - help=('zero or more file and directory names ' - 'to compile; if no arguments given, defaults ' - 'to the equivalent of -l sys.path')) - parser.add_argument('-j', '--workers', default=1, - type=int, help='Run compileall concurrently') - invalidation_modes = [mode.name.lower().replace('_', '-') - for mode in py_compile.PycInvalidationMode] - parser.add_argument('--invalidation-mode', - choices=sorted(invalidation_modes), - help=('set .pyc invalidation mode; defaults to ' - '"checked-hash" if the SOURCE_DATE_EPOCH ' - 'environment variable is set, and ' - '"timestamp" otherwise.')) - parser.add_argument('-o', action='append', type=int, dest='opt_levels', - help=('Optimization levels to run compilation with. ' - 'Default is -1 which uses the optimization level ' - 'of the Python interpreter itself (see -O).')) - parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest', - help='Ignore symlinks pointing outsite of the DIR') - parser.add_argument('--hardlink-dupes', action='store_true', - dest='hardlink_dupes', - help='Hardlink duplicated pyc files') - - args = parser.parse_args() - compile_dests = args.compile_dest - - if args.rx: - import re - args.rx = re.compile(args.rx) - - if args.limit_sl_dest == "": - args.limit_sl_dest = None - - if args.recursion is not None: - maxlevels = args.recursion - else: - maxlevels = args.maxlevels - - if args.opt_levels is None: - args.opt_levels = [-1] - - if len(args.opt_levels) == 1 and args.hardlink_dupes: - parser.error(("Hardlinking of duplicated bytecode makes sense " - "only for more than one optimization level.")) - - if args.ddir is not None and ( - args.stripdir is not None or args.prependdir is not None - ): - parser.error("-d cannot be used in combination with -s or -p") - - # if flist is provided then load it - if args.flist: - try: - with (sys.stdin if args.flist=='-' else - open(args.flist, encoding="utf-8")) as f: - for line in f: - compile_dests.append(line.strip()) - except OSError: - if args.quiet < 2: - print("Error reading file list {}".format(args.flist)) - return False - - if args.invalidation_mode: - ivl_mode = args.invalidation_mode.replace('-', '_').upper() - invalidation_mode = py_compile.PycInvalidationMode[ivl_mode] - else: - invalidation_mode = None - - success = True - try: - if compile_dests: - for dest in compile_dests: - if os.path.isfile(dest): - if not compile_file(dest, args.ddir, args.force, args.rx, - args.quiet, args.legacy, - invalidation_mode=invalidation_mode, - stripdir=args.stripdir, - prependdir=args.prependdir, - optimize=args.opt_levels, - limit_sl_dest=args.limit_sl_dest, - hardlink_dupes=args.hardlink_dupes): - success = False - else: - if not compile_dir(dest, maxlevels, args.ddir, - args.force, args.rx, args.quiet, - args.legacy, workers=args.workers, - invalidation_mode=invalidation_mode, - stripdir=args.stripdir, - prependdir=args.prependdir, - optimize=args.opt_levels, - limit_sl_dest=args.limit_sl_dest, - hardlink_dupes=args.hardlink_dupes): - success = False - return success - else: - return compile_path(legacy=args.legacy, force=args.force, - quiet=args.quiet, - invalidation_mode=invalidation_mode) - except KeyboardInterrupt: - if args.quiet < 2: - print("\n[interrupted]") - return False - return True - - -if __name__ == '__main__': - exit_status = int(not main()) - sys.exit(exit_status) diff --git a/python/Lib/concurrent/__init__.py b/python/Lib/concurrent/__init__.py deleted file mode 100644 index de1ac25..0000000 --- a/python/Lib/concurrent/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# This directory is a Python package. diff --git a/python/Lib/concurrent/futures/__init__.py b/python/Lib/concurrent/futures/__init__.py deleted file mode 100644 index 1d5cc07..0000000 --- a/python/Lib/concurrent/futures/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Execute computations asynchronously using threads or processes.""" - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -from concurrent.futures._base import (FIRST_COMPLETED, - FIRST_EXCEPTION, - ALL_COMPLETED, - CancelledError, - TimeoutError, - InvalidStateError, - BrokenExecutor, - Future, - Executor, - wait, - as_completed) - -__all__ = ( - 'FIRST_COMPLETED', - 'FIRST_EXCEPTION', - 'ALL_COMPLETED', - 'CancelledError', - 'TimeoutError', - 'BrokenExecutor', - 'Future', - 'Executor', - 'wait', - 'as_completed', - 'ProcessPoolExecutor', - 'ThreadPoolExecutor', -) - - -def __dir__(): - return __all__ + ('__author__', '__doc__') - - -def __getattr__(name): - global ProcessPoolExecutor, ThreadPoolExecutor - - if name == 'ProcessPoolExecutor': - from .process import ProcessPoolExecutor as pe - ProcessPoolExecutor = pe - return pe - - if name == 'ThreadPoolExecutor': - from .thread import ThreadPoolExecutor as te - ThreadPoolExecutor = te - return te - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/python/Lib/concurrent/futures/_base.py b/python/Lib/concurrent/futures/_base.py deleted file mode 100644 index 578201f..0000000 --- a/python/Lib/concurrent/futures/_base.py +++ /dev/null @@ -1,654 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -import collections -import logging -import threading -import time -import types - -FIRST_COMPLETED = 'FIRST_COMPLETED' -FIRST_EXCEPTION = 'FIRST_EXCEPTION' -ALL_COMPLETED = 'ALL_COMPLETED' -_AS_COMPLETED = '_AS_COMPLETED' - -# Possible future states (for internal use by the futures package). -PENDING = 'PENDING' -RUNNING = 'RUNNING' -# The future was cancelled by the user... -CANCELLED = 'CANCELLED' -# ...and _Waiter.add_cancelled() was called by a worker. -CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' -FINISHED = 'FINISHED' - -_FUTURE_STATES = [ - PENDING, - RUNNING, - CANCELLED, - CANCELLED_AND_NOTIFIED, - FINISHED -] - -_STATE_TO_DESCRIPTION_MAP = { - PENDING: "pending", - RUNNING: "running", - CANCELLED: "cancelled", - CANCELLED_AND_NOTIFIED: "cancelled", - FINISHED: "finished" -} - -# Logger for internal use by the futures package. -LOGGER = logging.getLogger("concurrent.futures") - -class Error(Exception): - """Base class for all future-related exceptions.""" - pass - -class CancelledError(Error): - """The Future was cancelled.""" - pass - -TimeoutError = TimeoutError # make local alias for the standard exception - -class InvalidStateError(Error): - """The operation is not allowed in this state.""" - pass - -class _Waiter(object): - """Provides the event that wait() and as_completed() block on.""" - def __init__(self): - self.event = threading.Event() - self.finished_futures = [] - - def add_result(self, future): - self.finished_futures.append(future) - - def add_exception(self, future): - self.finished_futures.append(future) - - def add_cancelled(self, future): - self.finished_futures.append(future) - -class _AsCompletedWaiter(_Waiter): - """Used by as_completed().""" - - def __init__(self): - super(_AsCompletedWaiter, self).__init__() - self.lock = threading.Lock() - - def add_result(self, future): - with self.lock: - super(_AsCompletedWaiter, self).add_result(future) - self.event.set() - - def add_exception(self, future): - with self.lock: - super(_AsCompletedWaiter, self).add_exception(future) - self.event.set() - - def add_cancelled(self, future): - with self.lock: - super(_AsCompletedWaiter, self).add_cancelled(future) - self.event.set() - -class _FirstCompletedWaiter(_Waiter): - """Used by wait(return_when=FIRST_COMPLETED).""" - - def add_result(self, future): - super().add_result(future) - self.event.set() - - def add_exception(self, future): - super().add_exception(future) - self.event.set() - - def add_cancelled(self, future): - super().add_cancelled(future) - self.event.set() - -class _AllCompletedWaiter(_Waiter): - """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" - - def __init__(self, num_pending_calls, stop_on_exception): - self.num_pending_calls = num_pending_calls - self.stop_on_exception = stop_on_exception - self.lock = threading.Lock() - super().__init__() - - def _decrement_pending_calls(self): - with self.lock: - self.num_pending_calls -= 1 - if not self.num_pending_calls: - self.event.set() - - def add_result(self, future): - super().add_result(future) - self._decrement_pending_calls() - - def add_exception(self, future): - super().add_exception(future) - if self.stop_on_exception: - self.event.set() - else: - self._decrement_pending_calls() - - def add_cancelled(self, future): - super().add_cancelled(future) - self._decrement_pending_calls() - -class _AcquireFutures(object): - """A context manager that does an ordered acquire of Future conditions.""" - - def __init__(self, futures): - self.futures = sorted(futures, key=id) - - def __enter__(self): - for future in self.futures: - future._condition.acquire() - - def __exit__(self, *args): - for future in self.futures: - future._condition.release() - -def _create_and_install_waiters(fs, return_when): - if return_when == _AS_COMPLETED: - waiter = _AsCompletedWaiter() - elif return_when == FIRST_COMPLETED: - waiter = _FirstCompletedWaiter() - else: - pending_count = sum( - f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) - - if return_when == FIRST_EXCEPTION: - waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) - elif return_when == ALL_COMPLETED: - waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) - else: - raise ValueError("Invalid return condition: %r" % return_when) - - for f in fs: - f._waiters.append(waiter) - - return waiter - - -def _yield_finished_futures(fs, waiter, ref_collect): - """ - Iterate on the list *fs*, yielding finished futures one by one in - reverse order. - Before yielding a future, *waiter* is removed from its waiters - and the future is removed from each set in the collection of sets - *ref_collect*. - - The aim of this function is to avoid keeping stale references after - the future is yielded and before the iterator resumes. - """ - while fs: - f = fs[-1] - for futures_set in ref_collect: - futures_set.remove(f) - with f._condition: - f._waiters.remove(waiter) - del f - # Careful not to keep a reference to the popped value - yield fs.pop() - - -def as_completed(fs, timeout=None): - """An iterator over the given futures that yields each as it completes. - - Args: - fs: The sequence of Futures (possibly created by different Executors) to - iterate over. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - - Returns: - An iterator that yields the given Futures as they complete (finished or - cancelled). If any given Futures are duplicated, they will be returned - once. - - Raises: - TimeoutError: If the entire result iterator could not be generated - before the given timeout. - """ - if timeout is not None: - end_time = timeout + time.monotonic() - - fs = set(fs) - total_futures = len(fs) - with _AcquireFutures(fs): - finished = set( - f for f in fs - if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) - pending = fs - finished - waiter = _create_and_install_waiters(fs, _AS_COMPLETED) - finished = list(finished) - try: - yield from _yield_finished_futures(finished, waiter, - ref_collect=(fs,)) - - while pending: - if timeout is None: - wait_timeout = None - else: - wait_timeout = end_time - time.monotonic() - if wait_timeout < 0: - raise TimeoutError( - '%d (of %d) futures unfinished' % ( - len(pending), total_futures)) - - waiter.event.wait(wait_timeout) - - with waiter.lock: - finished = waiter.finished_futures - waiter.finished_futures = [] - waiter.event.clear() - - # reverse to keep finishing order - finished.reverse() - yield from _yield_finished_futures(finished, waiter, - ref_collect=(fs, pending)) - - finally: - # Remove waiter from unfinished futures - for f in fs: - with f._condition: - f._waiters.remove(waiter) - -DoneAndNotDoneFutures = collections.namedtuple( - 'DoneAndNotDoneFutures', 'done not_done') -def wait(fs, timeout=None, return_when=ALL_COMPLETED): - """Wait for the futures in the given sequence to complete. - - Args: - fs: The sequence of Futures (possibly created by different Executors) to - wait upon. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - return_when: Indicates when this function should return. The options - are: - - FIRST_COMPLETED - Return when any future finishes or is - cancelled. - FIRST_EXCEPTION - Return when any future finishes by raising an - exception. If no future raises an exception - then it is equivalent to ALL_COMPLETED. - ALL_COMPLETED - Return when all futures finish or are cancelled. - - Returns: - A named 2-tuple of sets. The first set, named 'done', contains the - futures that completed (is finished or cancelled) before the wait - completed. The second set, named 'not_done', contains uncompleted - futures. Duplicate futures given to *fs* are removed and will be - returned only once. - """ - fs = set(fs) - with _AcquireFutures(fs): - done = {f for f in fs - if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]} - not_done = fs - done - if (return_when == FIRST_COMPLETED) and done: - return DoneAndNotDoneFutures(done, not_done) - elif (return_when == FIRST_EXCEPTION) and done: - if any(f for f in done - if not f.cancelled() and f.exception() is not None): - return DoneAndNotDoneFutures(done, not_done) - - if len(done) == len(fs): - return DoneAndNotDoneFutures(done, not_done) - - waiter = _create_and_install_waiters(fs, return_when) - - waiter.event.wait(timeout) - for f in fs: - with f._condition: - f._waiters.remove(waiter) - - done.update(waiter.finished_futures) - return DoneAndNotDoneFutures(done, fs - done) - - -def _result_or_cancel(fut, timeout=None): - try: - try: - return fut.result(timeout) - finally: - fut.cancel() - finally: - # Break a reference cycle with the exception in self._exception - del fut - - -class Future(object): - """Represents the result of an asynchronous computation.""" - - def __init__(self): - """Initializes the future. Should not be called by clients.""" - self._condition = threading.Condition() - self._state = PENDING - self._result = None - self._exception = None - self._waiters = [] - self._done_callbacks = [] - - def _invoke_callbacks(self): - for callback in self._done_callbacks: - try: - callback(self) - except Exception: - LOGGER.exception('exception calling callback for %r', self) - - def __repr__(self): - with self._condition: - if self._state == FINISHED: - if self._exception: - return '<%s at %#x state=%s raised %s>' % ( - self.__class__.__name__, - id(self), - _STATE_TO_DESCRIPTION_MAP[self._state], - self._exception.__class__.__name__) - else: - return '<%s at %#x state=%s returned %s>' % ( - self.__class__.__name__, - id(self), - _STATE_TO_DESCRIPTION_MAP[self._state], - self._result.__class__.__name__) - return '<%s at %#x state=%s>' % ( - self.__class__.__name__, - id(self), - _STATE_TO_DESCRIPTION_MAP[self._state]) - - def cancel(self): - """Cancel the future if possible. - - Returns True if the future was cancelled, False otherwise. A future - cannot be cancelled if it is running or has already completed. - """ - with self._condition: - if self._state in [RUNNING, FINISHED]: - return False - - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - return True - - self._state = CANCELLED - self._condition.notify_all() - - self._invoke_callbacks() - return True - - def cancelled(self): - """Return True if the future was cancelled.""" - with self._condition: - return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] - - def running(self): - """Return True if the future is currently executing.""" - with self._condition: - return self._state == RUNNING - - def done(self): - """Return True if the future was cancelled or finished executing.""" - with self._condition: - return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] - - def __get_result(self): - if self._exception: - try: - raise self._exception - finally: - # Break a reference cycle with the exception in self._exception - self = None - else: - return self._result - - def add_done_callback(self, fn): - """Attaches a callable that will be called when the future finishes. - - Args: - fn: A callable that will be called with this future as its only - argument when the future completes or is cancelled. The callable - will always be called by a thread in the same process in which - it was added. If the future has already completed or been - cancelled then the callable will be called immediately. These - callables are called in the order that they were added. - """ - with self._condition: - if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: - self._done_callbacks.append(fn) - return - try: - fn(self) - except Exception: - LOGGER.exception('exception calling callback for %r', self) - - def result(self, timeout=None): - """Return the result of the call that the future represents. - - Args: - timeout: The number of seconds to wait for the result if the future - isn't done. If None, then there is no limit on the wait time. - - Returns: - The result of the call that the future represents. - - Raises: - CancelledError: If the future was cancelled. - TimeoutError: If the future didn't finish executing before the given - timeout. - Exception: If the call raised then that exception will be raised. - """ - try: - with self._condition: - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self.__get_result() - - self._condition.wait(timeout) - - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self.__get_result() - else: - raise TimeoutError() - finally: - # Break a reference cycle with the exception in self._exception - self = None - - def exception(self, timeout=None): - """Return the exception raised by the call that the future represents. - - Args: - timeout: The number of seconds to wait for the exception if the - future isn't done. If None, then there is no limit on the wait - time. - - Returns: - The exception raised by the call that the future represents or None - if the call completed without raising. - - Raises: - CancelledError: If the future was cancelled. - TimeoutError: If the future didn't finish executing before the given - timeout. - """ - - with self._condition: - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self._exception - - self._condition.wait(timeout) - - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self._exception - else: - raise TimeoutError() - - # The following methods should only be used by Executors and in tests. - def set_running_or_notify_cancel(self): - """Mark the future as running or process any cancel notifications. - - Should only be used by Executor implementations and unit tests. - - If the future has been cancelled (cancel() was called and returned - True) then any threads waiting on the future completing (though calls - to as_completed() or wait()) are notified and False is returned. - - If the future was not cancelled then it is put in the running state - (future calls to running() will return True) and True is returned. - - This method should be called by Executor implementations before - executing the work associated with this future. If this method returns - False then the work should not be executed. - - Returns: - False if the Future was cancelled, True otherwise. - - Raises: - RuntimeError: if this method was already called or if set_result() - or set_exception() was called. - """ - with self._condition: - if self._state == CANCELLED: - self._state = CANCELLED_AND_NOTIFIED - for waiter in self._waiters: - waiter.add_cancelled(self) - # self._condition.notify_all() is not necessary because - # self.cancel() triggers a notification. - return False - elif self._state == PENDING: - self._state = RUNNING - return True - else: - LOGGER.critical('Future %s in unexpected state: %s', - id(self), - self._state) - raise RuntimeError('Future in unexpected state') - - def set_result(self, result): - """Sets the return value of work associated with the future. - - Should only be used by Executor implementations and unit tests. - """ - with self._condition: - if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}: - raise InvalidStateError('{}: {!r}'.format(self._state, self)) - self._result = result - self._state = FINISHED - for waiter in self._waiters: - waiter.add_result(self) - self._condition.notify_all() - self._invoke_callbacks() - - def set_exception(self, exception): - """Sets the result of the future as being the given exception. - - Should only be used by Executor implementations and unit tests. - """ - with self._condition: - if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}: - raise InvalidStateError('{}: {!r}'.format(self._state, self)) - self._exception = exception - self._state = FINISHED - for waiter in self._waiters: - waiter.add_exception(self) - self._condition.notify_all() - self._invoke_callbacks() - - __class_getitem__ = classmethod(types.GenericAlias) - -class Executor(object): - """This is an abstract base class for concrete asynchronous executors.""" - - def submit(self, fn, /, *args, **kwargs): - """Submits a callable to be executed with the given arguments. - - Schedules the callable to be executed as fn(*args, **kwargs) and returns - a Future instance representing the execution of the callable. - - Returns: - A Future representing the given call. - """ - raise NotImplementedError() - - def map(self, fn, *iterables, timeout=None, chunksize=1): - """Returns an iterator equivalent to map(fn, iter). - - Args: - fn: A callable that will take as many arguments as there are - passed iterables. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - chunksize: The size of the chunks the iterable will be broken into - before being passed to a child process. This argument is only - used by ProcessPoolExecutor; it is ignored by - ThreadPoolExecutor. - - Returns: - An iterator equivalent to: map(func, *iterables) but the calls may - be evaluated out-of-order. - - Raises: - TimeoutError: If the entire result iterator could not be generated - before the given timeout. - Exception: If fn(*args) raises for any values. - """ - if timeout is not None: - end_time = timeout + time.monotonic() - - fs = [self.submit(fn, *args) for args in zip(*iterables)] - - # Yield must be hidden in closure so that the futures are submitted - # before the first iterator value is required. - def result_iterator(): - try: - # reverse to keep finishing order - fs.reverse() - while fs: - # Careful not to keep a reference to the popped future - if timeout is None: - yield _result_or_cancel(fs.pop()) - else: - yield _result_or_cancel(fs.pop(), end_time - time.monotonic()) - finally: - for future in fs: - future.cancel() - return result_iterator() - - def shutdown(self, wait=True, *, cancel_futures=False): - """Clean-up the resources associated with the Executor. - - It is safe to call this method several times. Otherwise, no other - methods can be called after this one. - - Args: - wait: If True then shutdown will not return until all running - futures have finished executing and the resources used by the - executor have been reclaimed. - cancel_futures: If True then shutdown will cancel all pending - futures. Futures that are completed or running will not be - cancelled. - """ - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.shutdown(wait=True) - return False - - -class BrokenExecutor(RuntimeError): - """ - Raised when a executor has become non-functional after a severe failure. - """ diff --git a/python/Lib/concurrent/futures/process.py b/python/Lib/concurrent/futures/process.py deleted file mode 100644 index f0ee00a..0000000 --- a/python/Lib/concurrent/futures/process.py +++ /dev/null @@ -1,836 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Implements ProcessPoolExecutor. - -The following diagram and text describe the data-flow through the system: - -|======================= In-process =====================|== Out-of-process ==| - -+----------+ +----------+ +--------+ +-----------+ +---------+ -| | => | Work Ids | | | | Call Q | | Process | -| | +----------+ | | +-----------+ | Pool | -| | | ... | | | | ... | +---------+ -| | | 6 | => | | => | 5, call() | => | | -| | | 7 | | | | ... | | | -| Process | | ... | | Local | +-----------+ | Process | -| Pool | +----------+ | Worker | | #1..n | -| Executor | | Thread | | | -| | +----------- + | | +-----------+ | | -| | <=> | Work Items | <=> | | <= | Result Q | <= | | -| | +------------+ | | +-----------+ | | -| | | 6: call() | | | | ... | | | -| | | future | | | | 4, result | | | -| | | ... | | | | 3, except | | | -+----------+ +------------+ +--------+ +-----------+ +---------+ - -Executor.submit() called: -- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict -- adds the id of the _WorkItem to the "Work Ids" queue - -Local worker thread: -- reads work ids from the "Work Ids" queue and looks up the corresponding - WorkItem from the "Work Items" dict: if the work item has been cancelled then - it is simply removed from the dict, otherwise it is repackaged as a - _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" - until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because - calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). -- reads _ResultItems from "Result Q", updates the future stored in the - "Work Items" dict and deletes the dict entry - -Process #1..n: -- reads _CallItems from "Call Q", executes the calls, and puts the resulting - _ResultItems in "Result Q" -""" - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -import os -from concurrent.futures import _base -import queue -import multiprocessing as mp -import multiprocessing.connection -from multiprocessing.queues import Queue -import threading -import weakref -from functools import partial -import itertools -import sys -from traceback import format_exception - - -_threads_wakeups = weakref.WeakKeyDictionary() -_global_shutdown = False - - -class _ThreadWakeup: - def __init__(self): - self._closed = False - self._reader, self._writer = mp.Pipe(duplex=False) - - def close(self): - if not self._closed: - self._closed = True - self._writer.close() - self._reader.close() - - def wakeup(self): - if not self._closed: - self._writer.send_bytes(b"") - - def clear(self): - if not self._closed: - while self._reader.poll(): - self._reader.recv_bytes() - - -def _python_exit(): - global _global_shutdown - _global_shutdown = True - items = list(_threads_wakeups.items()) - for _, thread_wakeup in items: - # call not protected by ProcessPoolExecutor._shutdown_lock - thread_wakeup.wakeup() - for t, _ in items: - t.join() - -# Register for `_python_exit()` to be called just before joining all -# non-daemon threads. This is used instead of `atexit.register()` for -# compatibility with subinterpreters, which no longer support daemon threads. -# See bpo-39812 for context. -threading._register_atexit(_python_exit) - -# Controls how many more calls than processes will be queued in the call queue. -# A smaller number will mean that processes spend more time idle waiting for -# work while a larger number will make Future.cancel() succeed less frequently -# (Futures in the call queue cannot be cancelled). -EXTRA_QUEUED_CALLS = 1 - - -# On Windows, WaitForMultipleObjects is used to wait for processes to finish. -# It can wait on, at most, 63 objects. There is an overhead of two objects: -# - the result queue reader -# - the thread wakeup reader -_MAX_WINDOWS_WORKERS = 63 - 2 - -# Hack to embed stringification of remote traceback in local traceback - -class _RemoteTraceback(Exception): - def __init__(self, tb): - self.tb = tb - def __str__(self): - return self.tb - -class _ExceptionWithTraceback: - def __init__(self, exc, tb): - tb = ''.join(format_exception(type(exc), exc, tb)) - self.exc = exc - # Traceback object needs to be garbage-collected as its frames - # contain references to all the objects in the exception scope - self.exc.__traceback__ = None - self.tb = '\n"""\n%s"""' % tb - def __reduce__(self): - return _rebuild_exc, (self.exc, self.tb) - -def _rebuild_exc(exc, tb): - exc.__cause__ = _RemoteTraceback(tb) - return exc - -class _WorkItem(object): - def __init__(self, future, fn, args, kwargs): - self.future = future - self.fn = fn - self.args = args - self.kwargs = kwargs - -class _ResultItem(object): - def __init__(self, work_id, exception=None, result=None, exit_pid=None): - self.work_id = work_id - self.exception = exception - self.result = result - self.exit_pid = exit_pid - -class _CallItem(object): - def __init__(self, work_id, fn, args, kwargs): - self.work_id = work_id - self.fn = fn - self.args = args - self.kwargs = kwargs - - -class _SafeQueue(Queue): - """Safe Queue set exception to the future object linked to a job""" - def __init__(self, max_size=0, *, ctx, pending_work_items, shutdown_lock, - thread_wakeup): - self.pending_work_items = pending_work_items - self.shutdown_lock = shutdown_lock - self.thread_wakeup = thread_wakeup - super().__init__(max_size, ctx=ctx) - - def _on_queue_feeder_error(self, e, obj): - if isinstance(obj, _CallItem): - tb = format_exception(type(e), e, e.__traceback__) - e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb))) - work_item = self.pending_work_items.pop(obj.work_id, None) - with self.shutdown_lock: - self.thread_wakeup.wakeup() - # work_item can be None if another process terminated. In this - # case, the executor_manager_thread fails all work_items - # with BrokenProcessPool - if work_item is not None: - work_item.future.set_exception(e) - else: - super()._on_queue_feeder_error(e, obj) - - -def _get_chunks(*iterables, chunksize): - """ Iterates over zip()ed iterables in chunks. """ - it = zip(*iterables) - while True: - chunk = tuple(itertools.islice(it, chunksize)) - if not chunk: - return - yield chunk - - -def _process_chunk(fn, chunk): - """ Processes a chunk of an iterable passed to map. - - Runs the function passed to map() on a chunk of the - iterable passed to map. - - This function is run in a separate process. - - """ - return [fn(*args) for args in chunk] - - -def _sendback_result(result_queue, work_id, result=None, exception=None, - exit_pid=None): - """Safely send back the given result or exception""" - try: - result_queue.put(_ResultItem(work_id, result=result, - exception=exception, exit_pid=exit_pid)) - except BaseException as e: - exc = _ExceptionWithTraceback(e, e.__traceback__) - result_queue.put(_ResultItem(work_id, exception=exc, - exit_pid=exit_pid)) - - -def _process_worker(call_queue, result_queue, initializer, initargs, max_tasks=None): - """Evaluates calls from call_queue and places the results in result_queue. - - This worker is run in a separate process. - - Args: - call_queue: A ctx.Queue of _CallItems that will be read and - evaluated by the worker. - result_queue: A ctx.Queue of _ResultItems that will written - to by the worker. - initializer: A callable initializer, or None - initargs: A tuple of args for the initializer - """ - if initializer is not None: - try: - initializer(*initargs) - except BaseException: - _base.LOGGER.critical('Exception in initializer:', exc_info=True) - # The parent will notice that the process stopped and - # mark the pool broken - return - num_tasks = 0 - exit_pid = None - while True: - call_item = call_queue.get(block=True) - if call_item is None: - # Wake up queue management thread - result_queue.put(os.getpid()) - return - - if max_tasks is not None: - num_tasks += 1 - if num_tasks >= max_tasks: - exit_pid = os.getpid() - - try: - r = call_item.fn(*call_item.args, **call_item.kwargs) - except BaseException as e: - exc = _ExceptionWithTraceback(e, e.__traceback__) - _sendback_result(result_queue, call_item.work_id, exception=exc, - exit_pid=exit_pid) - else: - _sendback_result(result_queue, call_item.work_id, result=r, - exit_pid=exit_pid) - del r - - # Liberate the resource as soon as possible, to avoid holding onto - # open files or shared memory that is not needed anymore - del call_item - - if exit_pid is not None: - return - - -class _ExecutorManagerThread(threading.Thread): - """Manages the communication between this process and the worker processes. - - The manager is run in a local thread. - - Args: - executor: A reference to the ProcessPoolExecutor that owns - this thread. A weakref will be own by the manager as well as - references to internal objects used to introspect the state of - the executor. - """ - - def __init__(self, executor): - # Store references to necessary internals of the executor. - - # A _ThreadWakeup to allow waking up the queue_manager_thread from the - # main Thread and avoid deadlocks caused by permanently locked queues. - self.thread_wakeup = executor._executor_manager_thread_wakeup - self.shutdown_lock = executor._shutdown_lock - - # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used - # to determine if the ProcessPoolExecutor has been garbage collected - # and that the manager can exit. - # When the executor gets garbage collected, the weakref callback - # will wake up the queue management thread so that it can terminate - # if there is no pending work item. - def weakref_cb(_, - thread_wakeup=self.thread_wakeup, - shutdown_lock=self.shutdown_lock): - mp.util.debug('Executor collected: triggering callback for' - ' QueueManager wakeup') - with shutdown_lock: - thread_wakeup.wakeup() - - self.executor_reference = weakref.ref(executor, weakref_cb) - - # A list of the ctx.Process instances used as workers. - self.processes = executor._processes - - # A ctx.Queue that will be filled with _CallItems derived from - # _WorkItems for processing by the process workers. - self.call_queue = executor._call_queue - - # A ctx.SimpleQueue of _ResultItems generated by the process workers. - self.result_queue = executor._result_queue - - # A queue.Queue of work ids e.g. Queue([5, 6, ...]). - self.work_ids_queue = executor._work_ids - - # Maximum number of tasks a worker process can execute before - # exiting safely - self.max_tasks_per_child = executor._max_tasks_per_child - - # A dict mapping work ids to _WorkItems e.g. - # {5: <_WorkItem...>, 6: <_WorkItem...>, ...} - self.pending_work_items = executor._pending_work_items - - super().__init__() - - def run(self): - # Main loop for the executor manager thread. - - while True: - self.add_call_item_to_queue() - - result_item, is_broken, cause = self.wait_result_broken_or_wakeup() - - if is_broken: - self.terminate_broken(cause) - return - if result_item is not None: - self.process_result_item(result_item) - - process_exited = result_item.exit_pid is not None - if process_exited: - p = self.processes.pop(result_item.exit_pid) - p.join() - - # Delete reference to result_item to avoid keeping references - # while waiting on new results. - del result_item - - if executor := self.executor_reference(): - if process_exited: - with self.shutdown_lock: - executor._adjust_process_count() - else: - executor._idle_worker_semaphore.release() - del executor - - if self.is_shutting_down(): - self.flag_executor_shutting_down() - - # Since no new work items can be added, it is safe to shutdown - # this thread if there are no pending work items. - if not self.pending_work_items: - self.join_executor_internals() - return - - def add_call_item_to_queue(self): - # Fills call_queue with _WorkItems from pending_work_items. - # This function never blocks. - while True: - if self.call_queue.full(): - return - try: - work_id = self.work_ids_queue.get(block=False) - except queue.Empty: - return - else: - work_item = self.pending_work_items[work_id] - - if work_item.future.set_running_or_notify_cancel(): - self.call_queue.put(_CallItem(work_id, - work_item.fn, - work_item.args, - work_item.kwargs), - block=True) - else: - del self.pending_work_items[work_id] - continue - - def wait_result_broken_or_wakeup(self): - # Wait for a result to be ready in the result_queue while checking - # that all worker processes are still running, or for a wake up - # signal send. The wake up signals come either from new tasks being - # submitted, from the executor being shutdown/gc-ed, or from the - # shutdown of the python interpreter. - result_reader = self.result_queue._reader - assert not self.thread_wakeup._closed - wakeup_reader = self.thread_wakeup._reader - readers = [result_reader, wakeup_reader] - worker_sentinels = [p.sentinel for p in list(self.processes.values())] - ready = mp.connection.wait(readers + worker_sentinels) - - cause = None - is_broken = True - result_item = None - if result_reader in ready: - try: - result_item = result_reader.recv() - is_broken = False - except BaseException as e: - cause = format_exception(type(e), e, e.__traceback__) - - elif wakeup_reader in ready: - is_broken = False - - with self.shutdown_lock: - self.thread_wakeup.clear() - - return result_item, is_broken, cause - - def process_result_item(self, result_item): - # Process the received a result_item. This can be either the PID of a - # worker that exited gracefully or a _ResultItem - - if isinstance(result_item, int): - # Clean shutdown of a worker using its PID - # (avoids marking the executor broken) - assert self.is_shutting_down() - p = self.processes.pop(result_item) - p.join() - if not self.processes: - self.join_executor_internals() - return - else: - # Received a _ResultItem so mark the future as completed. - work_item = self.pending_work_items.pop(result_item.work_id, None) - # work_item can be None if another process terminated (see above) - if work_item is not None: - if result_item.exception: - work_item.future.set_exception(result_item.exception) - else: - work_item.future.set_result(result_item.result) - - def is_shutting_down(self): - # Check whether we should start shutting down the executor. - executor = self.executor_reference() - # No more work items can be added if: - # - The interpreter is shutting down OR - # - The executor that owns this worker has been collected OR - # - The executor that owns this worker has been shutdown. - return (_global_shutdown or executor is None - or executor._shutdown_thread) - - def terminate_broken(self, cause): - # Terminate the executor because it is in a broken state. The cause - # argument can be used to display more information on the error that - # lead the executor into becoming broken. - - # Mark the process pool broken so that submits fail right now. - executor = self.executor_reference() - if executor is not None: - executor._broken = ('A child process terminated ' - 'abruptly, the process pool is not ' - 'usable anymore') - executor._shutdown_thread = True - executor = None - - # All pending tasks are to be marked failed with the following - # BrokenProcessPool error - bpe = BrokenProcessPool("A process in the process pool was " - "terminated abruptly while the future was " - "running or pending.") - if cause is not None: - bpe.__cause__ = _RemoteTraceback( - f"\n'''\n{''.join(cause)}'''") - - # Mark pending tasks as failed. - for work_id, work_item in self.pending_work_items.items(): - work_item.future.set_exception(bpe) - # Delete references to object. See issue16284 - del work_item - self.pending_work_items.clear() - - # Terminate remaining workers forcibly: the queues or their - # locks may be in a dirty state and block forever. - for p in self.processes.values(): - p.terminate() - - # clean up resources - self.join_executor_internals() - - def flag_executor_shutting_down(self): - # Flag the executor as shutting down and cancel remaining tasks if - # requested as early as possible if it is not gc-ed yet. - executor = self.executor_reference() - if executor is not None: - executor._shutdown_thread = True - # Cancel pending work items if requested. - if executor._cancel_pending_futures: - # Cancel all pending futures and update pending_work_items - # to only have futures that are currently running. - new_pending_work_items = {} - for work_id, work_item in self.pending_work_items.items(): - if not work_item.future.cancel(): - new_pending_work_items[work_id] = work_item - self.pending_work_items = new_pending_work_items - # Drain work_ids_queue since we no longer need to - # add items to the call queue. - while True: - try: - self.work_ids_queue.get_nowait() - except queue.Empty: - break - # Make sure we do this only once to not waste time looping - # on running processes over and over. - executor._cancel_pending_futures = False - - def shutdown_workers(self): - n_children_to_stop = self.get_n_children_alive() - n_sentinels_sent = 0 - # Send the right number of sentinels, to make sure all children are - # properly terminated. - while (n_sentinels_sent < n_children_to_stop - and self.get_n_children_alive() > 0): - for i in range(n_children_to_stop - n_sentinels_sent): - try: - self.call_queue.put_nowait(None) - n_sentinels_sent += 1 - except queue.Full: - break - - def join_executor_internals(self): - self.shutdown_workers() - # Release the queue's resources as soon as possible. - self.call_queue.close() - self.call_queue.join_thread() - with self.shutdown_lock: - self.thread_wakeup.close() - # If .join() is not called on the created processes then - # some ctx.Queue methods may deadlock on Mac OS X. - for p in self.processes.values(): - p.join() - - def get_n_children_alive(self): - # This is an upper bound on the number of children alive. - return sum(p.is_alive() for p in self.processes.values()) - - -_system_limits_checked = False -_system_limited = None - - -def _check_system_limits(): - global _system_limits_checked, _system_limited - if _system_limits_checked: - if _system_limited: - raise NotImplementedError(_system_limited) - _system_limits_checked = True - try: - import multiprocessing.synchronize - except ImportError: - _system_limited = ( - "This Python build lacks multiprocessing.synchronize, usually due " - "to named semaphores being unavailable on this platform." - ) - raise NotImplementedError(_system_limited) - try: - nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") - except (AttributeError, ValueError): - # sysconf not available or setting not available - return - if nsems_max == -1: - # indetermined limit, assume that limit is determined - # by available memory only - return - if nsems_max >= 256: - # minimum number of semaphores available - # according to POSIX - return - _system_limited = ("system provides too few semaphores (%d" - " available, 256 necessary)" % nsems_max) - raise NotImplementedError(_system_limited) - - -def _chain_from_iterable_of_lists(iterable): - """ - Specialized implementation of itertools.chain.from_iterable. - Each item in *iterable* should be a list. This function is - careful not to keep references to yielded objects. - """ - for element in iterable: - element.reverse() - while element: - yield element.pop() - - -class BrokenProcessPool(_base.BrokenExecutor): - """ - Raised when a process in a ProcessPoolExecutor terminated abruptly - while a future was in the running state. - """ - - -class ProcessPoolExecutor(_base.Executor): - def __init__(self, max_workers=None, mp_context=None, - initializer=None, initargs=(), *, max_tasks_per_child=None): - """Initializes a new ProcessPoolExecutor instance. - - Args: - max_workers: The maximum number of processes that can be used to - execute the given calls. If None or not given then as many - worker processes will be created as the machine has processors. - mp_context: A multiprocessing context to launch the workers. This - object should provide SimpleQueue, Queue and Process. Useful - to allow specific multiprocessing start methods. - initializer: A callable used to initialize worker processes. - initargs: A tuple of arguments to pass to the initializer. - max_tasks_per_child: The maximum number of tasks a worker process - can complete before it will exit and be replaced with a fresh - worker process. The default of None means worker process will - live as long as the executor. Requires a non-'fork' mp_context - start method. When given, we default to using 'spawn' if no - mp_context is supplied. - """ - _check_system_limits() - - if max_workers is None: - self._max_workers = os.cpu_count() or 1 - if sys.platform == 'win32': - self._max_workers = min(_MAX_WINDOWS_WORKERS, - self._max_workers) - else: - if max_workers <= 0: - raise ValueError("max_workers must be greater than 0") - elif (sys.platform == 'win32' and - max_workers > _MAX_WINDOWS_WORKERS): - raise ValueError( - f"max_workers must be <= {_MAX_WINDOWS_WORKERS}") - - self._max_workers = max_workers - - if mp_context is None: - if max_tasks_per_child is not None: - mp_context = mp.get_context("spawn") - else: - mp_context = mp.get_context() - self._mp_context = mp_context - - # https://github.com/python/cpython/issues/90622 - self._safe_to_dynamically_spawn_children = ( - self._mp_context.get_start_method(allow_none=False) != "fork") - - if initializer is not None and not callable(initializer): - raise TypeError("initializer must be a callable") - self._initializer = initializer - self._initargs = initargs - - if max_tasks_per_child is not None: - if not isinstance(max_tasks_per_child, int): - raise TypeError("max_tasks_per_child must be an integer") - elif max_tasks_per_child <= 0: - raise ValueError("max_tasks_per_child must be >= 1") - if self._mp_context.get_start_method(allow_none=False) == "fork": - # https://github.com/python/cpython/issues/90622 - raise ValueError("max_tasks_per_child is incompatible with" - " the 'fork' multiprocessing start method;" - " supply a different mp_context.") - self._max_tasks_per_child = max_tasks_per_child - - # Management thread - self._executor_manager_thread = None - - # Map of pids to processes - self._processes = {} - - # Shutdown is a two-step process. - self._shutdown_thread = False - self._shutdown_lock = threading.Lock() - self._idle_worker_semaphore = threading.Semaphore(0) - self._broken = False - self._queue_count = 0 - self._pending_work_items = {} - self._cancel_pending_futures = False - - # _ThreadWakeup is a communication channel used to interrupt the wait - # of the main loop of executor_manager_thread from another thread (e.g. - # when calling executor.submit or executor.shutdown). We do not use the - # _result_queue to send wakeup signals to the executor_manager_thread - # as it could result in a deadlock if a worker process dies with the - # _result_queue write lock still acquired. - # - # _shutdown_lock must be locked to access _ThreadWakeup. - self._executor_manager_thread_wakeup = _ThreadWakeup() - - # Create communication channels for the executor - # Make the call queue slightly larger than the number of processes to - # prevent the worker processes from idling. But don't make it too big - # because futures in the call queue cannot be cancelled. - queue_size = self._max_workers + EXTRA_QUEUED_CALLS - self._call_queue = _SafeQueue( - max_size=queue_size, ctx=self._mp_context, - pending_work_items=self._pending_work_items, - shutdown_lock=self._shutdown_lock, - thread_wakeup=self._executor_manager_thread_wakeup) - # Killed worker processes can produce spurious "broken pipe" - # tracebacks in the queue's own worker thread. But we detect killed - # processes anyway, so silence the tracebacks. - self._call_queue._ignore_epipe = True - self._result_queue = mp_context.SimpleQueue() - self._work_ids = queue.Queue() - - def _start_executor_manager_thread(self): - if self._executor_manager_thread is None: - # Start the processes so that their sentinels are known. - if not self._safe_to_dynamically_spawn_children: # ie, using fork. - self._launch_processes() - self._executor_manager_thread = _ExecutorManagerThread(self) - self._executor_manager_thread.start() - _threads_wakeups[self._executor_manager_thread] = \ - self._executor_manager_thread_wakeup - - def _adjust_process_count(self): - # if there's an idle process, we don't need to spawn a new one. - if self._idle_worker_semaphore.acquire(blocking=False): - return - - process_count = len(self._processes) - if process_count < self._max_workers: - # Assertion disabled as this codepath is also used to replace a - # worker that unexpectedly dies, even when using the 'fork' start - # method. That means there is still a potential deadlock bug. If a - # 'fork' mp_context worker dies, we'll be forking a new one when - # we know a thread is running (self._executor_manager_thread). - #assert self._safe_to_dynamically_spawn_children or not self._executor_manager_thread, 'https://github.com/python/cpython/issues/90622' - self._spawn_process() - - def _launch_processes(self): - # https://github.com/python/cpython/issues/90622 - assert not self._executor_manager_thread, ( - 'Processes cannot be fork()ed after the thread has started, ' - 'deadlock in the child processes could result.') - for _ in range(len(self._processes), self._max_workers): - self._spawn_process() - - def _spawn_process(self): - p = self._mp_context.Process( - target=_process_worker, - args=(self._call_queue, - self._result_queue, - self._initializer, - self._initargs, - self._max_tasks_per_child)) - p.start() - self._processes[p.pid] = p - - def submit(self, fn, /, *args, **kwargs): - with self._shutdown_lock: - if self._broken: - raise BrokenProcessPool(self._broken) - if self._shutdown_thread: - raise RuntimeError('cannot schedule new futures after shutdown') - if _global_shutdown: - raise RuntimeError('cannot schedule new futures after ' - 'interpreter shutdown') - - f = _base.Future() - w = _WorkItem(f, fn, args, kwargs) - - self._pending_work_items[self._queue_count] = w - self._work_ids.put(self._queue_count) - self._queue_count += 1 - # Wake up queue management thread - self._executor_manager_thread_wakeup.wakeup() - - if self._safe_to_dynamically_spawn_children: - self._adjust_process_count() - self._start_executor_manager_thread() - return f - submit.__doc__ = _base.Executor.submit.__doc__ - - def map(self, fn, *iterables, timeout=None, chunksize=1): - """Returns an iterator equivalent to map(fn, iter). - - Args: - fn: A callable that will take as many arguments as there are - passed iterables. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - chunksize: If greater than one, the iterables will be chopped into - chunks of size chunksize and submitted to the process pool. - If set to one, the items in the list will be sent one at a time. - - Returns: - An iterator equivalent to: map(func, *iterables) but the calls may - be evaluated out-of-order. - - Raises: - TimeoutError: If the entire result iterator could not be generated - before the given timeout. - Exception: If fn(*args) raises for any values. - """ - if chunksize < 1: - raise ValueError("chunksize must be >= 1.") - - results = super().map(partial(_process_chunk, fn), - _get_chunks(*iterables, chunksize=chunksize), - timeout=timeout) - return _chain_from_iterable_of_lists(results) - - def shutdown(self, wait=True, *, cancel_futures=False): - with self._shutdown_lock: - self._cancel_pending_futures = cancel_futures - self._shutdown_thread = True - if self._executor_manager_thread_wakeup is not None: - # Wake up queue management thread - self._executor_manager_thread_wakeup.wakeup() - - if self._executor_manager_thread is not None and wait: - self._executor_manager_thread.join() - # To reduce the risk of opening too many files, remove references to - # objects that use file descriptors. - self._executor_manager_thread = None - self._call_queue = None - if self._result_queue is not None and wait: - self._result_queue.close() - self._result_queue = None - self._processes = None - self._executor_manager_thread_wakeup = None - - shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/python/Lib/concurrent/futures/thread.py b/python/Lib/concurrent/futures/thread.py deleted file mode 100644 index fdda936..0000000 --- a/python/Lib/concurrent/futures/thread.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Implements ThreadPoolExecutor.""" - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -from concurrent.futures import _base -import itertools -import queue -import threading -import types -import weakref -import os - - -_threads_queues = weakref.WeakKeyDictionary() -_shutdown = False -# Lock that ensures that new workers are not created while the interpreter is -# shutting down. Must be held while mutating _threads_queues and _shutdown. -_global_shutdown_lock = threading.Lock() - -def _python_exit(): - global _shutdown - with _global_shutdown_lock: - _shutdown = True - items = list(_threads_queues.items()) - for t, q in items: - q.put(None) - for t, q in items: - t.join() - -# Register for `_python_exit()` to be called just before joining all -# non-daemon threads. This is used instead of `atexit.register()` for -# compatibility with subinterpreters, which no longer support daemon threads. -# See bpo-39812 for context. -threading._register_atexit(_python_exit) - -# At fork, reinitialize the `_global_shutdown_lock` lock in the child process -if hasattr(os, 'register_at_fork'): - os.register_at_fork(before=_global_shutdown_lock.acquire, - after_in_child=_global_shutdown_lock._at_fork_reinit, - after_in_parent=_global_shutdown_lock.release) - - -class _WorkItem(object): - def __init__(self, future, fn, args, kwargs): - self.future = future - self.fn = fn - self.args = args - self.kwargs = kwargs - - def run(self): - if not self.future.set_running_or_notify_cancel(): - return - - try: - result = self.fn(*self.args, **self.kwargs) - except BaseException as exc: - self.future.set_exception(exc) - # Break a reference cycle with the exception 'exc' - self = None - else: - self.future.set_result(result) - - __class_getitem__ = classmethod(types.GenericAlias) - - -def _worker(executor_reference, work_queue, initializer, initargs): - if initializer is not None: - try: - initializer(*initargs) - except BaseException: - _base.LOGGER.critical('Exception in initializer:', exc_info=True) - executor = executor_reference() - if executor is not None: - executor._initializer_failed() - return - try: - while True: - work_item = work_queue.get(block=True) - if work_item is not None: - work_item.run() - # Delete references to object. See issue16284 - del work_item - - # attempt to increment idle count - executor = executor_reference() - if executor is not None: - executor._idle_semaphore.release() - del executor - continue - - executor = executor_reference() - # Exit if: - # - The interpreter is shutting down OR - # - The executor that owns the worker has been collected OR - # - The executor that owns the worker has been shutdown. - if _shutdown or executor is None or executor._shutdown: - # Flag the executor as shutting down as early as possible if it - # is not gc-ed yet. - if executor is not None: - executor._shutdown = True - # Notice other workers - work_queue.put(None) - return - del executor - except BaseException: - _base.LOGGER.critical('Exception in worker', exc_info=True) - - -class BrokenThreadPool(_base.BrokenExecutor): - """ - Raised when a worker thread in a ThreadPoolExecutor failed initializing. - """ - - -class ThreadPoolExecutor(_base.Executor): - - # Used to assign unique thread names when thread_name_prefix is not supplied. - _counter = itertools.count().__next__ - - def __init__(self, max_workers=None, thread_name_prefix='', - initializer=None, initargs=()): - """Initializes a new ThreadPoolExecutor instance. - - Args: - max_workers: The maximum number of threads that can be used to - execute the given calls. - thread_name_prefix: An optional name prefix to give our threads. - initializer: A callable used to initialize worker threads. - initargs: A tuple of arguments to pass to the initializer. - """ - if max_workers is None: - # ThreadPoolExecutor is often used to: - # * CPU bound task which releases GIL - # * I/O bound task (which releases GIL, of course) - # - # We use cpu_count + 4 for both types of tasks. - # But we limit it to 32 to avoid consuming surprisingly large resource - # on many core machine. - max_workers = min(32, (os.cpu_count() or 1) + 4) - if max_workers <= 0: - raise ValueError("max_workers must be greater than 0") - - if initializer is not None and not callable(initializer): - raise TypeError("initializer must be a callable") - - self._max_workers = max_workers - self._work_queue = queue.SimpleQueue() - self._idle_semaphore = threading.Semaphore(0) - self._threads = set() - self._broken = False - self._shutdown = False - self._shutdown_lock = threading.Lock() - self._thread_name_prefix = (thread_name_prefix or - ("ThreadPoolExecutor-%d" % self._counter())) - self._initializer = initializer - self._initargs = initargs - - def submit(self, fn, /, *args, **kwargs): - with self._shutdown_lock, _global_shutdown_lock: - if self._broken: - raise BrokenThreadPool(self._broken) - - if self._shutdown: - raise RuntimeError('cannot schedule new futures after shutdown') - if _shutdown: - raise RuntimeError('cannot schedule new futures after ' - 'interpreter shutdown') - - f = _base.Future() - w = _WorkItem(f, fn, args, kwargs) - - self._work_queue.put(w) - self._adjust_thread_count() - return f - submit.__doc__ = _base.Executor.submit.__doc__ - - def _adjust_thread_count(self): - # if idle threads are available, don't spin new threads - if self._idle_semaphore.acquire(timeout=0): - return - - # When the executor gets lost, the weakref callback will wake up - # the worker threads. - def weakref_cb(_, q=self._work_queue): - q.put(None) - - num_threads = len(self._threads) - if num_threads < self._max_workers: - thread_name = '%s_%d' % (self._thread_name_prefix or self, - num_threads) - t = threading.Thread(name=thread_name, target=_worker, - args=(weakref.ref(self, weakref_cb), - self._work_queue, - self._initializer, - self._initargs)) - t.start() - self._threads.add(t) - _threads_queues[t] = self._work_queue - - def _initializer_failed(self): - with self._shutdown_lock: - self._broken = ('A thread initializer failed, the thread pool ' - 'is not usable anymore') - # Drain work queue and mark pending futures failed - while True: - try: - work_item = self._work_queue.get_nowait() - except queue.Empty: - break - if work_item is not None: - work_item.future.set_exception(BrokenThreadPool(self._broken)) - - def shutdown(self, wait=True, *, cancel_futures=False): - with self._shutdown_lock: - self._shutdown = True - if cancel_futures: - # Drain all work items from the queue, and then cancel their - # associated futures. - while True: - try: - work_item = self._work_queue.get_nowait() - except queue.Empty: - break - if work_item is not None: - work_item.future.cancel() - - # Send a wake-up to prevent threads calling - # _work_queue.get(block=True) from permanently blocking. - self._work_queue.put(None) - if wait: - for t in self._threads: - t.join() - shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/python/Lib/configparser.py b/python/Lib/configparser.py deleted file mode 100644 index 5e72ad1..0000000 --- a/python/Lib/configparser.py +++ /dev/null @@ -1,1381 +0,0 @@ -"""Configuration file parser. - -A configuration file consists of sections, lead by a "[section]" header, -and followed by "name: value" entries, with continuations and such in -the style of RFC 822. - -Intrinsic defaults can be specified by passing them into the -ConfigParser constructor as a dictionary. - -class: - -ConfigParser -- responsible for parsing a list of - configuration files, and managing the parsed database. - - methods: - - __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, - delimiters=('=', ':'), comment_prefixes=('#', ';'), - inline_comment_prefixes=None, strict=True, - empty_lines_in_values=True, default_section='DEFAULT', - interpolation=, converters=): - Create the parser. When `defaults' is given, it is initialized into the - dictionary or intrinsic defaults. The keys must be strings, the values - must be appropriate for %()s string interpolation. - - When `dict_type' is given, it will be used to create the dictionary - objects for the list of sections, for the options within a section, and - for the default values. - - When `delimiters' is given, it will be used as the set of substrings - that divide keys from values. - - When `comment_prefixes' is given, it will be used as the set of - substrings that prefix comments in empty lines. Comments can be - indented. - - When `inline_comment_prefixes' is given, it will be used as the set of - substrings that prefix comments in non-empty lines. - - When `strict` is True, the parser won't allow for any section or option - duplicates while reading from a single source (file, string or - dictionary). Default is True. - - When `empty_lines_in_values' is False (default: True), each empty line - marks the end of an option. Otherwise, internal empty lines of - a multiline option are kept as part of the value. - - When `allow_no_value' is True (default: False), options without - values are accepted; the value presented for these is None. - - When `default_section' is given, the name of the special section is - named accordingly. By default it is called ``"DEFAULT"`` but this can - be customized to point to any other valid section name. Its current - value can be retrieved using the ``parser_instance.default_section`` - attribute and may be modified at runtime. - - When `interpolation` is given, it should be an Interpolation subclass - instance. It will be used as the handler for option value - pre-processing when using getters. RawConfigParser objects don't do - any sort of interpolation, whereas ConfigParser uses an instance of - BasicInterpolation. The library also provides a ``zc.buildbot`` - inspired ExtendedInterpolation implementation. - - When `converters` is given, it should be a dictionary where each key - represents the name of a type converter and each value is a callable - implementing the conversion from string to the desired datatype. Every - converter gets its corresponding get*() method on the parser object and - section proxies. - - sections() - Return all the configuration section names, sans DEFAULT. - - has_section(section) - Return whether the given section exists. - - has_option(section, option) - Return whether the given option exists in the given section. - - options(section) - Return list of configuration options for the named section. - - read(filenames, encoding=None) - Read and parse the iterable of named configuration files, given by - name. A single filename is also allowed. Non-existing files - are ignored. Return list of successfully read files. - - read_file(f, filename=None) - Read and parse one configuration file, given as a file object. - The filename defaults to f.name; it is only used in error - messages (if f has no `name' attribute, the string `' is used). - - read_string(string) - Read configuration from a given string. - - read_dict(dictionary) - Read configuration from a dictionary. Keys are section names, - values are dictionaries with keys and values that should be present - in the section. If the used dictionary type preserves order, sections - and their keys will be added in order. Values are automatically - converted to strings. - - get(section, option, raw=False, vars=None, fallback=_UNSET) - Return a string value for the named option. All % interpolations are - expanded in the return values, based on the defaults passed into the - constructor and the DEFAULT section. Additional substitutions may be - provided using the `vars' argument, which must be a dictionary whose - contents override any pre-existing defaults. If `option' is a key in - `vars', the value from `vars' is used. - - getint(section, options, raw=False, vars=None, fallback=_UNSET) - Like get(), but convert value to an integer. - - getfloat(section, options, raw=False, vars=None, fallback=_UNSET) - Like get(), but convert value to a float. - - getboolean(section, options, raw=False, vars=None, fallback=_UNSET) - Like get(), but convert value to a boolean (currently case - insensitively defined as 0, false, no, off for False, and 1, true, - yes, on for True). Returns False or True. - - items(section=_UNSET, raw=False, vars=None) - If section is given, return a list of tuples with (name, value) for - each option in the section. Otherwise, return a list of tuples with - (section_name, section_proxy) for each section, including DEFAULTSECT. - - remove_section(section) - Remove the given file section and all its options. - - remove_option(section, option) - Remove the given option from the given section. - - set(section, option, value) - Set the given option. - - write(fp, space_around_delimiters=True) - Write the configuration state in .ini format. If - `space_around_delimiters' is True (the default), delimiters - between keys and values are surrounded by spaces. -""" - -from collections.abc import MutableMapping -from collections import ChainMap as _ChainMap -import functools -import io -import itertools -import os -import re -import sys -import warnings - -__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", - "NoOptionError", "InterpolationError", "InterpolationDepthError", - "InterpolationMissingOptionError", "InterpolationSyntaxError", - "ParsingError", "MissingSectionHeaderError", - "ConfigParser", "SafeConfigParser", "RawConfigParser", - "Interpolation", "BasicInterpolation", "ExtendedInterpolation", - "LegacyInterpolation", "SectionProxy", "ConverterMapping", - "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] - -_default_dict = dict -DEFAULTSECT = "DEFAULT" - -MAX_INTERPOLATION_DEPTH = 10 - - - -# exception classes -class Error(Exception): - """Base class for ConfigParser exceptions.""" - - def __init__(self, msg=''): - self.message = msg - Exception.__init__(self, msg) - - def __repr__(self): - return self.message - - __str__ = __repr__ - - -class NoSectionError(Error): - """Raised when no section matches a requested option.""" - - def __init__(self, section): - Error.__init__(self, 'No section: %r' % (section,)) - self.section = section - self.args = (section, ) - - -class DuplicateSectionError(Error): - """Raised when a section is repeated in an input source. - - Possible repetitions that raise this exception are: multiple creation - using the API or in strict parsers when a section is found more than once - in a single input file, string or dictionary. - """ - - def __init__(self, section, source=None, lineno=None): - msg = [repr(section), " already exists"] - if source is not None: - message = ["While reading from ", repr(source)] - if lineno is not None: - message.append(" [line {0:2d}]".format(lineno)) - message.append(": section ") - message.extend(msg) - msg = message - else: - msg.insert(0, "Section ") - Error.__init__(self, "".join(msg)) - self.section = section - self.source = source - self.lineno = lineno - self.args = (section, source, lineno) - - -class DuplicateOptionError(Error): - """Raised by strict parsers when an option is repeated in an input source. - - Current implementation raises this exception only when an option is found - more than once in a single file, string or dictionary. - """ - - def __init__(self, section, option, source=None, lineno=None): - msg = [repr(option), " in section ", repr(section), - " already exists"] - if source is not None: - message = ["While reading from ", repr(source)] - if lineno is not None: - message.append(" [line {0:2d}]".format(lineno)) - message.append(": option ") - message.extend(msg) - msg = message - else: - msg.insert(0, "Option ") - Error.__init__(self, "".join(msg)) - self.section = section - self.option = option - self.source = source - self.lineno = lineno - self.args = (section, option, source, lineno) - - -class NoOptionError(Error): - """A requested option was not found.""" - - def __init__(self, option, section): - Error.__init__(self, "No option %r in section: %r" % - (option, section)) - self.option = option - self.section = section - self.args = (option, section) - - -class InterpolationError(Error): - """Base class for interpolation-related exceptions.""" - - def __init__(self, option, section, msg): - Error.__init__(self, msg) - self.option = option - self.section = section - self.args = (option, section, msg) - - -class InterpolationMissingOptionError(InterpolationError): - """A string substitution required a setting which was not available.""" - - def __init__(self, option, section, rawval, reference): - msg = ("Bad value substitution: option {!r} in section {!r} contains " - "an interpolation key {!r} which is not a valid option name. " - "Raw value: {!r}".format(option, section, reference, rawval)) - InterpolationError.__init__(self, option, section, msg) - self.reference = reference - self.args = (option, section, rawval, reference) - - -class InterpolationSyntaxError(InterpolationError): - """Raised when the source text contains invalid syntax. - - Current implementation raises this exception when the source text into - which substitutions are made does not conform to the required syntax. - """ - - -class InterpolationDepthError(InterpolationError): - """Raised when substitutions are nested too deeply.""" - - def __init__(self, option, section, rawval): - msg = ("Recursion limit exceeded in value substitution: option {!r} " - "in section {!r} contains an interpolation key which " - "cannot be substituted in {} steps. Raw value: {!r}" - "".format(option, section, MAX_INTERPOLATION_DEPTH, - rawval)) - InterpolationError.__init__(self, option, section, msg) - self.args = (option, section, rawval) - - -class ParsingError(Error): - """Raised when a configuration file does not follow legal syntax.""" - - def __init__(self, source=None, filename=None): - # Exactly one of `source'/`filename' arguments has to be given. - # `filename' kept for compatibility. - if filename and source: - raise ValueError("Cannot specify both `filename' and `source'. " - "Use `source'.") - elif not filename and not source: - raise ValueError("Required argument `source' not given.") - elif filename: - source = filename - Error.__init__(self, 'Source contains parsing errors: %r' % source) - self.source = source - self.errors = [] - self.args = (source, ) - - @property - def filename(self): - """Deprecated, use `source'.""" - warnings.warn( - "The 'filename' attribute will be removed in Python 3.12. " - "Use 'source' instead.", - DeprecationWarning, stacklevel=2 - ) - return self.source - - @filename.setter - def filename(self, value): - """Deprecated, user `source'.""" - warnings.warn( - "The 'filename' attribute will be removed in Python 3.12. " - "Use 'source' instead.", - DeprecationWarning, stacklevel=2 - ) - self.source = value - - def append(self, lineno, line): - self.errors.append((lineno, line)) - self.message += '\n\t[line %2d]: %s' % (lineno, line) - - -class MissingSectionHeaderError(ParsingError): - """Raised when a key-value pair is found before any section header.""" - - def __init__(self, filename, lineno, line): - Error.__init__( - self, - 'File contains no section headers.\nfile: %r, line: %d\n%r' % - (filename, lineno, line)) - self.source = filename - self.lineno = lineno - self.line = line - self.args = (filename, lineno, line) - - -# Used in parser getters to indicate the default behaviour when a specific -# option is not found it to raise an exception. Created to enable `None' as -# a valid fallback value. -_UNSET = object() - - -class Interpolation: - """Dummy interpolation that passes the value through with no changes.""" - - def before_get(self, parser, section, option, value, defaults): - return value - - def before_set(self, parser, section, option, value): - return value - - def before_read(self, parser, section, option, value): - return value - - def before_write(self, parser, section, option, value): - return value - - -class BasicInterpolation(Interpolation): - """Interpolation as implemented in the classic ConfigParser. - - The option values can contain format strings which refer to other values in - the same section, or values in the special default section. - - For example: - - something: %(dir)s/whatever - - would resolve the "%(dir)s" to the value of dir. All reference - expansions are done late, on demand. If a user needs to use a bare % in - a configuration file, she can escape it by writing %%. Other % usage - is considered a user error and raises `InterpolationSyntaxError'.""" - - _KEYCRE = re.compile(r"%\(([^)]+)\)s") - - def before_get(self, parser, section, option, value, defaults): - L = [] - self._interpolate_some(parser, option, L, value, section, defaults, 1) - return ''.join(L) - - def before_set(self, parser, section, option, value): - tmp_value = value.replace('%%', '') # escaped percent signs - tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax - if '%' in tmp_value: - raise ValueError("invalid interpolation syntax in %r at " - "position %d" % (value, tmp_value.find('%'))) - return value - - def _interpolate_some(self, parser, option, accum, rest, section, map, - depth): - rawval = parser.get(section, option, raw=True, fallback=rest) - if depth > MAX_INTERPOLATION_DEPTH: - raise InterpolationDepthError(option, section, rawval) - while rest: - p = rest.find("%") - if p < 0: - accum.append(rest) - return - if p > 0: - accum.append(rest[:p]) - rest = rest[p:] - # p is no longer used - c = rest[1:2] - if c == "%": - accum.append("%") - rest = rest[2:] - elif c == "(": - m = self._KEYCRE.match(rest) - if m is None: - raise InterpolationSyntaxError(option, section, - "bad interpolation variable reference %r" % rest) - var = parser.optionxform(m.group(1)) - rest = rest[m.end():] - try: - v = map[var] - except KeyError: - raise InterpolationMissingOptionError( - option, section, rawval, var) from None - if "%" in v: - self._interpolate_some(parser, option, accum, v, - section, map, depth + 1) - else: - accum.append(v) - else: - raise InterpolationSyntaxError( - option, section, - "'%%' must be followed by '%%' or '(', " - "found: %r" % (rest,)) - - -class ExtendedInterpolation(Interpolation): - """Advanced variant of interpolation, supports the syntax used by - `zc.buildout'. Enables interpolation between sections.""" - - _KEYCRE = re.compile(r"\$\{([^}]+)\}") - - def before_get(self, parser, section, option, value, defaults): - L = [] - self._interpolate_some(parser, option, L, value, section, defaults, 1) - return ''.join(L) - - def before_set(self, parser, section, option, value): - tmp_value = value.replace('$$', '') # escaped dollar signs - tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax - if '$' in tmp_value: - raise ValueError("invalid interpolation syntax in %r at " - "position %d" % (value, tmp_value.find('$'))) - return value - - def _interpolate_some(self, parser, option, accum, rest, section, map, - depth): - rawval = parser.get(section, option, raw=True, fallback=rest) - if depth > MAX_INTERPOLATION_DEPTH: - raise InterpolationDepthError(option, section, rawval) - while rest: - p = rest.find("$") - if p < 0: - accum.append(rest) - return - if p > 0: - accum.append(rest[:p]) - rest = rest[p:] - # p is no longer used - c = rest[1:2] - if c == "$": - accum.append("$") - rest = rest[2:] - elif c == "{": - m = self._KEYCRE.match(rest) - if m is None: - raise InterpolationSyntaxError(option, section, - "bad interpolation variable reference %r" % rest) - path = m.group(1).split(':') - rest = rest[m.end():] - sect = section - opt = option - try: - if len(path) == 1: - opt = parser.optionxform(path[0]) - v = map[opt] - elif len(path) == 2: - sect = path[0] - opt = parser.optionxform(path[1]) - v = parser.get(sect, opt, raw=True) - else: - raise InterpolationSyntaxError( - option, section, - "More than one ':' found: %r" % (rest,)) - except (KeyError, NoSectionError, NoOptionError): - raise InterpolationMissingOptionError( - option, section, rawval, ":".join(path)) from None - if "$" in v: - self._interpolate_some(parser, opt, accum, v, sect, - dict(parser.items(sect, raw=True)), - depth + 1) - else: - accum.append(v) - else: - raise InterpolationSyntaxError( - option, section, - "'$' must be followed by '$' or '{', " - "found: %r" % (rest,)) - - -class LegacyInterpolation(Interpolation): - """Deprecated interpolation used in old versions of ConfigParser. - Use BasicInterpolation or ExtendedInterpolation instead.""" - - _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - warnings.warn( - "LegacyInterpolation has been deprecated since Python 3.2 " - "and will be removed from the configparser module in Python 3.13. " - "Use BasicInterpolation or ExtendedInterpolation instead.", - DeprecationWarning, stacklevel=2 - ) - - def before_get(self, parser, section, option, value, vars): - rawval = value - depth = MAX_INTERPOLATION_DEPTH - while depth: # Loop through this until it's done - depth -= 1 - if value and "%(" in value: - replace = functools.partial(self._interpolation_replace, - parser=parser) - value = self._KEYCRE.sub(replace, value) - try: - value = value % vars - except KeyError as e: - raise InterpolationMissingOptionError( - option, section, rawval, e.args[0]) from None - else: - break - if value and "%(" in value: - raise InterpolationDepthError(option, section, rawval) - return value - - def before_set(self, parser, section, option, value): - return value - - @staticmethod - def _interpolation_replace(match, parser): - s = match.group(1) - if s is None: - return match.group() - else: - return "%%(%s)s" % parser.optionxform(s) - - -class RawConfigParser(MutableMapping): - """ConfigParser that does not do interpolation.""" - - # Regular expressions for parsing section headers and options - _SECT_TMPL = r""" - \[ # [ - (?P
.+) # very permissive! - \] # ] - """ - _OPT_TMPL = r""" - (?P