diff --git a/.coverage b/.coverage new file mode 100644 index 00000000..6d614e1b --- /dev/null +++ b/.coverage @@ -0,0 +1 @@ +!coverage.py: This is a private format, don't read it directly!{"lines":{"C:\\Users\\Maggi.DESKTOP-90M23NT\\OneDrive\\Desktop\\ADC2\\STORE-MANAGER-API\\app\\api\\v1\\views.py":[1,2,3,6,7,9,10,11,35,39,40,45,47,60,63,65,12,13,14,15,16,17,19,20,22,24,26,28,30,33,36,41,42,43,48,49,50,51,52,54,56,57,61,66,67]}} \ No newline at end of file diff --git a/.env b/.env new file mode 100644 index 00000000..59c1f89e --- /dev/null +++ b/.env @@ -0,0 +1,6 @@ +source venv/scripts/activate +export FLASK_APP="run.py" +export FLASK_ENV="development" +export APP_SETTINGS="development" + + diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..a20b38fb --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +!.codeclimate.yml +!.coveragerc +!.gitignore +!.pylintrc +!.travis* +!.env +*.log +*.pyc + +__pycache__/ +venv/* \ No newline at end of file diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..bbfc18f3 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,13 @@ +language: python +python: + - "3.6" +services: + - postgresql +install: + - pip install -r requirements.txt + - pip install coveralls +script: + - pytest + - coverage run --source=app.api.v1.views -m pytest && coverage report +after_success: + - coveralls \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..09102c24 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.pythonPath": "venv\\Scripts\\python.exe" +} \ No newline at end of file diff --git a/Procfile b/Procfile new file mode 100644 index 00000000..80a980e8 --- /dev/null +++ b/Procfile @@ -0,0 +1 @@ +web: gunicorn run:app \ No newline at end of file diff --git a/README.md b/README.md index e738bd8c..cb8c9df7 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,43 @@ # STORE-MANAGER-API Api endpoints for inventory management in store manager + +[![Build Status](https://travis-ci.com/MaggieKimani1/STORE-MANAGER-API.svg?branch=ch-add-travis-161340662)](https://travis-ci.com/MaggieKimani1/STORE-MANAGER-API) +[![Coverage Status](https://coveralls.io/repos/github/MaggieKimani1/STORE-MANAGER-API/badge.svg?branch=ch-add-travis-161340662)](https://coveralls.io/github/MaggieKimani1/STORE-MANAGER-API?branch=ch-add-travis-161340662) +[![Maintainability](https://api.codeclimate.com/v1/badges/a99a88d28ad37a79dbf6/maintainability)](https://codeclimate.com/github/codeclimate/codeclimate/maintainability) + +# STORE-MANAGER-API +- Admin can add a product +- Admin/store attendant can get all products +- Admin/store attendant can get a specific product +- Store attendant can add a sale order +- Admin can get all sale order records + +### Endpoints + +| HTTP Method | Endpoint | Function | +| ------------- |:--------------------------: | -----------------------:| +| GET | /api/v1/products | Get all products | +| GET | /api/v1/products/product_id | Get one product | +| GET | /api/v1/sales | Get all sales | +| GET | /api/v1/products/sale_id | Get one sale record | +| POST | /api/v1/products | Create a new sale | +| POST | /api/v1/sales | Create a new product | + +### Pivotal Tracker Board +https://www.pivotaltracker.com/n/projects/2204195 + +### Heroku Link +https://maggie1.herokuapp.com/ + +### Prerequisites +- pip +- virtualenv +- python 3 or python 2.7 + +## Setting Up Locally +- Clone the repo +- git clone https://github.com/MaggieKimani1/STORE-MANAGER-API +- create a virtual environment and activate it +- virtualenv venv +- install dependencies +- pip install -r requirements.txt diff --git a/__init__.py b/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/__pycache__/__init__.cpython-37.pyc b/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..4b77ff5e Binary files /dev/null and b/__pycache__/__init__.cpython-37.pyc differ diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 00000000..13149681 --- /dev/null +++ b/app/__init__.py @@ -0,0 +1,17 @@ +from flask import Flask, Blueprint +from flask_restful import Api, Resource +from instance.config import app_config + + + +def create_app(config_name): + app = Flask(__name__, instance_relative_config=True) + '''loads the right configurations from config.py given a config name''' + + app.config.from_object(app_config[config_name]) + + from .api.v1 import api_v1 as v1 + app.register_blueprint(v1) + + return app + \ No newline at end of file diff --git a/app/__pycache__/__init__.cpython-37.pyc b/app/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..7b4a93ab Binary files /dev/null and b/app/__pycache__/__init__.cpython-37.pyc differ diff --git a/app/api/__init__.py b/app/api/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/api/__pycache__/__init__.cpython-37.pyc b/app/api/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..5e1b17e5 Binary files /dev/null and b/app/api/__pycache__/__init__.cpython-37.pyc differ diff --git a/app/api/v1/__init__.py b/app/api/v1/__init__.py new file mode 100644 index 00000000..1ccf3431 --- /dev/null +++ b/app/api/v1/__init__.py @@ -0,0 +1,14 @@ +from flask import Blueprint +from flask_restful import Api, Resource +from .views import All_Products_Endpoint, One_Product_Endpoint, All_Sales_Endpoint,One_Sale_Endpoint + + +api_v1 = Blueprint('api_v1',__name__,url_prefix='/api/v1') +api = Api(api_v1) + +api.add_resource(All_Products_Endpoint,'/products') +api.add_resource(One_Product_Endpoint,'/products/') +api.add_resource(All_Sales_Endpoint,'/products//sales') +api.add_resource(One_Sale_Endpoint,'/sales/') + + diff --git a/app/api/v1/__pycache__/__init__.cpython-37.pyc b/app/api/v1/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..023498b0 Binary files /dev/null and b/app/api/v1/__pycache__/__init__.cpython-37.pyc differ diff --git a/app/api/v1/__pycache__/models.cpython-37.pyc b/app/api/v1/__pycache__/models.cpython-37.pyc new file mode 100644 index 00000000..7a468fcc Binary files /dev/null and b/app/api/v1/__pycache__/models.cpython-37.pyc differ diff --git a/app/api/v1/__pycache__/views.cpython-37.pyc b/app/api/v1/__pycache__/views.cpython-37.pyc new file mode 100644 index 00000000..61bb9a61 Binary files /dev/null and b/app/api/v1/__pycache__/views.cpython-37.pyc differ diff --git a/app/api/v1/models.py b/app/api/v1/models.py new file mode 100644 index 00000000..1ae3f11a --- /dev/null +++ b/app/api/v1/models.py @@ -0,0 +1,103 @@ +all_Products = [] +all_Sales = [] + +All_Users= {} +user_details={} + + +class Products(object): + all_Products = [] + + def __init__(self, product_name , price, quantity, category): + '''constructor method that is called when an object is created''' + self.product_name = product_name + self.product_id = len(Products.all_Products) + 1 + self.price = price + self.quantity= quantity + self.category = category + + def create_new_product(self,product_name,product_price, quantity, category): + for product in all_Products: + if self.product_name == product["product_name"]: + return "message The product you entered already exists" + + product_Description = {} + product_Description["product_id"] = self.product_id + product_Description["product_name"] = product_name + product_Description["price"] = product_price + product_Description["quantity"] = quantity + product_Description["category"] = category + + Products.all_Products.append(product_Description) + return Products.all_Products + @classmethod + def get_all_Products(cls): + return cls.all_Products + + + +class Sales(object): + all_Sales = [] + + def __init__(self,product_name, product_price): + self.product_name = product_name + self.product_price= product_price + self.sale_id = len(Sales.all_Sales) + 1 + + def create_new_sale_record(self, product_name, product_price, product_id): + new_Sale = {} + new_Sale["product_id"] = product_id + new_Sale["product_name"] = product_name + new_Sale["price"] = product_price + new_Sale["sale_id"] = self.sale_id + + Sales.all_Sales.append(new_Sale) + return Sales.all_Sales + + # for prod in all_Products: + # if self.product_id == prod["product_id"]: + + # new_Sale={ + # self.sale_id: prod + # } + + # all_Sales.append(new_Sale) + # return "success" + + def get_all_Sales(self): + return all_Sales + + def get_one_sale(self, sale_id): + for sale in all_Sales: + if sale_id == sale["id"]: + return sale + return "message The sale doesn't exist" + +# class Users(object): +# '''class for handling user data''' + +# def __init__(self,username, email, password): +# self.username = username +# self.email = email +# self.password = password + +# def register(self): +# if email in All_Users: +# return "message already exists" +# else: + +# user_details["username"] = self.username +# user_details["email"] = self.email +# user_details["password"] = self.password + +# All_Users.append(user_details) +# return "message successfully registered" + +# def get_one_user(self): +# for user in All_Users: +# if email in All_Users: +# return All_Users[email] +# return "user not found" + +# def get_all_users(self): +# return All_Users \ No newline at end of file diff --git a/app/api/v1/views.py b/app/api/v1/views.py new file mode 100644 index 00000000..097a4356 --- /dev/null +++ b/app/api/v1/views.py @@ -0,0 +1,106 @@ +from flask import Flask, Blueprint, request, jsonify, make_response +from flask_restful import Api, Resource +from .models import all_Products, all_Sales, Products, Sales + + +app = Flask(__name__) +api = Api(app) + +class All_Products_Endpoint(Resource): + + def post(self): + '''Creating a new product''' + try: + + data = request.get_json() + # product_id = len(all_Products) + 1 + product_name = data["product_name"] + product_price = data["price"] + quantity = data["quantity"] + category = data["category"] + + + + product = Products(product_name,product_price, quantity, category) + products=product.create_new_product(product_name,product_price, quantity, category) + + if product_name == "" or not product_name: + return {"message":"please enter the product_name"},400 + if quantity == "" or not quantity: + return {"message":"Please specify a quantity"},400 + if product_price == "" or not product_price: + return {"message":"please enter the price"},400 + if category == "" or not category: + return{"message":"please enter the category"},400 + if quantity == 0: + return{"message":"invalid quantity"},400 + + # all_Products.append() + return make_response(jsonify({"message":"success","products":products}), 201) + + + + except: + return make_response(jsonify({"message":"include all details"}), 400) + + + + + + + def get(self): + products=Products.get_all_Products() + '''Fetching a specific product''' + return make_response(jsonify({"message":"success","products":products}), 200) + + +class One_Product_Endpoint(Resource): + + def get(self, product_id): + '''Fetching a product''' + products=Products.get_all_Products() + for product in products: + if product["product_id"] == product_id: + return product + return make_response(jsonify({"message":"no product found"}), 404) + +class All_Sales_Endpoint(Resource): + + def post(self, product_id): + '''Creating a new sale''' + + data = request.get_json() #retrieving the data using the request in json format + product_name = data["product_name"] + product_price = data["price"] + + + + products=Products.get_all_Products() + for prod in products: + if product_id == prod["product_id"]: + # new_Sale={ + # "sale_id": prod + # } + # all_Sales.append(new_Sale) + sale = Sales(product_name,product_price) + sales=sale.create_new_sale_record(product_name,product_price,product_id) + + return make_response(jsonify({"message":"successfully sold" ,"sales": sales}), 201) + return make_response(jsonify({"message":"No product to sell"}), 404) + + def get(self): + '''Fetching all sale records''' + + return make_response(jsonify({"message":"success","sales":all_Sales}), 200) + +class One_Sale_Endpoint(Resource): + + def get(self, sale_id): + '''Fetching a sale record''' + + for sale in all_Sales: + if sale["sale_id"] == sale_id: + return sale + + + \ No newline at end of file diff --git a/app/tests/__init__.py b/app/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/tests/__pycache__/__init__.cpython-37.pyc b/app/tests/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..517c59e4 Binary files /dev/null and b/app/tests/__pycache__/__init__.cpython-37.pyc differ diff --git a/app/tests/__pycache__/test_products.cpython-27-PYTEST.pyc b/app/tests/__pycache__/test_products.cpython-27-PYTEST.pyc new file mode 100644 index 00000000..7865603e Binary files /dev/null and b/app/tests/__pycache__/test_products.cpython-27-PYTEST.pyc differ diff --git a/app/tests/__pycache__/test_products.cpython-37-PYTEST.pyc b/app/tests/__pycache__/test_products.cpython-37-PYTEST.pyc new file mode 100644 index 00000000..fca8d6fb Binary files /dev/null and b/app/tests/__pycache__/test_products.cpython-37-PYTEST.pyc differ diff --git a/app/tests/__pycache__/test_sales.cpython-27-PYTEST.pyc b/app/tests/__pycache__/test_sales.cpython-27-PYTEST.pyc new file mode 100644 index 00000000..519a0006 Binary files /dev/null and b/app/tests/__pycache__/test_sales.cpython-27-PYTEST.pyc differ diff --git a/app/tests/__pycache__/test_sales.cpython-37-PYTEST.pyc b/app/tests/__pycache__/test_sales.cpython-37-PYTEST.pyc new file mode 100644 index 00000000..da73e0c8 Binary files /dev/null and b/app/tests/__pycache__/test_sales.cpython-37-PYTEST.pyc differ diff --git a/app/tests/test_products.py b/app/tests/test_products.py new file mode 100644 index 00000000..a5250ca9 --- /dev/null +++ b/app/tests/test_products.py @@ -0,0 +1,37 @@ +import unittest +import json +from app import create_app +import sys + + +class ProductsTestCase(unittest.TestCase): + #this class represents the products testcase for the client methods + + def setUp(self): + #Define test variables and initialize app. + self.app = create_app(config_name="testing") + self.client= create_app('testing').test_client() + + + #test if user can get all products by using get method + def test_get_all_Products(self): + response=self.client.get('api/v1/products',content_type="application/json") + self.assertEqual(response.status_code,200) + + def test_create_new_product(self): + data = {"product_name":"nivea","price":5000,"quantity":40, "category":"body lotion"} + response = self.client.post("/api/v1/products",data = json.dumps(data),content_type="application/json") + response_data = json.loads(response.data.decode()) + self.assertEqual(response_data["message"], "success") + self.assertEqual(response.status_code,201) + + def test_get_one_product(self): + response=self.client.get('api/v1/products/1',content_type="application/json") + response_data = json.loads(response.data.decode()) + self.assertEqual(response_data["product_id"], 1) + self.assertEqual(response.status_code,200) + + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/app/tests/test_sales.py b/app/tests/test_sales.py new file mode 100644 index 00000000..bf4be67b --- /dev/null +++ b/app/tests/test_sales.py @@ -0,0 +1,38 @@ +import unittest +import json +from app import create_app +import sys + + +class SalesTestCase(unittest.TestCase): + #this class represents the products testcase for the client methods + + def setUp(self): + #Define test variables and initialize app. + self.app = create_app(config_name="testing") + self.client= create_app('testing').test_client() + self.data = {} + + + #test if user can get all products by using get method + def test_get_all_Sales(self): + response=self.client.get('api/v1/sales',content_type="application/json") + + self.assertEqual(response.status_code,200) + + def test_create_new_sale(self): + data = {"product_id":1} + response = self.client.post("/api/v1/sales",data = json.dumps(data),content_type="application/json") + self.assertEqual(response.status_code,201) + + def test_get_one_sale(self): + response=self.client.get('api/v1/sales/1',content_type="application/json") + self.assertEqual(response.status_code,200) + + + + + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/instance/__init__.py b/instance/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/instance/__pycache__/__init__.cpython-37.pyc b/instance/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..9838a326 Binary files /dev/null and b/instance/__pycache__/__init__.cpython-37.pyc differ diff --git a/instance/__pycache__/config.cpython-37.pyc b/instance/__pycache__/config.cpython-37.pyc new file mode 100644 index 00000000..545686d7 Binary files /dev/null and b/instance/__pycache__/config.cpython-37.pyc differ diff --git a/instance/config.py b/instance/config.py new file mode 100644 index 00000000..8e841f9c --- /dev/null +++ b/instance/config.py @@ -0,0 +1,32 @@ +class Config(object): + "parent configuration class" + DEBUG= False + + +class DevelopmentConfig(Config): + "Configurations for Development" + DEBUG = True + + +class TestingConfig(Config): + """Configurations for Testing,""" + TESTING = True + DEBUG = True + +class StagingConfig(Config): + """Configurations for Staging.""" + DEBUG = True + + +class ProductionConfig(Config): + """Configurations for Production.""" + DEBUG = False + TESTING = False + + +app_config = { + 'development': DevelopmentConfig, + 'testing': TestingConfig, + 'staging': StagingConfig, + 'production': ProductionConfig, +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..177e7c54 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,34 @@ +aniso8601==3.0.2 +astroid==2.0.4 +atomicwrites==1.2.1 +attrs==18.2.0 +certifi==2018.10.15 +chardet==3.0.4 +Click==7.0 +colorama==0.4.0 +coverage==4.5.1 +coveralls==1.5.1 +docopt==0.6.2 +Flask==1.0.2 +Flask-RESTful==0.3.6 +gunicorn==19.9.0 +idna==2.7 +isort==4.3.4 +ItsDangerous==1.0.0 +Jinja2==2.10 +lazy-object-proxy==1.3.1 +MarkupSafe==1.0 +mccabe==0.6.1 +more-itertools==4.3.0 +nose==1.3.7 +pep8==1.7.1 +pluggy==0.8.0 +py==1.7.0 +pylint==2.1.1 +pytest==3.9.2 +pytz==2018.6 +requests==2.20.0 +six==1.11.0 +urllib3==1.24 +Werkzeug==0.14.1 +wrapt==1.10.11 diff --git a/run.py b/run.py new file mode 100644 index 00000000..3bbb775b --- /dev/null +++ b/run.py @@ -0,0 +1,12 @@ +'''this file is the entry point to our app''' + +import os +from app import create_app + +config_name = "development" +app = create_app(config_name) +'''we create the app by running create_app function and passing in the config_name''' + + +if __name__ == "__main__": + app.run() diff --git a/venv/Include/Python-ast.h b/venv/Include/Python-ast.h new file mode 100644 index 00000000..8e0f750a --- /dev/null +++ b/venv/Include/Python-ast.h @@ -0,0 +1,637 @@ +/* File automatically generated by Parser/asdl_c.py. */ + +#include "asdl.h" + +typedef struct _mod *mod_ty; + +typedef struct _stmt *stmt_ty; + +typedef struct _expr *expr_ty; + +typedef enum _expr_context { Load=1, Store=2, Del=3, AugLoad=4, AugStore=5, + Param=6 } expr_context_ty; + +typedef struct _slice *slice_ty; + +typedef enum _boolop { And=1, Or=2 } boolop_ty; + +typedef enum _operator { Add=1, Sub=2, Mult=3, MatMult=4, Div=5, Mod=6, Pow=7, + LShift=8, RShift=9, BitOr=10, BitXor=11, BitAnd=12, + FloorDiv=13 } operator_ty; + +typedef enum _unaryop { Invert=1, Not=2, UAdd=3, USub=4 } unaryop_ty; + +typedef enum _cmpop { Eq=1, NotEq=2, Lt=3, LtE=4, Gt=5, GtE=6, Is=7, IsNot=8, + In=9, NotIn=10 } cmpop_ty; + +typedef struct _comprehension *comprehension_ty; + +typedef struct _excepthandler *excepthandler_ty; + +typedef struct _arguments *arguments_ty; + +typedef struct _arg *arg_ty; + +typedef struct _keyword *keyword_ty; + +typedef struct _alias *alias_ty; + +typedef struct _withitem *withitem_ty; + + +enum _mod_kind {Module_kind=1, Interactive_kind=2, Expression_kind=3, + Suite_kind=4}; +struct _mod { + enum _mod_kind kind; + union { + struct { + asdl_seq *body; + } Module; + + struct { + asdl_seq *body; + } Interactive; + + struct { + expr_ty body; + } Expression; + + struct { + asdl_seq *body; + } Suite; + + } v; +}; + +enum _stmt_kind {FunctionDef_kind=1, AsyncFunctionDef_kind=2, ClassDef_kind=3, + Return_kind=4, Delete_kind=5, Assign_kind=6, + AugAssign_kind=7, AnnAssign_kind=8, For_kind=9, + AsyncFor_kind=10, While_kind=11, If_kind=12, With_kind=13, + AsyncWith_kind=14, Raise_kind=15, Try_kind=16, + Assert_kind=17, Import_kind=18, ImportFrom_kind=19, + Global_kind=20, Nonlocal_kind=21, Expr_kind=22, Pass_kind=23, + Break_kind=24, Continue_kind=25}; +struct _stmt { + enum _stmt_kind kind; + union { + struct { + identifier name; + arguments_ty args; + asdl_seq *body; + asdl_seq *decorator_list; + expr_ty returns; + } FunctionDef; + + struct { + identifier name; + arguments_ty args; + asdl_seq *body; + asdl_seq *decorator_list; + expr_ty returns; + } AsyncFunctionDef; + + struct { + identifier name; + asdl_seq *bases; + asdl_seq *keywords; + asdl_seq *body; + asdl_seq *decorator_list; + } ClassDef; + + struct { + expr_ty value; + } Return; + + struct { + asdl_seq *targets; + } Delete; + + struct { + asdl_seq *targets; + expr_ty value; + } Assign; + + struct { + expr_ty target; + operator_ty op; + expr_ty value; + } AugAssign; + + struct { + expr_ty target; + expr_ty annotation; + expr_ty value; + int simple; + } AnnAssign; + + struct { + expr_ty target; + expr_ty iter; + asdl_seq *body; + asdl_seq *orelse; + } For; + + struct { + expr_ty target; + expr_ty iter; + asdl_seq *body; + asdl_seq *orelse; + } AsyncFor; + + struct { + expr_ty test; + asdl_seq *body; + asdl_seq *orelse; + } While; + + struct { + expr_ty test; + asdl_seq *body; + asdl_seq *orelse; + } If; + + struct { + asdl_seq *items; + asdl_seq *body; + } With; + + struct { + asdl_seq *items; + asdl_seq *body; + } AsyncWith; + + struct { + expr_ty exc; + expr_ty cause; + } Raise; + + struct { + asdl_seq *body; + asdl_seq *handlers; + asdl_seq *orelse; + asdl_seq *finalbody; + } Try; + + struct { + expr_ty test; + expr_ty msg; + } Assert; + + struct { + asdl_seq *names; + } Import; + + struct { + identifier module; + asdl_seq *names; + int level; + } ImportFrom; + + struct { + asdl_seq *names; + } Global; + + struct { + asdl_seq *names; + } Nonlocal; + + struct { + expr_ty value; + } Expr; + + } v; + int lineno; + int col_offset; +}; + +enum _expr_kind {BoolOp_kind=1, BinOp_kind=2, UnaryOp_kind=3, Lambda_kind=4, + IfExp_kind=5, Dict_kind=6, Set_kind=7, ListComp_kind=8, + SetComp_kind=9, DictComp_kind=10, GeneratorExp_kind=11, + Await_kind=12, Yield_kind=13, YieldFrom_kind=14, + Compare_kind=15, Call_kind=16, Num_kind=17, Str_kind=18, + FormattedValue_kind=19, JoinedStr_kind=20, Bytes_kind=21, + NameConstant_kind=22, Ellipsis_kind=23, Constant_kind=24, + Attribute_kind=25, Subscript_kind=26, Starred_kind=27, + Name_kind=28, List_kind=29, Tuple_kind=30}; +struct _expr { + enum _expr_kind kind; + union { + struct { + boolop_ty op; + asdl_seq *values; + } BoolOp; + + struct { + expr_ty left; + operator_ty op; + expr_ty right; + } BinOp; + + struct { + unaryop_ty op; + expr_ty operand; + } UnaryOp; + + struct { + arguments_ty args; + expr_ty body; + } Lambda; + + struct { + expr_ty test; + expr_ty body; + expr_ty orelse; + } IfExp; + + struct { + asdl_seq *keys; + asdl_seq *values; + } Dict; + + struct { + asdl_seq *elts; + } Set; + + struct { + expr_ty elt; + asdl_seq *generators; + } ListComp; + + struct { + expr_ty elt; + asdl_seq *generators; + } SetComp; + + struct { + expr_ty key; + expr_ty value; + asdl_seq *generators; + } DictComp; + + struct { + expr_ty elt; + asdl_seq *generators; + } GeneratorExp; + + struct { + expr_ty value; + } Await; + + struct { + expr_ty value; + } Yield; + + struct { + expr_ty value; + } YieldFrom; + + struct { + expr_ty left; + asdl_int_seq *ops; + asdl_seq *comparators; + } Compare; + + struct { + expr_ty func; + asdl_seq *args; + asdl_seq *keywords; + } Call; + + struct { + object n; + } Num; + + struct { + string s; + } Str; + + struct { + expr_ty value; + int conversion; + expr_ty format_spec; + } FormattedValue; + + struct { + asdl_seq *values; + } JoinedStr; + + struct { + bytes s; + } Bytes; + + struct { + singleton value; + } NameConstant; + + struct { + constant value; + } Constant; + + struct { + expr_ty value; + identifier attr; + expr_context_ty ctx; + } Attribute; + + struct { + expr_ty value; + slice_ty slice; + expr_context_ty ctx; + } Subscript; + + struct { + expr_ty value; + expr_context_ty ctx; + } Starred; + + struct { + identifier id; + expr_context_ty ctx; + } Name; + + struct { + asdl_seq *elts; + expr_context_ty ctx; + } List; + + struct { + asdl_seq *elts; + expr_context_ty ctx; + } Tuple; + + } v; + int lineno; + int col_offset; +}; + +enum _slice_kind {Slice_kind=1, ExtSlice_kind=2, Index_kind=3}; +struct _slice { + enum _slice_kind kind; + union { + struct { + expr_ty lower; + expr_ty upper; + expr_ty step; + } Slice; + + struct { + asdl_seq *dims; + } ExtSlice; + + struct { + expr_ty value; + } Index; + + } v; +}; + +struct _comprehension { + expr_ty target; + expr_ty iter; + asdl_seq *ifs; + int is_async; +}; + +enum _excepthandler_kind {ExceptHandler_kind=1}; +struct _excepthandler { + enum _excepthandler_kind kind; + union { + struct { + expr_ty type; + identifier name; + asdl_seq *body; + } ExceptHandler; + + } v; + int lineno; + int col_offset; +}; + +struct _arguments { + asdl_seq *args; + arg_ty vararg; + asdl_seq *kwonlyargs; + asdl_seq *kw_defaults; + arg_ty kwarg; + asdl_seq *defaults; +}; + +struct _arg { + identifier arg; + expr_ty annotation; + int lineno; + int col_offset; +}; + +struct _keyword { + identifier arg; + expr_ty value; +}; + +struct _alias { + identifier name; + identifier asname; +}; + +struct _withitem { + expr_ty context_expr; + expr_ty optional_vars; +}; + + +#define Module(a0, a1) _Py_Module(a0, a1) +mod_ty _Py_Module(asdl_seq * body, PyArena *arena); +#define Interactive(a0, a1) _Py_Interactive(a0, a1) +mod_ty _Py_Interactive(asdl_seq * body, PyArena *arena); +#define Expression(a0, a1) _Py_Expression(a0, a1) +mod_ty _Py_Expression(expr_ty body, PyArena *arena); +#define Suite(a0, a1) _Py_Suite(a0, a1) +mod_ty _Py_Suite(asdl_seq * body, PyArena *arena); +#define FunctionDef(a0, a1, a2, a3, a4, a5, a6, a7) _Py_FunctionDef(a0, a1, a2, a3, a4, a5, a6, a7) +stmt_ty _Py_FunctionDef(identifier name, arguments_ty args, asdl_seq * body, + asdl_seq * decorator_list, expr_ty returns, int lineno, + int col_offset, PyArena *arena); +#define AsyncFunctionDef(a0, a1, a2, a3, a4, a5, a6, a7) _Py_AsyncFunctionDef(a0, a1, a2, a3, a4, a5, a6, a7) +stmt_ty _Py_AsyncFunctionDef(identifier name, arguments_ty args, asdl_seq * + body, asdl_seq * decorator_list, expr_ty returns, + int lineno, int col_offset, PyArena *arena); +#define ClassDef(a0, a1, a2, a3, a4, a5, a6, a7) _Py_ClassDef(a0, a1, a2, a3, a4, a5, a6, a7) +stmt_ty _Py_ClassDef(identifier name, asdl_seq * bases, asdl_seq * keywords, + asdl_seq * body, asdl_seq * decorator_list, int lineno, + int col_offset, PyArena *arena); +#define Return(a0, a1, a2, a3) _Py_Return(a0, a1, a2, a3) +stmt_ty _Py_Return(expr_ty value, int lineno, int col_offset, PyArena *arena); +#define Delete(a0, a1, a2, a3) _Py_Delete(a0, a1, a2, a3) +stmt_ty _Py_Delete(asdl_seq * targets, int lineno, int col_offset, PyArena + *arena); +#define Assign(a0, a1, a2, a3, a4) _Py_Assign(a0, a1, a2, a3, a4) +stmt_ty _Py_Assign(asdl_seq * targets, expr_ty value, int lineno, int + col_offset, PyArena *arena); +#define AugAssign(a0, a1, a2, a3, a4, a5) _Py_AugAssign(a0, a1, a2, a3, a4, a5) +stmt_ty _Py_AugAssign(expr_ty target, operator_ty op, expr_ty value, int + lineno, int col_offset, PyArena *arena); +#define AnnAssign(a0, a1, a2, a3, a4, a5, a6) _Py_AnnAssign(a0, a1, a2, a3, a4, a5, a6) +stmt_ty _Py_AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int + simple, int lineno, int col_offset, PyArena *arena); +#define For(a0, a1, a2, a3, a4, a5, a6) _Py_For(a0, a1, a2, a3, a4, a5, a6) +stmt_ty _Py_For(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * + orelse, int lineno, int col_offset, PyArena *arena); +#define AsyncFor(a0, a1, a2, a3, a4, a5, a6) _Py_AsyncFor(a0, a1, a2, a3, a4, a5, a6) +stmt_ty _Py_AsyncFor(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * + orelse, int lineno, int col_offset, PyArena *arena); +#define While(a0, a1, a2, a3, a4, a5) _Py_While(a0, a1, a2, a3, a4, a5) +stmt_ty _Py_While(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, + int col_offset, PyArena *arena); +#define If(a0, a1, a2, a3, a4, a5) _Py_If(a0, a1, a2, a3, a4, a5) +stmt_ty _Py_If(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, + int col_offset, PyArena *arena); +#define With(a0, a1, a2, a3, a4) _Py_With(a0, a1, a2, a3, a4) +stmt_ty _Py_With(asdl_seq * items, asdl_seq * body, int lineno, int col_offset, + PyArena *arena); +#define AsyncWith(a0, a1, a2, a3, a4) _Py_AsyncWith(a0, a1, a2, a3, a4) +stmt_ty _Py_AsyncWith(asdl_seq * items, asdl_seq * body, int lineno, int + col_offset, PyArena *arena); +#define Raise(a0, a1, a2, a3, a4) _Py_Raise(a0, a1, a2, a3, a4) +stmt_ty _Py_Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset, + PyArena *arena); +#define Try(a0, a1, a2, a3, a4, a5, a6) _Py_Try(a0, a1, a2, a3, a4, a5, a6) +stmt_ty _Py_Try(asdl_seq * body, asdl_seq * handlers, asdl_seq * orelse, + asdl_seq * finalbody, int lineno, int col_offset, PyArena + *arena); +#define Assert(a0, a1, a2, a3, a4) _Py_Assert(a0, a1, a2, a3, a4) +stmt_ty _Py_Assert(expr_ty test, expr_ty msg, int lineno, int col_offset, + PyArena *arena); +#define Import(a0, a1, a2, a3) _Py_Import(a0, a1, a2, a3) +stmt_ty _Py_Import(asdl_seq * names, int lineno, int col_offset, PyArena + *arena); +#define ImportFrom(a0, a1, a2, a3, a4, a5) _Py_ImportFrom(a0, a1, a2, a3, a4, a5) +stmt_ty _Py_ImportFrom(identifier module, asdl_seq * names, int level, int + lineno, int col_offset, PyArena *arena); +#define Global(a0, a1, a2, a3) _Py_Global(a0, a1, a2, a3) +stmt_ty _Py_Global(asdl_seq * names, int lineno, int col_offset, PyArena + *arena); +#define Nonlocal(a0, a1, a2, a3) _Py_Nonlocal(a0, a1, a2, a3) +stmt_ty _Py_Nonlocal(asdl_seq * names, int lineno, int col_offset, PyArena + *arena); +#define Expr(a0, a1, a2, a3) _Py_Expr(a0, a1, a2, a3) +stmt_ty _Py_Expr(expr_ty value, int lineno, int col_offset, PyArena *arena); +#define Pass(a0, a1, a2) _Py_Pass(a0, a1, a2) +stmt_ty _Py_Pass(int lineno, int col_offset, PyArena *arena); +#define Break(a0, a1, a2) _Py_Break(a0, a1, a2) +stmt_ty _Py_Break(int lineno, int col_offset, PyArena *arena); +#define Continue(a0, a1, a2) _Py_Continue(a0, a1, a2) +stmt_ty _Py_Continue(int lineno, int col_offset, PyArena *arena); +#define BoolOp(a0, a1, a2, a3, a4) _Py_BoolOp(a0, a1, a2, a3, a4) +expr_ty _Py_BoolOp(boolop_ty op, asdl_seq * values, int lineno, int col_offset, + PyArena *arena); +#define BinOp(a0, a1, a2, a3, a4, a5) _Py_BinOp(a0, a1, a2, a3, a4, a5) +expr_ty _Py_BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno, int + col_offset, PyArena *arena); +#define UnaryOp(a0, a1, a2, a3, a4) _Py_UnaryOp(a0, a1, a2, a3, a4) +expr_ty _Py_UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int col_offset, + PyArena *arena); +#define Lambda(a0, a1, a2, a3, a4) _Py_Lambda(a0, a1, a2, a3, a4) +expr_ty _Py_Lambda(arguments_ty args, expr_ty body, int lineno, int col_offset, + PyArena *arena); +#define IfExp(a0, a1, a2, a3, a4, a5) _Py_IfExp(a0, a1, a2, a3, a4, a5) +expr_ty _Py_IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno, int + col_offset, PyArena *arena); +#define Dict(a0, a1, a2, a3, a4) _Py_Dict(a0, a1, a2, a3, a4) +expr_ty _Py_Dict(asdl_seq * keys, asdl_seq * values, int lineno, int + col_offset, PyArena *arena); +#define Set(a0, a1, a2, a3) _Py_Set(a0, a1, a2, a3) +expr_ty _Py_Set(asdl_seq * elts, int lineno, int col_offset, PyArena *arena); +#define ListComp(a0, a1, a2, a3, a4) _Py_ListComp(a0, a1, a2, a3, a4) +expr_ty _Py_ListComp(expr_ty elt, asdl_seq * generators, int lineno, int + col_offset, PyArena *arena); +#define SetComp(a0, a1, a2, a3, a4) _Py_SetComp(a0, a1, a2, a3, a4) +expr_ty _Py_SetComp(expr_ty elt, asdl_seq * generators, int lineno, int + col_offset, PyArena *arena); +#define DictComp(a0, a1, a2, a3, a4, a5) _Py_DictComp(a0, a1, a2, a3, a4, a5) +expr_ty _Py_DictComp(expr_ty key, expr_ty value, asdl_seq * generators, int + lineno, int col_offset, PyArena *arena); +#define GeneratorExp(a0, a1, a2, a3, a4) _Py_GeneratorExp(a0, a1, a2, a3, a4) +expr_ty _Py_GeneratorExp(expr_ty elt, asdl_seq * generators, int lineno, int + col_offset, PyArena *arena); +#define Await(a0, a1, a2, a3) _Py_Await(a0, a1, a2, a3) +expr_ty _Py_Await(expr_ty value, int lineno, int col_offset, PyArena *arena); +#define Yield(a0, a1, a2, a3) _Py_Yield(a0, a1, a2, a3) +expr_ty _Py_Yield(expr_ty value, int lineno, int col_offset, PyArena *arena); +#define YieldFrom(a0, a1, a2, a3) _Py_YieldFrom(a0, a1, a2, a3) +expr_ty _Py_YieldFrom(expr_ty value, int lineno, int col_offset, PyArena + *arena); +#define Compare(a0, a1, a2, a3, a4, a5) _Py_Compare(a0, a1, a2, a3, a4, a5) +expr_ty _Py_Compare(expr_ty left, asdl_int_seq * ops, asdl_seq * comparators, + int lineno, int col_offset, PyArena *arena); +#define Call(a0, a1, a2, a3, a4, a5) _Py_Call(a0, a1, a2, a3, a4, a5) +expr_ty _Py_Call(expr_ty func, asdl_seq * args, asdl_seq * keywords, int + lineno, int col_offset, PyArena *arena); +#define Num(a0, a1, a2, a3) _Py_Num(a0, a1, a2, a3) +expr_ty _Py_Num(object n, int lineno, int col_offset, PyArena *arena); +#define Str(a0, a1, a2, a3) _Py_Str(a0, a1, a2, a3) +expr_ty _Py_Str(string s, int lineno, int col_offset, PyArena *arena); +#define FormattedValue(a0, a1, a2, a3, a4, a5) _Py_FormattedValue(a0, a1, a2, a3, a4, a5) +expr_ty _Py_FormattedValue(expr_ty value, int conversion, expr_ty format_spec, + int lineno, int col_offset, PyArena *arena); +#define JoinedStr(a0, a1, a2, a3) _Py_JoinedStr(a0, a1, a2, a3) +expr_ty _Py_JoinedStr(asdl_seq * values, int lineno, int col_offset, PyArena + *arena); +#define Bytes(a0, a1, a2, a3) _Py_Bytes(a0, a1, a2, a3) +expr_ty _Py_Bytes(bytes s, int lineno, int col_offset, PyArena *arena); +#define NameConstant(a0, a1, a2, a3) _Py_NameConstant(a0, a1, a2, a3) +expr_ty _Py_NameConstant(singleton value, int lineno, int col_offset, PyArena + *arena); +#define Ellipsis(a0, a1, a2) _Py_Ellipsis(a0, a1, a2) +expr_ty _Py_Ellipsis(int lineno, int col_offset, PyArena *arena); +#define Constant(a0, a1, a2, a3) _Py_Constant(a0, a1, a2, a3) +expr_ty _Py_Constant(constant value, int lineno, int col_offset, PyArena + *arena); +#define Attribute(a0, a1, a2, a3, a4, a5) _Py_Attribute(a0, a1, a2, a3, a4, a5) +expr_ty _Py_Attribute(expr_ty value, identifier attr, expr_context_ty ctx, int + lineno, int col_offset, PyArena *arena); +#define Subscript(a0, a1, a2, a3, a4, a5) _Py_Subscript(a0, a1, a2, a3, a4, a5) +expr_ty _Py_Subscript(expr_ty value, slice_ty slice, expr_context_ty ctx, int + lineno, int col_offset, PyArena *arena); +#define Starred(a0, a1, a2, a3, a4) _Py_Starred(a0, a1, a2, a3, a4) +expr_ty _Py_Starred(expr_ty value, expr_context_ty ctx, int lineno, int + col_offset, PyArena *arena); +#define Name(a0, a1, a2, a3, a4) _Py_Name(a0, a1, a2, a3, a4) +expr_ty _Py_Name(identifier id, expr_context_ty ctx, int lineno, int + col_offset, PyArena *arena); +#define List(a0, a1, a2, a3, a4) _Py_List(a0, a1, a2, a3, a4) +expr_ty _Py_List(asdl_seq * elts, expr_context_ty ctx, int lineno, int + col_offset, PyArena *arena); +#define Tuple(a0, a1, a2, a3, a4) _Py_Tuple(a0, a1, a2, a3, a4) +expr_ty _Py_Tuple(asdl_seq * elts, expr_context_ty ctx, int lineno, int + col_offset, PyArena *arena); +#define Slice(a0, a1, a2, a3) _Py_Slice(a0, a1, a2, a3) +slice_ty _Py_Slice(expr_ty lower, expr_ty upper, expr_ty step, PyArena *arena); +#define ExtSlice(a0, a1) _Py_ExtSlice(a0, a1) +slice_ty _Py_ExtSlice(asdl_seq * dims, PyArena *arena); +#define Index(a0, a1) _Py_Index(a0, a1) +slice_ty _Py_Index(expr_ty value, PyArena *arena); +#define comprehension(a0, a1, a2, a3, a4) _Py_comprehension(a0, a1, a2, a3, a4) +comprehension_ty _Py_comprehension(expr_ty target, expr_ty iter, asdl_seq * + ifs, int is_async, PyArena *arena); +#define ExceptHandler(a0, a1, a2, a3, a4, a5) _Py_ExceptHandler(a0, a1, a2, a3, a4, a5) +excepthandler_ty _Py_ExceptHandler(expr_ty type, identifier name, asdl_seq * + body, int lineno, int col_offset, PyArena + *arena); +#define arguments(a0, a1, a2, a3, a4, a5, a6) _Py_arguments(a0, a1, a2, a3, a4, a5, a6) +arguments_ty _Py_arguments(asdl_seq * args, arg_ty vararg, asdl_seq * + kwonlyargs, asdl_seq * kw_defaults, arg_ty kwarg, + asdl_seq * defaults, PyArena *arena); +#define arg(a0, a1, a2, a3, a4) _Py_arg(a0, a1, a2, a3, a4) +arg_ty _Py_arg(identifier arg, expr_ty annotation, int lineno, int col_offset, + PyArena *arena); +#define keyword(a0, a1, a2) _Py_keyword(a0, a1, a2) +keyword_ty _Py_keyword(identifier arg, expr_ty value, PyArena *arena); +#define alias(a0, a1, a2) _Py_alias(a0, a1, a2) +alias_ty _Py_alias(identifier name, identifier asname, PyArena *arena); +#define withitem(a0, a1, a2) _Py_withitem(a0, a1, a2) +withitem_ty _Py_withitem(expr_ty context_expr, expr_ty optional_vars, PyArena + *arena); + +PyObject* PyAST_mod2obj(mod_ty t); +mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode); +int PyAST_Check(PyObject* obj); diff --git a/venv/Include/Python.h b/venv/Include/Python.h new file mode 100644 index 00000000..1feb1531 --- /dev/null +++ b/venv/Include/Python.h @@ -0,0 +1,140 @@ +#ifndef Py_PYTHON_H +#define Py_PYTHON_H +/* Since this is a "meta-include" file, no #ifdef __cplusplus / extern "C" { */ + +/* Include nearly all Python header files */ + +#include "patchlevel.h" +#include "pyconfig.h" +#include "pymacconfig.h" + +#include + +#ifndef UCHAR_MAX +#error "Something's broken. UCHAR_MAX should be defined in limits.h." +#endif + +#if UCHAR_MAX != 255 +#error "Python's source code assumes C's unsigned char is an 8-bit type." +#endif + +#if defined(__sgi) && !defined(_SGI_MP_SOURCE) +#define _SGI_MP_SOURCE +#endif + +#include +#ifndef NULL +# error "Python.h requires that stdio.h define NULL." +#endif + +#include +#ifdef HAVE_ERRNO_H +#include +#endif +#include +#ifdef HAVE_UNISTD_H +#include +#endif +#ifdef HAVE_CRYPT_H +#include +#endif + +/* For size_t? */ +#ifdef HAVE_STDDEF_H +#include +#endif + +/* CAUTION: Build setups should ensure that NDEBUG is defined on the + * compiler command line when building Python in release mode; else + * assert() calls won't be removed. + */ +#include + +#include "pyport.h" +#include "pymacro.h" + +#include "pyatomic.h" + +/* Debug-mode build with pymalloc implies PYMALLOC_DEBUG. + * PYMALLOC_DEBUG is in error if pymalloc is not in use. + */ +#if defined(Py_DEBUG) && defined(WITH_PYMALLOC) && !defined(PYMALLOC_DEBUG) +#define PYMALLOC_DEBUG +#endif +#if defined(PYMALLOC_DEBUG) && !defined(WITH_PYMALLOC) +#error "PYMALLOC_DEBUG requires WITH_PYMALLOC" +#endif +#include "pymath.h" +#include "pytime.h" +#include "pymem.h" + +#include "object.h" +#include "objimpl.h" +#include "typeslots.h" +#include "pyhash.h" + +#include "pydebug.h" + +#include "bytearrayobject.h" +#include "bytesobject.h" +#include "unicodeobject.h" +#include "longobject.h" +#include "longintrepr.h" +#include "boolobject.h" +#include "floatobject.h" +#include "complexobject.h" +#include "rangeobject.h" +#include "memoryobject.h" +#include "tupleobject.h" +#include "listobject.h" +#include "dictobject.h" +#include "odictobject.h" +#include "enumobject.h" +#include "setobject.h" +#include "methodobject.h" +#include "moduleobject.h" +#include "funcobject.h" +#include "classobject.h" +#include "fileobject.h" +#include "pycapsule.h" +#include "traceback.h" +#include "sliceobject.h" +#include "cellobject.h" +#include "iterobject.h" +#include "genobject.h" +#include "descrobject.h" +#include "warnings.h" +#include "weakrefobject.h" +#include "structseq.h" +#include "namespaceobject.h" + +#include "codecs.h" +#include "pyerrors.h" + +#include "pystate.h" +#include "context.h" + +#include "pyarena.h" +#include "modsupport.h" +#include "compile.h" +#include "pythonrun.h" +#include "pylifecycle.h" +#include "ceval.h" +#include "sysmodule.h" +#include "osmodule.h" +#include "intrcheck.h" +#include "import.h" + +#include "abstract.h" +#include "bltinmodule.h" + +#include "eval.h" + +#include "pyctype.h" +#include "pystrtod.h" +#include "pystrcmp.h" +#include "dtoa.h" +#include "fileutils.h" +#include "pyfpe.h" + +#endif /* !Py_PYTHON_H */ diff --git a/venv/Include/abstract.h b/venv/Include/abstract.h new file mode 100644 index 00000000..4088f75f --- /dev/null +++ b/venv/Include/abstract.h @@ -0,0 +1,1109 @@ +/* Abstract Object Interface (many thanks to Jim Fulton) */ + +#ifndef Py_ABSTRACTOBJECT_H +#define Py_ABSTRACTOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* === Object Protocol ================================================== */ + +/* Implemented elsewhere: + + int PyObject_Print(PyObject *o, FILE *fp, int flags); + + Print an object 'o' on file 'fp'. Returns -1 on error. The flags argument + is used to enable certain printing options. The only option currently + supported is Py_Print_RAW. + + (What should be said about Py_Print_RAW?). */ + + +/* Implemented elsewhere: + + int PyObject_HasAttrString(PyObject *o, const char *attr_name); + + Returns 1 if object 'o' has the attribute attr_name, and 0 otherwise. + + This is equivalent to the Python expression: hasattr(o,attr_name). + + This function always succeeds. */ + + +/* Implemented elsewhere: + + PyObject* PyObject_GetAttrString(PyObject *o, const char *attr_name); + + Retrieve an attributed named attr_name form object o. + Returns the attribute value on success, or NULL on failure. + + This is the equivalent of the Python expression: o.attr_name. */ + + +/* Implemented elsewhere: + + int PyObject_HasAttr(PyObject *o, PyObject *attr_name); + + Returns 1 if o has the attribute attr_name, and 0 otherwise. + + This is equivalent to the Python expression: hasattr(o,attr_name). + + This function always succeeds. */ + +/* Implemented elsewhere: + + PyObject* PyObject_GetAttr(PyObject *o, PyObject *attr_name); + + Retrieve an attributed named 'attr_name' form object 'o'. + Returns the attribute value on success, or NULL on failure. + + This is the equivalent of the Python expression: o.attr_name. */ + + +/* Implemented elsewhere: + + int PyObject_SetAttrString(PyObject *o, const char *attr_name, PyObject *v); + + Set the value of the attribute named attr_name, for object 'o', + to the value 'v'. Raise an exception and return -1 on failure; return 0 on + success. + + This is the equivalent of the Python statement o.attr_name=v. */ + + +/* Implemented elsewhere: + + int PyObject_SetAttr(PyObject *o, PyObject *attr_name, PyObject *v); + + Set the value of the attribute named attr_name, for object 'o', to the value + 'v'. an exception and return -1 on failure; return 0 on success. + + This is the equivalent of the Python statement o.attr_name=v. */ + +/* Implemented as a macro: + + int PyObject_DelAttrString(PyObject *o, const char *attr_name); + + Delete attribute named attr_name, for object o. Returns + -1 on failure. + + This is the equivalent of the Python statement: del o.attr_name. */ +#define PyObject_DelAttrString(O,A) PyObject_SetAttrString((O),(A), NULL) + + +/* Implemented as a macro: + + int PyObject_DelAttr(PyObject *o, PyObject *attr_name); + + Delete attribute named attr_name, for object o. Returns -1 + on failure. This is the equivalent of the Python + statement: del o.attr_name. */ +#define PyObject_DelAttr(O,A) PyObject_SetAttr((O),(A), NULL) + + +/* Implemented elsewhere: + + PyObject *PyObject_Repr(PyObject *o); + + Compute the string representation of object 'o'. Returns the + string representation on success, NULL on failure. + + This is the equivalent of the Python expression: repr(o). + + Called by the repr() built-in function. */ + + +/* Implemented elsewhere: + + PyObject *PyObject_Str(PyObject *o); + + Compute the string representation of object, o. Returns the + string representation on success, NULL on failure. + + This is the equivalent of the Python expression: str(o). + + Called by the str() and print() built-in functions. */ + + +/* Declared elsewhere + + PyAPI_FUNC(int) PyCallable_Check(PyObject *o); + + Determine if the object, o, is callable. Return 1 if the object is callable + and 0 otherwise. + + This function always succeeds. */ + + +#ifdef PY_SSIZE_T_CLEAN +# define PyObject_CallFunction _PyObject_CallFunction_SizeT +# define PyObject_CallMethod _PyObject_CallMethod_SizeT +# ifndef Py_LIMITED_API +# define _PyObject_CallMethodId _PyObject_CallMethodId_SizeT +# endif /* !Py_LIMITED_API */ +#endif + + +/* Call a callable Python object 'callable' with arguments given by the + tuple 'args' and keywords arguments given by the dictionary 'kwargs'. + + 'args' must not be *NULL*, use an empty tuple if no arguments are + needed. If no named arguments are needed, 'kwargs' can be NULL. + + This is the equivalent of the Python expression: + callable(*args, **kwargs). */ +PyAPI_FUNC(PyObject *) PyObject_Call(PyObject *callable, + PyObject *args, PyObject *kwargs); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) _PyStack_AsTuple( + PyObject *const *stack, + Py_ssize_t nargs); + +PyAPI_FUNC(PyObject*) _PyStack_AsTupleSlice( + PyObject *const *stack, + Py_ssize_t nargs, + Py_ssize_t start, + Py_ssize_t end); + +/* Convert keyword arguments from the FASTCALL (stack: C array, kwnames: tuple) + format to a Python dictionary ("kwargs" dict). + + The type of kwnames keys is not checked. The final function getting + arguments is responsible to check if all keys are strings, for example using + PyArg_ParseTupleAndKeywords() or PyArg_ValidateKeywordArguments(). + + Duplicate keys are merged using the last value. If duplicate keys must raise + an exception, the caller is responsible to implement an explicit keys on + kwnames. */ +PyAPI_FUNC(PyObject *) _PyStack_AsDict( + PyObject *const *values, + PyObject *kwnames); + +/* Convert (args, nargs, kwargs: dict) into a (stack, nargs, kwnames: tuple). + + Return 0 on success, raise an exception and return -1 on error. + + Write the new stack into *p_stack. If *p_stack is differen than args, it + must be released by PyMem_Free(). + + The stack uses borrowed references. + + The type of keyword keys is not checked, these checks should be done + later (ex: _PyArg_ParseStackAndKeywords). */ +PyAPI_FUNC(int) _PyStack_UnpackDict( + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwargs, + PyObject *const **p_stack, + PyObject **p_kwnames); + +/* Suggested size (number of positional arguments) for arrays of PyObject* + allocated on a C stack to avoid allocating memory on the heap memory. Such + array is used to pass positional arguments to call functions of the + _PyObject_FastCall() family. + + The size is chosen to not abuse the C stack and so limit the risk of stack + overflow. The size is also chosen to allow using the small stack for most + function calls of the Python standard library. On 64-bit CPU, it allocates + 40 bytes on the stack. */ +#define _PY_FASTCALL_SMALL_STACK 5 + +/* Return 1 if callable supports FASTCALL calling convention for positional + arguments: see _PyObject_FastCallDict() and _PyObject_FastCallKeywords() */ +PyAPI_FUNC(int) _PyObject_HasFastCall(PyObject *callable); + +/* Call the callable object 'callable' with the "fast call" calling convention: + args is a C array for positional arguments (nargs is the number of + positional arguments), kwargs is a dictionary for keyword arguments. + + If nargs is equal to zero, args can be NULL. kwargs can be NULL. + nargs must be greater or equal to zero. + + Return the result on success. Raise an exception on return NULL on + error. */ +PyAPI_FUNC(PyObject *) _PyObject_FastCallDict( + PyObject *callable, + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwargs); + +/* Call the callable object 'callable' with the "fast call" calling convention: + args is a C array for positional arguments followed by values of + keyword arguments. Keys of keyword arguments are stored as a tuple + of strings in kwnames. nargs is the number of positional parameters at + the beginning of stack. The size of kwnames gives the number of keyword + values in the stack after positional arguments. + + kwnames must only contains str strings, no subclass, and all keys must + be unique. + + If nargs is equal to zero and there is no keyword argument (kwnames is + NULL or its size is zero), args can be NULL. + + Return the result on success. Raise an exception and return NULL on + error. */ +PyAPI_FUNC(PyObject *) _PyObject_FastCallKeywords( + PyObject *callable, + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwnames); + +#define _PyObject_FastCall(func, args, nargs) \ + _PyObject_FastCallDict((func), (args), (nargs), NULL) + +#define _PyObject_CallNoArg(func) \ + _PyObject_FastCallDict((func), NULL, 0, NULL) + +PyAPI_FUNC(PyObject *) _PyObject_Call_Prepend( + PyObject *callable, + PyObject *obj, + PyObject *args, + PyObject *kwargs); + +PyAPI_FUNC(PyObject *) _PyObject_FastCall_Prepend( + PyObject *callable, + PyObject *obj, + PyObject *const *args, + Py_ssize_t nargs); + +PyAPI_FUNC(PyObject *) _Py_CheckFunctionResult(PyObject *callable, + PyObject *result, + const char *where); +#endif /* Py_LIMITED_API */ + + +/* Call a callable Python object 'callable', with arguments given by the + tuple 'args'. If no arguments are needed, then 'args' can be *NULL*. + + Returns the result of the call on success, or *NULL* on failure. + + This is the equivalent of the Python expression: + callable(*args). */ +PyAPI_FUNC(PyObject *) PyObject_CallObject(PyObject *callable, + PyObject *args); + +/* Call a callable Python object, callable, with a variable number of C + arguments. The C arguments are described using a mkvalue-style format + string. + + The format may be NULL, indicating that no arguments are provided. + + Returns the result of the call on success, or NULL on failure. + + This is the equivalent of the Python expression: + callable(arg1, arg2, ...). */ +PyAPI_FUNC(PyObject *) PyObject_CallFunction(PyObject *callable, + const char *format, ...); + +/* Call the method named 'name' of object 'obj' with a variable number of + C arguments. The C arguments are described by a mkvalue format string. + + The format can be NULL, indicating that no arguments are provided. + + Returns the result of the call on success, or NULL on failure. + + This is the equivalent of the Python expression: + obj.name(arg1, arg2, ...). */ +PyAPI_FUNC(PyObject *) PyObject_CallMethod(PyObject *obj, + const char *name, + const char *format, ...); + +#ifndef Py_LIMITED_API +/* Like PyObject_CallMethod(), but expect a _Py_Identifier* + as the method name. */ +PyAPI_FUNC(PyObject *) _PyObject_CallMethodId(PyObject *obj, + _Py_Identifier *name, + const char *format, ...); +#endif /* !Py_LIMITED_API */ + +PyAPI_FUNC(PyObject *) _PyObject_CallFunction_SizeT(PyObject *callable, + const char *format, + ...); + +PyAPI_FUNC(PyObject *) _PyObject_CallMethod_SizeT(PyObject *obj, + const char *name, + const char *format, + ...); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyObject_CallMethodId_SizeT(PyObject *obj, + _Py_Identifier *name, + const char *format, + ...); +#endif /* !Py_LIMITED_API */ + +/* Call a callable Python object 'callable' with a variable number of C + arguments. The C arguments are provided as PyObject* values, terminated + by a NULL. + + Returns the result of the call on success, or NULL on failure. + + This is the equivalent of the Python expression: + callable(arg1, arg2, ...). */ +PyAPI_FUNC(PyObject *) PyObject_CallFunctionObjArgs(PyObject *callable, + ...); + +/* Call the method named 'name' of object 'obj' with a variable number of + C arguments. The C arguments are provided as PyObject* values, terminated + by NULL. + + Returns the result of the call on success, or NULL on failure. + + This is the equivalent of the Python expression: obj.name(*args). */ + +PyAPI_FUNC(PyObject *) PyObject_CallMethodObjArgs( + PyObject *obj, + PyObject *name, + ...); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyObject_CallMethodIdObjArgs( + PyObject *obj, + struct _Py_Identifier *name, + ...); +#endif /* !Py_LIMITED_API */ + + +/* Implemented elsewhere: + + Py_hash_t PyObject_Hash(PyObject *o); + + Compute and return the hash, hash_value, of an object, o. On + failure, return -1. + + This is the equivalent of the Python expression: hash(o). */ + + +/* Implemented elsewhere: + + int PyObject_IsTrue(PyObject *o); + + Returns 1 if the object, o, is considered to be true, 0 if o is + considered to be false and -1 on failure. + + This is equivalent to the Python expression: not not o. */ + + +/* Implemented elsewhere: + + int PyObject_Not(PyObject *o); + + Returns 0 if the object, o, is considered to be true, 1 if o is + considered to be false and -1 on failure. + + This is equivalent to the Python expression: not o. */ + + +/* Get the type of an object. + + On success, returns a type object corresponding to the object type of object + 'o'. On failure, returns NULL. + + This is equivalent to the Python expression: type(o) */ +PyAPI_FUNC(PyObject *) PyObject_Type(PyObject *o); + + +/* Return the size of object 'o'. If the object 'o' provides both sequence and + mapping protocols, the sequence size is returned. + + On error, -1 is returned. + + This is the equivalent to the Python expression: len(o) */ +PyAPI_FUNC(Py_ssize_t) PyObject_Size(PyObject *o); + + +/* For DLL compatibility */ +#undef PyObject_Length +PyAPI_FUNC(Py_ssize_t) PyObject_Length(PyObject *o); +#define PyObject_Length PyObject_Size + + +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyObject_HasLen(PyObject *o); + +/* Guess the size of object 'o' using len(o) or o.__length_hint__(). + If neither of those return a non-negative value, then return the default + value. If one of the calls fails, this function returns -1. */ +PyAPI_FUNC(Py_ssize_t) PyObject_LengthHint(PyObject *o, Py_ssize_t); +#endif + +/* Return element of 'o' corresponding to the object 'key'. Return NULL + on failure. + + This is the equivalent of the Python expression: o[key] */ +PyAPI_FUNC(PyObject *) PyObject_GetItem(PyObject *o, PyObject *key); + + +/* Map the object 'key' to the value 'v' into 'o'. + + Raise an exception and return -1 on failure; return 0 on success. + + This is the equivalent of the Python statement: o[key]=v. */ +PyAPI_FUNC(int) PyObject_SetItem(PyObject *o, PyObject *key, PyObject *v); + +/* Remove the mapping for the string 'key' from the object 'o'. + Returns -1 on failure. + + This is equivalent to the Python statement: del o[key]. */ +PyAPI_FUNC(int) PyObject_DelItemString(PyObject *o, const char *key); + +/* Delete the mapping for the object 'key' from the object 'o'. + Returns -1 on failure. + + This is the equivalent of the Python statement: del o[key]. */ +PyAPI_FUNC(int) PyObject_DelItem(PyObject *o, PyObject *key); + + +/* === Old Buffer API ============================================ */ + +/* FIXME: usage of these should all be replaced in Python itself + but for backwards compatibility we will implement them. + Their usage without a corresponding "unlock" mechanism + may create issues (but they would already be there). */ + +/* Takes an arbitrary object which must support the (character, single segment) + buffer interface and returns a pointer to a read-only memory location + useable as character based input for subsequent processing. + + Return 0 on success. buffer and buffer_len are only set in case no error + occurs. Otherwise, -1 is returned and an exception set. */ +PyAPI_FUNC(int) PyObject_AsCharBuffer(PyObject *obj, + const char **buffer, + Py_ssize_t *buffer_len) + Py_DEPRECATED(3.0); + +/* Checks whether an arbitrary object supports the (character, single segment) + buffer interface. + + Returns 1 on success, 0 on failure. */ +PyAPI_FUNC(int) PyObject_CheckReadBuffer(PyObject *obj) + Py_DEPRECATED(3.0); + +/* Same as PyObject_AsCharBuffer() except that this API expects (readable, + single segment) buffer interface and returns a pointer to a read-only memory + location which can contain arbitrary data. + + 0 is returned on success. buffer and buffer_len are only set in case no + error occurs. Otherwise, -1 is returned and an exception set. */ +PyAPI_FUNC(int) PyObject_AsReadBuffer(PyObject *obj, + const void **buffer, + Py_ssize_t *buffer_len) + Py_DEPRECATED(3.0); + +/* Takes an arbitrary object which must support the (writable, single segment) + buffer interface and returns a pointer to a writable memory location in + buffer of size 'buffer_len'. + + Return 0 on success. buffer and buffer_len are only set in case no error + occurs. Otherwise, -1 is returned and an exception set. */ +PyAPI_FUNC(int) PyObject_AsWriteBuffer(PyObject *obj, + void **buffer, + Py_ssize_t *buffer_len) + Py_DEPRECATED(3.0); + + +/* === New Buffer API ============================================ */ + +#ifndef Py_LIMITED_API + +/* Return 1 if the getbuffer function is available, otherwise return 0. */ +#define PyObject_CheckBuffer(obj) \ + (((obj)->ob_type->tp_as_buffer != NULL) && \ + ((obj)->ob_type->tp_as_buffer->bf_getbuffer != NULL)) + +/* This is a C-API version of the getbuffer function call. It checks + to make sure object has the required function pointer and issues the + call. + + Returns -1 and raises an error on failure and returns 0 on success. */ +PyAPI_FUNC(int) PyObject_GetBuffer(PyObject *obj, Py_buffer *view, + int flags); + +/* Get the memory area pointed to by the indices for the buffer given. + Note that view->ndim is the assumed size of indices. */ +PyAPI_FUNC(void *) PyBuffer_GetPointer(Py_buffer *view, Py_ssize_t *indices); + +/* Return the implied itemsize of the data-format area from a + struct-style description. */ +PyAPI_FUNC(int) PyBuffer_SizeFromFormat(const char *); + +/* Implementation in memoryobject.c */ +PyAPI_FUNC(int) PyBuffer_ToContiguous(void *buf, Py_buffer *view, + Py_ssize_t len, char order); + +PyAPI_FUNC(int) PyBuffer_FromContiguous(Py_buffer *view, void *buf, + Py_ssize_t len, char order); + +/* Copy len bytes of data from the contiguous chunk of memory + pointed to by buf into the buffer exported by obj. Return + 0 on success and return -1 and raise a PyBuffer_Error on + error (i.e. the object does not have a buffer interface or + it is not working). + + If fort is 'F', then if the object is multi-dimensional, + then the data will be copied into the array in + Fortran-style (first dimension varies the fastest). If + fort is 'C', then the data will be copied into the array + in C-style (last dimension varies the fastest). If fort + is 'A', then it does not matter and the copy will be made + in whatever way is more efficient. */ +PyAPI_FUNC(int) PyObject_CopyData(PyObject *dest, PyObject *src); + +/* Copy the data from the src buffer to the buffer of destination. */ +PyAPI_FUNC(int) PyBuffer_IsContiguous(const Py_buffer *view, char fort); + +/*Fill the strides array with byte-strides of a contiguous + (Fortran-style if fort is 'F' or C-style otherwise) + array of the given shape with the given number of bytes + per element. */ +PyAPI_FUNC(void) PyBuffer_FillContiguousStrides(int ndims, + Py_ssize_t *shape, + Py_ssize_t *strides, + int itemsize, + char fort); + +/* Fills in a buffer-info structure correctly for an exporter + that can only share a contiguous chunk of memory of + "unsigned bytes" of the given length. + + Returns 0 on success and -1 (with raising an error) on error. */ +PyAPI_FUNC(int) PyBuffer_FillInfo(Py_buffer *view, PyObject *o, void *buf, + Py_ssize_t len, int readonly, + int flags); + +/* Releases a Py_buffer obtained from getbuffer ParseTuple's "s*". */ +PyAPI_FUNC(void) PyBuffer_Release(Py_buffer *view); + +#endif /* Py_LIMITED_API */ + +/* Takes an arbitrary object and returns the result of calling + obj.__format__(format_spec). */ +PyAPI_FUNC(PyObject *) PyObject_Format(PyObject *obj, + PyObject *format_spec); + + +/* ==== Iterators ================================================ */ + +/* Takes an object and returns an iterator for it. + This is typically a new iterator but if the argument is an iterator, this + returns itself. */ +PyAPI_FUNC(PyObject *) PyObject_GetIter(PyObject *); + +#define PyIter_Check(obj) \ + ((obj)->ob_type->tp_iternext != NULL && \ + (obj)->ob_type->tp_iternext != &_PyObject_NextNotImplemented) + +/* Takes an iterator object and calls its tp_iternext slot, + returning the next value. + + If the iterator is exhausted, this returns NULL without setting an + exception. + + NULL with an exception means an error occurred. */ +PyAPI_FUNC(PyObject *) PyIter_Next(PyObject *); + + +/* === Number Protocol ================================================== */ + +/* Returns 1 if the object 'o' provides numeric protocols, and 0 otherwise. + + This function always succeeds. */ +PyAPI_FUNC(int) PyNumber_Check(PyObject *o); + +/* Returns the result of adding o1 and o2, or NULL on failure. + + This is the equivalent of the Python expression: o1 + o2. */ +PyAPI_FUNC(PyObject *) PyNumber_Add(PyObject *o1, PyObject *o2); + +/* Returns the result of subtracting o2 from o1, or NULL on failure. + + This is the equivalent of the Python expression: o1 - o2. */ +PyAPI_FUNC(PyObject *) PyNumber_Subtract(PyObject *o1, PyObject *o2); + +/* Returns the result of multiplying o1 and o2, or NULL on failure. + + This is the equivalent of the Python expression: o1 * o2. */ +PyAPI_FUNC(PyObject *) PyNumber_Multiply(PyObject *o1, PyObject *o2); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +/* This is the equivalent of the Python expression: o1 @ o2. */ +PyAPI_FUNC(PyObject *) PyNumber_MatrixMultiply(PyObject *o1, PyObject *o2); +#endif + +/* Returns the result of dividing o1 by o2 giving an integral result, + or NULL on failure. + + This is the equivalent of the Python expression: o1 // o2. */ +PyAPI_FUNC(PyObject *) PyNumber_FloorDivide(PyObject *o1, PyObject *o2); + +/* Returns the result of dividing o1 by o2 giving a float result, or NULL on + failure. + + This is the equivalent of the Python expression: o1 / o2. */ +PyAPI_FUNC(PyObject *) PyNumber_TrueDivide(PyObject *o1, PyObject *o2); + +/* Returns the remainder of dividing o1 by o2, or NULL on failure. + + This is the equivalent of the Python expression: o1 % o2. */ +PyAPI_FUNC(PyObject *) PyNumber_Remainder(PyObject *o1, PyObject *o2); + +/* See the built-in function divmod. + + Returns NULL on failure. + + This is the equivalent of the Python expression: divmod(o1, o2). */ +PyAPI_FUNC(PyObject *) PyNumber_Divmod(PyObject *o1, PyObject *o2); + +/* See the built-in function pow. Returns NULL on failure. + + This is the equivalent of the Python expression: pow(o1, o2, o3), + where o3 is optional. */ +PyAPI_FUNC(PyObject *) PyNumber_Power(PyObject *o1, PyObject *o2, + PyObject *o3); + +/* Returns the negation of o on success, or NULL on failure. + + This is the equivalent of the Python expression: -o. */ +PyAPI_FUNC(PyObject *) PyNumber_Negative(PyObject *o); + +/* Returns the positive of o on success, or NULL on failure. + + This is the equivalent of the Python expression: +o. */ +PyAPI_FUNC(PyObject *) PyNumber_Positive(PyObject *o); + +/* Returns the absolute value of 'o', or NULL on failure. + + This is the equivalent of the Python expression: abs(o). */ +PyAPI_FUNC(PyObject *) PyNumber_Absolute(PyObject *o); + +/* Returns the bitwise negation of 'o' on success, or NULL on failure. + + This is the equivalent of the Python expression: ~o. */ +PyAPI_FUNC(PyObject *) PyNumber_Invert(PyObject *o); + +/* Returns the result of left shifting o1 by o2 on success, or NULL on failure. + + This is the equivalent of the Python expression: o1 << o2. */ +PyAPI_FUNC(PyObject *) PyNumber_Lshift(PyObject *o1, PyObject *o2); + +/* Returns the result of right shifting o1 by o2 on success, or NULL on + failure. + + This is the equivalent of the Python expression: o1 >> o2. */ +PyAPI_FUNC(PyObject *) PyNumber_Rshift(PyObject *o1, PyObject *o2); + +/* Returns the result of bitwise and of o1 and o2 on success, or NULL on + failure. + + This is the equivalent of the Python expression: o1 & o2. */ +PyAPI_FUNC(PyObject *) PyNumber_And(PyObject *o1, PyObject *o2); + +/* Returns the bitwise exclusive or of o1 by o2 on success, or NULL on failure. + + This is the equivalent of the Python expression: o1 ^ o2. */ +PyAPI_FUNC(PyObject *) PyNumber_Xor(PyObject *o1, PyObject *o2); + +/* Returns the result of bitwise or on o1 and o2 on success, or NULL on + failure. + + This is the equivalent of the Python expression: o1 | o2. */ +PyAPI_FUNC(PyObject *) PyNumber_Or(PyObject *o1, PyObject *o2); + +#define PyIndex_Check(obj) \ + ((obj)->ob_type->tp_as_number != NULL && \ + (obj)->ob_type->tp_as_number->nb_index != NULL) + +/* Returns the object 'o' converted to a Python int, or NULL with an exception + raised on failure. */ +PyAPI_FUNC(PyObject *) PyNumber_Index(PyObject *o); + +/* Returns the object 'o' converted to Py_ssize_t by going through + PyNumber_Index() first. + + If an overflow error occurs while converting the int to Py_ssize_t, then the + second argument 'exc' is the error-type to return. If it is NULL, then the + overflow error is cleared and the value is clipped. */ +PyAPI_FUNC(Py_ssize_t) PyNumber_AsSsize_t(PyObject *o, PyObject *exc); + +/* Returns the object 'o' converted to an integer object on success, or NULL + on failure. + + This is the equivalent of the Python expression: int(o). */ +PyAPI_FUNC(PyObject *) PyNumber_Long(PyObject *o); + +/* Returns the object 'o' converted to a float object on success, or NULL + on failure. + + This is the equivalent of the Python expression: float(o). */ +PyAPI_FUNC(PyObject *) PyNumber_Float(PyObject *o); + + +/* --- In-place variants of (some of) the above number protocol functions -- */ + +/* Returns the result of adding o2 to o1, possibly in-place, or NULL + on failure. + + This is the equivalent of the Python expression: o1 += o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceAdd(PyObject *o1, PyObject *o2); + +/* Returns the result of subtracting o2 from o1, possibly in-place or + NULL on failure. + + This is the equivalent of the Python expression: o1 -= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceSubtract(PyObject *o1, PyObject *o2); + +/* Returns the result of multiplying o1 by o2, possibly in-place, or NULL on + failure. + + This is the equivalent of the Python expression: o1 *= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceMultiply(PyObject *o1, PyObject *o2); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +/* This is the equivalent of the Python expression: o1 @= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceMatrixMultiply(PyObject *o1, PyObject *o2); +#endif + +/* Returns the result of dividing o1 by o2 giving an integral result, possibly + in-place, or NULL on failure. + + This is the equivalent of the Python expression: o1 /= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceFloorDivide(PyObject *o1, + PyObject *o2); + +/* Returns the result of dividing o1 by o2 giving a float result, possibly + in-place, or null on failure. + + This is the equivalent of the Python expression: o1 /= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceTrueDivide(PyObject *o1, + PyObject *o2); + +/* Returns the remainder of dividing o1 by o2, possibly in-place, or NULL on + failure. + + This is the equivalent of the Python expression: o1 %= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceRemainder(PyObject *o1, PyObject *o2); + +/* Returns the result of raising o1 to the power of o2, possibly in-place, + or NULL on failure. + + This is the equivalent of the Python expression: o1 **= o2, + or o1 = pow(o1, o2, o3) if o3 is present. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlacePower(PyObject *o1, PyObject *o2, + PyObject *o3); + +/* Returns the result of left shifting o1 by o2, possibly in-place, or NULL + on failure. + + This is the equivalent of the Python expression: o1 <<= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceLshift(PyObject *o1, PyObject *o2); + +/* Returns the result of right shifting o1 by o2, possibly in-place or NULL + on failure. + + This is the equivalent of the Python expression: o1 >>= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceRshift(PyObject *o1, PyObject *o2); + +/* Returns the result of bitwise and of o1 and o2, possibly in-place, or NULL + on failure. + + This is the equivalent of the Python expression: o1 &= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceAnd(PyObject *o1, PyObject *o2); + +/* Returns the bitwise exclusive or of o1 by o2, possibly in-place, or NULL + on failure. + + This is the equivalent of the Python expression: o1 ^= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceXor(PyObject *o1, PyObject *o2); + +/* Returns the result of bitwise or of o1 and o2, possibly in-place, + or NULL on failure. + + This is the equivalent of the Python expression: o1 |= o2. */ +PyAPI_FUNC(PyObject *) PyNumber_InPlaceOr(PyObject *o1, PyObject *o2); + +/* Returns the integer n converted to a string with a base, with a base + marker of 0b, 0o or 0x prefixed if applicable. + + If n is not an int object, it is converted with PyNumber_Index first. */ +PyAPI_FUNC(PyObject *) PyNumber_ToBase(PyObject *n, int base); + + +/* === Sequence protocol ================================================ */ + +/* Return 1 if the object provides sequence protocol, and zero + otherwise. + + This function always succeeds. */ +PyAPI_FUNC(int) PySequence_Check(PyObject *o); + +/* Return the size of sequence object o, or -1 on failure. */ +PyAPI_FUNC(Py_ssize_t) PySequence_Size(PyObject *o); + +/* For DLL compatibility */ +#undef PySequence_Length +PyAPI_FUNC(Py_ssize_t) PySequence_Length(PyObject *o); +#define PySequence_Length PySequence_Size + + +/* Return the concatenation of o1 and o2 on success, and NULL on failure. + + This is the equivalent of the Python expression: o1 + o2. */ +PyAPI_FUNC(PyObject *) PySequence_Concat(PyObject *o1, PyObject *o2); + +/* Return the result of repeating sequence object 'o' 'count' times, + or NULL on failure. + + This is the equivalent of the Python expression: o * count. */ +PyAPI_FUNC(PyObject *) PySequence_Repeat(PyObject *o, Py_ssize_t count); + +/* Return the ith element of o, or NULL on failure. + + This is the equivalent of the Python expression: o[i]. */ +PyAPI_FUNC(PyObject *) PySequence_GetItem(PyObject *o, Py_ssize_t i); + +/* Return the slice of sequence object o between i1 and i2, or NULL on failure. + + This is the equivalent of the Python expression: o[i1:i2]. */ +PyAPI_FUNC(PyObject *) PySequence_GetSlice(PyObject *o, Py_ssize_t i1, Py_ssize_t i2); + +/* Assign object 'v' to the ith element of the sequence 'o'. Raise an exception + and return -1 on failure; return 0 on success. + + This is the equivalent of the Python statement o[i] = v. */ +PyAPI_FUNC(int) PySequence_SetItem(PyObject *o, Py_ssize_t i, PyObject *v); + +/* Delete the 'i'-th element of the sequence 'v'. Returns -1 on failure. + + This is the equivalent of the Python statement: del o[i]. */ +PyAPI_FUNC(int) PySequence_DelItem(PyObject *o, Py_ssize_t i); + +/* Assign the sequence object 'v' to the slice in sequence object 'o', + from 'i1' to 'i2'. Returns -1 on failure. + + This is the equivalent of the Python statement: o[i1:i2] = v. */ +PyAPI_FUNC(int) PySequence_SetSlice(PyObject *o, Py_ssize_t i1, Py_ssize_t i2, + PyObject *v); + +/* Delete the slice in sequence object 'o' from 'i1' to 'i2'. + Returns -1 on failure. + + This is the equivalent of the Python statement: del o[i1:i2]. */ +PyAPI_FUNC(int) PySequence_DelSlice(PyObject *o, Py_ssize_t i1, Py_ssize_t i2); + +/* Returns the sequence 'o' as a tuple on success, and NULL on failure. + + This is equivalent to the Python expression: tuple(o). */ +PyAPI_FUNC(PyObject *) PySequence_Tuple(PyObject *o); + +/* Returns the sequence 'o' as a list on success, and NULL on failure. + This is equivalent to the Python expression: list(o) */ +PyAPI_FUNC(PyObject *) PySequence_List(PyObject *o); + +/* Return the sequence 'o' as a list, unless it's already a tuple or list. + + Use PySequence_Fast_GET_ITEM to access the members of this list, and + PySequence_Fast_GET_SIZE to get its length. + + Returns NULL on failure. If the object does not support iteration, raises a + TypeError exception with 'm' as the message text. */ +PyAPI_FUNC(PyObject *) PySequence_Fast(PyObject *o, const char* m); + +/* Return the size of the sequence 'o', assuming that 'o' was returned by + PySequence_Fast and is not NULL. */ +#define PySequence_Fast_GET_SIZE(o) \ + (PyList_Check(o) ? PyList_GET_SIZE(o) : PyTuple_GET_SIZE(o)) + +/* Return the 'i'-th element of the sequence 'o', assuming that o was returned + by PySequence_Fast, and that i is within bounds. */ +#define PySequence_Fast_GET_ITEM(o, i)\ + (PyList_Check(o) ? PyList_GET_ITEM(o, i) : PyTuple_GET_ITEM(o, i)) + +/* Assume tp_as_sequence and sq_item exist and that 'i' does not + need to be corrected for a negative index. */ +#define PySequence_ITEM(o, i)\ + ( Py_TYPE(o)->tp_as_sequence->sq_item(o, i) ) + +/* Return a pointer to the underlying item array for + an object retured by PySequence_Fast */ +#define PySequence_Fast_ITEMS(sf) \ + (PyList_Check(sf) ? ((PyListObject *)(sf))->ob_item \ + : ((PyTupleObject *)(sf))->ob_item) + +/* Return the number of occurrences on value on 'o', that is, return + the number of keys for which o[key] == value. + + On failure, return -1. This is equivalent to the Python expression: + o.count(value). */ +PyAPI_FUNC(Py_ssize_t) PySequence_Count(PyObject *o, PyObject *value); + +/* Return 1 if 'ob' is in the sequence 'seq'; 0 if 'ob' is not in the sequence + 'seq'; -1 on error. + + Use __contains__ if possible, else _PySequence_IterSearch(). */ +PyAPI_FUNC(int) PySequence_Contains(PyObject *seq, PyObject *ob); + +#ifndef Py_LIMITED_API +#define PY_ITERSEARCH_COUNT 1 +#define PY_ITERSEARCH_INDEX 2 +#define PY_ITERSEARCH_CONTAINS 3 + +/* Iterate over seq. + + Result depends on the operation: + + PY_ITERSEARCH_COUNT: return # of times obj appears in seq; -1 if + error. + PY_ITERSEARCH_INDEX: return 0-based index of first occurrence of + obj in seq; set ValueError and return -1 if none found; + also return -1 on error. + PY_ITERSEARCH_CONTAINS: return 1 if obj in seq, else 0; -1 on + error. */ +PyAPI_FUNC(Py_ssize_t) _PySequence_IterSearch(PyObject *seq, + PyObject *obj, int operation); +#endif + + +/* For DLL-level backwards compatibility */ +#undef PySequence_In +/* Determine if the sequence 'o' contains 'value'. If an item in 'o' is equal + to 'value', return 1, otherwise return 0. On error, return -1. + + This is equivalent to the Python expression: value in o. */ +PyAPI_FUNC(int) PySequence_In(PyObject *o, PyObject *value); + +/* For source-level backwards compatibility */ +#define PySequence_In PySequence_Contains + + +/* Return the first index for which o[i] == value. + On error, return -1. + + This is equivalent to the Python expression: o.index(value). */ +PyAPI_FUNC(Py_ssize_t) PySequence_Index(PyObject *o, PyObject *value); + + +/* --- In-place versions of some of the above Sequence functions --- */ + +/* Append sequence 'o2' to sequence 'o1', in-place when possible. Return the + resulting object, which could be 'o1', or NULL on failure. + + This is the equivalent of the Python expression: o1 += o2. */ +PyAPI_FUNC(PyObject *) PySequence_InPlaceConcat(PyObject *o1, PyObject *o2); + +/* Repeat sequence 'o' by 'count', in-place when possible. Return the resulting + object, which could be 'o', or NULL on failure. + + This is the equivalent of the Python expression: o1 *= count. */ +PyAPI_FUNC(PyObject *) PySequence_InPlaceRepeat(PyObject *o, Py_ssize_t count); + + +/* === Mapping protocol ================================================= */ + +/* Return 1 if the object provides mapping protocol, and 0 otherwise. + + This function always succeeds. */ +PyAPI_FUNC(int) PyMapping_Check(PyObject *o); + +/* Returns the number of keys in mapping object 'o' on success, and -1 on + failure. This is equivalent to the Python expression: len(o). */ +PyAPI_FUNC(Py_ssize_t) PyMapping_Size(PyObject *o); + +/* For DLL compatibility */ +#undef PyMapping_Length +PyAPI_FUNC(Py_ssize_t) PyMapping_Length(PyObject *o); +#define PyMapping_Length PyMapping_Size + + +/* Implemented as a macro: + + int PyMapping_DelItemString(PyObject *o, const char *key); + + Remove the mapping for the string 'key' from the mapping 'o'. Returns -1 on + failure. + + This is equivalent to the Python statement: del o[key]. */ +#define PyMapping_DelItemString(O,K) PyObject_DelItemString((O),(K)) + +/* Implemented as a macro: + + int PyMapping_DelItem(PyObject *o, PyObject *key); + + Remove the mapping for the object 'key' from the mapping object 'o'. + Returns -1 on failure. + + This is equivalent to the Python statement: del o[key]. */ +#define PyMapping_DelItem(O,K) PyObject_DelItem((O),(K)) + +/* On success, return 1 if the mapping object 'o' has the key 'key', + and 0 otherwise. + + This is equivalent to the Python expression: key in o. + + This function always succeeds. */ +PyAPI_FUNC(int) PyMapping_HasKeyString(PyObject *o, const char *key); + +/* Return 1 if the mapping object has the key 'key', and 0 otherwise. + + This is equivalent to the Python expression: key in o. + + This function always succeeds. */ +PyAPI_FUNC(int) PyMapping_HasKey(PyObject *o, PyObject *key); + +/* On success, return a list or tuple of the keys in mapping object 'o'. + On failure, return NULL. */ +PyAPI_FUNC(PyObject *) PyMapping_Keys(PyObject *o); + +/* On success, return a list or tuple of the values in mapping object 'o'. + On failure, return NULL. */ +PyAPI_FUNC(PyObject *) PyMapping_Values(PyObject *o); + +/* On success, return a list or tuple of the items in mapping object 'o', + where each item is a tuple containing a key-value pair. On failure, return + NULL. */ +PyAPI_FUNC(PyObject *) PyMapping_Items(PyObject *o); + +/* Return element of 'o' corresponding to the string 'key' or NULL on failure. + + This is the equivalent of the Python expression: o[key]. */ +PyAPI_FUNC(PyObject *) PyMapping_GetItemString(PyObject *o, + const char *key); + +/* Map the string 'key' to the value 'v' in the mapping 'o'. + Returns -1 on failure. + + This is the equivalent of the Python statement: o[key]=v. */ +PyAPI_FUNC(int) PyMapping_SetItemString(PyObject *o, const char *key, + PyObject *value); + +/* isinstance(object, typeorclass) */ +PyAPI_FUNC(int) PyObject_IsInstance(PyObject *object, PyObject *typeorclass); + +/* issubclass(object, typeorclass) */ +PyAPI_FUNC(int) PyObject_IsSubclass(PyObject *object, PyObject *typeorclass); + + +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyObject_RealIsInstance(PyObject *inst, PyObject *cls); + +PyAPI_FUNC(int) _PyObject_RealIsSubclass(PyObject *derived, PyObject *cls); + +PyAPI_FUNC(char *const *) _PySequence_BytesToCharpArray(PyObject* self); + +PyAPI_FUNC(void) _Py_FreeCharPArray(char *const array[]); + +/* For internal use by buffer API functions */ +PyAPI_FUNC(void) _Py_add_one_to_index_F(int nd, Py_ssize_t *index, + const Py_ssize_t *shape); +PyAPI_FUNC(void) _Py_add_one_to_index_C(int nd, Py_ssize_t *index, + const Py_ssize_t *shape); + +/* Convert Python int to Py_ssize_t. Do nothing if the argument is None. */ +PyAPI_FUNC(int) _Py_convert_optional_to_ssize_t(PyObject *, void *); +#endif /* !Py_LIMITED_API */ + + +#ifdef __cplusplus +} +#endif +#endif /* Py_ABSTRACTOBJECT_H */ diff --git a/venv/Include/accu.h b/venv/Include/accu.h new file mode 100644 index 00000000..3636ea6c --- /dev/null +++ b/venv/Include/accu.h @@ -0,0 +1,37 @@ +#ifndef Py_LIMITED_API +#ifndef Py_ACCU_H +#define Py_ACCU_H + +/*** This is a private API for use by the interpreter and the stdlib. + *** Its definition may be changed or removed at any moment. + ***/ + +/* + * A two-level accumulator of unicode objects that avoids both the overhead + * of keeping a huge number of small separate objects, and the quadratic + * behaviour of using a naive repeated concatenation scheme. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#undef small /* defined by some Windows headers */ + +typedef struct { + PyObject *large; /* A list of previously accumulated large strings */ + PyObject *small; /* Pending small strings */ +} _PyAccu; + +PyAPI_FUNC(int) _PyAccu_Init(_PyAccu *acc); +PyAPI_FUNC(int) _PyAccu_Accumulate(_PyAccu *acc, PyObject *unicode); +PyAPI_FUNC(PyObject *) _PyAccu_FinishAsList(_PyAccu *acc); +PyAPI_FUNC(PyObject *) _PyAccu_Finish(_PyAccu *acc); +PyAPI_FUNC(void) _PyAccu_Destroy(_PyAccu *acc); + +#ifdef __cplusplus +} +#endif + +#endif /* Py_ACCU_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/asdl.h b/venv/Include/asdl.h new file mode 100644 index 00000000..35e9fa18 --- /dev/null +++ b/venv/Include/asdl.h @@ -0,0 +1,46 @@ +#ifndef Py_ASDL_H +#define Py_ASDL_H + +typedef PyObject * identifier; +typedef PyObject * string; +typedef PyObject * bytes; +typedef PyObject * object; +typedef PyObject * singleton; +typedef PyObject * constant; + +/* It would be nice if the code generated by asdl_c.py was completely + independent of Python, but it is a goal the requires too much work + at this stage. So, for example, I'll represent identifiers as + interned Python strings. +*/ + +/* XXX A sequence should be typed so that its use can be typechecked. */ + +typedef struct { + Py_ssize_t size; + void *elements[1]; +} asdl_seq; + +typedef struct { + Py_ssize_t size; + int elements[1]; +} asdl_int_seq; + +asdl_seq *_Py_asdl_seq_new(Py_ssize_t size, PyArena *arena); +asdl_int_seq *_Py_asdl_int_seq_new(Py_ssize_t size, PyArena *arena); + +#define asdl_seq_GET(S, I) (S)->elements[(I)] +#define asdl_seq_LEN(S) ((S) == NULL ? 0 : (S)->size) +#ifdef Py_DEBUG +#define asdl_seq_SET(S, I, V) \ + do { \ + Py_ssize_t _asdl_i = (I); \ + assert((S) != NULL); \ + assert(_asdl_i < (S)->size); \ + (S)->elements[_asdl_i] = (V); \ + } while (0) +#else +#define asdl_seq_SET(S, I, V) (S)->elements[I] = (V) +#endif + +#endif /* !Py_ASDL_H */ diff --git a/venv/Include/ast.h b/venv/Include/ast.h new file mode 100644 index 00000000..5bc2b05b --- /dev/null +++ b/venv/Include/ast.h @@ -0,0 +1,29 @@ +#ifndef Py_AST_H +#define Py_AST_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_FUNC(int) PyAST_Validate(mod_ty); +PyAPI_FUNC(mod_ty) PyAST_FromNode( + const node *n, + PyCompilerFlags *flags, + const char *filename, /* decoded from the filesystem encoding */ + PyArena *arena); +PyAPI_FUNC(mod_ty) PyAST_FromNodeObject( + const node *n, + PyCompilerFlags *flags, + PyObject *filename, + PyArena *arena); + +#ifndef Py_LIMITED_API + +/* _PyAST_ExprAsUnicode is defined in ast_unparse.c */ +PyAPI_FUNC(PyObject *) _PyAST_ExprAsUnicode(expr_ty); + +#endif /* !Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_AST_H */ diff --git a/venv/Include/bitset.h b/venv/Include/bitset.h new file mode 100644 index 00000000..b22fa778 --- /dev/null +++ b/venv/Include/bitset.h @@ -0,0 +1,32 @@ + +#ifndef Py_BITSET_H +#define Py_BITSET_H +#ifdef __cplusplus +extern "C" { +#endif + +/* Bitset interface */ + +#define BYTE char + +typedef BYTE *bitset; + +bitset newbitset(int nbits); +void delbitset(bitset bs); +#define testbit(ss, ibit) (((ss)[BIT2BYTE(ibit)] & BIT2MASK(ibit)) != 0) +int addbit(bitset bs, int ibit); /* Returns 0 if already set */ +int samebitset(bitset bs1, bitset bs2, int nbits); +void mergebitset(bitset bs1, bitset bs2, int nbits); + +#define BITSPERBYTE (8*sizeof(BYTE)) +#define NBYTES(nbits) (((nbits) + BITSPERBYTE - 1) / BITSPERBYTE) + +#define BIT2BYTE(ibit) ((ibit) / BITSPERBYTE) +#define BIT2SHIFT(ibit) ((ibit) % BITSPERBYTE) +#define BIT2MASK(ibit) (1 << BIT2SHIFT(ibit)) +#define BYTE2BIT(ibyte) ((ibyte) * BITSPERBYTE) + +#ifdef __cplusplus +} +#endif +#endif /* !Py_BITSET_H */ diff --git a/venv/Include/bltinmodule.h b/venv/Include/bltinmodule.h new file mode 100644 index 00000000..868c9e64 --- /dev/null +++ b/venv/Include/bltinmodule.h @@ -0,0 +1,14 @@ +#ifndef Py_BLTINMODULE_H +#define Py_BLTINMODULE_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_DATA(PyTypeObject) PyFilter_Type; +PyAPI_DATA(PyTypeObject) PyMap_Type; +PyAPI_DATA(PyTypeObject) PyZip_Type; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_BLTINMODULE_H */ diff --git a/venv/Include/boolobject.h b/venv/Include/boolobject.h new file mode 100644 index 00000000..7cc2f1fe --- /dev/null +++ b/venv/Include/boolobject.h @@ -0,0 +1,34 @@ +/* Boolean object interface */ + +#ifndef Py_BOOLOBJECT_H +#define Py_BOOLOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + +PyAPI_DATA(PyTypeObject) PyBool_Type; + +#define PyBool_Check(x) (Py_TYPE(x) == &PyBool_Type) + +/* Py_False and Py_True are the only two bools in existence. +Don't forget to apply Py_INCREF() when returning either!!! */ + +/* Don't use these directly */ +PyAPI_DATA(struct _longobject) _Py_FalseStruct, _Py_TrueStruct; + +/* Use these macros */ +#define Py_False ((PyObject *) &_Py_FalseStruct) +#define Py_True ((PyObject *) &_Py_TrueStruct) + +/* Macros for returning Py_True or Py_False, respectively */ +#define Py_RETURN_TRUE return Py_INCREF(Py_True), Py_True +#define Py_RETURN_FALSE return Py_INCREF(Py_False), Py_False + +/* Function to return a bool from a C long */ +PyAPI_FUNC(PyObject *) PyBool_FromLong(long); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_BOOLOBJECT_H */ diff --git a/venv/Include/bytearrayobject.h b/venv/Include/bytearrayobject.h new file mode 100644 index 00000000..a757b880 --- /dev/null +++ b/venv/Include/bytearrayobject.h @@ -0,0 +1,62 @@ +/* ByteArray object interface */ + +#ifndef Py_BYTEARRAYOBJECT_H +#define Py_BYTEARRAYOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Type PyByteArrayObject represents a mutable array of bytes. + * The Python API is that of a sequence; + * the bytes are mapped to ints in [0, 256). + * Bytes are not characters; they may be used to encode characters. + * The only way to go between bytes and str/unicode is via encoding + * and decoding. + * For the convenience of C programmers, the bytes type is considered + * to contain a char pointer, not an unsigned char pointer. + */ + +/* Object layout */ +#ifndef Py_LIMITED_API +typedef struct { + PyObject_VAR_HEAD + Py_ssize_t ob_alloc; /* How many bytes allocated in ob_bytes */ + char *ob_bytes; /* Physical backing buffer */ + char *ob_start; /* Logical start inside ob_bytes */ + /* XXX(nnorwitz): should ob_exports be Py_ssize_t? */ + int ob_exports; /* How many buffer exports */ +} PyByteArrayObject; +#endif + +/* Type object */ +PyAPI_DATA(PyTypeObject) PyByteArray_Type; +PyAPI_DATA(PyTypeObject) PyByteArrayIter_Type; + +/* Type check macros */ +#define PyByteArray_Check(self) PyObject_TypeCheck(self, &PyByteArray_Type) +#define PyByteArray_CheckExact(self) (Py_TYPE(self) == &PyByteArray_Type) + +/* Direct API functions */ +PyAPI_FUNC(PyObject *) PyByteArray_FromObject(PyObject *); +PyAPI_FUNC(PyObject *) PyByteArray_Concat(PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyByteArray_FromStringAndSize(const char *, Py_ssize_t); +PyAPI_FUNC(Py_ssize_t) PyByteArray_Size(PyObject *); +PyAPI_FUNC(char *) PyByteArray_AsString(PyObject *); +PyAPI_FUNC(int) PyByteArray_Resize(PyObject *, Py_ssize_t); + +/* Macros, trading safety for speed */ +#ifndef Py_LIMITED_API +#define PyByteArray_AS_STRING(self) \ + (assert(PyByteArray_Check(self)), \ + Py_SIZE(self) ? ((PyByteArrayObject *)(self))->ob_start : _PyByteArray_empty_string) +#define PyByteArray_GET_SIZE(self) (assert(PyByteArray_Check(self)), Py_SIZE(self)) + +PyAPI_DATA(char) _PyByteArray_empty_string[]; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_BYTEARRAYOBJECT_H */ diff --git a/venv/Include/bytes_methods.h b/venv/Include/bytes_methods.h new file mode 100644 index 00000000..8434a50a --- /dev/null +++ b/venv/Include/bytes_methods.h @@ -0,0 +1,69 @@ +#ifndef Py_LIMITED_API +#ifndef Py_BYTES_CTYPE_H +#define Py_BYTES_CTYPE_H + +/* + * The internal implementation behind PyBytes (bytes) and PyByteArray (bytearray) + * methods of the given names, they operate on ASCII byte strings. + */ +extern PyObject* _Py_bytes_isspace(const char *cptr, Py_ssize_t len); +extern PyObject* _Py_bytes_isalpha(const char *cptr, Py_ssize_t len); +extern PyObject* _Py_bytes_isalnum(const char *cptr, Py_ssize_t len); +extern PyObject* _Py_bytes_isascii(const char *cptr, Py_ssize_t len); +extern PyObject* _Py_bytes_isdigit(const char *cptr, Py_ssize_t len); +extern PyObject* _Py_bytes_islower(const char *cptr, Py_ssize_t len); +extern PyObject* _Py_bytes_isupper(const char *cptr, Py_ssize_t len); +extern PyObject* _Py_bytes_istitle(const char *cptr, Py_ssize_t len); + +/* These store their len sized answer in the given preallocated *result arg. */ +extern void _Py_bytes_lower(char *result, const char *cptr, Py_ssize_t len); +extern void _Py_bytes_upper(char *result, const char *cptr, Py_ssize_t len); +extern void _Py_bytes_title(char *result, const char *s, Py_ssize_t len); +extern void _Py_bytes_capitalize(char *result, const char *s, Py_ssize_t len); +extern void _Py_bytes_swapcase(char *result, const char *s, Py_ssize_t len); + +extern PyObject *_Py_bytes_find(const char *str, Py_ssize_t len, PyObject *args); +extern PyObject *_Py_bytes_index(const char *str, Py_ssize_t len, PyObject *args); +extern PyObject *_Py_bytes_rfind(const char *str, Py_ssize_t len, PyObject *args); +extern PyObject *_Py_bytes_rindex(const char *str, Py_ssize_t len, PyObject *args); +extern PyObject *_Py_bytes_count(const char *str, Py_ssize_t len, PyObject *args); +extern int _Py_bytes_contains(const char *str, Py_ssize_t len, PyObject *arg); +extern PyObject *_Py_bytes_startswith(const char *str, Py_ssize_t len, PyObject *args); +extern PyObject *_Py_bytes_endswith(const char *str, Py_ssize_t len, PyObject *args); + +/* The maketrans() static method. */ +extern PyObject* _Py_bytes_maketrans(Py_buffer *frm, Py_buffer *to); + +/* Shared __doc__ strings. */ +extern const char _Py_isspace__doc__[]; +extern const char _Py_isalpha__doc__[]; +extern const char _Py_isalnum__doc__[]; +extern const char _Py_isascii__doc__[]; +extern const char _Py_isdigit__doc__[]; +extern const char _Py_islower__doc__[]; +extern const char _Py_isupper__doc__[]; +extern const char _Py_istitle__doc__[]; +extern const char _Py_lower__doc__[]; +extern const char _Py_upper__doc__[]; +extern const char _Py_title__doc__[]; +extern const char _Py_capitalize__doc__[]; +extern const char _Py_swapcase__doc__[]; +extern const char _Py_count__doc__[]; +extern const char _Py_find__doc__[]; +extern const char _Py_index__doc__[]; +extern const char _Py_rfind__doc__[]; +extern const char _Py_rindex__doc__[]; +extern const char _Py_startswith__doc__[]; +extern const char _Py_endswith__doc__[]; +extern const char _Py_maketrans__doc__[]; +extern const char _Py_expandtabs__doc__[]; +extern const char _Py_ljust__doc__[]; +extern const char _Py_rjust__doc__[]; +extern const char _Py_center__doc__[]; +extern const char _Py_zfill__doc__[]; + +/* this is needed because some docs are shared from the .o, not static */ +#define PyDoc_STRVAR_shared(name,str) const char name[] = PyDoc_STR(str) + +#endif /* !Py_BYTES_CTYPE_H */ +#endif /* !Py_LIMITED_API */ diff --git a/venv/Include/bytesobject.h b/venv/Include/bytesobject.h new file mode 100644 index 00000000..3fde4a22 --- /dev/null +++ b/venv/Include/bytesobject.h @@ -0,0 +1,224 @@ + +/* Bytes (String) object interface */ + +#ifndef Py_BYTESOBJECT_H +#define Py_BYTESOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* +Type PyBytesObject represents a character string. An extra zero byte is +reserved at the end to ensure it is zero-terminated, but a size is +present so strings with null bytes in them can be represented. This +is an immutable object type. + +There are functions to create new string objects, to test +an object for string-ness, and to get the +string value. The latter function returns a null pointer +if the object is not of the proper type. +There is a variant that takes an explicit size as well as a +variant that assumes a zero-terminated string. Note that none of the +functions should be applied to nil objects. +*/ + +/* Caching the hash (ob_shash) saves recalculation of a string's hash value. + This significantly speeds up dict lookups. */ + +#ifndef Py_LIMITED_API +typedef struct { + PyObject_VAR_HEAD + Py_hash_t ob_shash; + char ob_sval[1]; + + /* Invariants: + * ob_sval contains space for 'ob_size+1' elements. + * ob_sval[ob_size] == 0. + * ob_shash is the hash of the string or -1 if not computed yet. + */ +} PyBytesObject; +#endif + +PyAPI_DATA(PyTypeObject) PyBytes_Type; +PyAPI_DATA(PyTypeObject) PyBytesIter_Type; + +#define PyBytes_Check(op) \ + PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_BYTES_SUBCLASS) +#define PyBytes_CheckExact(op) (Py_TYPE(op) == &PyBytes_Type) + +PyAPI_FUNC(PyObject *) PyBytes_FromStringAndSize(const char *, Py_ssize_t); +PyAPI_FUNC(PyObject *) PyBytes_FromString(const char *); +PyAPI_FUNC(PyObject *) PyBytes_FromObject(PyObject *); +PyAPI_FUNC(PyObject *) PyBytes_FromFormatV(const char*, va_list) + Py_GCC_ATTRIBUTE((format(printf, 1, 0))); +PyAPI_FUNC(PyObject *) PyBytes_FromFormat(const char*, ...) + Py_GCC_ATTRIBUTE((format(printf, 1, 2))); +PyAPI_FUNC(Py_ssize_t) PyBytes_Size(PyObject *); +PyAPI_FUNC(char *) PyBytes_AsString(PyObject *); +PyAPI_FUNC(PyObject *) PyBytes_Repr(PyObject *, int); +PyAPI_FUNC(void) PyBytes_Concat(PyObject **, PyObject *); +PyAPI_FUNC(void) PyBytes_ConcatAndDel(PyObject **, PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyBytes_Resize(PyObject **, Py_ssize_t); +PyAPI_FUNC(PyObject*) _PyBytes_FormatEx( + const char *format, + Py_ssize_t format_len, + PyObject *args, + int use_bytearray); +PyAPI_FUNC(PyObject*) _PyBytes_FromHex( + PyObject *string, + int use_bytearray); +#endif +PyAPI_FUNC(PyObject *) PyBytes_DecodeEscape(const char *, Py_ssize_t, + const char *, Py_ssize_t, + const char *); +#ifndef Py_LIMITED_API +/* Helper for PyBytes_DecodeEscape that detects invalid escape chars. */ +PyAPI_FUNC(PyObject *) _PyBytes_DecodeEscape(const char *, Py_ssize_t, + const char *, Py_ssize_t, + const char *, + const char **); +#endif + +/* Macro, trading safety for speed */ +#ifndef Py_LIMITED_API +#define PyBytes_AS_STRING(op) (assert(PyBytes_Check(op)), \ + (((PyBytesObject *)(op))->ob_sval)) +#define PyBytes_GET_SIZE(op) (assert(PyBytes_Check(op)),Py_SIZE(op)) +#endif + +/* _PyBytes_Join(sep, x) is like sep.join(x). sep must be PyBytesObject*, + x must be an iterable object. */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyBytes_Join(PyObject *sep, PyObject *x); +#endif + +/* Provides access to the internal data buffer and size of a string + object or the default encoded version of a Unicode object. Passing + NULL as *len parameter will force the string buffer to be + 0-terminated (passing a string with embedded NULL characters will + cause an exception). */ +PyAPI_FUNC(int) PyBytes_AsStringAndSize( + PyObject *obj, /* string or Unicode object */ + char **s, /* pointer to buffer variable */ + Py_ssize_t *len /* pointer to length variable or NULL + (only possible for 0-terminated + strings) */ + ); + +/* Using the current locale, insert the thousands grouping + into the string pointed to by buffer. For the argument descriptions, + see Objects/stringlib/localeutil.h */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(Py_ssize_t) _PyBytes_InsertThousandsGroupingLocale(char *buffer, + Py_ssize_t n_buffer, + char *digits, + Py_ssize_t n_digits, + Py_ssize_t min_width); + +/* Using explicit passed-in values, insert the thousands grouping + into the string pointed to by buffer. For the argument descriptions, + see Objects/stringlib/localeutil.h */ +PyAPI_FUNC(Py_ssize_t) _PyBytes_InsertThousandsGrouping(char *buffer, + Py_ssize_t n_buffer, + char *digits, + Py_ssize_t n_digits, + Py_ssize_t min_width, + const char *grouping, + const char *thousands_sep); +#endif + +/* Flags used by string formatting */ +#define F_LJUST (1<<0) +#define F_SIGN (1<<1) +#define F_BLANK (1<<2) +#define F_ALT (1<<3) +#define F_ZERO (1<<4) + +#ifndef Py_LIMITED_API +/* The _PyBytesWriter structure is big: it contains an embedded "stack buffer". + A _PyBytesWriter variable must be declared at the end of variables in a + function to optimize the memory allocation on the stack. */ +typedef struct { + /* bytes, bytearray or NULL (when the small buffer is used) */ + PyObject *buffer; + + /* Number of allocated size. */ + Py_ssize_t allocated; + + /* Minimum number of allocated bytes, + incremented by _PyBytesWriter_Prepare() */ + Py_ssize_t min_size; + + /* If non-zero, use a bytearray instead of a bytes object for buffer. */ + int use_bytearray; + + /* If non-zero, overallocate the buffer (default: 0). + This flag must be zero if use_bytearray is non-zero. */ + int overallocate; + + /* Stack buffer */ + int use_small_buffer; + char small_buffer[512]; +} _PyBytesWriter; + +/* Initialize a bytes writer + + By default, the overallocation is disabled. Set the overallocate attribute + to control the allocation of the buffer. */ +PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer); + +/* Get the buffer content and reset the writer. + Return a bytes object, or a bytearray object if use_bytearray is non-zero. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer, + void *str); + +/* Deallocate memory of a writer (clear its internal buffer). */ +PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer); + +/* Allocate the buffer to write size bytes. + Return the pointer to the beginning of buffer data. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer, + Py_ssize_t size); + +/* Ensure that the buffer is large enough to write *size* bytes. + Add size to the writer minimum size (min_size attribute). + + str is the current pointer inside the buffer. + Return the updated current pointer inside the buffer. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(void*) _PyBytesWriter_Prepare(_PyBytesWriter *writer, + void *str, + Py_ssize_t size); + +/* Resize the buffer to make it larger. + The new buffer may be larger than size bytes because of overallocation. + Return the updated current pointer inside the buffer. + Raise an exception and return NULL on error. + + Note: size must be greater than the number of allocated bytes in the writer. + + This function doesn't use the writer minimum size (min_size attribute). + + See also _PyBytesWriter_Prepare(). + */ +PyAPI_FUNC(void*) _PyBytesWriter_Resize(_PyBytesWriter *writer, + void *str, + Py_ssize_t size); + +/* Write bytes. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer, + void *str, + const void *bytes, + Py_ssize_t size); +#endif /* Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_BYTESOBJECT_H */ diff --git a/venv/Include/cellobject.h b/venv/Include/cellobject.h new file mode 100644 index 00000000..2f9b5b75 --- /dev/null +++ b/venv/Include/cellobject.h @@ -0,0 +1,29 @@ +/* Cell object interface */ +#ifndef Py_LIMITED_API +#ifndef Py_CELLOBJECT_H +#define Py_CELLOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyObject *ob_ref; /* Content of the cell or NULL when empty */ +} PyCellObject; + +PyAPI_DATA(PyTypeObject) PyCell_Type; + +#define PyCell_Check(op) (Py_TYPE(op) == &PyCell_Type) + +PyAPI_FUNC(PyObject *) PyCell_New(PyObject *); +PyAPI_FUNC(PyObject *) PyCell_Get(PyObject *); +PyAPI_FUNC(int) PyCell_Set(PyObject *, PyObject *); + +#define PyCell_GET(op) (((PyCellObject *)(op))->ob_ref) +#define PyCell_SET(op, v) (((PyCellObject *)(op))->ob_ref = v) + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TUPLEOBJECT_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/ceval.h b/venv/Include/ceval.h new file mode 100644 index 00000000..bce8a0be --- /dev/null +++ b/venv/Include/ceval.h @@ -0,0 +1,237 @@ +#ifndef Py_CEVAL_H +#define Py_CEVAL_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Interface to random parts in ceval.c */ + +/* PyEval_CallObjectWithKeywords(), PyEval_CallObject(), PyEval_CallFunction + * and PyEval_CallMethod are kept for backward compatibility: PyObject_Call(), + * PyObject_CallFunction() and PyObject_CallMethod() are recommended to call + * a callable object. + */ + +PyAPI_FUNC(PyObject *) PyEval_CallObjectWithKeywords( + PyObject *callable, + PyObject *args, + PyObject *kwargs); + +/* Inline this */ +#define PyEval_CallObject(callable, arg) \ + PyEval_CallObjectWithKeywords(callable, arg, (PyObject *)NULL) + +PyAPI_FUNC(PyObject *) PyEval_CallFunction(PyObject *callable, + const char *format, ...); +PyAPI_FUNC(PyObject *) PyEval_CallMethod(PyObject *obj, + const char *name, + const char *format, ...); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) PyEval_SetProfile(Py_tracefunc, PyObject *); +PyAPI_FUNC(void) PyEval_SetTrace(Py_tracefunc, PyObject *); +PyAPI_FUNC(void) _PyEval_SetCoroutineOriginTrackingDepth(int new_depth); +PyAPI_FUNC(int) _PyEval_GetCoroutineOriginTrackingDepth(void); +PyAPI_FUNC(void) _PyEval_SetCoroutineWrapper(PyObject *); +PyAPI_FUNC(PyObject *) _PyEval_GetCoroutineWrapper(void); +PyAPI_FUNC(void) _PyEval_SetAsyncGenFirstiter(PyObject *); +PyAPI_FUNC(PyObject *) _PyEval_GetAsyncGenFirstiter(void); +PyAPI_FUNC(void) _PyEval_SetAsyncGenFinalizer(PyObject *); +PyAPI_FUNC(PyObject *) _PyEval_GetAsyncGenFinalizer(void); +#endif + +struct _frame; /* Avoid including frameobject.h */ + +PyAPI_FUNC(PyObject *) PyEval_GetBuiltins(void); +PyAPI_FUNC(PyObject *) PyEval_GetGlobals(void); +PyAPI_FUNC(PyObject *) PyEval_GetLocals(void); +PyAPI_FUNC(struct _frame *) PyEval_GetFrame(void); + +/* Look at the current frame's (if any) code's co_flags, and turn on + the corresponding compiler flags in cf->cf_flags. Return 1 if any + flag was set, else return 0. */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) PyEval_MergeCompilerFlags(PyCompilerFlags *cf); +#endif + +PyAPI_FUNC(int) Py_AddPendingCall(int (*func)(void *), void *arg); +PyAPI_FUNC(void) _PyEval_SignalReceived(void); +PyAPI_FUNC(int) Py_MakePendingCalls(void); + +/* Protection against deeply nested recursive calls + + In Python 3.0, this protection has two levels: + * normal anti-recursion protection is triggered when the recursion level + exceeds the current recursion limit. It raises a RecursionError, and sets + the "overflowed" flag in the thread state structure. This flag + temporarily *disables* the normal protection; this allows cleanup code + to potentially outgrow the recursion limit while processing the + RecursionError. + * "last chance" anti-recursion protection is triggered when the recursion + level exceeds "current recursion limit + 50". By construction, this + protection can only be triggered when the "overflowed" flag is set. It + means the cleanup code has itself gone into an infinite loop, or the + RecursionError has been mistakingly ignored. When this protection is + triggered, the interpreter aborts with a Fatal Error. + + In addition, the "overflowed" flag is automatically reset when the + recursion level drops below "current recursion limit - 50". This heuristic + is meant to ensure that the normal anti-recursion protection doesn't get + disabled too long. + + Please note: this scheme has its own limitations. See: + http://mail.python.org/pipermail/python-dev/2008-August/082106.html + for some observations. +*/ +PyAPI_FUNC(void) Py_SetRecursionLimit(int); +PyAPI_FUNC(int) Py_GetRecursionLimit(void); + +#define Py_EnterRecursiveCall(where) \ + (_Py_MakeRecCheck(PyThreadState_GET()->recursion_depth) && \ + _Py_CheckRecursiveCall(where)) +#define Py_LeaveRecursiveCall() \ + do{ if(_Py_MakeEndRecCheck(PyThreadState_GET()->recursion_depth)) \ + PyThreadState_GET()->overflowed = 0; \ + } while(0) +PyAPI_FUNC(int) _Py_CheckRecursiveCall(const char *where); + +/* Due to the macros in which it's used, _Py_CheckRecursionLimit is in + the stable ABI. It should be removed therefrom when possible. +*/ +PyAPI_DATA(int) _Py_CheckRecursionLimit; + +#ifdef USE_STACKCHECK +/* With USE_STACKCHECK, trigger stack checks in _Py_CheckRecursiveCall() + on every 64th call to Py_EnterRecursiveCall. +*/ +# define _Py_MakeRecCheck(x) \ + (++(x) > _Py_CheckRecursionLimit || \ + ++(PyThreadState_GET()->stackcheck_counter) > 64) +#else +# define _Py_MakeRecCheck(x) (++(x) > _Py_CheckRecursionLimit) +#endif + +/* Compute the "lower-water mark" for a recursion limit. When + * Py_LeaveRecursiveCall() is called with a recursion depth below this mark, + * the overflowed flag is reset to 0. */ +#define _Py_RecursionLimitLowerWaterMark(limit) \ + (((limit) > 200) \ + ? ((limit) - 50) \ + : (3 * ((limit) >> 2))) + +#define _Py_MakeEndRecCheck(x) \ + (--(x) < _Py_RecursionLimitLowerWaterMark(_Py_CheckRecursionLimit)) + +#define Py_ALLOW_RECURSION \ + do { unsigned char _old = PyThreadState_GET()->recursion_critical;\ + PyThreadState_GET()->recursion_critical = 1; + +#define Py_END_ALLOW_RECURSION \ + PyThreadState_GET()->recursion_critical = _old; \ + } while(0); + +PyAPI_FUNC(const char *) PyEval_GetFuncName(PyObject *); +PyAPI_FUNC(const char *) PyEval_GetFuncDesc(PyObject *); + +PyAPI_FUNC(PyObject *) PyEval_EvalFrame(struct _frame *); +PyAPI_FUNC(PyObject *) PyEval_EvalFrameEx(struct _frame *f, int exc); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyEval_EvalFrameDefault(struct _frame *f, int exc); +#endif + +/* Interface for threads. + + A module that plans to do a blocking system call (or something else + that lasts a long time and doesn't touch Python data) can allow other + threads to run as follows: + + ...preparations here... + Py_BEGIN_ALLOW_THREADS + ...blocking system call here... + Py_END_ALLOW_THREADS + ...interpret result here... + + The Py_BEGIN_ALLOW_THREADS/Py_END_ALLOW_THREADS pair expands to a + {}-surrounded block. + To leave the block in the middle (e.g., with return), you must insert + a line containing Py_BLOCK_THREADS before the return, e.g. + + if (...premature_exit...) { + Py_BLOCK_THREADS + PyErr_SetFromErrno(PyExc_OSError); + return NULL; + } + + An alternative is: + + Py_BLOCK_THREADS + if (...premature_exit...) { + PyErr_SetFromErrno(PyExc_OSError); + return NULL; + } + Py_UNBLOCK_THREADS + + For convenience, that the value of 'errno' is restored across + Py_END_ALLOW_THREADS and Py_BLOCK_THREADS. + + WARNING: NEVER NEST CALLS TO Py_BEGIN_ALLOW_THREADS AND + Py_END_ALLOW_THREADS!!! + + The function PyEval_InitThreads() should be called only from + init_thread() in "_threadmodule.c". + + Note that not yet all candidates have been converted to use this + mechanism! +*/ + +PyAPI_FUNC(PyThreadState *) PyEval_SaveThread(void); +PyAPI_FUNC(void) PyEval_RestoreThread(PyThreadState *); + +PyAPI_FUNC(int) PyEval_ThreadsInitialized(void); +PyAPI_FUNC(void) PyEval_InitThreads(void); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyEval_FiniThreads(void); +#endif /* !Py_LIMITED_API */ +PyAPI_FUNC(void) PyEval_AcquireLock(void) Py_DEPRECATED(3.2); +PyAPI_FUNC(void) PyEval_ReleaseLock(void) /* Py_DEPRECATED(3.2) */; +PyAPI_FUNC(void) PyEval_AcquireThread(PyThreadState *tstate); +PyAPI_FUNC(void) PyEval_ReleaseThread(PyThreadState *tstate); +PyAPI_FUNC(void) PyEval_ReInitThreads(void); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyEval_SetSwitchInterval(unsigned long microseconds); +PyAPI_FUNC(unsigned long) _PyEval_GetSwitchInterval(void); +#endif + +#ifndef Py_LIMITED_API +PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc); +#endif + +#define Py_BEGIN_ALLOW_THREADS { \ + PyThreadState *_save; \ + _save = PyEval_SaveThread(); +#define Py_BLOCK_THREADS PyEval_RestoreThread(_save); +#define Py_UNBLOCK_THREADS _save = PyEval_SaveThread(); +#define Py_END_ALLOW_THREADS PyEval_RestoreThread(_save); \ + } + +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *); +PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *); +PyAPI_FUNC(void) _PyEval_SignalAsyncExc(void); +#endif + +/* Masks and values used by FORMAT_VALUE opcode. */ +#define FVC_MASK 0x3 +#define FVC_NONE 0x0 +#define FVC_STR 0x1 +#define FVC_REPR 0x2 +#define FVC_ASCII 0x3 +#define FVS_MASK 0x4 +#define FVS_HAVE_SPEC 0x4 + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CEVAL_H */ diff --git a/venv/Include/classobject.h b/venv/Include/classobject.h new file mode 100644 index 00000000..209f0f4a --- /dev/null +++ b/venv/Include/classobject.h @@ -0,0 +1,58 @@ +/* Former class object interface -- now only bound methods are here */ + +/* Revealing some structures (not for general use) */ + +#ifndef Py_LIMITED_API +#ifndef Py_CLASSOBJECT_H +#define Py_CLASSOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyObject *im_func; /* The callable object implementing the method */ + PyObject *im_self; /* The instance it is bound to */ + PyObject *im_weakreflist; /* List of weak references */ +} PyMethodObject; + +PyAPI_DATA(PyTypeObject) PyMethod_Type; + +#define PyMethod_Check(op) ((op)->ob_type == &PyMethod_Type) + +PyAPI_FUNC(PyObject *) PyMethod_New(PyObject *, PyObject *); + +PyAPI_FUNC(PyObject *) PyMethod_Function(PyObject *); +PyAPI_FUNC(PyObject *) PyMethod_Self(PyObject *); + +/* Macros for direct access to these values. Type checks are *not* + done, so use with care. */ +#define PyMethod_GET_FUNCTION(meth) \ + (((PyMethodObject *)meth) -> im_func) +#define PyMethod_GET_SELF(meth) \ + (((PyMethodObject *)meth) -> im_self) + +PyAPI_FUNC(int) PyMethod_ClearFreeList(void); + +typedef struct { + PyObject_HEAD + PyObject *func; +} PyInstanceMethodObject; + +PyAPI_DATA(PyTypeObject) PyInstanceMethod_Type; + +#define PyInstanceMethod_Check(op) ((op)->ob_type == &PyInstanceMethod_Type) + +PyAPI_FUNC(PyObject *) PyInstanceMethod_New(PyObject *); +PyAPI_FUNC(PyObject *) PyInstanceMethod_Function(PyObject *); + +/* Macros for direct access to these values. Type checks are *not* + done, so use with care. */ +#define PyInstanceMethod_GET_FUNCTION(meth) \ + (((PyInstanceMethodObject *)meth) -> func) + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CLASSOBJECT_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/code.h b/venv/Include/code.h new file mode 100644 index 00000000..2e661e8b --- /dev/null +++ b/venv/Include/code.h @@ -0,0 +1,157 @@ +/* Definitions for bytecode */ + +#ifndef Py_LIMITED_API +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef uint16_t _Py_CODEUNIT; + +#ifdef WORDS_BIGENDIAN +# define _Py_OPCODE(word) ((word) >> 8) +# define _Py_OPARG(word) ((word) & 255) +#else +# define _Py_OPCODE(word) ((word) & 255) +# define _Py_OPARG(word) ((word) >> 8) +#endif + +/* Bytecode object */ +typedef struct { + PyObject_HEAD + int co_argcount; /* #arguments, except *args */ + int co_kwonlyargcount; /* #keyword only arguments */ + int co_nlocals; /* #local variables */ + int co_stacksize; /* #entries needed for evaluation stack */ + int co_flags; /* CO_..., see below */ + int co_firstlineno; /* first source line number */ + PyObject *co_code; /* instruction opcodes */ + PyObject *co_consts; /* list (constants used) */ + PyObject *co_names; /* list of strings (names used) */ + PyObject *co_varnames; /* tuple of strings (local variable names) */ + PyObject *co_freevars; /* tuple of strings (free variable names) */ + PyObject *co_cellvars; /* tuple of strings (cell variable names) */ + /* The rest aren't used in either hash or comparisons, except for co_name, + used in both. This is done to preserve the name and line number + for tracebacks and debuggers; otherwise, constant de-duplication + would collapse identical functions/lambdas defined on different lines. + */ + Py_ssize_t *co_cell2arg; /* Maps cell vars which are arguments. */ + PyObject *co_filename; /* unicode (where it was loaded from) */ + PyObject *co_name; /* unicode (name, for reference) */ + PyObject *co_lnotab; /* string (encoding addr<->lineno mapping) See + Objects/lnotab_notes.txt for details. */ + void *co_zombieframe; /* for optimization only (see frameobject.c) */ + PyObject *co_weakreflist; /* to support weakrefs to code objects */ + /* Scratch space for extra data relating to the code object. + Type is a void* to keep the format private in codeobject.c to force + people to go through the proper APIs. */ + void *co_extra; +} PyCodeObject; + +/* Masks for co_flags above */ +#define CO_OPTIMIZED 0x0001 +#define CO_NEWLOCALS 0x0002 +#define CO_VARARGS 0x0004 +#define CO_VARKEYWORDS 0x0008 +#define CO_NESTED 0x0010 +#define CO_GENERATOR 0x0020 +/* The CO_NOFREE flag is set if there are no free or cell variables. + This information is redundant, but it allows a single flag test + to determine whether there is any extra work to be done when the + call frame it setup. +*/ +#define CO_NOFREE 0x0040 + +/* The CO_COROUTINE flag is set for coroutine functions (defined with + ``async def`` keywords) */ +#define CO_COROUTINE 0x0080 +#define CO_ITERABLE_COROUTINE 0x0100 +#define CO_ASYNC_GENERATOR 0x0200 + +/* These are no longer used. */ +#if 0 +#define CO_GENERATOR_ALLOWED 0x1000 +#endif +#define CO_FUTURE_DIVISION 0x2000 +#define CO_FUTURE_ABSOLUTE_IMPORT 0x4000 /* do absolute imports by default */ +#define CO_FUTURE_WITH_STATEMENT 0x8000 +#define CO_FUTURE_PRINT_FUNCTION 0x10000 +#define CO_FUTURE_UNICODE_LITERALS 0x20000 + +#define CO_FUTURE_BARRY_AS_BDFL 0x40000 +#define CO_FUTURE_GENERATOR_STOP 0x80000 +#define CO_FUTURE_ANNOTATIONS 0x100000 + +/* This value is found in the co_cell2arg array when the associated cell + variable does not correspond to an argument. */ +#define CO_CELL_NOT_AN_ARG (-1) + +/* This should be defined if a future statement modifies the syntax. + For example, when a keyword is added. +*/ +#define PY_PARSER_REQUIRES_FUTURE_KEYWORD + +#define CO_MAXBLOCKS 20 /* Max static block nesting within a function */ + +PyAPI_DATA(PyTypeObject) PyCode_Type; + +#define PyCode_Check(op) (Py_TYPE(op) == &PyCode_Type) +#define PyCode_GetNumFree(op) (PyTuple_GET_SIZE((op)->co_freevars)) + +/* Public interface */ +PyAPI_FUNC(PyCodeObject *) PyCode_New( + int, int, int, int, int, PyObject *, PyObject *, + PyObject *, PyObject *, PyObject *, PyObject *, + PyObject *, PyObject *, int, PyObject *); + /* same as struct above */ + +/* Creates a new empty code object with the specified source location. */ +PyAPI_FUNC(PyCodeObject *) +PyCode_NewEmpty(const char *filename, const char *funcname, int firstlineno); + +/* Return the line number associated with the specified bytecode index + in this code object. If you just need the line number of a frame, + use PyFrame_GetLineNumber() instead. */ +PyAPI_FUNC(int) PyCode_Addr2Line(PyCodeObject *, int); + +/* for internal use only */ +typedef struct _addr_pair { + int ap_lower; + int ap_upper; +} PyAddrPair; + +#ifndef Py_LIMITED_API +/* Update *bounds to describe the first and one-past-the-last instructions in the + same line as lasti. Return the number of that line. +*/ +PyAPI_FUNC(int) _PyCode_CheckLineNumber(PyCodeObject* co, + int lasti, PyAddrPair *bounds); + +/* Create a comparable key used to compare constants taking in account the + * object type. It is used to make sure types are not coerced (e.g., float and + * complex) _and_ to distinguish 0.0 from -0.0 e.g. on IEEE platforms + * + * Return (type(obj), obj, ...): a tuple with variable size (at least 2 items) + * depending on the type and the value. The type is the first item to not + * compare bytes and str which can raise a BytesWarning exception. */ +PyAPI_FUNC(PyObject*) _PyCode_ConstantKey(PyObject *obj); +#endif + +PyAPI_FUNC(PyObject*) PyCode_Optimize(PyObject *code, PyObject* consts, + PyObject *names, PyObject *lnotab); + + +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyCode_GetExtra(PyObject *code, Py_ssize_t index, + void **extra); +PyAPI_FUNC(int) _PyCode_SetExtra(PyObject *code, Py_ssize_t index, + void *extra); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/codecs.h b/venv/Include/codecs.h new file mode 100644 index 00000000..3ad0f2b5 --- /dev/null +++ b/venv/Include/codecs.h @@ -0,0 +1,240 @@ +#ifndef Py_CODECREGISTRY_H +#define Py_CODECREGISTRY_H +#ifdef __cplusplus +extern "C" { +#endif + +/* ------------------------------------------------------------------------ + + Python Codec Registry and support functions + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +Copyright (c) Corporation for National Research Initiatives. + + ------------------------------------------------------------------------ */ + +/* Register a new codec search function. + + As side effect, this tries to load the encodings package, if not + yet done, to make sure that it is always first in the list of + search functions. + + The search_function's refcount is incremented by this function. */ + +PyAPI_FUNC(int) PyCodec_Register( + PyObject *search_function + ); + +/* Codec registry lookup API. + + Looks up the given encoding and returns a CodecInfo object with + function attributes which implement the different aspects of + processing the encoding. + + The encoding string is looked up converted to all lower-case + characters. This makes encodings looked up through this mechanism + effectively case-insensitive. + + If no codec is found, a KeyError is set and NULL returned. + + As side effect, this tries to load the encodings package, if not + yet done. This is part of the lazy load strategy for the encodings + package. + + */ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyCodec_Lookup( + const char *encoding + ); + +PyAPI_FUNC(int) _PyCodec_Forget( + const char *encoding + ); +#endif + +/* Codec registry encoding check API. + + Returns 1/0 depending on whether there is a registered codec for + the given encoding. + +*/ + +PyAPI_FUNC(int) PyCodec_KnownEncoding( + const char *encoding + ); + +/* Generic codec based encoding API. + + object is passed through the encoder function found for the given + encoding using the error handling method defined by errors. errors + may be NULL to use the default method defined for the codec. + + Raises a LookupError in case no encoder can be found. + + */ + +PyAPI_FUNC(PyObject *) PyCodec_Encode( + PyObject *object, + const char *encoding, + const char *errors + ); + +/* Generic codec based decoding API. + + object is passed through the decoder function found for the given + encoding using the error handling method defined by errors. errors + may be NULL to use the default method defined for the codec. + + Raises a LookupError in case no encoder can be found. + + */ + +PyAPI_FUNC(PyObject *) PyCodec_Decode( + PyObject *object, + const char *encoding, + const char *errors + ); + +#ifndef Py_LIMITED_API +/* Text codec specific encoding and decoding API. + + Checks the encoding against a list of codecs which do not + implement a str<->bytes encoding before attempting the + operation. + + Please note that these APIs are internal and should not + be used in Python C extensions. + + XXX (ncoghlan): should we make these, or something like them, public + in Python 3.5+? + + */ +PyAPI_FUNC(PyObject *) _PyCodec_LookupTextEncoding( + const char *encoding, + const char *alternate_command + ); + +PyAPI_FUNC(PyObject *) _PyCodec_EncodeText( + PyObject *object, + const char *encoding, + const char *errors + ); + +PyAPI_FUNC(PyObject *) _PyCodec_DecodeText( + PyObject *object, + const char *encoding, + const char *errors + ); + +/* These two aren't actually text encoding specific, but _io.TextIOWrapper + * is the only current API consumer. + */ +PyAPI_FUNC(PyObject *) _PyCodecInfo_GetIncrementalDecoder( + PyObject *codec_info, + const char *errors + ); + +PyAPI_FUNC(PyObject *) _PyCodecInfo_GetIncrementalEncoder( + PyObject *codec_info, + const char *errors + ); +#endif + + + +/* --- Codec Lookup APIs -------------------------------------------------- + + All APIs return a codec object with incremented refcount and are + based on _PyCodec_Lookup(). The same comments w/r to the encoding + name also apply to these APIs. + +*/ + +/* Get an encoder function for the given encoding. */ + +PyAPI_FUNC(PyObject *) PyCodec_Encoder( + const char *encoding + ); + +/* Get a decoder function for the given encoding. */ + +PyAPI_FUNC(PyObject *) PyCodec_Decoder( + const char *encoding + ); + +/* Get an IncrementalEncoder object for the given encoding. */ + +PyAPI_FUNC(PyObject *) PyCodec_IncrementalEncoder( + const char *encoding, + const char *errors + ); + +/* Get an IncrementalDecoder object function for the given encoding. */ + +PyAPI_FUNC(PyObject *) PyCodec_IncrementalDecoder( + const char *encoding, + const char *errors + ); + +/* Get a StreamReader factory function for the given encoding. */ + +PyAPI_FUNC(PyObject *) PyCodec_StreamReader( + const char *encoding, + PyObject *stream, + const char *errors + ); + +/* Get a StreamWriter factory function for the given encoding. */ + +PyAPI_FUNC(PyObject *) PyCodec_StreamWriter( + const char *encoding, + PyObject *stream, + const char *errors + ); + +/* Unicode encoding error handling callback registry API */ + +/* Register the error handling callback function error under the given + name. This function will be called by the codec when it encounters + unencodable characters/undecodable bytes and doesn't know the + callback name, when name is specified as the error parameter + in the call to the encode/decode function. + Return 0 on success, -1 on error */ +PyAPI_FUNC(int) PyCodec_RegisterError(const char *name, PyObject *error); + +/* Lookup the error handling callback function registered under the given + name. As a special case NULL can be passed, in which case + the error handling callback for "strict" will be returned. */ +PyAPI_FUNC(PyObject *) PyCodec_LookupError(const char *name); + +/* raise exc as an exception */ +PyAPI_FUNC(PyObject *) PyCodec_StrictErrors(PyObject *exc); + +/* ignore the unicode error, skipping the faulty input */ +PyAPI_FUNC(PyObject *) PyCodec_IgnoreErrors(PyObject *exc); + +/* replace the unicode encode error with ? or U+FFFD */ +PyAPI_FUNC(PyObject *) PyCodec_ReplaceErrors(PyObject *exc); + +/* replace the unicode encode error with XML character references */ +PyAPI_FUNC(PyObject *) PyCodec_XMLCharRefReplaceErrors(PyObject *exc); + +/* replace the unicode encode error with backslash escapes (\x, \u and \U) */ +PyAPI_FUNC(PyObject *) PyCodec_BackslashReplaceErrors(PyObject *exc); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +/* replace the unicode encode error with backslash escapes (\N, \x, \u and \U) */ +PyAPI_FUNC(PyObject *) PyCodec_NameReplaceErrors(PyObject *exc); +#endif + +#ifndef Py_LIMITED_API +PyAPI_DATA(const char *) Py_hexdigits; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODECREGISTRY_H */ diff --git a/venv/Include/compile.h b/venv/Include/compile.h new file mode 100644 index 00000000..edb961f4 --- /dev/null +++ b/venv/Include/compile.h @@ -0,0 +1,93 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#ifndef Py_LIMITED_API +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Public interface */ +struct _node; /* Declare the existence of this type */ +PyAPI_FUNC(PyCodeObject *) PyNode_Compile(struct _node *, const char *); +/* XXX (ncoghlan): Unprefixed type name in a public API! */ + +#define PyCF_MASK (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | \ + CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | \ + CO_FUTURE_UNICODE_LITERALS | CO_FUTURE_BARRY_AS_BDFL | \ + CO_FUTURE_GENERATOR_STOP | CO_FUTURE_ANNOTATIONS) +#define PyCF_MASK_OBSOLETE (CO_NESTED) +#define PyCF_SOURCE_IS_UTF8 0x0100 +#define PyCF_DONT_IMPLY_DEDENT 0x0200 +#define PyCF_ONLY_AST 0x0400 +#define PyCF_IGNORE_COOKIE 0x0800 + +#ifndef Py_LIMITED_API +typedef struct { + int cf_flags; /* bitmask of CO_xxx flags relevant to future */ +} PyCompilerFlags; +#endif + +/* Future feature support */ + +typedef struct { + int ff_features; /* flags set by future statements */ + int ff_lineno; /* line number of last future statement */ +} PyFutureFeatures; + +#define FUTURE_NESTED_SCOPES "nested_scopes" +#define FUTURE_GENERATORS "generators" +#define FUTURE_DIVISION "division" +#define FUTURE_ABSOLUTE_IMPORT "absolute_import" +#define FUTURE_WITH_STATEMENT "with_statement" +#define FUTURE_PRINT_FUNCTION "print_function" +#define FUTURE_UNICODE_LITERALS "unicode_literals" +#define FUTURE_BARRY_AS_BDFL "barry_as_FLUFL" +#define FUTURE_GENERATOR_STOP "generator_stop" +#define FUTURE_ANNOTATIONS "annotations" + +struct _mod; /* Declare the existence of this type */ +#define PyAST_Compile(mod, s, f, ar) PyAST_CompileEx(mod, s, f, -1, ar) +PyAPI_FUNC(PyCodeObject *) PyAST_CompileEx( + struct _mod *mod, + const char *filename, /* decoded from the filesystem encoding */ + PyCompilerFlags *flags, + int optimize, + PyArena *arena); +PyAPI_FUNC(PyCodeObject *) PyAST_CompileObject( + struct _mod *mod, + PyObject *filename, + PyCompilerFlags *flags, + int optimize, + PyArena *arena); +PyAPI_FUNC(PyFutureFeatures *) PyFuture_FromAST( + struct _mod * mod, + const char *filename /* decoded from the filesystem encoding */ + ); +PyAPI_FUNC(PyFutureFeatures *) PyFuture_FromASTObject( + struct _mod * mod, + PyObject *filename + ); + +/* _Py_Mangle is defined in compile.c */ +PyAPI_FUNC(PyObject*) _Py_Mangle(PyObject *p, PyObject *name); + +#define PY_INVALID_STACK_EFFECT INT_MAX +PyAPI_FUNC(int) PyCompile_OpcodeStackEffect(int opcode, int oparg); + +PyAPI_FUNC(int) _PyAST_Optimize(struct _mod *, PyArena *arena, int optimize); + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_LIMITED_API */ + +/* These definitions must match corresponding definitions in graminit.h. + There's code in compile.c that checks that they are the same. */ +#define Py_single_input 256 +#define Py_file_input 257 +#define Py_eval_input 258 + +#endif /* !Py_COMPILE_H */ diff --git a/venv/Include/complexobject.h b/venv/Include/complexobject.h new file mode 100644 index 00000000..cb8c52c5 --- /dev/null +++ b/venv/Include/complexobject.h @@ -0,0 +1,69 @@ +/* Complex number structure */ + +#ifndef Py_COMPLEXOBJECT_H +#define Py_COMPLEXOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +typedef struct { + double real; + double imag; +} Py_complex; + +/* Operations on complex numbers from complexmodule.c */ + +PyAPI_FUNC(Py_complex) _Py_c_sum(Py_complex, Py_complex); +PyAPI_FUNC(Py_complex) _Py_c_diff(Py_complex, Py_complex); +PyAPI_FUNC(Py_complex) _Py_c_neg(Py_complex); +PyAPI_FUNC(Py_complex) _Py_c_prod(Py_complex, Py_complex); +PyAPI_FUNC(Py_complex) _Py_c_quot(Py_complex, Py_complex); +PyAPI_FUNC(Py_complex) _Py_c_pow(Py_complex, Py_complex); +PyAPI_FUNC(double) _Py_c_abs(Py_complex); +#endif + +/* Complex object interface */ + +/* +PyComplexObject represents a complex number with double-precision +real and imaginary parts. +*/ +#ifndef Py_LIMITED_API +typedef struct { + PyObject_HEAD + Py_complex cval; +} PyComplexObject; +#endif + +PyAPI_DATA(PyTypeObject) PyComplex_Type; + +#define PyComplex_Check(op) PyObject_TypeCheck(op, &PyComplex_Type) +#define PyComplex_CheckExact(op) (Py_TYPE(op) == &PyComplex_Type) + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyComplex_FromCComplex(Py_complex); +#endif +PyAPI_FUNC(PyObject *) PyComplex_FromDoubles(double real, double imag); + +PyAPI_FUNC(double) PyComplex_RealAsDouble(PyObject *op); +PyAPI_FUNC(double) PyComplex_ImagAsDouble(PyObject *op); +#ifndef Py_LIMITED_API +PyAPI_FUNC(Py_complex) PyComplex_AsCComplex(PyObject *op); +#endif + +/* Format the object based on the format_spec, as defined in PEP 3101 + (Advanced String Formatting). */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyComplex_FormatAdvancedWriter( + _PyUnicodeWriter *writer, + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPLEXOBJECT_H */ diff --git a/venv/Include/context.h b/venv/Include/context.h new file mode 100644 index 00000000..8b9f1292 --- /dev/null +++ b/venv/Include/context.h @@ -0,0 +1,86 @@ +#ifndef Py_CONTEXT_H +#define Py_CONTEXT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API + + +PyAPI_DATA(PyTypeObject) PyContext_Type; +typedef struct _pycontextobject PyContext; + +PyAPI_DATA(PyTypeObject) PyContextVar_Type; +typedef struct _pycontextvarobject PyContextVar; + +PyAPI_DATA(PyTypeObject) PyContextToken_Type; +typedef struct _pycontexttokenobject PyContextToken; + + +#define PyContext_CheckExact(o) (Py_TYPE(o) == &PyContext_Type) +#define PyContextVar_CheckExact(o) (Py_TYPE(o) == &PyContextVar_Type) +#define PyContextToken_CheckExact(o) (Py_TYPE(o) == &PyContextToken_Type) + + +PyAPI_FUNC(PyContext *) PyContext_New(void); +PyAPI_FUNC(PyContext *) PyContext_Copy(PyContext *); +PyAPI_FUNC(PyContext *) PyContext_CopyCurrent(void); + +PyAPI_FUNC(int) PyContext_Enter(PyContext *); +PyAPI_FUNC(int) PyContext_Exit(PyContext *); + + +/* Create a new context variable. + + default_value can be NULL. +*/ +PyAPI_FUNC(PyContextVar *) PyContextVar_New( + const char *name, PyObject *default_value); + + +/* Get a value for the variable. + + Returns -1 if an error occurred during lookup. + + Returns 0 if value either was or was not found. + + If value was found, *value will point to it. + If not, it will point to: + + - default_value, if not NULL; + - the default value of "var", if not NULL; + - NULL. + + '*value' will be a new ref, if not NULL. +*/ +PyAPI_FUNC(int) PyContextVar_Get( + PyContextVar *var, PyObject *default_value, PyObject **value); + + +/* Set a new value for the variable. + Returns NULL if an error occurs. +*/ +PyAPI_FUNC(PyContextToken *) PyContextVar_Set( + PyContextVar *var, PyObject *value); + + +/* Reset a variable to its previous value. + Returns 0 on success, -1 on error. +*/ +PyAPI_FUNC(int) PyContextVar_Reset( + PyContextVar *var, PyContextToken *token); + + +/* This method is exposed only for CPython tests. Don not use it. */ +PyAPI_FUNC(PyObject *) _PyContext_NewHamtForTests(void); + + +PyAPI_FUNC(int) PyContext_ClearFreeList(void); + + +#endif /* !Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CONTEXT_H */ diff --git a/venv/Include/datetime.h b/venv/Include/datetime.h new file mode 100644 index 00000000..059d5ecf --- /dev/null +++ b/venv/Include/datetime.h @@ -0,0 +1,273 @@ +/* datetime.h + */ +#ifndef Py_LIMITED_API +#ifndef DATETIME_H +#define DATETIME_H +#ifdef __cplusplus +extern "C" { +#endif + +/* Fields are packed into successive bytes, each viewed as unsigned and + * big-endian, unless otherwise noted: + * + * byte offset + * 0 year 2 bytes, 1-9999 + * 2 month 1 byte, 1-12 + * 3 day 1 byte, 1-31 + * 4 hour 1 byte, 0-23 + * 5 minute 1 byte, 0-59 + * 6 second 1 byte, 0-59 + * 7 usecond 3 bytes, 0-999999 + * 10 + */ + +/* # of bytes for year, month, and day. */ +#define _PyDateTime_DATE_DATASIZE 4 + +/* # of bytes for hour, minute, second, and usecond. */ +#define _PyDateTime_TIME_DATASIZE 6 + +/* # of bytes for year, month, day, hour, minute, second, and usecond. */ +#define _PyDateTime_DATETIME_DATASIZE 10 + + +typedef struct +{ + PyObject_HEAD + Py_hash_t hashcode; /* -1 when unknown */ + int days; /* -MAX_DELTA_DAYS <= days <= MAX_DELTA_DAYS */ + int seconds; /* 0 <= seconds < 24*3600 is invariant */ + int microseconds; /* 0 <= microseconds < 1000000 is invariant */ +} PyDateTime_Delta; + +typedef struct +{ + PyObject_HEAD /* a pure abstract base class */ +} PyDateTime_TZInfo; + + +/* The datetime and time types have hashcodes, and an optional tzinfo member, + * present if and only if hastzinfo is true. + */ +#define _PyTZINFO_HEAD \ + PyObject_HEAD \ + Py_hash_t hashcode; \ + char hastzinfo; /* boolean flag */ + +/* No _PyDateTime_BaseTZInfo is allocated; it's just to have something + * convenient to cast to, when getting at the hastzinfo member of objects + * starting with _PyTZINFO_HEAD. + */ +typedef struct +{ + _PyTZINFO_HEAD +} _PyDateTime_BaseTZInfo; + +/* All time objects are of PyDateTime_TimeType, but that can be allocated + * in two ways, with or without a tzinfo member. Without is the same as + * tzinfo == None, but consumes less memory. _PyDateTime_BaseTime is an + * internal struct used to allocate the right amount of space for the + * "without" case. + */ +#define _PyDateTime_TIMEHEAD \ + _PyTZINFO_HEAD \ + unsigned char data[_PyDateTime_TIME_DATASIZE]; + +typedef struct +{ + _PyDateTime_TIMEHEAD +} _PyDateTime_BaseTime; /* hastzinfo false */ + +typedef struct +{ + _PyDateTime_TIMEHEAD + unsigned char fold; + PyObject *tzinfo; +} PyDateTime_Time; /* hastzinfo true */ + + +/* All datetime objects are of PyDateTime_DateTimeType, but that can be + * allocated in two ways too, just like for time objects above. In addition, + * the plain date type is a base class for datetime, so it must also have + * a hastzinfo member (although it's unused there). + */ +typedef struct +{ + _PyTZINFO_HEAD + unsigned char data[_PyDateTime_DATE_DATASIZE]; +} PyDateTime_Date; + +#define _PyDateTime_DATETIMEHEAD \ + _PyTZINFO_HEAD \ + unsigned char data[_PyDateTime_DATETIME_DATASIZE]; + +typedef struct +{ + _PyDateTime_DATETIMEHEAD +} _PyDateTime_BaseDateTime; /* hastzinfo false */ + +typedef struct +{ + _PyDateTime_DATETIMEHEAD + unsigned char fold; + PyObject *tzinfo; +} PyDateTime_DateTime; /* hastzinfo true */ + + +/* Apply for date and datetime instances. */ +#define PyDateTime_GET_YEAR(o) ((((PyDateTime_Date*)o)->data[0] << 8) | \ + ((PyDateTime_Date*)o)->data[1]) +#define PyDateTime_GET_MONTH(o) (((PyDateTime_Date*)o)->data[2]) +#define PyDateTime_GET_DAY(o) (((PyDateTime_Date*)o)->data[3]) + +#define PyDateTime_DATE_GET_HOUR(o) (((PyDateTime_DateTime*)o)->data[4]) +#define PyDateTime_DATE_GET_MINUTE(o) (((PyDateTime_DateTime*)o)->data[5]) +#define PyDateTime_DATE_GET_SECOND(o) (((PyDateTime_DateTime*)o)->data[6]) +#define PyDateTime_DATE_GET_MICROSECOND(o) \ + ((((PyDateTime_DateTime*)o)->data[7] << 16) | \ + (((PyDateTime_DateTime*)o)->data[8] << 8) | \ + ((PyDateTime_DateTime*)o)->data[9]) +#define PyDateTime_DATE_GET_FOLD(o) (((PyDateTime_DateTime*)o)->fold) + +/* Apply for time instances. */ +#define PyDateTime_TIME_GET_HOUR(o) (((PyDateTime_Time*)o)->data[0]) +#define PyDateTime_TIME_GET_MINUTE(o) (((PyDateTime_Time*)o)->data[1]) +#define PyDateTime_TIME_GET_SECOND(o) (((PyDateTime_Time*)o)->data[2]) +#define PyDateTime_TIME_GET_MICROSECOND(o) \ + ((((PyDateTime_Time*)o)->data[3] << 16) | \ + (((PyDateTime_Time*)o)->data[4] << 8) | \ + ((PyDateTime_Time*)o)->data[5]) +#define PyDateTime_TIME_GET_FOLD(o) (((PyDateTime_Time*)o)->fold) + +/* Apply for time delta instances */ +#define PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days) +#define PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds) +#define PyDateTime_DELTA_GET_MICROSECONDS(o) \ + (((PyDateTime_Delta*)o)->microseconds) + + +/* Define structure for C API. */ +typedef struct { + /* type objects */ + PyTypeObject *DateType; + PyTypeObject *DateTimeType; + PyTypeObject *TimeType; + PyTypeObject *DeltaType; + PyTypeObject *TZInfoType; + + /* singletons */ + PyObject *TimeZone_UTC; + + /* constructors */ + PyObject *(*Date_FromDate)(int, int, int, PyTypeObject*); + PyObject *(*DateTime_FromDateAndTime)(int, int, int, int, int, int, int, + PyObject*, PyTypeObject*); + PyObject *(*Time_FromTime)(int, int, int, int, PyObject*, PyTypeObject*); + PyObject *(*Delta_FromDelta)(int, int, int, int, PyTypeObject*); + PyObject *(*TimeZone_FromTimeZone)(PyObject *offset, PyObject *name); + + /* constructors for the DB API */ + PyObject *(*DateTime_FromTimestamp)(PyObject*, PyObject*, PyObject*); + PyObject *(*Date_FromTimestamp)(PyObject*, PyObject*); + + /* PEP 495 constructors */ + PyObject *(*DateTime_FromDateAndTimeAndFold)(int, int, int, int, int, int, int, + PyObject*, int, PyTypeObject*); + PyObject *(*Time_FromTimeAndFold)(int, int, int, int, PyObject*, int, PyTypeObject*); + +} PyDateTime_CAPI; + +#define PyDateTime_CAPSULE_NAME "datetime.datetime_CAPI" + + +#ifdef Py_BUILD_CORE + +/* Macros for type checking when building the Python core. */ +#define PyDate_Check(op) PyObject_TypeCheck(op, &PyDateTime_DateType) +#define PyDate_CheckExact(op) (Py_TYPE(op) == &PyDateTime_DateType) + +#define PyDateTime_Check(op) PyObject_TypeCheck(op, &PyDateTime_DateTimeType) +#define PyDateTime_CheckExact(op) (Py_TYPE(op) == &PyDateTime_DateTimeType) + +#define PyTime_Check(op) PyObject_TypeCheck(op, &PyDateTime_TimeType) +#define PyTime_CheckExact(op) (Py_TYPE(op) == &PyDateTime_TimeType) + +#define PyDelta_Check(op) PyObject_TypeCheck(op, &PyDateTime_DeltaType) +#define PyDelta_CheckExact(op) (Py_TYPE(op) == &PyDateTime_DeltaType) + +#define PyTZInfo_Check(op) PyObject_TypeCheck(op, &PyDateTime_TZInfoType) +#define PyTZInfo_CheckExact(op) (Py_TYPE(op) == &PyDateTime_TZInfoType) + +#else + +/* Define global variable for the C API and a macro for setting it. */ +static PyDateTime_CAPI *PyDateTimeAPI = NULL; + +#define PyDateTime_IMPORT \ + PyDateTimeAPI = (PyDateTime_CAPI *)PyCapsule_Import(PyDateTime_CAPSULE_NAME, 0) + +/* Macro for access to the UTC singleton */ +#define PyDateTime_TimeZone_UTC PyDateTimeAPI->TimeZone_UTC + +/* Macros for type checking when not building the Python core. */ +#define PyDate_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->DateType) +#define PyDate_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->DateType) + +#define PyDateTime_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->DateTimeType) +#define PyDateTime_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->DateTimeType) + +#define PyTime_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->TimeType) +#define PyTime_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->TimeType) + +#define PyDelta_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->DeltaType) +#define PyDelta_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->DeltaType) + +#define PyTZInfo_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->TZInfoType) +#define PyTZInfo_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->TZInfoType) + +/* Macros for accessing constructors in a simplified fashion. */ +#define PyDate_FromDate(year, month, day) \ + PyDateTimeAPI->Date_FromDate(year, month, day, PyDateTimeAPI->DateType) + +#define PyDateTime_FromDateAndTime(year, month, day, hour, min, sec, usec) \ + PyDateTimeAPI->DateTime_FromDateAndTime(year, month, day, hour, \ + min, sec, usec, Py_None, PyDateTimeAPI->DateTimeType) + +#define PyDateTime_FromDateAndTimeAndFold(year, month, day, hour, min, sec, usec, fold) \ + PyDateTimeAPI->DateTime_FromDateAndTimeAndFold(year, month, day, hour, \ + min, sec, usec, Py_None, fold, PyDateTimeAPI->DateTimeType) + +#define PyTime_FromTime(hour, minute, second, usecond) \ + PyDateTimeAPI->Time_FromTime(hour, minute, second, usecond, \ + Py_None, PyDateTimeAPI->TimeType) + +#define PyTime_FromTimeAndFold(hour, minute, second, usecond, fold) \ + PyDateTimeAPI->Time_FromTimeAndFold(hour, minute, second, usecond, \ + Py_None, fold, PyDateTimeAPI->TimeType) + +#define PyDelta_FromDSU(days, seconds, useconds) \ + PyDateTimeAPI->Delta_FromDelta(days, seconds, useconds, 1, \ + PyDateTimeAPI->DeltaType) + +#define PyTimeZone_FromOffset(offset) \ + PyDateTimeAPI->TimeZone_FromTimeZone(offset, NULL) + +#define PyTimeZone_FromOffsetAndName(offset, name) \ + PyDateTimeAPI->TimeZone_FromTimeZone(offset, name) + +/* Macros supporting the DB API. */ +#define PyDateTime_FromTimestamp(args) \ + PyDateTimeAPI->DateTime_FromTimestamp( \ + (PyObject*) (PyDateTimeAPI->DateTimeType), args, NULL) + +#define PyDate_FromTimestamp(args) \ + PyDateTimeAPI->Date_FromTimestamp( \ + (PyObject*) (PyDateTimeAPI->DateType), args) + +#endif /* Py_BUILD_CORE */ + +#ifdef __cplusplus +} +#endif +#endif +#endif /* !Py_LIMITED_API */ diff --git a/venv/Include/descrobject.h b/venv/Include/descrobject.h new file mode 100644 index 00000000..73bbb3fe --- /dev/null +++ b/venv/Include/descrobject.h @@ -0,0 +1,110 @@ +/* Descriptors */ +#ifndef Py_DESCROBJECT_H +#define Py_DESCROBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject *(*getter)(PyObject *, void *); +typedef int (*setter)(PyObject *, PyObject *, void *); + +typedef struct PyGetSetDef { + const char *name; + getter get; + setter set; + const char *doc; + void *closure; +} PyGetSetDef; + +#ifndef Py_LIMITED_API +typedef PyObject *(*wrapperfunc)(PyObject *self, PyObject *args, + void *wrapped); + +typedef PyObject *(*wrapperfunc_kwds)(PyObject *self, PyObject *args, + void *wrapped, PyObject *kwds); + +struct wrapperbase { + const char *name; + int offset; + void *function; + wrapperfunc wrapper; + const char *doc; + int flags; + PyObject *name_strobj; +}; + +/* Flags for above struct */ +#define PyWrapperFlag_KEYWORDS 1 /* wrapper function takes keyword args */ + +/* Various kinds of descriptor objects */ + +typedef struct { + PyObject_HEAD + PyTypeObject *d_type; + PyObject *d_name; + PyObject *d_qualname; +} PyDescrObject; + +#define PyDescr_COMMON PyDescrObject d_common + +#define PyDescr_TYPE(x) (((PyDescrObject *)(x))->d_type) +#define PyDescr_NAME(x) (((PyDescrObject *)(x))->d_name) + +typedef struct { + PyDescr_COMMON; + PyMethodDef *d_method; +} PyMethodDescrObject; + +typedef struct { + PyDescr_COMMON; + struct PyMemberDef *d_member; +} PyMemberDescrObject; + +typedef struct { + PyDescr_COMMON; + PyGetSetDef *d_getset; +} PyGetSetDescrObject; + +typedef struct { + PyDescr_COMMON; + struct wrapperbase *d_base; + void *d_wrapped; /* This can be any function pointer */ +} PyWrapperDescrObject; +#endif /* Py_LIMITED_API */ + +PyAPI_DATA(PyTypeObject) PyClassMethodDescr_Type; +PyAPI_DATA(PyTypeObject) PyGetSetDescr_Type; +PyAPI_DATA(PyTypeObject) PyMemberDescr_Type; +PyAPI_DATA(PyTypeObject) PyMethodDescr_Type; +PyAPI_DATA(PyTypeObject) PyWrapperDescr_Type; +PyAPI_DATA(PyTypeObject) PyDictProxy_Type; +#ifndef Py_LIMITED_API +PyAPI_DATA(PyTypeObject) _PyMethodWrapper_Type; +#endif /* Py_LIMITED_API */ + +PyAPI_FUNC(PyObject *) PyDescr_NewMethod(PyTypeObject *, PyMethodDef *); +PyAPI_FUNC(PyObject *) PyDescr_NewClassMethod(PyTypeObject *, PyMethodDef *); +struct PyMemberDef; /* forward declaration for following prototype */ +PyAPI_FUNC(PyObject *) PyDescr_NewMember(PyTypeObject *, + struct PyMemberDef *); +PyAPI_FUNC(PyObject *) PyDescr_NewGetSet(PyTypeObject *, + struct PyGetSetDef *); +#ifndef Py_LIMITED_API + +PyAPI_FUNC(PyObject *) _PyMethodDescr_FastCallKeywords( + PyObject *descrobj, PyObject *const *stack, Py_ssize_t nargs, PyObject *kwnames); +PyAPI_FUNC(PyObject *) PyDescr_NewWrapper(PyTypeObject *, + struct wrapperbase *, void *); +#define PyDescr_IsData(d) (Py_TYPE(d)->tp_descr_set != NULL) +#endif + +PyAPI_FUNC(PyObject *) PyDictProxy_New(PyObject *); +PyAPI_FUNC(PyObject *) PyWrapper_New(PyObject *, PyObject *); + + +PyAPI_DATA(PyTypeObject) PyProperty_Type; +#ifdef __cplusplus +} +#endif +#endif /* !Py_DESCROBJECT_H */ + diff --git a/venv/Include/dictobject.h b/venv/Include/dictobject.h new file mode 100644 index 00000000..28930f43 --- /dev/null +++ b/venv/Include/dictobject.h @@ -0,0 +1,179 @@ +#ifndef Py_DICTOBJECT_H +#define Py_DICTOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Dictionary object type -- mapping from hashable object to object */ + +/* The distribution includes a separate file, Objects/dictnotes.txt, + describing explorations into dictionary design and optimization. + It covers typical dictionary use patterns, the parameters for + tuning dictionaries, and several ideas for possible optimizations. +*/ + +#ifndef Py_LIMITED_API + +typedef struct _dictkeysobject PyDictKeysObject; + +/* The ma_values pointer is NULL for a combined table + * or points to an array of PyObject* for a split table + */ +typedef struct { + PyObject_HEAD + + /* Number of items in the dictionary */ + Py_ssize_t ma_used; + + /* Dictionary version: globally unique, value change each time + the dictionary is modified */ + uint64_t ma_version_tag; + + PyDictKeysObject *ma_keys; + + /* If ma_values is NULL, the table is "combined": keys and values + are stored in ma_keys. + + If ma_values is not NULL, the table is splitted: + keys are stored in ma_keys and values are stored in ma_values */ + PyObject **ma_values; +} PyDictObject; + +typedef struct { + PyObject_HEAD + PyDictObject *dv_dict; +} _PyDictViewObject; + +#endif /* Py_LIMITED_API */ + +PyAPI_DATA(PyTypeObject) PyDict_Type; +PyAPI_DATA(PyTypeObject) PyDictIterKey_Type; +PyAPI_DATA(PyTypeObject) PyDictIterValue_Type; +PyAPI_DATA(PyTypeObject) PyDictIterItem_Type; +PyAPI_DATA(PyTypeObject) PyDictKeys_Type; +PyAPI_DATA(PyTypeObject) PyDictItems_Type; +PyAPI_DATA(PyTypeObject) PyDictValues_Type; + +#define PyDict_Check(op) \ + PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_DICT_SUBCLASS) +#define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) +#define PyDictKeys_Check(op) PyObject_TypeCheck(op, &PyDictKeys_Type) +#define PyDictItems_Check(op) PyObject_TypeCheck(op, &PyDictItems_Type) +#define PyDictValues_Check(op) PyObject_TypeCheck(op, &PyDictValues_Type) +/* This excludes Values, since they are not sets. */ +# define PyDictViewSet_Check(op) \ + (PyDictKeys_Check(op) || PyDictItems_Check(op)) + + +PyAPI_FUNC(PyObject *) PyDict_New(void); +PyAPI_FUNC(PyObject *) PyDict_GetItem(PyObject *mp, PyObject *key); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyDict_GetItem_KnownHash(PyObject *mp, PyObject *key, + Py_hash_t hash); +#endif +PyAPI_FUNC(PyObject *) PyDict_GetItemWithError(PyObject *mp, PyObject *key); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyDict_GetItemIdWithError(PyObject *dp, + struct _Py_Identifier *key); +PyAPI_FUNC(PyObject *) PyDict_SetDefault( + PyObject *mp, PyObject *key, PyObject *defaultobj); +#endif +PyAPI_FUNC(int) PyDict_SetItem(PyObject *mp, PyObject *key, PyObject *item); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyDict_SetItem_KnownHash(PyObject *mp, PyObject *key, + PyObject *item, Py_hash_t hash); +#endif +PyAPI_FUNC(int) PyDict_DelItem(PyObject *mp, PyObject *key); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyDict_DelItem_KnownHash(PyObject *mp, PyObject *key, + Py_hash_t hash); +PyAPI_FUNC(int) _PyDict_DelItemIf(PyObject *mp, PyObject *key, + int (*predicate)(PyObject *value)); +#endif +PyAPI_FUNC(void) PyDict_Clear(PyObject *mp); +PyAPI_FUNC(int) PyDict_Next( + PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value); +#ifndef Py_LIMITED_API +PyDictKeysObject *_PyDict_NewKeysForClass(void); +PyAPI_FUNC(PyObject *) PyObject_GenericGetDict(PyObject *, void *); +PyAPI_FUNC(int) _PyDict_Next( + PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value, Py_hash_t *hash); +PyObject *_PyDictView_New(PyObject *, PyTypeObject *); +#endif +PyAPI_FUNC(PyObject *) PyDict_Keys(PyObject *mp); +PyAPI_FUNC(PyObject *) PyDict_Values(PyObject *mp); +PyAPI_FUNC(PyObject *) PyDict_Items(PyObject *mp); +PyAPI_FUNC(Py_ssize_t) PyDict_Size(PyObject *mp); +PyAPI_FUNC(PyObject *) PyDict_Copy(PyObject *mp); +PyAPI_FUNC(int) PyDict_Contains(PyObject *mp, PyObject *key); +#ifndef Py_LIMITED_API +/* Get the number of items of a dictionary. */ +#define PyDict_GET_SIZE(mp) (assert(PyDict_Check(mp)),((PyDictObject *)mp)->ma_used) +PyAPI_FUNC(int) _PyDict_Contains(PyObject *mp, PyObject *key, Py_hash_t hash); +PyAPI_FUNC(PyObject *) _PyDict_NewPresized(Py_ssize_t minused); +PyAPI_FUNC(void) _PyDict_MaybeUntrack(PyObject *mp); +PyAPI_FUNC(int) _PyDict_HasOnlyStringKeys(PyObject *mp); +Py_ssize_t _PyDict_KeysSize(PyDictKeysObject *keys); +PyAPI_FUNC(Py_ssize_t) _PyDict_SizeOf(PyDictObject *); +PyAPI_FUNC(PyObject *) _PyDict_Pop(PyObject *, PyObject *, PyObject *); +PyObject *_PyDict_Pop_KnownHash(PyObject *, PyObject *, Py_hash_t, PyObject *); +PyObject *_PyDict_FromKeys(PyObject *, PyObject *, PyObject *); +#define _PyDict_HasSplitTable(d) ((d)->ma_values != NULL) + +PyAPI_FUNC(int) PyDict_ClearFreeList(void); +#endif + +/* PyDict_Update(mp, other) is equivalent to PyDict_Merge(mp, other, 1). */ +PyAPI_FUNC(int) PyDict_Update(PyObject *mp, PyObject *other); + +/* PyDict_Merge updates/merges from a mapping object (an object that + supports PyMapping_Keys() and PyObject_GetItem()). If override is true, + the last occurrence of a key wins, else the first. The Python + dict.update(other) is equivalent to PyDict_Merge(dict, other, 1). +*/ +PyAPI_FUNC(int) PyDict_Merge(PyObject *mp, + PyObject *other, + int override); + +#ifndef Py_LIMITED_API +/* Like PyDict_Merge, but override can be 0, 1 or 2. If override is 0, + the first occurrence of a key wins, if override is 1, the last occurrence + of a key wins, if override is 2, a KeyError with conflicting key as + argument is raised. +*/ +PyAPI_FUNC(int) _PyDict_MergeEx(PyObject *mp, PyObject *other, int override); +PyAPI_FUNC(PyObject *) _PyDictView_Intersect(PyObject* self, PyObject *other); +#endif + +/* PyDict_MergeFromSeq2 updates/merges from an iterable object producing + iterable objects of length 2. If override is true, the last occurrence + of a key wins, else the first. The Python dict constructor dict(seq2) + is equivalent to dict={}; PyDict_MergeFromSeq(dict, seq2, 1). +*/ +PyAPI_FUNC(int) PyDict_MergeFromSeq2(PyObject *d, + PyObject *seq2, + int override); + +PyAPI_FUNC(PyObject *) PyDict_GetItemString(PyObject *dp, const char *key); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyDict_GetItemId(PyObject *dp, struct _Py_Identifier *key); +#endif /* !Py_LIMITED_API */ +PyAPI_FUNC(int) PyDict_SetItemString(PyObject *dp, const char *key, PyObject *item); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyDict_SetItemId(PyObject *dp, struct _Py_Identifier *key, PyObject *item); +#endif /* !Py_LIMITED_API */ +PyAPI_FUNC(int) PyDict_DelItemString(PyObject *dp, const char *key); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyDict_DelItemId(PyObject *mp, struct _Py_Identifier *key); +PyAPI_FUNC(void) _PyDict_DebugMallocStats(FILE *out); + +int _PyObjectDict_SetItem(PyTypeObject *tp, PyObject **dictptr, PyObject *name, PyObject *value); +PyObject *_PyDict_LoadGlobal(PyDictObject *, PyDictObject *, PyObject *); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_DICTOBJECT_H */ diff --git a/venv/Include/dtoa.h b/venv/Include/dtoa.h new file mode 100644 index 00000000..9bfb6251 --- /dev/null +++ b/venv/Include/dtoa.h @@ -0,0 +1,19 @@ +#ifndef Py_LIMITED_API +#ifndef PY_NO_SHORT_FLOAT_REPR +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_FUNC(double) _Py_dg_strtod(const char *str, char **ptr); +PyAPI_FUNC(char *) _Py_dg_dtoa(double d, int mode, int ndigits, + int *decpt, int *sign, char **rve); +PyAPI_FUNC(void) _Py_dg_freedtoa(char *s); +PyAPI_FUNC(double) _Py_dg_stdnan(int sign); +PyAPI_FUNC(double) _Py_dg_infinity(int sign); + + +#ifdef __cplusplus +} +#endif +#endif +#endif diff --git a/venv/Include/dynamic_annotations.h b/venv/Include/dynamic_annotations.h new file mode 100644 index 00000000..0bd1a833 --- /dev/null +++ b/venv/Include/dynamic_annotations.h @@ -0,0 +1,499 @@ +/* Copyright (c) 2008-2009, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * --- + * Author: Kostya Serebryany + * Copied to CPython by Jeffrey Yasskin, with all macros renamed to + * start with _Py_ to avoid colliding with users embedding Python, and + * with deprecated macros removed. + */ + +/* This file defines dynamic annotations for use with dynamic analysis + tool such as valgrind, PIN, etc. + + Dynamic annotation is a source code annotation that affects + the generated code (that is, the annotation is not a comment). + Each such annotation is attached to a particular + instruction and/or to a particular object (address) in the program. + + The annotations that should be used by users are macros in all upper-case + (e.g., _Py_ANNOTATE_NEW_MEMORY). + + Actual implementation of these macros may differ depending on the + dynamic analysis tool being used. + + See http://code.google.com/p/data-race-test/ for more information. + + This file supports the following dynamic analysis tools: + - None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero). + Macros are defined empty. + - ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1). + Macros are defined as calls to non-inlinable empty functions + that are intercepted by Valgrind. */ + +#ifndef __DYNAMIC_ANNOTATIONS_H__ +#define __DYNAMIC_ANNOTATIONS_H__ + +#ifndef DYNAMIC_ANNOTATIONS_ENABLED +# define DYNAMIC_ANNOTATIONS_ENABLED 0 +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 + + /* ------------------------------------------------------------- + Annotations useful when implementing condition variables such as CondVar, + using conditional critical sections (Await/LockWhen) and when constructing + user-defined synchronization mechanisms. + + The annotations _Py_ANNOTATE_HAPPENS_BEFORE() and + _Py_ANNOTATE_HAPPENS_AFTER() can be used to define happens-before arcs in + user-defined synchronization mechanisms: the race detector will infer an + arc from the former to the latter when they share the same argument + pointer. + + Example 1 (reference counting): + + void Unref() { + _Py_ANNOTATE_HAPPENS_BEFORE(&refcount_); + if (AtomicDecrementByOne(&refcount_) == 0) { + _Py_ANNOTATE_HAPPENS_AFTER(&refcount_); + delete this; + } + } + + Example 2 (message queue): + + void MyQueue::Put(Type *e) { + MutexLock lock(&mu_); + _Py_ANNOTATE_HAPPENS_BEFORE(e); + PutElementIntoMyQueue(e); + } + + Type *MyQueue::Get() { + MutexLock lock(&mu_); + Type *e = GetElementFromMyQueue(); + _Py_ANNOTATE_HAPPENS_AFTER(e); + return e; + } + + Note: when possible, please use the existing reference counting and message + queue implementations instead of inventing new ones. */ + + /* Report that wait on the condition variable at address "cv" has succeeded + and the lock at address "lock" is held. */ +#define _Py_ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ + AnnotateCondVarWait(__FILE__, __LINE__, cv, lock) + + /* Report that wait on the condition variable at "cv" has succeeded. Variant + w/o lock. */ +#define _Py_ANNOTATE_CONDVAR_WAIT(cv) \ + AnnotateCondVarWait(__FILE__, __LINE__, cv, NULL) + + /* Report that we are about to signal on the condition variable at address + "cv". */ +#define _Py_ANNOTATE_CONDVAR_SIGNAL(cv) \ + AnnotateCondVarSignal(__FILE__, __LINE__, cv) + + /* Report that we are about to signal_all on the condition variable at "cv". */ +#define _Py_ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ + AnnotateCondVarSignalAll(__FILE__, __LINE__, cv) + + /* Annotations for user-defined synchronization mechanisms. */ +#define _Py_ANNOTATE_HAPPENS_BEFORE(obj) _Py_ANNOTATE_CONDVAR_SIGNAL(obj) +#define _Py_ANNOTATE_HAPPENS_AFTER(obj) _Py_ANNOTATE_CONDVAR_WAIT(obj) + + /* Report that the bytes in the range [pointer, pointer+size) are about + to be published safely. The race checker will create a happens-before + arc from the call _Py_ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to + subsequent accesses to this memory. + Note: this annotation may not work properly if the race detector uses + sampling, i.e. does not observe all memory accesses. + */ +#define _Py_ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ + AnnotatePublishMemoryRange(__FILE__, __LINE__, pointer, size) + + /* Instruct the tool to create a happens-before arc between mu->Unlock() and + mu->Lock(). This annotation may slow down the race detector and hide real + races. Normally it is used only when it would be difficult to annotate each + of the mutex's critical sections individually using the annotations above. + This annotation makes sense only for hybrid race detectors. For pure + happens-before detectors this is a no-op. For more details see + http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */ +#define _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \ + AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu) + + /* ------------------------------------------------------------- + Annotations useful when defining memory allocators, or when memory that + was protected in one way starts to be protected in another. */ + + /* Report that a new memory at "address" of size "size" has been allocated. + This might be used when the memory has been retrieved from a free list and + is about to be reused, or when the locking discipline for a variable + changes. */ +#define _Py_ANNOTATE_NEW_MEMORY(address, size) \ + AnnotateNewMemory(__FILE__, __LINE__, address, size) + + /* ------------------------------------------------------------- + Annotations useful when defining FIFO queues that transfer data between + threads. */ + + /* Report that the producer-consumer queue (such as ProducerConsumerQueue) at + address "pcq" has been created. The _Py_ANNOTATE_PCQ_* annotations should + be used only for FIFO queues. For non-FIFO queues use + _Py_ANNOTATE_HAPPENS_BEFORE (for put) and _Py_ANNOTATE_HAPPENS_AFTER (for + get). */ +#define _Py_ANNOTATE_PCQ_CREATE(pcq) \ + AnnotatePCQCreate(__FILE__, __LINE__, pcq) + + /* Report that the queue at address "pcq" is about to be destroyed. */ +#define _Py_ANNOTATE_PCQ_DESTROY(pcq) \ + AnnotatePCQDestroy(__FILE__, __LINE__, pcq) + + /* Report that we are about to put an element into a FIFO queue at address + "pcq". */ +#define _Py_ANNOTATE_PCQ_PUT(pcq) \ + AnnotatePCQPut(__FILE__, __LINE__, pcq) + + /* Report that we've just got an element from a FIFO queue at address "pcq". */ +#define _Py_ANNOTATE_PCQ_GET(pcq) \ + AnnotatePCQGet(__FILE__, __LINE__, pcq) + + /* ------------------------------------------------------------- + Annotations that suppress errors. It is usually better to express the + program's synchronization using the other annotations, but these can + be used when all else fails. */ + + /* Report that we may have a benign race at "pointer", with size + "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the + point where "pointer" has been allocated, preferably close to the point + where the race happens. See also _Py_ANNOTATE_BENIGN_RACE_STATIC. */ +#define _Py_ANNOTATE_BENIGN_RACE(pointer, description) \ + AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \ + sizeof(*(pointer)), description) + + /* Same as _Py_ANNOTATE_BENIGN_RACE(address, description), but applies to + the memory range [address, address+size). */ +#define _Py_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description) + + /* Request the analysis tool to ignore all reads in the current thread + until _Py_ANNOTATE_IGNORE_READS_END is called. + Useful to ignore intentional racey reads, while still checking + other reads and all writes. + See also _Py_ANNOTATE_UNPROTECTED_READ. */ +#define _Py_ANNOTATE_IGNORE_READS_BEGIN() \ + AnnotateIgnoreReadsBegin(__FILE__, __LINE__) + + /* Stop ignoring reads. */ +#define _Py_ANNOTATE_IGNORE_READS_END() \ + AnnotateIgnoreReadsEnd(__FILE__, __LINE__) + + /* Similar to _Py_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ +#define _Py_ANNOTATE_IGNORE_WRITES_BEGIN() \ + AnnotateIgnoreWritesBegin(__FILE__, __LINE__) + + /* Stop ignoring writes. */ +#define _Py_ANNOTATE_IGNORE_WRITES_END() \ + AnnotateIgnoreWritesEnd(__FILE__, __LINE__) + + /* Start ignoring all memory accesses (reads and writes). */ +#define _Py_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do {\ + _Py_ANNOTATE_IGNORE_READS_BEGIN();\ + _Py_ANNOTATE_IGNORE_WRITES_BEGIN();\ + }while(0)\ + + /* Stop ignoring all memory accesses. */ +#define _Py_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do {\ + _Py_ANNOTATE_IGNORE_WRITES_END();\ + _Py_ANNOTATE_IGNORE_READS_END();\ + }while(0)\ + + /* Similar to _Py_ANNOTATE_IGNORE_READS_BEGIN, but ignore synchronization events: + RWLOCK* and CONDVAR*. */ +#define _Py_ANNOTATE_IGNORE_SYNC_BEGIN() \ + AnnotateIgnoreSyncBegin(__FILE__, __LINE__) + + /* Stop ignoring sync events. */ +#define _Py_ANNOTATE_IGNORE_SYNC_END() \ + AnnotateIgnoreSyncEnd(__FILE__, __LINE__) + + + /* Enable (enable!=0) or disable (enable==0) race detection for all threads. + This annotation could be useful if you want to skip expensive race analysis + during some period of program execution, e.g. during initialization. */ +#define _Py_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + AnnotateEnableRaceDetection(__FILE__, __LINE__, enable) + + /* ------------------------------------------------------------- + Annotations useful for debugging. */ + + /* Request to trace every access to "address". */ +#define _Py_ANNOTATE_TRACE_MEMORY(address) \ + AnnotateTraceMemory(__FILE__, __LINE__, address) + + /* Report the current thread name to a race detector. */ +#define _Py_ANNOTATE_THREAD_NAME(name) \ + AnnotateThreadName(__FILE__, __LINE__, name) + + /* ------------------------------------------------------------- + Annotations useful when implementing locks. They are not + normally needed by modules that merely use locks. + The "lock" argument is a pointer to the lock object. */ + + /* Report that a lock has been created at address "lock". */ +#define _Py_ANNOTATE_RWLOCK_CREATE(lock) \ + AnnotateRWLockCreate(__FILE__, __LINE__, lock) + + /* Report that the lock at address "lock" is about to be destroyed. */ +#define _Py_ANNOTATE_RWLOCK_DESTROY(lock) \ + AnnotateRWLockDestroy(__FILE__, __LINE__, lock) + + /* Report that the lock at address "lock" has been acquired. + is_w=1 for writer lock, is_w=0 for reader lock. */ +#define _Py_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w) + + /* Report that the lock at address "lock" is about to be released. */ +#define _Py_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w) + + /* ------------------------------------------------------------- + Annotations useful when implementing barriers. They are not + normally needed by modules that merely use barriers. + The "barrier" argument is a pointer to the barrier object. */ + + /* Report that the "barrier" has been initialized with initial "count". + If 'reinitialization_allowed' is true, initialization is allowed to happen + multiple times w/o calling barrier_destroy() */ +#define _Py_ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ + AnnotateBarrierInit(__FILE__, __LINE__, barrier, count, \ + reinitialization_allowed) + + /* Report that we are about to enter barrier_wait("barrier"). */ +#define _Py_ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ + AnnotateBarrierWaitBefore(__FILE__, __LINE__, barrier) + + /* Report that we just exited barrier_wait("barrier"). */ +#define _Py_ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ + AnnotateBarrierWaitAfter(__FILE__, __LINE__, barrier) + + /* Report that the "barrier" has been destroyed. */ +#define _Py_ANNOTATE_BARRIER_DESTROY(barrier) \ + AnnotateBarrierDestroy(__FILE__, __LINE__, barrier) + + /* ------------------------------------------------------------- + Annotations useful for testing race detectors. */ + + /* Report that we expect a race on the variable at "address". + Use only in unit tests for a race detector. */ +#define _Py_ANNOTATE_EXPECT_RACE(address, description) \ + AnnotateExpectRace(__FILE__, __LINE__, address, description) + + /* A no-op. Insert where you like to test the interceptors. */ +#define _Py_ANNOTATE_NO_OP(arg) \ + AnnotateNoOp(__FILE__, __LINE__, arg) + + /* Force the race detector to flush its state. The actual effect depends on + * the implementation of the detector. */ +#define _Py_ANNOTATE_FLUSH_STATE() \ + AnnotateFlushState(__FILE__, __LINE__) + + +#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ + +#define _Py_ANNOTATE_RWLOCK_CREATE(lock) /* empty */ +#define _Py_ANNOTATE_RWLOCK_DESTROY(lock) /* empty */ +#define _Py_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */ +#define _Py_ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */ +#define _Py_ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */ +#define _Py_ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */ +#define _Py_ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */ +#define _Py_ANNOTATE_BARRIER_DESTROY(barrier) /* empty */ +#define _Py_ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */ +#define _Py_ANNOTATE_CONDVAR_WAIT(cv) /* empty */ +#define _Py_ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */ +#define _Py_ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */ +#define _Py_ANNOTATE_HAPPENS_BEFORE(obj) /* empty */ +#define _Py_ANNOTATE_HAPPENS_AFTER(obj) /* empty */ +#define _Py_ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */ +#define _Py_ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */ +#define _Py_ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */ +#define _Py_ANNOTATE_PCQ_CREATE(pcq) /* empty */ +#define _Py_ANNOTATE_PCQ_DESTROY(pcq) /* empty */ +#define _Py_ANNOTATE_PCQ_PUT(pcq) /* empty */ +#define _Py_ANNOTATE_PCQ_GET(pcq) /* empty */ +#define _Py_ANNOTATE_NEW_MEMORY(address, size) /* empty */ +#define _Py_ANNOTATE_EXPECT_RACE(address, description) /* empty */ +#define _Py_ANNOTATE_BENIGN_RACE(address, description) /* empty */ +#define _Py_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */ +#define _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */ +#define _Py_ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */ +#define _Py_ANNOTATE_TRACE_MEMORY(arg) /* empty */ +#define _Py_ANNOTATE_THREAD_NAME(name) /* empty */ +#define _Py_ANNOTATE_IGNORE_READS_BEGIN() /* empty */ +#define _Py_ANNOTATE_IGNORE_READS_END() /* empty */ +#define _Py_ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */ +#define _Py_ANNOTATE_IGNORE_WRITES_END() /* empty */ +#define _Py_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */ +#define _Py_ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */ +#define _Py_ANNOTATE_IGNORE_SYNC_BEGIN() /* empty */ +#define _Py_ANNOTATE_IGNORE_SYNC_END() /* empty */ +#define _Py_ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */ +#define _Py_ANNOTATE_NO_OP(arg) /* empty */ +#define _Py_ANNOTATE_FLUSH_STATE() /* empty */ + +#endif /* DYNAMIC_ANNOTATIONS_ENABLED */ + +/* Use the macros above rather than using these functions directly. */ +#ifdef __cplusplus +extern "C" { +#endif +void AnnotateRWLockCreate(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockDestroy(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockAcquired(const char *file, int line, + const volatile void *lock, long is_w); +void AnnotateRWLockReleased(const char *file, int line, + const volatile void *lock, long is_w); +void AnnotateBarrierInit(const char *file, int line, + const volatile void *barrier, long count, + long reinitialization_allowed); +void AnnotateBarrierWaitBefore(const char *file, int line, + const volatile void *barrier); +void AnnotateBarrierWaitAfter(const char *file, int line, + const volatile void *barrier); +void AnnotateBarrierDestroy(const char *file, int line, + const volatile void *barrier); +void AnnotateCondVarWait(const char *file, int line, + const volatile void *cv, + const volatile void *lock); +void AnnotateCondVarSignal(const char *file, int line, + const volatile void *cv); +void AnnotateCondVarSignalAll(const char *file, int line, + const volatile void *cv); +void AnnotatePublishMemoryRange(const char *file, int line, + const volatile void *address, + long size); +void AnnotateUnpublishMemoryRange(const char *file, int line, + const volatile void *address, + long size); +void AnnotatePCQCreate(const char *file, int line, + const volatile void *pcq); +void AnnotatePCQDestroy(const char *file, int line, + const volatile void *pcq); +void AnnotatePCQPut(const char *file, int line, + const volatile void *pcq); +void AnnotatePCQGet(const char *file, int line, + const volatile void *pcq); +void AnnotateNewMemory(const char *file, int line, + const volatile void *address, + long size); +void AnnotateExpectRace(const char *file, int line, + const volatile void *address, + const char *description); +void AnnotateBenignRace(const char *file, int line, + const volatile void *address, + const char *description); +void AnnotateBenignRaceSized(const char *file, int line, + const volatile void *address, + long size, + const char *description); +void AnnotateMutexIsUsedAsCondVar(const char *file, int line, + const volatile void *mu); +void AnnotateTraceMemory(const char *file, int line, + const volatile void *arg); +void AnnotateThreadName(const char *file, int line, + const char *name); +void AnnotateIgnoreReadsBegin(const char *file, int line); +void AnnotateIgnoreReadsEnd(const char *file, int line); +void AnnotateIgnoreWritesBegin(const char *file, int line); +void AnnotateIgnoreWritesEnd(const char *file, int line); +void AnnotateEnableRaceDetection(const char *file, int line, int enable); +void AnnotateNoOp(const char *file, int line, + const volatile void *arg); +void AnnotateFlushState(const char *file, int line); + +/* Return non-zero value if running under valgrind. + + If "valgrind.h" is included into dynamic_annotations.c, + the regular valgrind mechanism will be used. + See http://valgrind.org/docs/manual/manual-core-adv.html about + RUNNING_ON_VALGRIND and other valgrind "client requests". + The file "valgrind.h" may be obtained by doing + svn co svn://svn.valgrind.org/valgrind/trunk/include + + If for some reason you can't use "valgrind.h" or want to fake valgrind, + there are two ways to make this function return non-zero: + - Use environment variable: export RUNNING_ON_VALGRIND=1 + - Make your tool intercept the function RunningOnValgrind() and + change its return value. + */ +int RunningOnValgrind(void); + +#ifdef __cplusplus +} +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus) + + /* _Py_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. + + Instead of doing + _Py_ANNOTATE_IGNORE_READS_BEGIN(); + ... = x; + _Py_ANNOTATE_IGNORE_READS_END(); + one can use + ... = _Py_ANNOTATE_UNPROTECTED_READ(x); */ + template + inline T _Py_ANNOTATE_UNPROTECTED_READ(const volatile T &x) { + _Py_ANNOTATE_IGNORE_READS_BEGIN(); + T res = x; + _Py_ANNOTATE_IGNORE_READS_END(); + return res; + } + /* Apply _Py_ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ +#define _Py_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var ## _annotator { \ + public: \ + static_var ## _annotator() { \ + _Py_ANNOTATE_BENIGN_RACE_SIZED(&static_var, \ + sizeof(static_var), \ + # static_var ": " description); \ + } \ + }; \ + static static_var ## _annotator the ## static_var ## _annotator;\ + } +#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ + +#define _Py_ANNOTATE_UNPROTECTED_READ(x) (x) +#define _Py_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */ + +#endif /* DYNAMIC_ANNOTATIONS_ENABLED */ + +#endif /* __DYNAMIC_ANNOTATIONS_H__ */ diff --git a/venv/Include/enumobject.h b/venv/Include/enumobject.h new file mode 100644 index 00000000..c14dbfc8 --- /dev/null +++ b/venv/Include/enumobject.h @@ -0,0 +1,17 @@ +#ifndef Py_ENUMOBJECT_H +#define Py_ENUMOBJECT_H + +/* Enumerate Object */ + +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_DATA(PyTypeObject) PyEnum_Type; +PyAPI_DATA(PyTypeObject) PyReversed_Type; + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_ENUMOBJECT_H */ diff --git a/venv/Include/errcode.h b/venv/Include/errcode.h new file mode 100644 index 00000000..b37cd261 --- /dev/null +++ b/venv/Include/errcode.h @@ -0,0 +1,38 @@ +#ifndef Py_ERRCODE_H +#define Py_ERRCODE_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Error codes passed around between file input, tokenizer, parser and + interpreter. This is necessary so we can turn them into Python + exceptions at a higher level. Note that some errors have a + slightly different meaning when passed from the tokenizer to the + parser than when passed from the parser to the interpreter; e.g. + the parser only returns E_EOF when it hits EOF immediately, and it + never returns E_OK. */ + +#define E_OK 10 /* No error */ +#define E_EOF 11 /* End Of File */ +#define E_INTR 12 /* Interrupted */ +#define E_TOKEN 13 /* Bad token */ +#define E_SYNTAX 14 /* Syntax error */ +#define E_NOMEM 15 /* Ran out of memory */ +#define E_DONE 16 /* Parsing complete */ +#define E_ERROR 17 /* Execution error */ +#define E_TABSPACE 18 /* Inconsistent mixing of tabs and spaces */ +#define E_OVERFLOW 19 /* Node had too many children */ +#define E_TOODEEP 20 /* Too many indentation levels */ +#define E_DEDENT 21 /* No matching outer block for dedent */ +#define E_DECODE 22 /* Error in decoding into Unicode */ +#define E_EOFS 23 /* EOF in triple-quoted string */ +#define E_EOLS 24 /* EOL in single-quoted string */ +#define E_LINECONT 25 /* Unexpected characters after a line continuation */ +#define E_IDENTIFIER 26 /* Invalid characters in identifier */ +#define E_BADSINGLE 27 /* Ill-formed single statement input */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_ERRCODE_H */ diff --git a/venv/Include/eval.h b/venv/Include/eval.h new file mode 100644 index 00000000..2c1c2d05 --- /dev/null +++ b/venv/Include/eval.h @@ -0,0 +1,37 @@ + +/* Interface to execute compiled code */ + +#ifndef Py_EVAL_H +#define Py_EVAL_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_FUNC(PyObject *) PyEval_EvalCode(PyObject *, PyObject *, PyObject *); + +PyAPI_FUNC(PyObject *) PyEval_EvalCodeEx(PyObject *co, + PyObject *globals, + PyObject *locals, + PyObject *const *args, int argc, + PyObject *const *kwds, int kwdc, + PyObject *const *defs, int defc, + PyObject *kwdefs, PyObject *closure); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyEval_EvalCodeWithName( + PyObject *co, + PyObject *globals, PyObject *locals, + PyObject *const *args, Py_ssize_t argcount, + PyObject *const *kwnames, PyObject *const *kwargs, + Py_ssize_t kwcount, int kwstep, + PyObject *const *defs, Py_ssize_t defcount, + PyObject *kwdefs, PyObject *closure, + PyObject *name, PyObject *qualname); + +PyAPI_FUNC(PyObject *) _PyEval_CallTracing(PyObject *func, PyObject *args); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_EVAL_H */ diff --git a/venv/Include/fileobject.h b/venv/Include/fileobject.h new file mode 100644 index 00000000..89e8dd6a --- /dev/null +++ b/venv/Include/fileobject.h @@ -0,0 +1,55 @@ +/* File object interface (what's left of it -- see io.py) */ + +#ifndef Py_FILEOBJECT_H +#define Py_FILEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#define PY_STDIOTEXTMODE "b" + +PyAPI_FUNC(PyObject *) PyFile_FromFd(int, const char *, const char *, int, + const char *, const char *, + const char *, int); +PyAPI_FUNC(PyObject *) PyFile_GetLine(PyObject *, int); +PyAPI_FUNC(int) PyFile_WriteObject(PyObject *, PyObject *, int); +PyAPI_FUNC(int) PyFile_WriteString(const char *, PyObject *); +PyAPI_FUNC(int) PyObject_AsFileDescriptor(PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(char *) Py_UniversalNewlineFgets(char *, int, FILE*, PyObject *); +#endif + +/* The default encoding used by the platform file system APIs + If non-NULL, this is different than the default encoding for strings +*/ +PyAPI_DATA(const char *) Py_FileSystemDefaultEncoding; +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000 +PyAPI_DATA(const char *) Py_FileSystemDefaultEncodeErrors; +#endif +PyAPI_DATA(int) Py_HasFileSystemDefaultEncoding; + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000 +PyAPI_DATA(int) Py_UTF8Mode; +#endif + +/* Internal API + + The std printer acts as a preliminary sys.stderr until the new io + infrastructure is in place. */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyFile_NewStdPrinter(int); +PyAPI_DATA(PyTypeObject) PyStdPrinter_Type; +#endif /* Py_LIMITED_API */ + +/* A routine to check if a file descriptor can be select()-ed. */ +#ifdef _MSC_VER + /* On Windows, any socket fd can be select()-ed, no matter how high */ + #define _PyIsSelectable_fd(FD) (1) +#else + #define _PyIsSelectable_fd(FD) ((unsigned int)(FD) < (unsigned int)FD_SETSIZE) +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FILEOBJECT_H */ diff --git a/venv/Include/fileutils.h b/venv/Include/fileutils.h new file mode 100644 index 00000000..e4bf6d4d --- /dev/null +++ b/venv/Include/fileutils.h @@ -0,0 +1,177 @@ +#ifndef Py_FILEUTILS_H +#define Py_FILEUTILS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +PyAPI_FUNC(wchar_t *) Py_DecodeLocale( + const char *arg, + size_t *size); + +PyAPI_FUNC(char*) Py_EncodeLocale( + const wchar_t *text, + size_t *error_pos); + +PyAPI_FUNC(char*) _Py_EncodeLocaleRaw( + const wchar_t *text, + size_t *error_pos); +#endif + +#ifdef Py_BUILD_CORE +PyAPI_FUNC(int) _Py_DecodeUTF8Ex( + const char *arg, + Py_ssize_t arglen, + wchar_t **wstr, + size_t *wlen, + const char **reason, + int surrogateescape); + +PyAPI_FUNC(int) _Py_EncodeUTF8Ex( + const wchar_t *text, + char **str, + size_t *error_pos, + const char **reason, + int raw_malloc, + int surrogateescape); + +PyAPI_FUNC(wchar_t*) _Py_DecodeUTF8_surrogateescape( + const char *arg, + Py_ssize_t arglen); + +PyAPI_FUNC(int) _Py_DecodeLocaleEx( + const char *arg, + wchar_t **wstr, + size_t *wlen, + const char **reason, + int current_locale, + int surrogateescape); + +PyAPI_FUNC(int) _Py_EncodeLocaleEx( + const wchar_t *text, + char **str, + size_t *error_pos, + const char **reason, + int current_locale, + int surrogateescape); +#endif + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _Py_device_encoding(int); + +#ifdef MS_WINDOWS +struct _Py_stat_struct { + unsigned long st_dev; + uint64_t st_ino; + unsigned short st_mode; + int st_nlink; + int st_uid; + int st_gid; + unsigned long st_rdev; + __int64 st_size; + time_t st_atime; + int st_atime_nsec; + time_t st_mtime; + int st_mtime_nsec; + time_t st_ctime; + int st_ctime_nsec; + unsigned long st_file_attributes; +}; +#else +# define _Py_stat_struct stat +#endif + +PyAPI_FUNC(int) _Py_fstat( + int fd, + struct _Py_stat_struct *status); + +PyAPI_FUNC(int) _Py_fstat_noraise( + int fd, + struct _Py_stat_struct *status); + +PyAPI_FUNC(int) _Py_stat( + PyObject *path, + struct stat *status); + +PyAPI_FUNC(int) _Py_open( + const char *pathname, + int flags); + +PyAPI_FUNC(int) _Py_open_noraise( + const char *pathname, + int flags); + +PyAPI_FUNC(FILE *) _Py_wfopen( + const wchar_t *path, + const wchar_t *mode); + +PyAPI_FUNC(FILE*) _Py_fopen( + const char *pathname, + const char *mode); + +PyAPI_FUNC(FILE*) _Py_fopen_obj( + PyObject *path, + const char *mode); + +PyAPI_FUNC(Py_ssize_t) _Py_read( + int fd, + void *buf, + size_t count); + +PyAPI_FUNC(Py_ssize_t) _Py_write( + int fd, + const void *buf, + size_t count); + +PyAPI_FUNC(Py_ssize_t) _Py_write_noraise( + int fd, + const void *buf, + size_t count); + +#ifdef HAVE_READLINK +PyAPI_FUNC(int) _Py_wreadlink( + const wchar_t *path, + wchar_t *buf, + size_t bufsiz); +#endif + +#ifdef HAVE_REALPATH +PyAPI_FUNC(wchar_t*) _Py_wrealpath( + const wchar_t *path, + wchar_t *resolved_path, + size_t resolved_path_size); +#endif + +PyAPI_FUNC(wchar_t*) _Py_wgetcwd( + wchar_t *buf, + size_t size); + +PyAPI_FUNC(int) _Py_get_inheritable(int fd); + +PyAPI_FUNC(int) _Py_set_inheritable(int fd, int inheritable, + int *atomic_flag_works); + +PyAPI_FUNC(int) _Py_set_inheritable_async_safe(int fd, int inheritable, + int *atomic_flag_works); + +PyAPI_FUNC(int) _Py_dup(int fd); + +#ifndef MS_WINDOWS +PyAPI_FUNC(int) _Py_get_blocking(int fd); + +PyAPI_FUNC(int) _Py_set_blocking(int fd, int blocking); +#endif /* !MS_WINDOWS */ + +PyAPI_FUNC(int) _Py_GetLocaleconvNumeric( + PyObject **decimal_point, + PyObject **thousands_sep, + const char **grouping); + +#endif /* Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_FILEUTILS_H */ diff --git a/venv/Include/floatobject.h b/venv/Include/floatobject.h new file mode 100644 index 00000000..f1044d64 --- /dev/null +++ b/venv/Include/floatobject.h @@ -0,0 +1,130 @@ + +/* Float object interface */ + +/* +PyFloatObject represents a (double precision) floating point number. +*/ + +#ifndef Py_FLOATOBJECT_H +#define Py_FLOATOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +typedef struct { + PyObject_HEAD + double ob_fval; +} PyFloatObject; +#endif + +PyAPI_DATA(PyTypeObject) PyFloat_Type; + +#define PyFloat_Check(op) PyObject_TypeCheck(op, &PyFloat_Type) +#define PyFloat_CheckExact(op) (Py_TYPE(op) == &PyFloat_Type) + +#ifdef Py_NAN +#define Py_RETURN_NAN return PyFloat_FromDouble(Py_NAN) +#endif + +#define Py_RETURN_INF(sign) do \ + if (copysign(1., sign) == 1.) { \ + return PyFloat_FromDouble(Py_HUGE_VAL); \ + } else { \ + return PyFloat_FromDouble(-Py_HUGE_VAL); \ + } while(0) + +PyAPI_FUNC(double) PyFloat_GetMax(void); +PyAPI_FUNC(double) PyFloat_GetMin(void); +PyAPI_FUNC(PyObject *) PyFloat_GetInfo(void); + +/* Return Python float from string PyObject. */ +PyAPI_FUNC(PyObject *) PyFloat_FromString(PyObject*); + +/* Return Python float from C double. */ +PyAPI_FUNC(PyObject *) PyFloat_FromDouble(double); + +/* Extract C double from Python float. The macro version trades safety for + speed. */ +PyAPI_FUNC(double) PyFloat_AsDouble(PyObject *); +#ifndef Py_LIMITED_API +#define PyFloat_AS_DOUBLE(op) (((PyFloatObject *)(op))->ob_fval) +#endif + +#ifndef Py_LIMITED_API +/* _PyFloat_{Pack,Unpack}{4,8} + * + * The struct and pickle (at least) modules need an efficient platform- + * independent way to store floating-point values as byte strings. + * The Pack routines produce a string from a C double, and the Unpack + * routines produce a C double from such a string. The suffix (4 or 8) + * specifies the number of bytes in the string. + * + * On platforms that appear to use (see _PyFloat_Init()) IEEE-754 formats + * these functions work by copying bits. On other platforms, the formats the + * 4- byte format is identical to the IEEE-754 single precision format, and + * the 8-byte format to the IEEE-754 double precision format, although the + * packing of INFs and NaNs (if such things exist on the platform) isn't + * handled correctly, and attempting to unpack a string containing an IEEE + * INF or NaN will raise an exception. + * + * On non-IEEE platforms with more precision, or larger dynamic range, than + * 754 supports, not all values can be packed; on non-IEEE platforms with less + * precision, or smaller dynamic range, not all values can be unpacked. What + * happens in such cases is partly accidental (alas). + */ + +/* The pack routines write 2, 4 or 8 bytes, starting at p. le is a bool + * argument, true if you want the string in little-endian format (exponent + * last, at p+1, p+3 or p+7), false if you want big-endian format (exponent + * first, at p). + * Return value: 0 if all is OK, -1 if error (and an exception is + * set, most likely OverflowError). + * There are two problems on non-IEEE platforms: + * 1): What this does is undefined if x is a NaN or infinity. + * 2): -0.0 and +0.0 produce the same string. + */ +PyAPI_FUNC(int) _PyFloat_Pack2(double x, unsigned char *p, int le); +PyAPI_FUNC(int) _PyFloat_Pack4(double x, unsigned char *p, int le); +PyAPI_FUNC(int) _PyFloat_Pack8(double x, unsigned char *p, int le); + +/* Needed for the old way for marshal to store a floating point number. + Returns the string length copied into p, -1 on error. + */ +PyAPI_FUNC(int) _PyFloat_Repr(double x, char *p, size_t len); + +/* Used to get the important decimal digits of a double */ +PyAPI_FUNC(int) _PyFloat_Digits(char *buf, double v, int *signum); +PyAPI_FUNC(void) _PyFloat_DigitsInit(void); + +/* The unpack routines read 2, 4 or 8 bytes, starting at p. le is a bool + * argument, true if the string is in little-endian format (exponent + * last, at p+1, p+3 or p+7), false if big-endian (exponent first, at p). + * Return value: The unpacked double. On error, this is -1.0 and + * PyErr_Occurred() is true (and an exception is set, most likely + * OverflowError). Note that on a non-IEEE platform this will refuse + * to unpack a string that represents a NaN or infinity. + */ +PyAPI_FUNC(double) _PyFloat_Unpack2(const unsigned char *p, int le); +PyAPI_FUNC(double) _PyFloat_Unpack4(const unsigned char *p, int le); +PyAPI_FUNC(double) _PyFloat_Unpack8(const unsigned char *p, int le); + +/* free list api */ +PyAPI_FUNC(int) PyFloat_ClearFreeList(void); + +PyAPI_FUNC(void) _PyFloat_DebugMallocStats(FILE* out); + +/* Format the object based on the format_spec, as defined in PEP 3101 + (Advanced String Formatting). */ +PyAPI_FUNC(int) _PyFloat_FormatAdvancedWriter( + _PyUnicodeWriter *writer, + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end); +#endif /* Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FLOATOBJECT_H */ diff --git a/venv/Include/frameobject.h b/venv/Include/frameobject.h new file mode 100644 index 00000000..a95baf88 --- /dev/null +++ b/venv/Include/frameobject.h @@ -0,0 +1,93 @@ + +/* Frame object interface */ + +#ifndef Py_LIMITED_API +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + int b_type; /* what kind of block this is */ + int b_handler; /* where to jump to find handler */ + int b_level; /* value stack level to pop to */ +} PyTryBlock; + +typedef struct _frame { + PyObject_VAR_HEAD + struct _frame *f_back; /* previous frame, or NULL */ + PyCodeObject *f_code; /* code segment */ + PyObject *f_builtins; /* builtin symbol table (PyDictObject) */ + PyObject *f_globals; /* global symbol table (PyDictObject) */ + PyObject *f_locals; /* local symbol table (any mapping) */ + PyObject **f_valuestack; /* points after the last local */ + /* Next free slot in f_valuestack. Frame creation sets to f_valuestack. + Frame evaluation usually NULLs it, but a frame that yields sets it + to the current stack top. */ + PyObject **f_stacktop; + PyObject *f_trace; /* Trace function */ + char f_trace_lines; /* Emit per-line trace events? */ + char f_trace_opcodes; /* Emit per-opcode trace events? */ + + /* Borrowed reference to a generator, or NULL */ + PyObject *f_gen; + + int f_lasti; /* Last instruction if called */ + /* Call PyFrame_GetLineNumber() instead of reading this field + directly. As of 2.3 f_lineno is only valid when tracing is + active (i.e. when f_trace is set). At other times we use + PyCode_Addr2Line to calculate the line from the current + bytecode index. */ + int f_lineno; /* Current line number */ + int f_iblock; /* index in f_blockstack */ + char f_executing; /* whether the frame is still executing */ + PyTryBlock f_blockstack[CO_MAXBLOCKS]; /* for try and loop blocks */ + PyObject *f_localsplus[1]; /* locals+stack, dynamically sized */ +} PyFrameObject; + + +/* Standard object interface */ + +PyAPI_DATA(PyTypeObject) PyFrame_Type; + +#define PyFrame_Check(op) (Py_TYPE(op) == &PyFrame_Type) + +PyAPI_FUNC(PyFrameObject *) PyFrame_New(PyThreadState *, PyCodeObject *, + PyObject *, PyObject *); + +/* only internal use */ +PyFrameObject* _PyFrame_New_NoTrack(PyThreadState *, PyCodeObject *, + PyObject *, PyObject *); + + +/* The rest of the interface is specific for frame objects */ + +/* Block management functions */ + +PyAPI_FUNC(void) PyFrame_BlockSetup(PyFrameObject *, int, int, int); +PyAPI_FUNC(PyTryBlock *) PyFrame_BlockPop(PyFrameObject *); + +/* Extend the value stack */ + +PyAPI_FUNC(PyObject **) PyFrame_ExtendStack(PyFrameObject *, int, int); + +/* Conversions between "fast locals" and locals in dictionary */ + +PyAPI_FUNC(void) PyFrame_LocalsToFast(PyFrameObject *, int); + +PyAPI_FUNC(int) PyFrame_FastToLocalsWithError(PyFrameObject *f); +PyAPI_FUNC(void) PyFrame_FastToLocals(PyFrameObject *); + +PyAPI_FUNC(int) PyFrame_ClearFreeList(void); + +PyAPI_FUNC(void) _PyFrame_DebugMallocStats(FILE *out); + +/* Return the line of code the frame is currently executing. */ +PyAPI_FUNC(int) PyFrame_GetLineNumber(PyFrameObject *); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/funcobject.h b/venv/Include/funcobject.h new file mode 100644 index 00000000..86674ac9 --- /dev/null +++ b/venv/Include/funcobject.h @@ -0,0 +1,103 @@ + +/* Function object interface */ +#ifndef Py_LIMITED_API +#ifndef Py_FUNCOBJECT_H +#define Py_FUNCOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* Function objects and code objects should not be confused with each other: + * + * Function objects are created by the execution of the 'def' statement. + * They reference a code object in their __code__ attribute, which is a + * purely syntactic object, i.e. nothing more than a compiled version of some + * source code lines. There is one code object per source code "fragment", + * but each code object can be referenced by zero or many function objects + * depending only on how many times the 'def' statement in the source was + * executed so far. + */ + +typedef struct { + PyObject_HEAD + PyObject *func_code; /* A code object, the __code__ attribute */ + PyObject *func_globals; /* A dictionary (other mappings won't do) */ + PyObject *func_defaults; /* NULL or a tuple */ + PyObject *func_kwdefaults; /* NULL or a dict */ + PyObject *func_closure; /* NULL or a tuple of cell objects */ + PyObject *func_doc; /* The __doc__ attribute, can be anything */ + PyObject *func_name; /* The __name__ attribute, a string object */ + PyObject *func_dict; /* The __dict__ attribute, a dict or NULL */ + PyObject *func_weakreflist; /* List of weak references */ + PyObject *func_module; /* The __module__ attribute, can be anything */ + PyObject *func_annotations; /* Annotations, a dict or NULL */ + PyObject *func_qualname; /* The qualified name */ + + /* Invariant: + * func_closure contains the bindings for func_code->co_freevars, so + * PyTuple_Size(func_closure) == PyCode_GetNumFree(func_code) + * (func_closure may be NULL if PyCode_GetNumFree(func_code) == 0). + */ +} PyFunctionObject; + +PyAPI_DATA(PyTypeObject) PyFunction_Type; + +#define PyFunction_Check(op) (Py_TYPE(op) == &PyFunction_Type) + +PyAPI_FUNC(PyObject *) PyFunction_New(PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyFunction_NewWithQualName(PyObject *, PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyFunction_GetCode(PyObject *); +PyAPI_FUNC(PyObject *) PyFunction_GetGlobals(PyObject *); +PyAPI_FUNC(PyObject *) PyFunction_GetModule(PyObject *); +PyAPI_FUNC(PyObject *) PyFunction_GetDefaults(PyObject *); +PyAPI_FUNC(int) PyFunction_SetDefaults(PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyFunction_GetKwDefaults(PyObject *); +PyAPI_FUNC(int) PyFunction_SetKwDefaults(PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyFunction_GetClosure(PyObject *); +PyAPI_FUNC(int) PyFunction_SetClosure(PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyFunction_GetAnnotations(PyObject *); +PyAPI_FUNC(int) PyFunction_SetAnnotations(PyObject *, PyObject *); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyFunction_FastCallDict( + PyObject *func, + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwargs); + +PyAPI_FUNC(PyObject *) _PyFunction_FastCallKeywords( + PyObject *func, + PyObject *const *stack, + Py_ssize_t nargs, + PyObject *kwnames); +#endif + +/* Macros for direct access to these values. Type checks are *not* + done, so use with care. */ +#define PyFunction_GET_CODE(func) \ + (((PyFunctionObject *)func) -> func_code) +#define PyFunction_GET_GLOBALS(func) \ + (((PyFunctionObject *)func) -> func_globals) +#define PyFunction_GET_MODULE(func) \ + (((PyFunctionObject *)func) -> func_module) +#define PyFunction_GET_DEFAULTS(func) \ + (((PyFunctionObject *)func) -> func_defaults) +#define PyFunction_GET_KW_DEFAULTS(func) \ + (((PyFunctionObject *)func) -> func_kwdefaults) +#define PyFunction_GET_CLOSURE(func) \ + (((PyFunctionObject *)func) -> func_closure) +#define PyFunction_GET_ANNOTATIONS(func) \ + (((PyFunctionObject *)func) -> func_annotations) + +/* The classmethod and staticmethod types lives here, too */ +PyAPI_DATA(PyTypeObject) PyClassMethod_Type; +PyAPI_DATA(PyTypeObject) PyStaticMethod_Type; + +PyAPI_FUNC(PyObject *) PyClassMethod_New(PyObject *); +PyAPI_FUNC(PyObject *) PyStaticMethod_New(PyObject *); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FUNCOBJECT_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/genobject.h b/venv/Include/genobject.h new file mode 100644 index 00000000..16b98333 --- /dev/null +++ b/venv/Include/genobject.h @@ -0,0 +1,105 @@ + +/* Generator object interface */ + +#ifndef Py_LIMITED_API +#ifndef Py_GENOBJECT_H +#define Py_GENOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +struct _frame; /* Avoid including frameobject.h */ + +/* _PyGenObject_HEAD defines the initial segment of generator + and coroutine objects. */ +#define _PyGenObject_HEAD(prefix) \ + PyObject_HEAD \ + /* Note: gi_frame can be NULL if the generator is "finished" */ \ + struct _frame *prefix##_frame; \ + /* True if generator is being executed. */ \ + char prefix##_running; \ + /* The code object backing the generator */ \ + PyObject *prefix##_code; \ + /* List of weak reference. */ \ + PyObject *prefix##_weakreflist; \ + /* Name of the generator. */ \ + PyObject *prefix##_name; \ + /* Qualified name of the generator. */ \ + PyObject *prefix##_qualname; \ + _PyErr_StackItem prefix##_exc_state; + +typedef struct { + /* The gi_ prefix is intended to remind of generator-iterator. */ + _PyGenObject_HEAD(gi) +} PyGenObject; + +PyAPI_DATA(PyTypeObject) PyGen_Type; + +#define PyGen_Check(op) PyObject_TypeCheck(op, &PyGen_Type) +#define PyGen_CheckExact(op) (Py_TYPE(op) == &PyGen_Type) + +PyAPI_FUNC(PyObject *) PyGen_New(struct _frame *); +PyAPI_FUNC(PyObject *) PyGen_NewWithQualName(struct _frame *, + PyObject *name, PyObject *qualname); +PyAPI_FUNC(int) PyGen_NeedsFinalizing(PyGenObject *); +PyAPI_FUNC(int) _PyGen_SetStopIterationValue(PyObject *); +PyAPI_FUNC(int) _PyGen_FetchStopIterationValue(PyObject **); +PyAPI_FUNC(PyObject *) _PyGen_Send(PyGenObject *, PyObject *); +PyObject *_PyGen_yf(PyGenObject *); +PyAPI_FUNC(void) _PyGen_Finalize(PyObject *self); + +#ifndef Py_LIMITED_API +typedef struct { + _PyGenObject_HEAD(cr) + PyObject *cr_origin; +} PyCoroObject; + +PyAPI_DATA(PyTypeObject) PyCoro_Type; +PyAPI_DATA(PyTypeObject) _PyCoroWrapper_Type; + +PyAPI_DATA(PyTypeObject) _PyAIterWrapper_Type; + +#define PyCoro_CheckExact(op) (Py_TYPE(op) == &PyCoro_Type) +PyObject *_PyCoro_GetAwaitableIter(PyObject *o); +PyAPI_FUNC(PyObject *) PyCoro_New(struct _frame *, + PyObject *name, PyObject *qualname); + +/* Asynchronous Generators */ + +typedef struct { + _PyGenObject_HEAD(ag) + PyObject *ag_finalizer; + + /* Flag is set to 1 when hooks set up by sys.set_asyncgen_hooks + were called on the generator, to avoid calling them more + than once. */ + int ag_hooks_inited; + + /* Flag is set to 1 when aclose() is called for the first time, or + when a StopAsyncIteration exception is raised. */ + int ag_closed; +} PyAsyncGenObject; + +PyAPI_DATA(PyTypeObject) PyAsyncGen_Type; +PyAPI_DATA(PyTypeObject) _PyAsyncGenASend_Type; +PyAPI_DATA(PyTypeObject) _PyAsyncGenWrappedValue_Type; +PyAPI_DATA(PyTypeObject) _PyAsyncGenAThrow_Type; + +PyAPI_FUNC(PyObject *) PyAsyncGen_New(struct _frame *, + PyObject *name, PyObject *qualname); + +#define PyAsyncGen_CheckExact(op) (Py_TYPE(op) == &PyAsyncGen_Type) + +PyObject *_PyAsyncGenValueWrapperNew(PyObject *); + +int PyAsyncGen_ClearFreeLists(void); + +#endif + +#undef _PyGenObject_HEAD + +#ifdef __cplusplus +} +#endif +#endif /* !Py_GENOBJECT_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/graminit.h b/venv/Include/graminit.h new file mode 100644 index 00000000..bdfe821a --- /dev/null +++ b/venv/Include/graminit.h @@ -0,0 +1,89 @@ +/* Generated by Parser/pgen */ + +#define single_input 256 +#define file_input 257 +#define eval_input 258 +#define decorator 259 +#define decorators 260 +#define decorated 261 +#define async_funcdef 262 +#define funcdef 263 +#define parameters 264 +#define typedargslist 265 +#define tfpdef 266 +#define varargslist 267 +#define vfpdef 268 +#define stmt 269 +#define simple_stmt 270 +#define small_stmt 271 +#define expr_stmt 272 +#define annassign 273 +#define testlist_star_expr 274 +#define augassign 275 +#define del_stmt 276 +#define pass_stmt 277 +#define flow_stmt 278 +#define break_stmt 279 +#define continue_stmt 280 +#define return_stmt 281 +#define yield_stmt 282 +#define raise_stmt 283 +#define import_stmt 284 +#define import_name 285 +#define import_from 286 +#define import_as_name 287 +#define dotted_as_name 288 +#define import_as_names 289 +#define dotted_as_names 290 +#define dotted_name 291 +#define global_stmt 292 +#define nonlocal_stmt 293 +#define assert_stmt 294 +#define compound_stmt 295 +#define async_stmt 296 +#define if_stmt 297 +#define while_stmt 298 +#define for_stmt 299 +#define try_stmt 300 +#define with_stmt 301 +#define with_item 302 +#define except_clause 303 +#define suite 304 +#define test 305 +#define test_nocond 306 +#define lambdef 307 +#define lambdef_nocond 308 +#define or_test 309 +#define and_test 310 +#define not_test 311 +#define comparison 312 +#define comp_op 313 +#define star_expr 314 +#define expr 315 +#define xor_expr 316 +#define and_expr 317 +#define shift_expr 318 +#define arith_expr 319 +#define term 320 +#define factor 321 +#define power 322 +#define atom_expr 323 +#define atom 324 +#define testlist_comp 325 +#define trailer 326 +#define subscriptlist 327 +#define subscript 328 +#define sliceop 329 +#define exprlist 330 +#define testlist 331 +#define dictorsetmaker 332 +#define classdef 333 +#define arglist 334 +#define argument 335 +#define comp_iter 336 +#define sync_comp_for 337 +#define comp_for 338 +#define comp_if 339 +#define encoding_decl 340 +#define yield_expr 341 +#define yield_arg 342 diff --git a/venv/Include/grammar.h b/venv/Include/grammar.h new file mode 100644 index 00000000..e1703f4b --- /dev/null +++ b/venv/Include/grammar.h @@ -0,0 +1,94 @@ + +/* Grammar interface */ + +#ifndef Py_GRAMMAR_H +#define Py_GRAMMAR_H +#ifdef __cplusplus +extern "C" { +#endif + +#include "bitset.h" /* Sigh... */ + +/* A label of an arc */ + +typedef struct { + int lb_type; + char *lb_str; +} label; + +#define EMPTY 0 /* Label number 0 is by definition the empty label */ + +/* A list of labels */ + +typedef struct { + int ll_nlabels; + label *ll_label; +} labellist; + +/* An arc from one state to another */ + +typedef struct { + short a_lbl; /* Label of this arc */ + short a_arrow; /* State where this arc goes to */ +} arc; + +/* A state in a DFA */ + +typedef struct { + int s_narcs; + arc *s_arc; /* Array of arcs */ + + /* Optional accelerators */ + int s_lower; /* Lowest label index */ + int s_upper; /* Highest label index */ + int *s_accel; /* Accelerator */ + int s_accept; /* Nonzero for accepting state */ +} state; + +/* A DFA */ + +typedef struct { + int d_type; /* Non-terminal this represents */ + char *d_name; /* For printing */ + int d_initial; /* Initial state */ + int d_nstates; + state *d_state; /* Array of states */ + bitset d_first; +} dfa; + +/* A grammar */ + +typedef struct { + int g_ndfas; + dfa *g_dfa; /* Array of DFAs */ + labellist g_ll; + int g_start; /* Start symbol of the grammar */ + int g_accel; /* Set if accelerators present */ +} grammar; + +/* FUNCTIONS */ + +grammar *newgrammar(int start); +void freegrammar(grammar *g); +dfa *adddfa(grammar *g, int type, const char *name); +int addstate(dfa *d); +void addarc(dfa *d, int from, int to, int lbl); +dfa *PyGrammar_FindDFA(grammar *g, int type); + +int addlabel(labellist *ll, int type, const char *str); +int findlabel(labellist *ll, int type, const char *str); +const char *PyGrammar_LabelRepr(label *lb); +void translatelabels(grammar *g); + +void addfirstsets(grammar *g); + +void PyGrammar_AddAccelerators(grammar *g); +void PyGrammar_RemoveAccelerators(grammar *); + +void printgrammar(grammar *g, FILE *fp); +void printnonterminals(grammar *g, FILE *fp); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_GRAMMAR_H */ diff --git a/venv/Include/import.h b/venv/Include/import.h new file mode 100644 index 00000000..13f32a10 --- /dev/null +++ b/venv/Include/import.h @@ -0,0 +1,154 @@ + +/* Module definition and import interface */ + +#ifndef Py_IMPORT_H +#define Py_IMPORT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +PyAPI_FUNC(_PyInitError) _PyImportZip_Init(void); + +PyMODINIT_FUNC PyInit__imp(void); +#endif /* !Py_LIMITED_API */ +PyAPI_FUNC(long) PyImport_GetMagicNumber(void); +PyAPI_FUNC(const char *) PyImport_GetMagicTag(void); +PyAPI_FUNC(PyObject *) PyImport_ExecCodeModule( + const char *name, /* UTF-8 encoded string */ + PyObject *co + ); +PyAPI_FUNC(PyObject *) PyImport_ExecCodeModuleEx( + const char *name, /* UTF-8 encoded string */ + PyObject *co, + const char *pathname /* decoded from the filesystem encoding */ + ); +PyAPI_FUNC(PyObject *) PyImport_ExecCodeModuleWithPathnames( + const char *name, /* UTF-8 encoded string */ + PyObject *co, + const char *pathname, /* decoded from the filesystem encoding */ + const char *cpathname /* decoded from the filesystem encoding */ + ); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject *) PyImport_ExecCodeModuleObject( + PyObject *name, + PyObject *co, + PyObject *pathname, + PyObject *cpathname + ); +#endif +PyAPI_FUNC(PyObject *) PyImport_GetModuleDict(void); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000 +PyAPI_FUNC(PyObject *) PyImport_GetModule(PyObject *name); +#endif +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyImport_IsInitialized(PyInterpreterState *); +PyAPI_FUNC(PyObject *) _PyImport_GetModuleId(struct _Py_Identifier *name); +PyAPI_FUNC(PyObject *) _PyImport_AddModuleObject(PyObject *name, + PyObject *modules); +PyAPI_FUNC(int) _PyImport_SetModule(PyObject *name, PyObject *module); +PyAPI_FUNC(int) _PyImport_SetModuleString(const char *name, PyObject* module); +#endif +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject *) PyImport_AddModuleObject( + PyObject *name + ); +#endif +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyImport_AddModuleObject(PyObject *, PyObject *); +#endif +PyAPI_FUNC(PyObject *) PyImport_AddModule( + const char *name /* UTF-8 encoded string */ + ); +PyAPI_FUNC(PyObject *) PyImport_ImportModule( + const char *name /* UTF-8 encoded string */ + ); +PyAPI_FUNC(PyObject *) PyImport_ImportModuleNoBlock( + const char *name /* UTF-8 encoded string */ + ); +PyAPI_FUNC(PyObject *) PyImport_ImportModuleLevel( + const char *name, /* UTF-8 encoded string */ + PyObject *globals, + PyObject *locals, + PyObject *fromlist, + int level + ); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +PyAPI_FUNC(PyObject *) PyImport_ImportModuleLevelObject( + PyObject *name, + PyObject *globals, + PyObject *locals, + PyObject *fromlist, + int level + ); +#endif + +#define PyImport_ImportModuleEx(n, g, l, f) \ + PyImport_ImportModuleLevel(n, g, l, f, 0) + +PyAPI_FUNC(PyObject *) PyImport_GetImporter(PyObject *path); +PyAPI_FUNC(PyObject *) PyImport_Import(PyObject *name); +PyAPI_FUNC(PyObject *) PyImport_ReloadModule(PyObject *m); +PyAPI_FUNC(void) PyImport_Cleanup(void); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(int) PyImport_ImportFrozenModuleObject( + PyObject *name + ); +#endif +PyAPI_FUNC(int) PyImport_ImportFrozenModule( + const char *name /* UTF-8 encoded string */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyImport_AcquireLock(void); +PyAPI_FUNC(int) _PyImport_ReleaseLock(void); + +PyAPI_FUNC(void) _PyImport_ReInitLock(void); + +PyAPI_FUNC(PyObject *) _PyImport_FindBuiltin( + const char *name, /* UTF-8 encoded string */ + PyObject *modules + ); +PyAPI_FUNC(PyObject *) _PyImport_FindExtensionObject(PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) _PyImport_FindExtensionObjectEx(PyObject *, PyObject *, + PyObject *); +PyAPI_FUNC(int) _PyImport_FixupBuiltin( + PyObject *mod, + const char *name, /* UTF-8 encoded string */ + PyObject *modules + ); +PyAPI_FUNC(int) _PyImport_FixupExtensionObject(PyObject*, PyObject *, + PyObject *, PyObject *); + +struct _inittab { + const char *name; /* ASCII encoded string */ + PyObject* (*initfunc)(void); +}; +PyAPI_DATA(struct _inittab *) PyImport_Inittab; +PyAPI_FUNC(int) PyImport_ExtendInittab(struct _inittab *newtab); +#endif /* Py_LIMITED_API */ + +PyAPI_DATA(PyTypeObject) PyNullImporter_Type; + +PyAPI_FUNC(int) PyImport_AppendInittab( + const char *name, /* ASCII encoded string */ + PyObject* (*initfunc)(void) + ); + +#ifndef Py_LIMITED_API +struct _frozen { + const char *name; /* ASCII encoded string */ + const unsigned char *code; + int size; +}; + +/* Embedding apps may change this pointer to point to their favorite + collection of frozen modules: */ + +PyAPI_DATA(const struct _frozen *) PyImport_FrozenModules; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_IMPORT_H */ diff --git a/venv/Include/intrcheck.h b/venv/Include/intrcheck.h new file mode 100644 index 00000000..2e17336c --- /dev/null +++ b/venv/Include/intrcheck.h @@ -0,0 +1,33 @@ + +#ifndef Py_INTRCHECK_H +#define Py_INTRCHECK_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_FUNC(int) PyOS_InterruptOccurred(void); +PyAPI_FUNC(void) PyOS_InitInterrupts(void); +#ifdef HAVE_FORK +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000 +PyAPI_FUNC(void) PyOS_BeforeFork(void); +PyAPI_FUNC(void) PyOS_AfterFork_Parent(void); +PyAPI_FUNC(void) PyOS_AfterFork_Child(void); +#endif +#endif +/* Deprecated, please use PyOS_AfterFork_Child() instead */ +PyAPI_FUNC(void) PyOS_AfterFork(void) Py_DEPRECATED(3.7); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyOS_IsMainThread(void); +PyAPI_FUNC(void) _PySignal_AfterFork(void); + +#ifdef MS_WINDOWS +/* windows.h is not included by Python.h so use void* instead of HANDLE */ +PyAPI_FUNC(void*) _PyOS_SigintEvent(void); +#endif +#endif /* !Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTRCHECK_H */ diff --git a/venv/Include/iterobject.h b/venv/Include/iterobject.h new file mode 100644 index 00000000..f61726f1 --- /dev/null +++ b/venv/Include/iterobject.h @@ -0,0 +1,25 @@ +#ifndef Py_ITEROBJECT_H +#define Py_ITEROBJECT_H +/* Iterators (the basic kind, over a sequence) */ +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_DATA(PyTypeObject) PySeqIter_Type; +PyAPI_DATA(PyTypeObject) PyCallIter_Type; +PyAPI_DATA(PyTypeObject) PyCmpWrapper_Type; + +#define PySeqIter_Check(op) (Py_TYPE(op) == &PySeqIter_Type) + +PyAPI_FUNC(PyObject *) PySeqIter_New(PyObject *); + + +#define PyCallIter_Check(op) (Py_TYPE(op) == &PyCallIter_Type) + +PyAPI_FUNC(PyObject *) PyCallIter_New(PyObject *, PyObject *); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_ITEROBJECT_H */ + diff --git a/venv/Include/listobject.h b/venv/Include/listobject.h new file mode 100644 index 00000000..6057279d --- /dev/null +++ b/venv/Include/listobject.h @@ -0,0 +1,81 @@ + +/* List object interface */ + +/* +Another generally useful object type is a list of object pointers. +This is a mutable type: the list items can be changed, and items can be +added or removed. Out-of-range indices or non-list objects are ignored. + +*** WARNING *** PyList_SetItem does not increment the new item's reference +count, but does decrement the reference count of the item it replaces, +if not nil. It does *decrement* the reference count if it is *not* +inserted in the list. Similarly, PyList_GetItem does not increment the +returned item's reference count. +*/ + +#ifndef Py_LISTOBJECT_H +#define Py_LISTOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +typedef struct { + PyObject_VAR_HEAD + /* Vector of pointers to list elements. list[0] is ob_item[0], etc. */ + PyObject **ob_item; + + /* ob_item contains space for 'allocated' elements. The number + * currently in use is ob_size. + * Invariants: + * 0 <= ob_size <= allocated + * len(list) == ob_size + * ob_item == NULL implies ob_size == allocated == 0 + * list.sort() temporarily sets allocated to -1 to detect mutations. + * + * Items must normally not be NULL, except during construction when + * the list is not yet visible outside the function that builds it. + */ + Py_ssize_t allocated; +} PyListObject; +#endif + +PyAPI_DATA(PyTypeObject) PyList_Type; +PyAPI_DATA(PyTypeObject) PyListIter_Type; +PyAPI_DATA(PyTypeObject) PyListRevIter_Type; +PyAPI_DATA(PyTypeObject) PySortWrapper_Type; + +#define PyList_Check(op) \ + PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_LIST_SUBCLASS) +#define PyList_CheckExact(op) (Py_TYPE(op) == &PyList_Type) + +PyAPI_FUNC(PyObject *) PyList_New(Py_ssize_t size); +PyAPI_FUNC(Py_ssize_t) PyList_Size(PyObject *); +PyAPI_FUNC(PyObject *) PyList_GetItem(PyObject *, Py_ssize_t); +PyAPI_FUNC(int) PyList_SetItem(PyObject *, Py_ssize_t, PyObject *); +PyAPI_FUNC(int) PyList_Insert(PyObject *, Py_ssize_t, PyObject *); +PyAPI_FUNC(int) PyList_Append(PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyList_GetSlice(PyObject *, Py_ssize_t, Py_ssize_t); +PyAPI_FUNC(int) PyList_SetSlice(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *); +PyAPI_FUNC(int) PyList_Sort(PyObject *); +PyAPI_FUNC(int) PyList_Reverse(PyObject *); +PyAPI_FUNC(PyObject *) PyList_AsTuple(PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyList_Extend(PyListObject *, PyObject *); + +PyAPI_FUNC(int) PyList_ClearFreeList(void); +PyAPI_FUNC(void) _PyList_DebugMallocStats(FILE *out); +#endif + +/* Macro, trading safety for speed */ +#ifndef Py_LIMITED_API +#define PyList_GET_ITEM(op, i) (((PyListObject *)(op))->ob_item[i]) +#define PyList_SET_ITEM(op, i, v) (((PyListObject *)(op))->ob_item[i] = (v)) +#define PyList_GET_SIZE(op) (assert(PyList_Check(op)),Py_SIZE(op)) +#define _PyList_ITEMS(op) (((PyListObject *)(op))->ob_item) +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_LISTOBJECT_H */ diff --git a/venv/Include/longintrepr.h b/venv/Include/longintrepr.h new file mode 100644 index 00000000..ff4155f9 --- /dev/null +++ b/venv/Include/longintrepr.h @@ -0,0 +1,99 @@ +#ifndef Py_LIMITED_API +#ifndef Py_LONGINTREPR_H +#define Py_LONGINTREPR_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* This is published for the benefit of "friends" marshal.c and _decimal.c. */ + +/* Parameters of the integer representation. There are two different + sets of parameters: one set for 30-bit digits, stored in an unsigned 32-bit + integer type, and one set for 15-bit digits with each digit stored in an + unsigned short. The value of PYLONG_BITS_IN_DIGIT, defined either at + configure time or in pyport.h, is used to decide which digit size to use. + + Type 'digit' should be able to hold 2*PyLong_BASE-1, and type 'twodigits' + should be an unsigned integer type able to hold all integers up to + PyLong_BASE*PyLong_BASE-1. x_sub assumes that 'digit' is an unsigned type, + and that overflow is handled by taking the result modulo 2**N for some N > + PyLong_SHIFT. The majority of the code doesn't care about the precise + value of PyLong_SHIFT, but there are some notable exceptions: + + - long_pow() requires that PyLong_SHIFT be divisible by 5 + + - PyLong_{As,From}ByteArray require that PyLong_SHIFT be at least 8 + + - long_hash() requires that PyLong_SHIFT is *strictly* less than the number + of bits in an unsigned long, as do the PyLong <-> long (or unsigned long) + conversion functions + + - the Python int <-> size_t/Py_ssize_t conversion functions expect that + PyLong_SHIFT is strictly less than the number of bits in a size_t + + - the marshal code currently expects that PyLong_SHIFT is a multiple of 15 + + - NSMALLNEGINTS and NSMALLPOSINTS should be small enough to fit in a single + digit; with the current values this forces PyLong_SHIFT >= 9 + + The values 15 and 30 should fit all of the above requirements, on any + platform. +*/ + +#if PYLONG_BITS_IN_DIGIT == 30 +typedef uint32_t digit; +typedef int32_t sdigit; /* signed variant of digit */ +typedef uint64_t twodigits; +typedef int64_t stwodigits; /* signed variant of twodigits */ +#define PyLong_SHIFT 30 +#define _PyLong_DECIMAL_SHIFT 9 /* max(e such that 10**e fits in a digit) */ +#define _PyLong_DECIMAL_BASE ((digit)1000000000) /* 10 ** DECIMAL_SHIFT */ +#elif PYLONG_BITS_IN_DIGIT == 15 +typedef unsigned short digit; +typedef short sdigit; /* signed variant of digit */ +typedef unsigned long twodigits; +typedef long stwodigits; /* signed variant of twodigits */ +#define PyLong_SHIFT 15 +#define _PyLong_DECIMAL_SHIFT 4 /* max(e such that 10**e fits in a digit) */ +#define _PyLong_DECIMAL_BASE ((digit)10000) /* 10 ** DECIMAL_SHIFT */ +#else +#error "PYLONG_BITS_IN_DIGIT should be 15 or 30" +#endif +#define PyLong_BASE ((digit)1 << PyLong_SHIFT) +#define PyLong_MASK ((digit)(PyLong_BASE - 1)) + +#if PyLong_SHIFT % 5 != 0 +#error "longobject.c requires that PyLong_SHIFT be divisible by 5" +#endif + +/* Long integer representation. + The absolute value of a number is equal to + SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i) + Negative numbers are represented with ob_size < 0; + zero is represented by ob_size == 0. + In a normalized number, ob_digit[abs(ob_size)-1] (the most significant + digit) is never zero. Also, in all cases, for all valid i, + 0 <= ob_digit[i] <= MASK. + The allocation function takes care of allocating extra memory + so that ob_digit[0] ... ob_digit[abs(ob_size)-1] are actually available. + + CAUTION: Generic code manipulating subtypes of PyVarObject has to + aware that ints abuse ob_size's sign bit. +*/ + +struct _longobject { + PyObject_VAR_HEAD + digit ob_digit[1]; +}; + +PyAPI_FUNC(PyLongObject *) _PyLong_New(Py_ssize_t); + +/* Return a copy of src. */ +PyAPI_FUNC(PyObject *) _PyLong_Copy(PyLongObject *src); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_LONGINTREPR_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/longobject.h b/venv/Include/longobject.h new file mode 100644 index 00000000..7bdd0472 --- /dev/null +++ b/venv/Include/longobject.h @@ -0,0 +1,220 @@ +#ifndef Py_LONGOBJECT_H +#define Py_LONGOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Long (arbitrary precision) integer object interface */ + +typedef struct _longobject PyLongObject; /* Revealed in longintrepr.h */ + +PyAPI_DATA(PyTypeObject) PyLong_Type; + +#define PyLong_Check(op) \ + PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_LONG_SUBCLASS) +#define PyLong_CheckExact(op) (Py_TYPE(op) == &PyLong_Type) + +PyAPI_FUNC(PyObject *) PyLong_FromLong(long); +PyAPI_FUNC(PyObject *) PyLong_FromUnsignedLong(unsigned long); +PyAPI_FUNC(PyObject *) PyLong_FromSize_t(size_t); +PyAPI_FUNC(PyObject *) PyLong_FromSsize_t(Py_ssize_t); +PyAPI_FUNC(PyObject *) PyLong_FromDouble(double); +PyAPI_FUNC(long) PyLong_AsLong(PyObject *); +PyAPI_FUNC(long) PyLong_AsLongAndOverflow(PyObject *, int *); +PyAPI_FUNC(Py_ssize_t) PyLong_AsSsize_t(PyObject *); +PyAPI_FUNC(size_t) PyLong_AsSize_t(PyObject *); +PyAPI_FUNC(unsigned long) PyLong_AsUnsignedLong(PyObject *); +PyAPI_FUNC(unsigned long) PyLong_AsUnsignedLongMask(PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyLong_AsInt(PyObject *); +#endif +PyAPI_FUNC(PyObject *) PyLong_GetInfo(void); + +/* It may be useful in the future. I've added it in the PyInt -> PyLong + cleanup to keep the extra information. [CH] */ +#define PyLong_AS_LONG(op) PyLong_AsLong(op) + +/* Issue #1983: pid_t can be longer than a C long on some systems */ +#if !defined(SIZEOF_PID_T) || SIZEOF_PID_T == SIZEOF_INT +#define _Py_PARSE_PID "i" +#define PyLong_FromPid PyLong_FromLong +#define PyLong_AsPid PyLong_AsLong +#elif SIZEOF_PID_T == SIZEOF_LONG +#define _Py_PARSE_PID "l" +#define PyLong_FromPid PyLong_FromLong +#define PyLong_AsPid PyLong_AsLong +#elif defined(SIZEOF_LONG_LONG) && SIZEOF_PID_T == SIZEOF_LONG_LONG +#define _Py_PARSE_PID "L" +#define PyLong_FromPid PyLong_FromLongLong +#define PyLong_AsPid PyLong_AsLongLong +#else +#error "sizeof(pid_t) is neither sizeof(int), sizeof(long) or sizeof(long long)" +#endif /* SIZEOF_PID_T */ + +#if SIZEOF_VOID_P == SIZEOF_INT +# define _Py_PARSE_INTPTR "i" +# define _Py_PARSE_UINTPTR "I" +#elif SIZEOF_VOID_P == SIZEOF_LONG +# define _Py_PARSE_INTPTR "l" +# define _Py_PARSE_UINTPTR "k" +#elif defined(SIZEOF_LONG_LONG) && SIZEOF_VOID_P == SIZEOF_LONG_LONG +# define _Py_PARSE_INTPTR "L" +# define _Py_PARSE_UINTPTR "K" +#else +# error "void* different in size from int, long and long long" +#endif /* SIZEOF_VOID_P */ + +/* Used by Python/mystrtoul.c, _PyBytes_FromHex(), + _PyBytes_DecodeEscapeRecode(), etc. */ +#ifndef Py_LIMITED_API +PyAPI_DATA(unsigned char) _PyLong_DigitValue[256]; +#endif + +/* _PyLong_Frexp returns a double x and an exponent e such that the + true value is approximately equal to x * 2**e. e is >= 0. x is + 0.0 if and only if the input is 0 (in which case, e and x are both + zeroes); otherwise, 0.5 <= abs(x) < 1.0. On overflow, which is + possible if the number of bits doesn't fit into a Py_ssize_t, sets + OverflowError and returns -1.0 for x, 0 for e. */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(double) _PyLong_Frexp(PyLongObject *a, Py_ssize_t *e); +#endif + +PyAPI_FUNC(double) PyLong_AsDouble(PyObject *); +PyAPI_FUNC(PyObject *) PyLong_FromVoidPtr(void *); +PyAPI_FUNC(void *) PyLong_AsVoidPtr(PyObject *); + +PyAPI_FUNC(PyObject *) PyLong_FromLongLong(long long); +PyAPI_FUNC(PyObject *) PyLong_FromUnsignedLongLong(unsigned long long); +PyAPI_FUNC(long long) PyLong_AsLongLong(PyObject *); +PyAPI_FUNC(unsigned long long) PyLong_AsUnsignedLongLong(PyObject *); +PyAPI_FUNC(unsigned long long) PyLong_AsUnsignedLongLongMask(PyObject *); +PyAPI_FUNC(long long) PyLong_AsLongLongAndOverflow(PyObject *, int *); + +PyAPI_FUNC(PyObject *) PyLong_FromString(const char *, char **, int); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyLong_FromUnicode(Py_UNICODE*, Py_ssize_t, int) Py_DEPRECATED(3.3); +PyAPI_FUNC(PyObject *) PyLong_FromUnicodeObject(PyObject *u, int base); +PyAPI_FUNC(PyObject *) _PyLong_FromBytes(const char *, Py_ssize_t, int); +#endif + +#ifndef Py_LIMITED_API +/* _PyLong_Sign. Return 0 if v is 0, -1 if v < 0, +1 if v > 0. + v must not be NULL, and must be a normalized long. + There are no error cases. +*/ +PyAPI_FUNC(int) _PyLong_Sign(PyObject *v); + + +/* _PyLong_NumBits. Return the number of bits needed to represent the + absolute value of a long. For example, this returns 1 for 1 and -1, 2 + for 2 and -2, and 2 for 3 and -3. It returns 0 for 0. + v must not be NULL, and must be a normalized long. + (size_t)-1 is returned and OverflowError set if the true result doesn't + fit in a size_t. +*/ +PyAPI_FUNC(size_t) _PyLong_NumBits(PyObject *v); + +/* _PyLong_DivmodNear. Given integers a and b, compute the nearest + integer q to the exact quotient a / b, rounding to the nearest even integer + in the case of a tie. Return (q, r), where r = a - q*b. The remainder r + will satisfy abs(r) <= abs(b)/2, with equality possible only if q is + even. +*/ +PyAPI_FUNC(PyObject *) _PyLong_DivmodNear(PyObject *, PyObject *); + +/* _PyLong_FromByteArray: View the n unsigned bytes as a binary integer in + base 256, and return a Python int with the same numeric value. + If n is 0, the integer is 0. Else: + If little_endian is 1/true, bytes[n-1] is the MSB and bytes[0] the LSB; + else (little_endian is 0/false) bytes[0] is the MSB and bytes[n-1] the + LSB. + If is_signed is 0/false, view the bytes as a non-negative integer. + If is_signed is 1/true, view the bytes as a 2's-complement integer, + non-negative if bit 0x80 of the MSB is clear, negative if set. + Error returns: + + Return NULL with the appropriate exception set if there's not + enough memory to create the Python int. +*/ +PyAPI_FUNC(PyObject *) _PyLong_FromByteArray( + const unsigned char* bytes, size_t n, + int little_endian, int is_signed); + +/* _PyLong_AsByteArray: Convert the least-significant 8*n bits of long + v to a base-256 integer, stored in array bytes. Normally return 0, + return -1 on error. + If little_endian is 1/true, store the MSB at bytes[n-1] and the LSB at + bytes[0]; else (little_endian is 0/false) store the MSB at bytes[0] and + the LSB at bytes[n-1]. + If is_signed is 0/false, it's an error if v < 0; else (v >= 0) n bytes + are filled and there's nothing special about bit 0x80 of the MSB. + If is_signed is 1/true, bytes is filled with the 2's-complement + representation of v's value. Bit 0x80 of the MSB is the sign bit. + Error returns (-1): + + is_signed is 0 and v < 0. TypeError is set in this case, and bytes + isn't altered. + + n isn't big enough to hold the full mathematical value of v. For + example, if is_signed is 0 and there are more digits in the v than + fit in n; or if is_signed is 1, v < 0, and n is just 1 bit shy of + being large enough to hold a sign bit. OverflowError is set in this + case, but bytes holds the least-significant n bytes of the true value. +*/ +PyAPI_FUNC(int) _PyLong_AsByteArray(PyLongObject* v, + unsigned char* bytes, size_t n, + int little_endian, int is_signed); + +/* _PyLong_FromNbInt: Convert the given object to a PyLongObject + using the nb_int slot, if available. Raise TypeError if either the + nb_int slot is not available or the result of the call to nb_int + returns something not of type int. +*/ +PyAPI_FUNC(PyLongObject *)_PyLong_FromNbInt(PyObject *); + +/* _PyLong_Format: Convert the long to a string object with given base, + appending a base prefix of 0[box] if base is 2, 8 or 16. */ +PyAPI_FUNC(PyObject *) _PyLong_Format(PyObject *obj, int base); + +PyAPI_FUNC(int) _PyLong_FormatWriter( + _PyUnicodeWriter *writer, + PyObject *obj, + int base, + int alternate); + +PyAPI_FUNC(char*) _PyLong_FormatBytesWriter( + _PyBytesWriter *writer, + char *str, + PyObject *obj, + int base, + int alternate); + +/* Format the object based on the format_spec, as defined in PEP 3101 + (Advanced String Formatting). */ +PyAPI_FUNC(int) _PyLong_FormatAdvancedWriter( + _PyUnicodeWriter *writer, + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end); +#endif /* Py_LIMITED_API */ + +/* These aren't really part of the int object, but they're handy. The + functions are in Python/mystrtoul.c. + */ +PyAPI_FUNC(unsigned long) PyOS_strtoul(const char *, char **, int); +PyAPI_FUNC(long) PyOS_strtol(const char *, char **, int); + +#ifndef Py_LIMITED_API +/* For use by the gcd function in mathmodule.c */ +PyAPI_FUNC(PyObject *) _PyLong_GCD(PyObject *, PyObject *); +#endif /* !Py_LIMITED_API */ + +#ifndef Py_LIMITED_API +PyAPI_DATA(PyObject *) _PyLong_Zero; +PyAPI_DATA(PyObject *) _PyLong_One; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_LONGOBJECT_H */ diff --git a/venv/Include/marshal.h b/venv/Include/marshal.h new file mode 100644 index 00000000..09d9337e --- /dev/null +++ b/venv/Include/marshal.h @@ -0,0 +1,28 @@ + +/* Interface for marshal.c */ + +#ifndef Py_MARSHAL_H +#define Py_MARSHAL_H +#ifdef __cplusplus +extern "C" { +#endif + +#define Py_MARSHAL_VERSION 4 + +PyAPI_FUNC(void) PyMarshal_WriteLongToFile(long, FILE *, int); +PyAPI_FUNC(void) PyMarshal_WriteObjectToFile(PyObject *, FILE *, int); +PyAPI_FUNC(PyObject *) PyMarshal_WriteObjectToString(PyObject *, int); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(long) PyMarshal_ReadLongFromFile(FILE *); +PyAPI_FUNC(int) PyMarshal_ReadShortFromFile(FILE *); +PyAPI_FUNC(PyObject *) PyMarshal_ReadObjectFromFile(FILE *); +PyAPI_FUNC(PyObject *) PyMarshal_ReadLastObjectFromFile(FILE *); +#endif +PyAPI_FUNC(PyObject *) PyMarshal_ReadObjectFromString(const char *, + Py_ssize_t); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_MARSHAL_H */ diff --git a/venv/Include/memoryobject.h b/venv/Include/memoryobject.h new file mode 100644 index 00000000..990a716f --- /dev/null +++ b/venv/Include/memoryobject.h @@ -0,0 +1,72 @@ +/* Memory view object. In Python this is available as "memoryview". */ + +#ifndef Py_MEMORYOBJECT_H +#define Py_MEMORYOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +PyAPI_DATA(PyTypeObject) _PyManagedBuffer_Type; +#endif +PyAPI_DATA(PyTypeObject) PyMemoryView_Type; + +#define PyMemoryView_Check(op) (Py_TYPE(op) == &PyMemoryView_Type) + +#ifndef Py_LIMITED_API +/* Get a pointer to the memoryview's private copy of the exporter's buffer. */ +#define PyMemoryView_GET_BUFFER(op) (&((PyMemoryViewObject *)(op))->view) +/* Get a pointer to the exporting object (this may be NULL!). */ +#define PyMemoryView_GET_BASE(op) (((PyMemoryViewObject *)(op))->view.obj) +#endif + +PyAPI_FUNC(PyObject *) PyMemoryView_FromObject(PyObject *base); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject *) PyMemoryView_FromMemory(char *mem, Py_ssize_t size, + int flags); +#endif +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyMemoryView_FromBuffer(Py_buffer *info); +#endif +PyAPI_FUNC(PyObject *) PyMemoryView_GetContiguous(PyObject *base, + int buffertype, + char order); + + +/* The structs are declared here so that macros can work, but they shouldn't + be considered public. Don't access their fields directly, use the macros + and functions instead! */ +#ifndef Py_LIMITED_API +#define _Py_MANAGED_BUFFER_RELEASED 0x001 /* access to exporter blocked */ +#define _Py_MANAGED_BUFFER_FREE_FORMAT 0x002 /* free format */ +typedef struct { + PyObject_HEAD + int flags; /* state flags */ + Py_ssize_t exports; /* number of direct memoryview exports */ + Py_buffer master; /* snapshot buffer obtained from the original exporter */ +} _PyManagedBufferObject; + + +/* memoryview state flags */ +#define _Py_MEMORYVIEW_RELEASED 0x001 /* access to master buffer blocked */ +#define _Py_MEMORYVIEW_C 0x002 /* C-contiguous layout */ +#define _Py_MEMORYVIEW_FORTRAN 0x004 /* Fortran contiguous layout */ +#define _Py_MEMORYVIEW_SCALAR 0x008 /* scalar: ndim = 0 */ +#define _Py_MEMORYVIEW_PIL 0x010 /* PIL-style layout */ + +typedef struct { + PyObject_VAR_HEAD + _PyManagedBufferObject *mbuf; /* managed buffer */ + Py_hash_t hash; /* hash value for read-only views */ + int flags; /* state flags */ + Py_ssize_t exports; /* number of buffer re-exports */ + Py_buffer view; /* private copy of the exporter's view */ + PyObject *weakreflist; + Py_ssize_t ob_array[1]; /* shape, strides, suboffsets */ +} PyMemoryViewObject; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_MEMORYOBJECT_H */ diff --git a/venv/Include/metagrammar.h b/venv/Include/metagrammar.h new file mode 100644 index 00000000..15c8ef8f --- /dev/null +++ b/venv/Include/metagrammar.h @@ -0,0 +1,18 @@ +#ifndef Py_METAGRAMMAR_H +#define Py_METAGRAMMAR_H +#ifdef __cplusplus +extern "C" { +#endif + + +#define MSTART 256 +#define RULE 257 +#define RHS 258 +#define ALT 259 +#define ITEM 260 +#define ATOM 261 + +#ifdef __cplusplus +} +#endif +#endif /* !Py_METAGRAMMAR_H */ diff --git a/venv/Include/methodobject.h b/venv/Include/methodobject.h new file mode 100644 index 00000000..ea35d86b --- /dev/null +++ b/venv/Include/methodobject.h @@ -0,0 +1,135 @@ + +/* Method object interface */ + +#ifndef Py_METHODOBJECT_H +#define Py_METHODOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* This is about the type 'builtin_function_or_method', + not Python methods in user-defined classes. See classobject.h + for the latter. */ + +PyAPI_DATA(PyTypeObject) PyCFunction_Type; + +#define PyCFunction_Check(op) (Py_TYPE(op) == &PyCFunction_Type) + +typedef PyObject *(*PyCFunction)(PyObject *, PyObject *); +typedef PyObject *(*_PyCFunctionFast) (PyObject *, PyObject *const *, Py_ssize_t); +typedef PyObject *(*PyCFunctionWithKeywords)(PyObject *, PyObject *, + PyObject *); +typedef PyObject *(*_PyCFunctionFastWithKeywords) (PyObject *, + PyObject *const *, Py_ssize_t, + PyObject *); +typedef PyObject *(*PyNoArgsFunction)(PyObject *); + +PyAPI_FUNC(PyCFunction) PyCFunction_GetFunction(PyObject *); +PyAPI_FUNC(PyObject *) PyCFunction_GetSelf(PyObject *); +PyAPI_FUNC(int) PyCFunction_GetFlags(PyObject *); + +/* Macros for direct access to these values. Type checks are *not* + done, so use with care. */ +#ifndef Py_LIMITED_API +#define PyCFunction_GET_FUNCTION(func) \ + (((PyCFunctionObject *)func) -> m_ml -> ml_meth) +#define PyCFunction_GET_SELF(func) \ + (((PyCFunctionObject *)func) -> m_ml -> ml_flags & METH_STATIC ? \ + NULL : ((PyCFunctionObject *)func) -> m_self) +#define PyCFunction_GET_FLAGS(func) \ + (((PyCFunctionObject *)func) -> m_ml -> ml_flags) +#endif +PyAPI_FUNC(PyObject *) PyCFunction_Call(PyObject *, PyObject *, PyObject *); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyCFunction_FastCallDict(PyObject *func, + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwargs); + +PyAPI_FUNC(PyObject *) _PyCFunction_FastCallKeywords(PyObject *func, + PyObject *const *stack, + Py_ssize_t nargs, + PyObject *kwnames); +#endif + +struct PyMethodDef { + const char *ml_name; /* The name of the built-in function/method */ + PyCFunction ml_meth; /* The C function that implements it */ + int ml_flags; /* Combination of METH_xxx flags, which mostly + describe the args expected by the C func */ + const char *ml_doc; /* The __doc__ attribute, or NULL */ +}; +typedef struct PyMethodDef PyMethodDef; + +#define PyCFunction_New(ML, SELF) PyCFunction_NewEx((ML), (SELF), NULL) +PyAPI_FUNC(PyObject *) PyCFunction_NewEx(PyMethodDef *, PyObject *, + PyObject *); + +/* Flag passed to newmethodobject */ +/* #define METH_OLDARGS 0x0000 -- unsupported now */ +#define METH_VARARGS 0x0001 +#define METH_KEYWORDS 0x0002 +/* METH_NOARGS and METH_O must not be combined with the flags above. */ +#define METH_NOARGS 0x0004 +#define METH_O 0x0008 + +/* METH_CLASS and METH_STATIC are a little different; these control + the construction of methods for a class. These cannot be used for + functions in modules. */ +#define METH_CLASS 0x0010 +#define METH_STATIC 0x0020 + +/* METH_COEXIST allows a method to be entered even though a slot has + already filled the entry. When defined, the flag allows a separate + method, "__contains__" for example, to coexist with a defined + slot like sq_contains. */ + +#define METH_COEXIST 0x0040 + +#ifndef Py_LIMITED_API +#define METH_FASTCALL 0x0080 +#endif + +/* This bit is preserved for Stackless Python */ +#ifdef STACKLESS +#define METH_STACKLESS 0x0100 +#else +#define METH_STACKLESS 0x0000 +#endif + +#ifndef Py_LIMITED_API +typedef struct { + PyObject_HEAD + PyMethodDef *m_ml; /* Description of the C function to call */ + PyObject *m_self; /* Passed as 'self' arg to the C func, can be NULL */ + PyObject *m_module; /* The __module__ attribute, can be anything */ + PyObject *m_weakreflist; /* List of weak references */ +} PyCFunctionObject; + +PyAPI_FUNC(PyObject *) _PyMethodDef_RawFastCallDict( + PyMethodDef *method, + PyObject *self, + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwargs); + +PyAPI_FUNC(PyObject *) _PyMethodDef_RawFastCallKeywords( + PyMethodDef *method, + PyObject *self, + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwnames); +#endif + +PyAPI_FUNC(int) PyCFunction_ClearFreeList(void); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyCFunction_DebugMallocStats(FILE *out); +PyAPI_FUNC(void) _PyMethod_DebugMallocStats(FILE *out); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_METHODOBJECT_H */ diff --git a/venv/Include/modsupport.h b/venv/Include/modsupport.h new file mode 100644 index 00000000..a238bef0 --- /dev/null +++ b/venv/Include/modsupport.h @@ -0,0 +1,229 @@ + +#ifndef Py_MODSUPPORT_H +#define Py_MODSUPPORT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* Module support interface */ + +#include + +/* If PY_SSIZE_T_CLEAN is defined, each functions treats #-specifier + to mean Py_ssize_t */ +#ifdef PY_SSIZE_T_CLEAN +#define PyArg_Parse _PyArg_Parse_SizeT +#define PyArg_ParseTuple _PyArg_ParseTuple_SizeT +#define PyArg_ParseTupleAndKeywords _PyArg_ParseTupleAndKeywords_SizeT +#define PyArg_VaParse _PyArg_VaParse_SizeT +#define PyArg_VaParseTupleAndKeywords _PyArg_VaParseTupleAndKeywords_SizeT +#define Py_BuildValue _Py_BuildValue_SizeT +#define Py_VaBuildValue _Py_VaBuildValue_SizeT +#ifndef Py_LIMITED_API +#define _Py_VaBuildStack _Py_VaBuildStack_SizeT +#endif +#else +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _Py_VaBuildValue_SizeT(const char *, va_list); +PyAPI_FUNC(PyObject **) _Py_VaBuildStack_SizeT( + PyObject **small_stack, + Py_ssize_t small_stack_len, + const char *format, + va_list va, + Py_ssize_t *p_nargs); +#endif /* !Py_LIMITED_API */ +#endif + +/* Due to a glitch in 3.2, the _SizeT versions weren't exported from the DLL. */ +#if !defined(PY_SSIZE_T_CLEAN) || !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(int) PyArg_Parse(PyObject *, const char *, ...); +PyAPI_FUNC(int) PyArg_ParseTuple(PyObject *, const char *, ...); +PyAPI_FUNC(int) PyArg_ParseTupleAndKeywords(PyObject *, PyObject *, + const char *, char **, ...); +PyAPI_FUNC(int) PyArg_VaParse(PyObject *, const char *, va_list); +PyAPI_FUNC(int) PyArg_VaParseTupleAndKeywords(PyObject *, PyObject *, + const char *, char **, va_list); +#endif +PyAPI_FUNC(int) PyArg_ValidateKeywordArguments(PyObject *); +PyAPI_FUNC(int) PyArg_UnpackTuple(PyObject *, const char *, Py_ssize_t, Py_ssize_t, ...); +PyAPI_FUNC(PyObject *) Py_BuildValue(const char *, ...); +PyAPI_FUNC(PyObject *) _Py_BuildValue_SizeT(const char *, ...); + + +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyArg_UnpackStack( + PyObject *const *args, + Py_ssize_t nargs, + const char *name, + Py_ssize_t min, + Py_ssize_t max, + ...); + +PyAPI_FUNC(int) _PyArg_NoKeywords(const char *funcname, PyObject *kwargs); +PyAPI_FUNC(int) _PyArg_NoPositional(const char *funcname, PyObject *args); +#define _PyArg_NoKeywords(funcname, kwargs) \ + ((kwargs) == NULL || _PyArg_NoKeywords((funcname), (kwargs))) +#define _PyArg_NoPositional(funcname, args) \ + ((args) == NULL || _PyArg_NoPositional((funcname), (args))) + +#endif + +PyAPI_FUNC(PyObject *) Py_VaBuildValue(const char *, va_list); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject **) _Py_VaBuildStack( + PyObject **small_stack, + Py_ssize_t small_stack_len, + const char *format, + va_list va, + Py_ssize_t *p_nargs); +#endif + +#ifndef Py_LIMITED_API +typedef struct _PyArg_Parser { + const char *format; + const char * const *keywords; + const char *fname; + const char *custom_msg; + int pos; /* number of positional-only arguments */ + int min; /* minimal number of arguments */ + int max; /* maximal number of positional arguments */ + PyObject *kwtuple; /* tuple of keyword parameter names */ + struct _PyArg_Parser *next; +} _PyArg_Parser; +#ifdef PY_SSIZE_T_CLEAN +#define _PyArg_ParseTupleAndKeywordsFast _PyArg_ParseTupleAndKeywordsFast_SizeT +#define _PyArg_ParseStack _PyArg_ParseStack_SizeT +#define _PyArg_ParseStackAndKeywords _PyArg_ParseStackAndKeywords_SizeT +#define _PyArg_VaParseTupleAndKeywordsFast _PyArg_VaParseTupleAndKeywordsFast_SizeT +#endif +PyAPI_FUNC(int) _PyArg_ParseTupleAndKeywordsFast(PyObject *, PyObject *, + struct _PyArg_Parser *, ...); +PyAPI_FUNC(int) _PyArg_ParseStack( + PyObject *const *args, + Py_ssize_t nargs, + const char *format, + ...); +PyAPI_FUNC(int) _PyArg_ParseStackAndKeywords( + PyObject *const *args, + Py_ssize_t nargs, + PyObject *kwnames, + struct _PyArg_Parser *, + ...); +PyAPI_FUNC(int) _PyArg_VaParseTupleAndKeywordsFast(PyObject *, PyObject *, + struct _PyArg_Parser *, va_list); +void _PyArg_Fini(void); +#endif /* Py_LIMITED_API */ + +PyAPI_FUNC(int) PyModule_AddObject(PyObject *, const char *, PyObject *); +PyAPI_FUNC(int) PyModule_AddIntConstant(PyObject *, const char *, long); +PyAPI_FUNC(int) PyModule_AddStringConstant(PyObject *, const char *, const char *); +#define PyModule_AddIntMacro(m, c) PyModule_AddIntConstant(m, #c, c) +#define PyModule_AddStringMacro(m, c) PyModule_AddStringConstant(m, #c, c) + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +/* New in 3.5 */ +PyAPI_FUNC(int) PyModule_SetDocString(PyObject *, const char *); +PyAPI_FUNC(int) PyModule_AddFunctions(PyObject *, PyMethodDef *); +PyAPI_FUNC(int) PyModule_ExecDef(PyObject *module, PyModuleDef *def); +#endif + +#define Py_CLEANUP_SUPPORTED 0x20000 + +#define PYTHON_API_VERSION 1013 +#define PYTHON_API_STRING "1013" +/* The API version is maintained (independently from the Python version) + so we can detect mismatches between the interpreter and dynamically + loaded modules. These are diagnosed by an error message but + the module is still loaded (because the mismatch can only be tested + after loading the module). The error message is intended to + explain the core dump a few seconds later. + + The symbol PYTHON_API_STRING defines the same value as a string + literal. *** PLEASE MAKE SURE THE DEFINITIONS MATCH. *** + + Please add a line or two to the top of this log for each API + version change: + + 22-Feb-2006 MvL 1013 PEP 353 - long indices for sequence lengths + + 19-Aug-2002 GvR 1012 Changes to string object struct for + interning changes, saving 3 bytes. + + 17-Jul-2001 GvR 1011 Descr-branch, just to be on the safe side + + 25-Jan-2001 FLD 1010 Parameters added to PyCode_New() and + PyFrame_New(); Python 2.1a2 + + 14-Mar-2000 GvR 1009 Unicode API added + + 3-Jan-1999 GvR 1007 Decided to change back! (Don't reuse 1008!) + + 3-Dec-1998 GvR 1008 Python 1.5.2b1 + + 18-Jan-1997 GvR 1007 string interning and other speedups + + 11-Oct-1996 GvR renamed Py_Ellipses to Py_Ellipsis :-( + + 30-Jul-1996 GvR Slice and ellipses syntax added + + 23-Jul-1996 GvR For 1.4 -- better safe than sorry this time :-) + + 7-Nov-1995 GvR Keyword arguments (should've been done at 1.3 :-( ) + + 10-Jan-1995 GvR Renamed globals to new naming scheme + + 9-Jan-1995 GvR Initial version (incompatible with older API) +*/ + +/* The PYTHON_ABI_VERSION is introduced in PEP 384. For the lifetime of + Python 3, it will stay at the value of 3; changes to the limited API + must be performed in a strictly backwards-compatible manner. */ +#define PYTHON_ABI_VERSION 3 +#define PYTHON_ABI_STRING "3" + +#ifdef Py_TRACE_REFS + /* When we are tracing reference counts, rename module creation functions so + modules compiled with incompatible settings will generate a + link-time error. */ + #define PyModule_Create2 PyModule_Create2TraceRefs + #define PyModule_FromDefAndSpec2 PyModule_FromDefAndSpec2TraceRefs +#endif + +PyAPI_FUNC(PyObject *) PyModule_Create2(struct PyModuleDef*, + int apiver); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyModule_CreateInitialized(struct PyModuleDef*, + int apiver); +#endif + +#ifdef Py_LIMITED_API +#define PyModule_Create(module) \ + PyModule_Create2(module, PYTHON_ABI_VERSION) +#else +#define PyModule_Create(module) \ + PyModule_Create2(module, PYTHON_API_VERSION) +#endif + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +/* New in 3.5 */ +PyAPI_FUNC(PyObject *) PyModule_FromDefAndSpec2(PyModuleDef *def, + PyObject *spec, + int module_api_version); + +#ifdef Py_LIMITED_API +#define PyModule_FromDefAndSpec(module, spec) \ + PyModule_FromDefAndSpec2(module, spec, PYTHON_ABI_VERSION) +#else +#define PyModule_FromDefAndSpec(module, spec) \ + PyModule_FromDefAndSpec2(module, spec, PYTHON_API_VERSION) +#endif /* Py_LIMITED_API */ +#endif /* New in 3.5 */ + +#ifndef Py_LIMITED_API +PyAPI_DATA(const char *) _Py_PackageContext; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_MODSUPPORT_H */ diff --git a/venv/Include/moduleobject.h b/venv/Include/moduleobject.h new file mode 100644 index 00000000..1d8fe46d --- /dev/null +++ b/venv/Include/moduleobject.h @@ -0,0 +1,89 @@ + +/* Module object interface */ + +#ifndef Py_MODULEOBJECT_H +#define Py_MODULEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_DATA(PyTypeObject) PyModule_Type; + +#define PyModule_Check(op) PyObject_TypeCheck(op, &PyModule_Type) +#define PyModule_CheckExact(op) (Py_TYPE(op) == &PyModule_Type) + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject *) PyModule_NewObject( + PyObject *name + ); +#endif +PyAPI_FUNC(PyObject *) PyModule_New( + const char *name /* UTF-8 encoded string */ + ); +PyAPI_FUNC(PyObject *) PyModule_GetDict(PyObject *); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject *) PyModule_GetNameObject(PyObject *); +#endif +PyAPI_FUNC(const char *) PyModule_GetName(PyObject *); +PyAPI_FUNC(const char *) PyModule_GetFilename(PyObject *) Py_DEPRECATED(3.2); +PyAPI_FUNC(PyObject *) PyModule_GetFilenameObject(PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyModule_Clear(PyObject *); +PyAPI_FUNC(void) _PyModule_ClearDict(PyObject *); +#endif +PyAPI_FUNC(struct PyModuleDef*) PyModule_GetDef(PyObject*); +PyAPI_FUNC(void*) PyModule_GetState(PyObject*); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +/* New in 3.5 */ +PyAPI_FUNC(PyObject *) PyModuleDef_Init(struct PyModuleDef*); +PyAPI_DATA(PyTypeObject) PyModuleDef_Type; +#endif + +typedef struct PyModuleDef_Base { + PyObject_HEAD + PyObject* (*m_init)(void); + Py_ssize_t m_index; + PyObject* m_copy; +} PyModuleDef_Base; + +#define PyModuleDef_HEAD_INIT { \ + PyObject_HEAD_INIT(NULL) \ + NULL, /* m_init */ \ + 0, /* m_index */ \ + NULL, /* m_copy */ \ + } + +struct PyModuleDef_Slot; +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +/* New in 3.5 */ +typedef struct PyModuleDef_Slot{ + int slot; + void *value; +} PyModuleDef_Slot; + +#define Py_mod_create 1 +#define Py_mod_exec 2 + +#ifndef Py_LIMITED_API +#define _Py_mod_LAST_SLOT 2 +#endif + +#endif /* New in 3.5 */ + +typedef struct PyModuleDef{ + PyModuleDef_Base m_base; + const char* m_name; + const char* m_doc; + Py_ssize_t m_size; + PyMethodDef *m_methods; + struct PyModuleDef_Slot* m_slots; + traverseproc m_traverse; + inquiry m_clear; + freefunc m_free; +} PyModuleDef; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_MODULEOBJECT_H */ diff --git a/venv/Include/namespaceobject.h b/venv/Include/namespaceobject.h new file mode 100644 index 00000000..0c8d95c0 --- /dev/null +++ b/venv/Include/namespaceobject.h @@ -0,0 +1,19 @@ + +/* simple namespace object interface */ + +#ifndef NAMESPACEOBJECT_H +#define NAMESPACEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +PyAPI_DATA(PyTypeObject) _PyNamespace_Type; + +PyAPI_FUNC(PyObject *) _PyNamespace_New(PyObject *kwds); +#endif /* !Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !NAMESPACEOBJECT_H */ diff --git a/venv/Include/node.h b/venv/Include/node.h new file mode 100644 index 00000000..40596dfe --- /dev/null +++ b/venv/Include/node.h @@ -0,0 +1,44 @@ + +/* Parse tree node interface */ + +#ifndef Py_NODE_H +#define Py_NODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _node { + short n_type; + char *n_str; + int n_lineno; + int n_col_offset; + int n_nchildren; + struct _node *n_child; +} node; + +PyAPI_FUNC(node *) PyNode_New(int type); +PyAPI_FUNC(int) PyNode_AddChild(node *n, int type, + char *str, int lineno, int col_offset); +PyAPI_FUNC(void) PyNode_Free(node *n); +#ifndef Py_LIMITED_API +PyAPI_FUNC(Py_ssize_t) _PyNode_SizeOf(node *n); +#endif + +/* Node access functions */ +#define NCH(n) ((n)->n_nchildren) + +#define CHILD(n, i) (&(n)->n_child[i]) +#define RCHILD(n, i) (CHILD(n, NCH(n) + i)) +#define TYPE(n) ((n)->n_type) +#define STR(n) ((n)->n_str) +#define LINENO(n) ((n)->n_lineno) + +/* Assert that the type of a node is what we expect */ +#define REQ(n, type) assert(TYPE(n) == (type)) + +PyAPI_FUNC(void) PyNode_ListTree(node *); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_NODE_H */ diff --git a/venv/Include/object.h b/venv/Include/object.h new file mode 100644 index 00000000..c772deaf --- /dev/null +++ b/venv/Include/object.h @@ -0,0 +1,1104 @@ +#ifndef Py_OBJECT_H +#define Py_OBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Object and type object interface */ + +/* +Objects are structures allocated on the heap. Special rules apply to +the use of objects to ensure they are properly garbage-collected. +Objects are never allocated statically or on the stack; they must be +accessed through special macros and functions only. (Type objects are +exceptions to the first rule; the standard types are represented by +statically initialized type objects, although work on type/class unification +for Python 2.2 made it possible to have heap-allocated type objects too). + +An object has a 'reference count' that is increased or decreased when a +pointer to the object is copied or deleted; when the reference count +reaches zero there are no references to the object left and it can be +removed from the heap. + +An object has a 'type' that determines what it represents and what kind +of data it contains. An object's type is fixed when it is created. +Types themselves are represented as objects; an object contains a +pointer to the corresponding type object. The type itself has a type +pointer pointing to the object representing the type 'type', which +contains a pointer to itself!). + +Objects do not float around in memory; once allocated an object keeps +the same size and address. Objects that must hold variable-size data +can contain pointers to variable-size parts of the object. Not all +objects of the same type have the same size; but the size cannot change +after allocation. (These restrictions are made so a reference to an +object can be simply a pointer -- moving an object would require +updating all the pointers, and changing an object's size would require +moving it if there was another object right next to it.) + +Objects are always accessed through pointers of the type 'PyObject *'. +The type 'PyObject' is a structure that only contains the reference count +and the type pointer. The actual memory allocated for an object +contains other data that can only be accessed after casting the pointer +to a pointer to a longer structure type. This longer type must start +with the reference count and type fields; the macro PyObject_HEAD should be +used for this (to accommodate for future changes). The implementation +of a particular object type can cast the object pointer to the proper +type and back. + +A standard interface exists for objects that contain an array of items +whose size is determined when the object is allocated. +*/ + +/* Py_DEBUG implies Py_TRACE_REFS. */ +#if defined(Py_DEBUG) && !defined(Py_TRACE_REFS) +#define Py_TRACE_REFS +#endif + +/* Py_TRACE_REFS implies Py_REF_DEBUG. */ +#if defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +#define Py_REF_DEBUG +#endif + +#if defined(Py_LIMITED_API) && defined(Py_REF_DEBUG) +#error Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, and Py_REF_DEBUG +#endif + + +#ifdef Py_TRACE_REFS +/* Define pointers to support a doubly-linked list of all live heap objects. */ +#define _PyObject_HEAD_EXTRA \ + struct _object *_ob_next; \ + struct _object *_ob_prev; + +#define _PyObject_EXTRA_INIT 0, 0, + +#else +#define _PyObject_HEAD_EXTRA +#define _PyObject_EXTRA_INIT +#endif + +/* PyObject_HEAD defines the initial segment of every PyObject. */ +#define PyObject_HEAD PyObject ob_base; + +#define PyObject_HEAD_INIT(type) \ + { _PyObject_EXTRA_INIT \ + 1, type }, + +#define PyVarObject_HEAD_INIT(type, size) \ + { PyObject_HEAD_INIT(type) size }, + +/* PyObject_VAR_HEAD defines the initial segment of all variable-size + * container objects. These end with a declaration of an array with 1 + * element, but enough space is malloc'ed so that the array actually + * has room for ob_size elements. Note that ob_size is an element count, + * not necessarily a byte count. + */ +#define PyObject_VAR_HEAD PyVarObject ob_base; +#define Py_INVALID_SIZE (Py_ssize_t)-1 + +/* Nothing is actually declared to be a PyObject, but every pointer to + * a Python object can be cast to a PyObject*. This is inheritance built + * by hand. Similarly every pointer to a variable-size Python object can, + * in addition, be cast to PyVarObject*. + */ +typedef struct _object { + _PyObject_HEAD_EXTRA + Py_ssize_t ob_refcnt; + struct _typeobject *ob_type; +} PyObject; + +typedef struct { + PyObject ob_base; + Py_ssize_t ob_size; /* Number of items in variable part */ +} PyVarObject; + +#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) +#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) +#define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) + +#ifndef Py_LIMITED_API +/********************* String Literals ****************************************/ +/* This structure helps managing static strings. The basic usage goes like this: + Instead of doing + + r = PyObject_CallMethod(o, "foo", "args", ...); + + do + + _Py_IDENTIFIER(foo); + ... + r = _PyObject_CallMethodId(o, &PyId_foo, "args", ...); + + PyId_foo is a static variable, either on block level or file level. On first + usage, the string "foo" is interned, and the structures are linked. On interpreter + shutdown, all strings are released (through _PyUnicode_ClearStaticStrings). + + Alternatively, _Py_static_string allows choosing the variable name. + _PyUnicode_FromId returns a borrowed reference to the interned string. + _PyObject_{Get,Set,Has}AttrId are __getattr__ versions using _Py_Identifier*. +*/ +typedef struct _Py_Identifier { + struct _Py_Identifier *next; + const char* string; + PyObject *object; +} _Py_Identifier; + +#define _Py_static_string_init(value) { .next = NULL, .string = value, .object = NULL } +#define _Py_static_string(varname, value) static _Py_Identifier varname = _Py_static_string_init(value) +#define _Py_IDENTIFIER(varname) _Py_static_string(PyId_##varname, #varname) + +#endif /* !Py_LIMITED_API */ + +/* +Type objects contain a string containing the type name (to help somewhat +in debugging), the allocation parameters (see PyObject_New() and +PyObject_NewVar()), +and methods for accessing objects of the type. Methods are optional, a +nil pointer meaning that particular kind of access is not available for +this type. The Py_DECREF() macro uses the tp_dealloc method without +checking for a nil pointer; it should always be implemented except if +the implementation can guarantee that the reference count will never +reach zero (e.g., for statically allocated type objects). + +NB: the methods for certain type groups are now contained in separate +method blocks. +*/ + +typedef PyObject * (*unaryfunc)(PyObject *); +typedef PyObject * (*binaryfunc)(PyObject *, PyObject *); +typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *); +typedef int (*inquiry)(PyObject *); +typedef Py_ssize_t (*lenfunc)(PyObject *); +typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t); +typedef PyObject *(*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t); +typedef int(*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *); +typedef int(*ssizessizeobjargproc)(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *); +typedef int(*objobjargproc)(PyObject *, PyObject *, PyObject *); + +#ifndef Py_LIMITED_API +/* buffer interface */ +typedef struct bufferinfo { + void *buf; + PyObject *obj; /* owned reference */ + Py_ssize_t len; + Py_ssize_t itemsize; /* This is Py_ssize_t so it can be + pointed to by strides in simple case.*/ + int readonly; + int ndim; + char *format; + Py_ssize_t *shape; + Py_ssize_t *strides; + Py_ssize_t *suboffsets; + void *internal; +} Py_buffer; + +typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); +typedef void (*releasebufferproc)(PyObject *, Py_buffer *); + +/* Maximum number of dimensions */ +#define PyBUF_MAX_NDIM 64 + +/* Flags for getting buffers */ +#define PyBUF_SIMPLE 0 +#define PyBUF_WRITABLE 0x0001 +/* we used to include an E, backwards compatible alias */ +#define PyBUF_WRITEABLE PyBUF_WRITABLE +#define PyBUF_FORMAT 0x0004 +#define PyBUF_ND 0x0008 +#define PyBUF_STRIDES (0x0010 | PyBUF_ND) +#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) +#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) +#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) +#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) + +#define PyBUF_CONTIG (PyBUF_ND | PyBUF_WRITABLE) +#define PyBUF_CONTIG_RO (PyBUF_ND) + +#define PyBUF_STRIDED (PyBUF_STRIDES | PyBUF_WRITABLE) +#define PyBUF_STRIDED_RO (PyBUF_STRIDES) + +#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_WRITABLE | PyBUF_FORMAT) +#define PyBUF_RECORDS_RO (PyBUF_STRIDES | PyBUF_FORMAT) + +#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_WRITABLE | PyBUF_FORMAT) +#define PyBUF_FULL_RO (PyBUF_INDIRECT | PyBUF_FORMAT) + + +#define PyBUF_READ 0x100 +#define PyBUF_WRITE 0x200 + +/* End buffer interface */ +#endif /* Py_LIMITED_API */ + +typedef int (*objobjproc)(PyObject *, PyObject *); +typedef int (*visitproc)(PyObject *, void *); +typedef int (*traverseproc)(PyObject *, visitproc, void *); + +#ifndef Py_LIMITED_API +typedef struct { + /* Number implementations must check *both* + arguments for proper type and implement the necessary conversions + in the slot functions themselves. */ + + binaryfunc nb_add; + binaryfunc nb_subtract; + binaryfunc nb_multiply; + binaryfunc nb_remainder; + binaryfunc nb_divmod; + ternaryfunc nb_power; + unaryfunc nb_negative; + unaryfunc nb_positive; + unaryfunc nb_absolute; + inquiry nb_bool; + unaryfunc nb_invert; + binaryfunc nb_lshift; + binaryfunc nb_rshift; + binaryfunc nb_and; + binaryfunc nb_xor; + binaryfunc nb_or; + unaryfunc nb_int; + void *nb_reserved; /* the slot formerly known as nb_long */ + unaryfunc nb_float; + + binaryfunc nb_inplace_add; + binaryfunc nb_inplace_subtract; + binaryfunc nb_inplace_multiply; + binaryfunc nb_inplace_remainder; + ternaryfunc nb_inplace_power; + binaryfunc nb_inplace_lshift; + binaryfunc nb_inplace_rshift; + binaryfunc nb_inplace_and; + binaryfunc nb_inplace_xor; + binaryfunc nb_inplace_or; + + binaryfunc nb_floor_divide; + binaryfunc nb_true_divide; + binaryfunc nb_inplace_floor_divide; + binaryfunc nb_inplace_true_divide; + + unaryfunc nb_index; + + binaryfunc nb_matrix_multiply; + binaryfunc nb_inplace_matrix_multiply; +} PyNumberMethods; + +typedef struct { + lenfunc sq_length; + binaryfunc sq_concat; + ssizeargfunc sq_repeat; + ssizeargfunc sq_item; + void *was_sq_slice; + ssizeobjargproc sq_ass_item; + void *was_sq_ass_slice; + objobjproc sq_contains; + + binaryfunc sq_inplace_concat; + ssizeargfunc sq_inplace_repeat; +} PySequenceMethods; + +typedef struct { + lenfunc mp_length; + binaryfunc mp_subscript; + objobjargproc mp_ass_subscript; +} PyMappingMethods; + +typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; +} PyAsyncMethods; + +typedef struct { + getbufferproc bf_getbuffer; + releasebufferproc bf_releasebuffer; +} PyBufferProcs; +#endif /* Py_LIMITED_API */ + +typedef void (*freefunc)(void *); +typedef void (*destructor)(PyObject *); +#ifndef Py_LIMITED_API +/* We can't provide a full compile-time check that limited-API + users won't implement tp_print. However, not defining printfunc + and making tp_print of a different function pointer type + should at least cause a warning in most cases. */ +typedef int (*printfunc)(PyObject *, FILE *, int); +#endif +typedef PyObject *(*getattrfunc)(PyObject *, char *); +typedef PyObject *(*getattrofunc)(PyObject *, PyObject *); +typedef int (*setattrfunc)(PyObject *, char *, PyObject *); +typedef int (*setattrofunc)(PyObject *, PyObject *, PyObject *); +typedef PyObject *(*reprfunc)(PyObject *); +typedef Py_hash_t (*hashfunc)(PyObject *); +typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int); +typedef PyObject *(*getiterfunc) (PyObject *); +typedef PyObject *(*iternextfunc) (PyObject *); +typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *); +typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *); +typedef int (*initproc)(PyObject *, PyObject *, PyObject *); +typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *); +typedef PyObject *(*allocfunc)(struct _typeobject *, Py_ssize_t); + +#ifdef Py_LIMITED_API +typedef struct _typeobject PyTypeObject; /* opaque */ +#else +typedef struct _typeobject { + PyObject_VAR_HEAD + const char *tp_name; /* For printing, in format "." */ + Py_ssize_t tp_basicsize, tp_itemsize; /* For allocation */ + + /* Methods to implement standard operations */ + + destructor tp_dealloc; + printfunc tp_print; + getattrfunc tp_getattr; + setattrfunc tp_setattr; + PyAsyncMethods *tp_as_async; /* formerly known as tp_compare (Python 2) + or tp_reserved (Python 3) */ + reprfunc tp_repr; + + /* Method suites for standard classes */ + + PyNumberMethods *tp_as_number; + PySequenceMethods *tp_as_sequence; + PyMappingMethods *tp_as_mapping; + + /* More standard operations (here for binary compatibility) */ + + hashfunc tp_hash; + ternaryfunc tp_call; + reprfunc tp_str; + getattrofunc tp_getattro; + setattrofunc tp_setattro; + + /* Functions to access object as input/output buffer */ + PyBufferProcs *tp_as_buffer; + + /* Flags to define presence of optional/expanded features */ + unsigned long tp_flags; + + const char *tp_doc; /* Documentation string */ + + /* Assigned meaning in release 2.0 */ + /* call function for all accessible objects */ + traverseproc tp_traverse; + + /* delete references to contained objects */ + inquiry tp_clear; + + /* Assigned meaning in release 2.1 */ + /* rich comparisons */ + richcmpfunc tp_richcompare; + + /* weak reference enabler */ + Py_ssize_t tp_weaklistoffset; + + /* Iterators */ + getiterfunc tp_iter; + iternextfunc tp_iternext; + + /* Attribute descriptor and subclassing stuff */ + struct PyMethodDef *tp_methods; + struct PyMemberDef *tp_members; + struct PyGetSetDef *tp_getset; + struct _typeobject *tp_base; + PyObject *tp_dict; + descrgetfunc tp_descr_get; + descrsetfunc tp_descr_set; + Py_ssize_t tp_dictoffset; + initproc tp_init; + allocfunc tp_alloc; + newfunc tp_new; + freefunc tp_free; /* Low-level free-memory routine */ + inquiry tp_is_gc; /* For PyObject_IS_GC */ + PyObject *tp_bases; + PyObject *tp_mro; /* method resolution order */ + PyObject *tp_cache; + PyObject *tp_subclasses; + PyObject *tp_weaklist; + destructor tp_del; + + /* Type attribute cache version tag. Added in version 2.6 */ + unsigned int tp_version_tag; + + destructor tp_finalize; + +#ifdef COUNT_ALLOCS + /* these must be last and never explicitly initialized */ + Py_ssize_t tp_allocs; + Py_ssize_t tp_frees; + Py_ssize_t tp_maxalloc; + struct _typeobject *tp_prev; + struct _typeobject *tp_next; +#endif +} PyTypeObject; +#endif + +typedef struct{ + int slot; /* slot id, see below */ + void *pfunc; /* function pointer */ +} PyType_Slot; + +typedef struct{ + const char* name; + int basicsize; + int itemsize; + unsigned int flags; + PyType_Slot *slots; /* terminated by slot==0. */ +} PyType_Spec; + +PyAPI_FUNC(PyObject*) PyType_FromSpec(PyType_Spec*); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject*) PyType_FromSpecWithBases(PyType_Spec*, PyObject*); +#endif +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03040000 +PyAPI_FUNC(void*) PyType_GetSlot(PyTypeObject*, int); +#endif + +#ifndef Py_LIMITED_API +/* The *real* layout of a type object when allocated on the heap */ +typedef struct _heaptypeobject { + /* Note: there's a dependency on the order of these members + in slotptr() in typeobject.c . */ + PyTypeObject ht_type; + PyAsyncMethods as_async; + PyNumberMethods as_number; + PyMappingMethods as_mapping; + PySequenceMethods as_sequence; /* as_sequence comes after as_mapping, + so that the mapping wins when both + the mapping and the sequence define + a given operator (e.g. __getitem__). + see add_operators() in typeobject.c . */ + PyBufferProcs as_buffer; + PyObject *ht_name, *ht_slots, *ht_qualname; + struct _dictkeysobject *ht_cached_keys; + /* here are optional user slots, followed by the members. */ +} PyHeapTypeObject; + +/* access macro to the members which are floating "behind" the object */ +#define PyHeapType_GET_MEMBERS(etype) \ + ((PyMemberDef *)(((char *)etype) + Py_TYPE(etype)->tp_basicsize)) +#endif + +/* Generic type check */ +PyAPI_FUNC(int) PyType_IsSubtype(PyTypeObject *, PyTypeObject *); +#define PyObject_TypeCheck(ob, tp) \ + (Py_TYPE(ob) == (tp) || PyType_IsSubtype(Py_TYPE(ob), (tp))) + +PyAPI_DATA(PyTypeObject) PyType_Type; /* built-in 'type' */ +PyAPI_DATA(PyTypeObject) PyBaseObject_Type; /* built-in 'object' */ +PyAPI_DATA(PyTypeObject) PySuper_Type; /* built-in 'super' */ + +PyAPI_FUNC(unsigned long) PyType_GetFlags(PyTypeObject*); + +#define PyType_Check(op) \ + PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_TYPE_SUBCLASS) +#define PyType_CheckExact(op) (Py_TYPE(op) == &PyType_Type) + +PyAPI_FUNC(int) PyType_Ready(PyTypeObject *); +PyAPI_FUNC(PyObject *) PyType_GenericAlloc(PyTypeObject *, Py_ssize_t); +PyAPI_FUNC(PyObject *) PyType_GenericNew(PyTypeObject *, + PyObject *, PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(const char *) _PyType_Name(PyTypeObject *); +PyAPI_FUNC(PyObject *) _PyType_Lookup(PyTypeObject *, PyObject *); +PyAPI_FUNC(PyObject *) _PyType_LookupId(PyTypeObject *, _Py_Identifier *); +PyAPI_FUNC(PyObject *) _PyObject_LookupSpecial(PyObject *, _Py_Identifier *); +PyAPI_FUNC(PyTypeObject *) _PyType_CalculateMetaclass(PyTypeObject *, PyObject *); +#endif +PyAPI_FUNC(unsigned int) PyType_ClearCache(void); +PyAPI_FUNC(void) PyType_Modified(PyTypeObject *); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyType_GetDocFromInternalDoc(const char *, const char *); +PyAPI_FUNC(PyObject *) _PyType_GetTextSignatureFromInternalDoc(const char *, const char *); +#endif + +/* Generic operations on objects */ +#ifndef Py_LIMITED_API +struct _Py_Identifier; +PyAPI_FUNC(int) PyObject_Print(PyObject *, FILE *, int); +PyAPI_FUNC(void) _Py_BreakPoint(void); +PyAPI_FUNC(void) _PyObject_Dump(PyObject *); +#endif +PyAPI_FUNC(PyObject *) PyObject_Repr(PyObject *); +PyAPI_FUNC(PyObject *) PyObject_Str(PyObject *); +PyAPI_FUNC(PyObject *) PyObject_ASCII(PyObject *); +PyAPI_FUNC(PyObject *) PyObject_Bytes(PyObject *); +PyAPI_FUNC(PyObject *) PyObject_RichCompare(PyObject *, PyObject *, int); +PyAPI_FUNC(int) PyObject_RichCompareBool(PyObject *, PyObject *, int); +PyAPI_FUNC(PyObject *) PyObject_GetAttrString(PyObject *, const char *); +PyAPI_FUNC(int) PyObject_SetAttrString(PyObject *, const char *, PyObject *); +PyAPI_FUNC(int) PyObject_HasAttrString(PyObject *, const char *); +PyAPI_FUNC(PyObject *) PyObject_GetAttr(PyObject *, PyObject *); +PyAPI_FUNC(int) PyObject_SetAttr(PyObject *, PyObject *, PyObject *); +PyAPI_FUNC(int) PyObject_HasAttr(PyObject *, PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyObject_IsAbstract(PyObject *); +PyAPI_FUNC(PyObject *) _PyObject_GetAttrId(PyObject *, struct _Py_Identifier *); +PyAPI_FUNC(int) _PyObject_SetAttrId(PyObject *, struct _Py_Identifier *, PyObject *); +PyAPI_FUNC(int) _PyObject_HasAttrId(PyObject *, struct _Py_Identifier *); +/* Replacements of PyObject_GetAttr() and _PyObject_GetAttrId() which + don't raise AttributeError. + + Return 1 and set *result != NULL if an attribute is found. + Return 0 and set *result == NULL if an attribute is not found; + an AttributeError is silenced. + Return -1 and set *result == NULL if an error other than AttributeError + is raised. +*/ +PyAPI_FUNC(int) _PyObject_LookupAttr(PyObject *, PyObject *, PyObject **); +PyAPI_FUNC(int) _PyObject_LookupAttrId(PyObject *, struct _Py_Identifier *, PyObject **); +PyAPI_FUNC(PyObject **) _PyObject_GetDictPtr(PyObject *); +#endif +PyAPI_FUNC(PyObject *) PyObject_SelfIter(PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyObject_NextNotImplemented(PyObject *); +#endif +PyAPI_FUNC(PyObject *) PyObject_GenericGetAttr(PyObject *, PyObject *); +PyAPI_FUNC(int) PyObject_GenericSetAttr(PyObject *, + PyObject *, PyObject *); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(int) PyObject_GenericSetDict(PyObject *, PyObject *, void *); +#endif +PyAPI_FUNC(Py_hash_t) PyObject_Hash(PyObject *); +PyAPI_FUNC(Py_hash_t) PyObject_HashNotImplemented(PyObject *); +PyAPI_FUNC(int) PyObject_IsTrue(PyObject *); +PyAPI_FUNC(int) PyObject_Not(PyObject *); +PyAPI_FUNC(int) PyCallable_Check(PyObject *); + +PyAPI_FUNC(void) PyObject_ClearWeakRefs(PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) PyObject_CallFinalizer(PyObject *); +PyAPI_FUNC(int) PyObject_CallFinalizerFromDealloc(PyObject *); +#endif + +#ifndef Py_LIMITED_API +/* Same as PyObject_Generic{Get,Set}Attr, but passing the attributes + dict as the last parameter. */ +PyAPI_FUNC(PyObject *) +_PyObject_GenericGetAttrWithDict(PyObject *, PyObject *, PyObject *, int); +PyAPI_FUNC(int) +_PyObject_GenericSetAttrWithDict(PyObject *, PyObject *, + PyObject *, PyObject *); +#endif /* !Py_LIMITED_API */ + +/* Helper to look up a builtin object */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) +_PyObject_GetBuiltin(const char *name); +#endif + +/* PyObject_Dir(obj) acts like Python builtins.dir(obj), returning a + list of strings. PyObject_Dir(NULL) is like builtins.dir(), + returning the names of the current locals. In this case, if there are + no current locals, NULL is returned, and PyErr_Occurred() is false. +*/ +PyAPI_FUNC(PyObject *) PyObject_Dir(PyObject *); + + +/* Helpers for printing recursive container types */ +PyAPI_FUNC(int) Py_ReprEnter(PyObject *); +PyAPI_FUNC(void) Py_ReprLeave(PyObject *); + +/* Flag bits for printing: */ +#define Py_PRINT_RAW 1 /* No string quotes etc. */ + +/* +`Type flags (tp_flags) + +These flags are used to extend the type structure in a backwards-compatible +fashion. Extensions can use the flags to indicate (and test) when a given +type structure contains a new feature. The Python core will use these when +introducing new functionality between major revisions (to avoid mid-version +changes in the PYTHON_API_VERSION). + +Arbitration of the flag bit positions will need to be coordinated among +all extension writers who publicly release their extensions (this will +be fewer than you might expect!).. + +Most flags were removed as of Python 3.0 to make room for new flags. (Some +flags are not for backwards compatibility but to indicate the presence of an +optional feature; these flags remain of course.) + +Type definitions should use Py_TPFLAGS_DEFAULT for their tp_flags value. + +Code can use PyType_HasFeature(type_ob, flag_value) to test whether the +given type object has a specified feature. +*/ + +/* Set if the type object is dynamically allocated */ +#define Py_TPFLAGS_HEAPTYPE (1UL << 9) + +/* Set if the type allows subclassing */ +#define Py_TPFLAGS_BASETYPE (1UL << 10) + +/* Set if the type is 'ready' -- fully initialized */ +#define Py_TPFLAGS_READY (1UL << 12) + +/* Set while the type is being 'readied', to prevent recursive ready calls */ +#define Py_TPFLAGS_READYING (1UL << 13) + +/* Objects support garbage collection (see objimp.h) */ +#define Py_TPFLAGS_HAVE_GC (1UL << 14) + +/* These two bits are preserved for Stackless Python, next after this is 17 */ +#ifdef STACKLESS +#define Py_TPFLAGS_HAVE_STACKLESS_EXTENSION (3UL << 15) +#else +#define Py_TPFLAGS_HAVE_STACKLESS_EXTENSION 0 +#endif + +/* Objects support type attribute cache */ +#define Py_TPFLAGS_HAVE_VERSION_TAG (1UL << 18) +#define Py_TPFLAGS_VALID_VERSION_TAG (1UL << 19) + +/* Type is abstract and cannot be instantiated */ +#define Py_TPFLAGS_IS_ABSTRACT (1UL << 20) + +/* These flags are used to determine if a type is a subclass. */ +#define Py_TPFLAGS_LONG_SUBCLASS (1UL << 24) +#define Py_TPFLAGS_LIST_SUBCLASS (1UL << 25) +#define Py_TPFLAGS_TUPLE_SUBCLASS (1UL << 26) +#define Py_TPFLAGS_BYTES_SUBCLASS (1UL << 27) +#define Py_TPFLAGS_UNICODE_SUBCLASS (1UL << 28) +#define Py_TPFLAGS_DICT_SUBCLASS (1UL << 29) +#define Py_TPFLAGS_BASE_EXC_SUBCLASS (1UL << 30) +#define Py_TPFLAGS_TYPE_SUBCLASS (1UL << 31) + +#define Py_TPFLAGS_DEFAULT ( \ + Py_TPFLAGS_HAVE_STACKLESS_EXTENSION | \ + Py_TPFLAGS_HAVE_VERSION_TAG | \ + 0) + +/* NOTE: The following flags reuse lower bits (removed as part of the + * Python 3.0 transition). */ + +/* Type structure has tp_finalize member (3.4) */ +#define Py_TPFLAGS_HAVE_FINALIZE (1UL << 0) + +#ifdef Py_LIMITED_API +#define PyType_HasFeature(t,f) ((PyType_GetFlags(t) & (f)) != 0) +#else +#define PyType_HasFeature(t,f) (((t)->tp_flags & (f)) != 0) +#endif +#define PyType_FastSubclass(t,f) PyType_HasFeature(t,f) + + +/* +The macros Py_INCREF(op) and Py_DECREF(op) are used to increment or decrement +reference counts. Py_DECREF calls the object's deallocator function when +the refcount falls to 0; for +objects that don't contain references to other objects or heap memory +this can be the standard function free(). Both macros can be used +wherever a void expression is allowed. The argument must not be a +NULL pointer. If it may be NULL, use Py_XINCREF/Py_XDECREF instead. +The macro _Py_NewReference(op) initialize reference counts to 1, and +in special builds (Py_REF_DEBUG, Py_TRACE_REFS) performs additional +bookkeeping appropriate to the special build. + +We assume that the reference count field can never overflow; this can +be proven when the size of the field is the same as the pointer size, so +we ignore the possibility. Provided a C int is at least 32 bits (which +is implicitly assumed in many parts of this code), that's enough for +about 2**31 references to an object. + +XXX The following became out of date in Python 2.2, but I'm not sure +XXX what the full truth is now. Certainly, heap-allocated type objects +XXX can and should be deallocated. +Type objects should never be deallocated; the type pointer in an object +is not considered to be a reference to the type object, to save +complications in the deallocation function. (This is actually a +decision that's up to the implementer of each new type so if you want, +you can count such references to the type object.) +*/ + +/* First define a pile of simple helper macros, one set per special + * build symbol. These either expand to the obvious things, or to + * nothing at all when the special mode isn't in effect. The main + * macros can later be defined just once then, yet expand to different + * things depending on which special build options are and aren't in effect. + * Trust me : while painful, this is 20x easier to understand than, + * e.g, defining _Py_NewReference five different times in a maze of nested + * #ifdefs (we used to do that -- it was impenetrable). + */ +#ifdef Py_REF_DEBUG +PyAPI_DATA(Py_ssize_t) _Py_RefTotal; +PyAPI_FUNC(void) _Py_NegativeRefcount(const char *fname, + int lineno, PyObject *op); +PyAPI_FUNC(Py_ssize_t) _Py_GetRefTotal(void); +#define _Py_INC_REFTOTAL _Py_RefTotal++ +#define _Py_DEC_REFTOTAL _Py_RefTotal-- +#define _Py_REF_DEBUG_COMMA , +#define _Py_CHECK_REFCNT(OP) \ +{ if (((PyObject*)OP)->ob_refcnt < 0) \ + _Py_NegativeRefcount(__FILE__, __LINE__, \ + (PyObject *)(OP)); \ +} +/* Py_REF_DEBUG also controls the display of refcounts and memory block + * allocations at the interactive prompt and at interpreter shutdown + */ +PyAPI_FUNC(void) _PyDebug_PrintTotalRefs(void); +#else +#define _Py_INC_REFTOTAL +#define _Py_DEC_REFTOTAL +#define _Py_REF_DEBUG_COMMA +#define _Py_CHECK_REFCNT(OP) /* a semicolon */; +#endif /* Py_REF_DEBUG */ + +#ifdef COUNT_ALLOCS +PyAPI_FUNC(void) inc_count(PyTypeObject *); +PyAPI_FUNC(void) dec_count(PyTypeObject *); +#define _Py_INC_TPALLOCS(OP) inc_count(Py_TYPE(OP)) +#define _Py_INC_TPFREES(OP) dec_count(Py_TYPE(OP)) +#define _Py_DEC_TPFREES(OP) Py_TYPE(OP)->tp_frees-- +#define _Py_COUNT_ALLOCS_COMMA , +#else +#define _Py_INC_TPALLOCS(OP) +#define _Py_INC_TPFREES(OP) +#define _Py_DEC_TPFREES(OP) +#define _Py_COUNT_ALLOCS_COMMA +#endif /* COUNT_ALLOCS */ + +#ifdef Py_TRACE_REFS +/* Py_TRACE_REFS is such major surgery that we call external routines. */ +PyAPI_FUNC(void) _Py_NewReference(PyObject *); +PyAPI_FUNC(void) _Py_ForgetReference(PyObject *); +PyAPI_FUNC(void) _Py_Dealloc(PyObject *); +PyAPI_FUNC(void) _Py_PrintReferences(FILE *); +PyAPI_FUNC(void) _Py_PrintReferenceAddresses(FILE *); +PyAPI_FUNC(void) _Py_AddToAllObjects(PyObject *, int force); + +#else +/* Without Py_TRACE_REFS, there's little enough to do that we expand code + * inline. + */ +#define _Py_NewReference(op) ( \ + _Py_INC_TPALLOCS(op) _Py_COUNT_ALLOCS_COMMA \ + _Py_INC_REFTOTAL _Py_REF_DEBUG_COMMA \ + Py_REFCNT(op) = 1) + +#define _Py_ForgetReference(op) _Py_INC_TPFREES(op) + +#ifdef Py_LIMITED_API +PyAPI_FUNC(void) _Py_Dealloc(PyObject *); +#else +#define _Py_Dealloc(op) ( \ + _Py_INC_TPFREES(op) _Py_COUNT_ALLOCS_COMMA \ + (*Py_TYPE(op)->tp_dealloc)((PyObject *)(op))) +#endif +#endif /* !Py_TRACE_REFS */ + +#define Py_INCREF(op) ( \ + _Py_INC_REFTOTAL _Py_REF_DEBUG_COMMA \ + ((PyObject *)(op))->ob_refcnt++) + +#define Py_DECREF(op) \ + do { \ + PyObject *_py_decref_tmp = (PyObject *)(op); \ + if (_Py_DEC_REFTOTAL _Py_REF_DEBUG_COMMA \ + --(_py_decref_tmp)->ob_refcnt != 0) \ + _Py_CHECK_REFCNT(_py_decref_tmp) \ + else \ + _Py_Dealloc(_py_decref_tmp); \ + } while (0) + +/* Safely decref `op` and set `op` to NULL, especially useful in tp_clear + * and tp_dealloc implementations. + * + * Note that "the obvious" code can be deadly: + * + * Py_XDECREF(op); + * op = NULL; + * + * Typically, `op` is something like self->containee, and `self` is done + * using its `containee` member. In the code sequence above, suppose + * `containee` is non-NULL with a refcount of 1. Its refcount falls to + * 0 on the first line, which can trigger an arbitrary amount of code, + * possibly including finalizers (like __del__ methods or weakref callbacks) + * coded in Python, which in turn can release the GIL and allow other threads + * to run, etc. Such code may even invoke methods of `self` again, or cause + * cyclic gc to trigger, but-- oops! --self->containee still points to the + * object being torn down, and it may be in an insane state while being torn + * down. This has in fact been a rich historic source of miserable (rare & + * hard-to-diagnose) segfaulting (and other) bugs. + * + * The safe way is: + * + * Py_CLEAR(op); + * + * That arranges to set `op` to NULL _before_ decref'ing, so that any code + * triggered as a side-effect of `op` getting torn down no longer believes + * `op` points to a valid object. + * + * There are cases where it's safe to use the naive code, but they're brittle. + * For example, if `op` points to a Python integer, you know that destroying + * one of those can't cause problems -- but in part that relies on that + * Python integers aren't currently weakly referencable. Best practice is + * to use Py_CLEAR() even if you can't think of a reason for why you need to. + */ +#define Py_CLEAR(op) \ + do { \ + PyObject *_py_tmp = (PyObject *)(op); \ + if (_py_tmp != NULL) { \ + (op) = NULL; \ + Py_DECREF(_py_tmp); \ + } \ + } while (0) + +/* Macros to use in case the object pointer may be NULL: */ +#define Py_XINCREF(op) \ + do { \ + PyObject *_py_xincref_tmp = (PyObject *)(op); \ + if (_py_xincref_tmp != NULL) \ + Py_INCREF(_py_xincref_tmp); \ + } while (0) + +#define Py_XDECREF(op) \ + do { \ + PyObject *_py_xdecref_tmp = (PyObject *)(op); \ + if (_py_xdecref_tmp != NULL) \ + Py_DECREF(_py_xdecref_tmp); \ + } while (0) + +#ifndef Py_LIMITED_API +/* Safely decref `op` and set `op` to `op2`. + * + * As in case of Py_CLEAR "the obvious" code can be deadly: + * + * Py_DECREF(op); + * op = op2; + * + * The safe way is: + * + * Py_SETREF(op, op2); + * + * That arranges to set `op` to `op2` _before_ decref'ing, so that any code + * triggered as a side-effect of `op` getting torn down no longer believes + * `op` points to a valid object. + * + * Py_XSETREF is a variant of Py_SETREF that uses Py_XDECREF instead of + * Py_DECREF. + */ + +#define Py_SETREF(op, op2) \ + do { \ + PyObject *_py_tmp = (PyObject *)(op); \ + (op) = (op2); \ + Py_DECREF(_py_tmp); \ + } while (0) + +#define Py_XSETREF(op, op2) \ + do { \ + PyObject *_py_tmp = (PyObject *)(op); \ + (op) = (op2); \ + Py_XDECREF(_py_tmp); \ + } while (0) + +#endif /* ifndef Py_LIMITED_API */ + +/* +These are provided as conveniences to Python runtime embedders, so that +they can have object code that is not dependent on Python compilation flags. +*/ +PyAPI_FUNC(void) Py_IncRef(PyObject *); +PyAPI_FUNC(void) Py_DecRef(PyObject *); + +#ifndef Py_LIMITED_API +PyAPI_DATA(PyTypeObject) _PyNone_Type; +PyAPI_DATA(PyTypeObject) _PyNotImplemented_Type; +#endif /* !Py_LIMITED_API */ + +/* +_Py_NoneStruct is an object of undefined type which can be used in contexts +where NULL (nil) is not suitable (since NULL often means 'error'). + +Don't forget to apply Py_INCREF() when returning this value!!! +*/ +PyAPI_DATA(PyObject) _Py_NoneStruct; /* Don't use this directly */ +#define Py_None (&_Py_NoneStruct) + +/* Macro for returning Py_None from a function */ +#define Py_RETURN_NONE return Py_INCREF(Py_None), Py_None + +/* +Py_NotImplemented is a singleton used to signal that an operation is +not implemented for a given type combination. +*/ +PyAPI_DATA(PyObject) _Py_NotImplementedStruct; /* Don't use this directly */ +#define Py_NotImplemented (&_Py_NotImplementedStruct) + +/* Macro for returning Py_NotImplemented from a function */ +#define Py_RETURN_NOTIMPLEMENTED \ + return Py_INCREF(Py_NotImplemented), Py_NotImplemented + +/* Rich comparison opcodes */ +#define Py_LT 0 +#define Py_LE 1 +#define Py_EQ 2 +#define Py_NE 3 +#define Py_GT 4 +#define Py_GE 5 + +/* + * Macro for implementing rich comparisons + * + * Needs to be a macro because any C-comparable type can be used. + */ +#define Py_RETURN_RICHCOMPARE(val1, val2, op) \ + do { \ + switch (op) { \ + case Py_EQ: if ((val1) == (val2)) Py_RETURN_TRUE; Py_RETURN_FALSE; \ + case Py_NE: if ((val1) != (val2)) Py_RETURN_TRUE; Py_RETURN_FALSE; \ + case Py_LT: if ((val1) < (val2)) Py_RETURN_TRUE; Py_RETURN_FALSE; \ + case Py_GT: if ((val1) > (val2)) Py_RETURN_TRUE; Py_RETURN_FALSE; \ + case Py_LE: if ((val1) <= (val2)) Py_RETURN_TRUE; Py_RETURN_FALSE; \ + case Py_GE: if ((val1) >= (val2)) Py_RETURN_TRUE; Py_RETURN_FALSE; \ + default: \ + Py_UNREACHABLE(); \ + } \ + } while (0) + +#ifndef Py_LIMITED_API +/* Maps Py_LT to Py_GT, ..., Py_GE to Py_LE. + * Defined in object.c. + */ +PyAPI_DATA(int) _Py_SwappedOp[]; +#endif /* !Py_LIMITED_API */ + + +/* +More conventions +================ + +Argument Checking +----------------- + +Functions that take objects as arguments normally don't check for nil +arguments, but they do check the type of the argument, and return an +error if the function doesn't apply to the type. + +Failure Modes +------------- + +Functions may fail for a variety of reasons, including running out of +memory. This is communicated to the caller in two ways: an error string +is set (see errors.h), and the function result differs: functions that +normally return a pointer return NULL for failure, functions returning +an integer return -1 (which could be a legal return value too!), and +other functions return 0 for success and -1 for failure. +Callers should always check for errors before using the result. If +an error was set, the caller must either explicitly clear it, or pass +the error on to its caller. + +Reference Counts +---------------- + +It takes a while to get used to the proper usage of reference counts. + +Functions that create an object set the reference count to 1; such new +objects must be stored somewhere or destroyed again with Py_DECREF(). +Some functions that 'store' objects, such as PyTuple_SetItem() and +PyList_SetItem(), +don't increment the reference count of the object, since the most +frequent use is to store a fresh object. Functions that 'retrieve' +objects, such as PyTuple_GetItem() and PyDict_GetItemString(), also +don't increment +the reference count, since most frequently the object is only looked at +quickly. Thus, to retrieve an object and store it again, the caller +must call Py_INCREF() explicitly. + +NOTE: functions that 'consume' a reference count, like +PyList_SetItem(), consume the reference even if the object wasn't +successfully stored, to simplify error handling. + +It seems attractive to make other functions that take an object as +argument consume a reference count; however, this may quickly get +confusing (even the current practice is already confusing). Consider +it carefully, it may save lots of calls to Py_INCREF() and Py_DECREF() at +times. +*/ + + +/* Trashcan mechanism, thanks to Christian Tismer. + +When deallocating a container object, it's possible to trigger an unbounded +chain of deallocations, as each Py_DECREF in turn drops the refcount on "the +next" object in the chain to 0. This can easily lead to stack faults, and +especially in threads (which typically have less stack space to work with). + +A container object that participates in cyclic gc can avoid this by +bracketing the body of its tp_dealloc function with a pair of macros: + +static void +mytype_dealloc(mytype *p) +{ + ... declarations go here ... + + PyObject_GC_UnTrack(p); // must untrack first + Py_TRASHCAN_SAFE_BEGIN(p) + ... The body of the deallocator goes here, including all calls ... + ... to Py_DECREF on contained objects. ... + Py_TRASHCAN_SAFE_END(p) +} + +CAUTION: Never return from the middle of the body! If the body needs to +"get out early", put a label immediately before the Py_TRASHCAN_SAFE_END +call, and goto it. Else the call-depth counter (see below) will stay +above 0 forever, and the trashcan will never get emptied. + +How it works: The BEGIN macro increments a call-depth counter. So long +as this counter is small, the body of the deallocator is run directly without +further ado. But if the counter gets large, it instead adds p to a list of +objects to be deallocated later, skips the body of the deallocator, and +resumes execution after the END macro. The tp_dealloc routine then returns +without deallocating anything (and so unbounded call-stack depth is avoided). + +When the call stack finishes unwinding again, code generated by the END macro +notices this, and calls another routine to deallocate all the objects that +may have been added to the list of deferred deallocations. In effect, a +chain of N deallocations is broken into (N-1)/(PyTrash_UNWIND_LEVEL-1) pieces, +with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL. +*/ + +#ifndef Py_LIMITED_API +/* This is the old private API, invoked by the macros before 3.2.4. + Kept for binary compatibility of extensions using the stable ABI. */ +PyAPI_FUNC(void) _PyTrash_deposit_object(PyObject*); +PyAPI_FUNC(void) _PyTrash_destroy_chain(void); +#endif /* !Py_LIMITED_API */ + +/* The new thread-safe private API, invoked by the macros below. */ +PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyObject*); +PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(void); + +#define PyTrash_UNWIND_LEVEL 50 + +#define Py_TRASHCAN_SAFE_BEGIN(op) \ + do { \ + PyThreadState *_tstate = PyThreadState_GET(); \ + if (_tstate->trash_delete_nesting < PyTrash_UNWIND_LEVEL) { \ + ++_tstate->trash_delete_nesting; + /* The body of the deallocator is here. */ +#define Py_TRASHCAN_SAFE_END(op) \ + --_tstate->trash_delete_nesting; \ + if (_tstate->trash_delete_later && _tstate->trash_delete_nesting <= 0) \ + _PyTrash_thread_destroy_chain(); \ + } \ + else \ + _PyTrash_thread_deposit_object((PyObject*)op); \ + } while (0); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) +_PyDebugAllocatorStats(FILE *out, const char *block_name, int num_blocks, + size_t sizeof_block); +PyAPI_FUNC(void) +_PyObject_DebugTypeStats(FILE *out); +#endif /* ifndef Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_OBJECT_H */ diff --git a/venv/Include/objimpl.h b/venv/Include/objimpl.h new file mode 100644 index 00000000..057bb50c --- /dev/null +++ b/venv/Include/objimpl.h @@ -0,0 +1,370 @@ +/* The PyObject_ memory family: high-level object memory interfaces. + See pymem.h for the low-level PyMem_ family. +*/ + +#ifndef Py_OBJIMPL_H +#define Py_OBJIMPL_H + +#include "pymem.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* BEWARE: + + Each interface exports both functions and macros. Extension modules should + use the functions, to ensure binary compatibility across Python versions. + Because the Python implementation is free to change internal details, and + the macros may (or may not) expose details for speed, if you do use the + macros you must recompile your extensions with each Python release. + + Never mix calls to PyObject_ memory functions with calls to the platform + malloc/realloc/ calloc/free, or with calls to PyMem_. +*/ + +/* +Functions and macros for modules that implement new object types. + + - PyObject_New(type, typeobj) allocates memory for a new object of the given + type, and initializes part of it. 'type' must be the C structure type used + to represent the object, and 'typeobj' the address of the corresponding + type object. Reference count and type pointer are filled in; the rest of + the bytes of the object are *undefined*! The resulting expression type is + 'type *'. The size of the object is determined by the tp_basicsize field + of the type object. + + - PyObject_NewVar(type, typeobj, n) is similar but allocates a variable-size + object with room for n items. In addition to the refcount and type pointer + fields, this also fills in the ob_size field. + + - PyObject_Del(op) releases the memory allocated for an object. It does not + run a destructor -- it only frees the memory. PyObject_Free is identical. + + - PyObject_Init(op, typeobj) and PyObject_InitVar(op, typeobj, n) don't + allocate memory. Instead of a 'type' parameter, they take a pointer to a + new object (allocated by an arbitrary allocator), and initialize its object + header fields. + +Note that objects created with PyObject_{New, NewVar} are allocated using the +specialized Python allocator (implemented in obmalloc.c), if WITH_PYMALLOC is +enabled. In addition, a special debugging allocator is used if PYMALLOC_DEBUG +is also #defined. + +In case a specific form of memory management is needed (for example, if you +must use the platform malloc heap(s), or shared memory, or C++ local storage or +operator new), you must first allocate the object with your custom allocator, +then pass its pointer to PyObject_{Init, InitVar} for filling in its Python- +specific fields: reference count, type pointer, possibly others. You should +be aware that Python has no control over these objects because they don't +cooperate with the Python memory manager. Such objects may not be eligible +for automatic garbage collection and you have to make sure that they are +released accordingly whenever their destructor gets called (cf. the specific +form of memory management you're using). + +Unless you have specific memory management requirements, use +PyObject_{New, NewVar, Del}. +*/ + +/* + * Raw object memory interface + * =========================== + */ + +/* Functions to call the same malloc/realloc/free as used by Python's + object allocator. If WITH_PYMALLOC is enabled, these may differ from + the platform malloc/realloc/free. The Python object allocator is + designed for fast, cache-conscious allocation of many "small" objects, + and with low hidden memory overhead. + + PyObject_Malloc(0) returns a unique non-NULL pointer if possible. + + PyObject_Realloc(NULL, n) acts like PyObject_Malloc(n). + PyObject_Realloc(p != NULL, 0) does not return NULL, or free the memory + at p. + + Returned pointers must be checked for NULL explicitly; no action is + performed on failure other than to return NULL (no warning it printed, no + exception is set, etc). + + For allocating objects, use PyObject_{New, NewVar} instead whenever + possible. The PyObject_{Malloc, Realloc, Free} family is exposed + so that you can exploit Python's small-block allocator for non-object + uses. If you must use these routines to allocate object memory, make sure + the object gets initialized via PyObject_{Init, InitVar} after obtaining + the raw memory. +*/ +PyAPI_FUNC(void *) PyObject_Malloc(size_t size); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +PyAPI_FUNC(void *) PyObject_Calloc(size_t nelem, size_t elsize); +#endif +PyAPI_FUNC(void *) PyObject_Realloc(void *ptr, size_t new_size); +PyAPI_FUNC(void) PyObject_Free(void *ptr); + +#ifndef Py_LIMITED_API +/* This function returns the number of allocated memory blocks, regardless of size */ +PyAPI_FUNC(Py_ssize_t) _Py_GetAllocatedBlocks(void); +#endif /* !Py_LIMITED_API */ + +/* Macros */ +#ifdef WITH_PYMALLOC +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out); +#endif /* #ifndef Py_LIMITED_API */ +#endif + +/* Macros */ +#define PyObject_MALLOC PyObject_Malloc +#define PyObject_REALLOC PyObject_Realloc +#define PyObject_FREE PyObject_Free +#define PyObject_Del PyObject_Free +#define PyObject_DEL PyObject_Free + + +/* + * Generic object allocator interface + * ================================== + */ + +/* Functions */ +PyAPI_FUNC(PyObject *) PyObject_Init(PyObject *, PyTypeObject *); +PyAPI_FUNC(PyVarObject *) PyObject_InitVar(PyVarObject *, + PyTypeObject *, Py_ssize_t); +PyAPI_FUNC(PyObject *) _PyObject_New(PyTypeObject *); +PyAPI_FUNC(PyVarObject *) _PyObject_NewVar(PyTypeObject *, Py_ssize_t); + +#define PyObject_New(type, typeobj) \ + ( (type *) _PyObject_New(typeobj) ) +#define PyObject_NewVar(type, typeobj, n) \ + ( (type *) _PyObject_NewVar((typeobj), (n)) ) + +/* Macros trading binary compatibility for speed. See also pymem.h. + Note that these macros expect non-NULL object pointers.*/ +#define PyObject_INIT(op, typeobj) \ + ( Py_TYPE(op) = (typeobj), _Py_NewReference((PyObject *)(op)), (op) ) +#define PyObject_INIT_VAR(op, typeobj, size) \ + ( Py_SIZE(op) = (size), PyObject_INIT((op), (typeobj)) ) + +#define _PyObject_SIZE(typeobj) ( (typeobj)->tp_basicsize ) + +/* _PyObject_VAR_SIZE returns the number of bytes (as size_t) allocated for a + vrbl-size object with nitems items, exclusive of gc overhead (if any). The + value is rounded up to the closest multiple of sizeof(void *), in order to + ensure that pointer fields at the end of the object are correctly aligned + for the platform (this is of special importance for subclasses of, e.g., + str or int, so that pointers can be stored after the embedded data). + + Note that there's no memory wastage in doing this, as malloc has to + return (at worst) pointer-aligned memory anyway. +*/ +#if ((SIZEOF_VOID_P - 1) & SIZEOF_VOID_P) != 0 +# error "_PyObject_VAR_SIZE requires SIZEOF_VOID_P be a power of 2" +#endif + +#define _PyObject_VAR_SIZE(typeobj, nitems) \ + _Py_SIZE_ROUND_UP((typeobj)->tp_basicsize + \ + (nitems)*(typeobj)->tp_itemsize, \ + SIZEOF_VOID_P) + +#define PyObject_NEW(type, typeobj) \ +( (type *) PyObject_Init( \ + (PyObject *) PyObject_MALLOC( _PyObject_SIZE(typeobj) ), (typeobj)) ) + +#define PyObject_NEW_VAR(type, typeobj, n) \ +( (type *) PyObject_InitVar( \ + (PyVarObject *) PyObject_MALLOC(_PyObject_VAR_SIZE((typeobj),(n)) ),\ + (typeobj), (n)) ) + +/* This example code implements an object constructor with a custom + allocator, where PyObject_New is inlined, and shows the important + distinction between two steps (at least): + 1) the actual allocation of the object storage; + 2) the initialization of the Python specific fields + in this storage with PyObject_{Init, InitVar}. + + PyObject * + YourObject_New(...) + { + PyObject *op; + + op = (PyObject *) Your_Allocator(_PyObject_SIZE(YourTypeStruct)); + if (op == NULL) + return PyErr_NoMemory(); + + PyObject_Init(op, &YourTypeStruct); + + op->ob_field = value; + ... + return op; + } + + Note that in C++, the use of the new operator usually implies that + the 1st step is performed automatically for you, so in a C++ class + constructor you would start directly with PyObject_Init/InitVar +*/ + +#ifndef Py_LIMITED_API +typedef struct { + /* user context passed as the first argument to the 2 functions */ + void *ctx; + + /* allocate an arena of size bytes */ + void* (*alloc) (void *ctx, size_t size); + + /* free an arena */ + void (*free) (void *ctx, void *ptr, size_t size); +} PyObjectArenaAllocator; + +/* Get the arena allocator. */ +PyAPI_FUNC(void) PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator); + +/* Set the arena allocator. */ +PyAPI_FUNC(void) PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator); +#endif + + +/* + * Garbage Collection Support + * ========================== + */ + +/* C equivalent of gc.collect() which ignores the state of gc.enabled. */ +PyAPI_FUNC(Py_ssize_t) PyGC_Collect(void); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(Py_ssize_t) _PyGC_CollectNoFail(void); +PyAPI_FUNC(Py_ssize_t) _PyGC_CollectIfEnabled(void); +#endif + +/* Test if a type has a GC head */ +#define PyType_IS_GC(t) PyType_HasFeature((t), Py_TPFLAGS_HAVE_GC) + +/* Test if an object has a GC head */ +#define PyObject_IS_GC(o) (PyType_IS_GC(Py_TYPE(o)) && \ + (Py_TYPE(o)->tp_is_gc == NULL || Py_TYPE(o)->tp_is_gc(o))) + +PyAPI_FUNC(PyVarObject *) _PyObject_GC_Resize(PyVarObject *, Py_ssize_t); +#define PyObject_GC_Resize(type, op, n) \ + ( (type *) _PyObject_GC_Resize((PyVarObject *)(op), (n)) ) + +/* GC information is stored BEFORE the object structure. */ +#ifndef Py_LIMITED_API +typedef union _gc_head { + struct { + union _gc_head *gc_next; + union _gc_head *gc_prev; + Py_ssize_t gc_refs; + } gc; + double dummy; /* force worst-case alignment */ +} PyGC_Head; + +extern PyGC_Head *_PyGC_generation0; + +#define _Py_AS_GC(o) ((PyGC_Head *)(o)-1) + +/* Bit 0 is set when tp_finalize is called */ +#define _PyGC_REFS_MASK_FINALIZED (1 << 0) +/* The (N-1) most significant bits contain the gc state / refcount */ +#define _PyGC_REFS_SHIFT (1) +#define _PyGC_REFS_MASK (((size_t) -1) << _PyGC_REFS_SHIFT) + +#define _PyGCHead_REFS(g) ((g)->gc.gc_refs >> _PyGC_REFS_SHIFT) +#define _PyGCHead_SET_REFS(g, v) do { \ + (g)->gc.gc_refs = ((g)->gc.gc_refs & ~_PyGC_REFS_MASK) \ + | (((size_t)(v)) << _PyGC_REFS_SHIFT); \ + } while (0) +#define _PyGCHead_DECREF(g) ((g)->gc.gc_refs -= 1 << _PyGC_REFS_SHIFT) + +#define _PyGCHead_FINALIZED(g) (((g)->gc.gc_refs & _PyGC_REFS_MASK_FINALIZED) != 0) +#define _PyGCHead_SET_FINALIZED(g, v) do { \ + (g)->gc.gc_refs = ((g)->gc.gc_refs & ~_PyGC_REFS_MASK_FINALIZED) \ + | (v != 0); \ + } while (0) + +#define _PyGC_FINALIZED(o) _PyGCHead_FINALIZED(_Py_AS_GC(o)) +#define _PyGC_SET_FINALIZED(o, v) _PyGCHead_SET_FINALIZED(_Py_AS_GC(o), v) + +#define _PyGC_REFS(o) _PyGCHead_REFS(_Py_AS_GC(o)) + +#define _PyGC_REFS_UNTRACKED (-2) +#define _PyGC_REFS_REACHABLE (-3) +#define _PyGC_REFS_TENTATIVELY_UNREACHABLE (-4) + +/* Tell the GC to track this object. NB: While the object is tracked the + * collector it must be safe to call the ob_traverse method. */ +#define _PyObject_GC_TRACK(o) do { \ + PyGC_Head *g = _Py_AS_GC(o); \ + if (_PyGCHead_REFS(g) != _PyGC_REFS_UNTRACKED) \ + Py_FatalError("GC object already tracked"); \ + _PyGCHead_SET_REFS(g, _PyGC_REFS_REACHABLE); \ + g->gc.gc_next = _PyGC_generation0; \ + g->gc.gc_prev = _PyGC_generation0->gc.gc_prev; \ + g->gc.gc_prev->gc.gc_next = g; \ + _PyGC_generation0->gc.gc_prev = g; \ + } while (0); + +/* Tell the GC to stop tracking this object. + * gc_next doesn't need to be set to NULL, but doing so is a good + * way to provoke memory errors if calling code is confused. + */ +#define _PyObject_GC_UNTRACK(o) do { \ + PyGC_Head *g = _Py_AS_GC(o); \ + assert(_PyGCHead_REFS(g) != _PyGC_REFS_UNTRACKED); \ + _PyGCHead_SET_REFS(g, _PyGC_REFS_UNTRACKED); \ + g->gc.gc_prev->gc.gc_next = g->gc.gc_next; \ + g->gc.gc_next->gc.gc_prev = g->gc.gc_prev; \ + g->gc.gc_next = NULL; \ + } while (0); + +/* True if the object is currently tracked by the GC. */ +#define _PyObject_GC_IS_TRACKED(o) \ + (_PyGC_REFS(o) != _PyGC_REFS_UNTRACKED) + +/* True if the object may be tracked by the GC in the future, or already is. + This can be useful to implement some optimizations. */ +#define _PyObject_GC_MAY_BE_TRACKED(obj) \ + (PyObject_IS_GC(obj) && \ + (!PyTuple_CheckExact(obj) || _PyObject_GC_IS_TRACKED(obj))) +#endif /* Py_LIMITED_API */ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyObject_GC_Malloc(size_t size); +PyAPI_FUNC(PyObject *) _PyObject_GC_Calloc(size_t size); +#endif /* !Py_LIMITED_API */ +PyAPI_FUNC(PyObject *) _PyObject_GC_New(PyTypeObject *); +PyAPI_FUNC(PyVarObject *) _PyObject_GC_NewVar(PyTypeObject *, Py_ssize_t); +PyAPI_FUNC(void) PyObject_GC_Track(void *); +PyAPI_FUNC(void) PyObject_GC_UnTrack(void *); +PyAPI_FUNC(void) PyObject_GC_Del(void *); + +#define PyObject_GC_New(type, typeobj) \ + ( (type *) _PyObject_GC_New(typeobj) ) +#define PyObject_GC_NewVar(type, typeobj, n) \ + ( (type *) _PyObject_GC_NewVar((typeobj), (n)) ) + + +/* Utility macro to help write tp_traverse functions. + * To use this macro, the tp_traverse function must name its arguments + * "visit" and "arg". This is intended to keep tp_traverse functions + * looking as much alike as possible. + */ +#define Py_VISIT(op) \ + do { \ + if (op) { \ + int vret = visit((PyObject *)(op), arg); \ + if (vret) \ + return vret; \ + } \ + } while (0) + + +/* Test if a type supports weak references */ +#define PyType_SUPPORTS_WEAKREFS(t) ((t)->tp_weaklistoffset > 0) + +#define PyObject_GET_WEAKREFS_LISTPTR(o) \ + ((PyObject **) (((char *) (o)) + Py_TYPE(o)->tp_weaklistoffset)) + +#ifdef __cplusplus +} +#endif +#endif /* !Py_OBJIMPL_H */ diff --git a/venv/Include/odictobject.h b/venv/Include/odictobject.h new file mode 100644 index 00000000..8378dc4b --- /dev/null +++ b/venv/Include/odictobject.h @@ -0,0 +1,43 @@ +#ifndef Py_ODICTOBJECT_H +#define Py_ODICTOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* OrderedDict */ +/* This API is optional and mostly redundant. */ + +#ifndef Py_LIMITED_API + +typedef struct _odictobject PyODictObject; + +PyAPI_DATA(PyTypeObject) PyODict_Type; +PyAPI_DATA(PyTypeObject) PyODictIter_Type; +PyAPI_DATA(PyTypeObject) PyODictKeys_Type; +PyAPI_DATA(PyTypeObject) PyODictItems_Type; +PyAPI_DATA(PyTypeObject) PyODictValues_Type; + +#define PyODict_Check(op) PyObject_TypeCheck(op, &PyODict_Type) +#define PyODict_CheckExact(op) (Py_TYPE(op) == &PyODict_Type) +#define PyODict_SIZE(op) PyDict_GET_SIZE((op)) + +PyAPI_FUNC(PyObject *) PyODict_New(void); +PyAPI_FUNC(int) PyODict_SetItem(PyObject *od, PyObject *key, PyObject *item); +PyAPI_FUNC(int) PyODict_DelItem(PyObject *od, PyObject *key); + +/* wrappers around PyDict* functions */ +#define PyODict_GetItem(od, key) PyDict_GetItem((PyObject *)od, key) +#define PyODict_GetItemWithError(od, key) \ + PyDict_GetItemWithError((PyObject *)od, key) +#define PyODict_Contains(od, key) PyDict_Contains((PyObject *)od, key) +#define PyODict_Size(od) PyDict_Size((PyObject *)od) +#define PyODict_GetItemString(od, key) \ + PyDict_GetItemString((PyObject *)od, key) + +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_ODICTOBJECT_H */ diff --git a/venv/Include/opcode.h b/venv/Include/opcode.h new file mode 100644 index 00000000..fc6cbf3a --- /dev/null +++ b/venv/Include/opcode.h @@ -0,0 +1,147 @@ +/* Auto-generated by Tools/scripts/generate_opcode_h.py */ +#ifndef Py_OPCODE_H +#define Py_OPCODE_H +#ifdef __cplusplus +extern "C" { +#endif + + + /* Instruction opcodes for compiled code */ +#define POP_TOP 1 +#define ROT_TWO 2 +#define ROT_THREE 3 +#define DUP_TOP 4 +#define DUP_TOP_TWO 5 +#define NOP 9 +#define UNARY_POSITIVE 10 +#define UNARY_NEGATIVE 11 +#define UNARY_NOT 12 +#define UNARY_INVERT 15 +#define BINARY_MATRIX_MULTIPLY 16 +#define INPLACE_MATRIX_MULTIPLY 17 +#define BINARY_POWER 19 +#define BINARY_MULTIPLY 20 +#define BINARY_MODULO 22 +#define BINARY_ADD 23 +#define BINARY_SUBTRACT 24 +#define BINARY_SUBSCR 25 +#define BINARY_FLOOR_DIVIDE 26 +#define BINARY_TRUE_DIVIDE 27 +#define INPLACE_FLOOR_DIVIDE 28 +#define INPLACE_TRUE_DIVIDE 29 +#define GET_AITER 50 +#define GET_ANEXT 51 +#define BEFORE_ASYNC_WITH 52 +#define INPLACE_ADD 55 +#define INPLACE_SUBTRACT 56 +#define INPLACE_MULTIPLY 57 +#define INPLACE_MODULO 59 +#define STORE_SUBSCR 60 +#define DELETE_SUBSCR 61 +#define BINARY_LSHIFT 62 +#define BINARY_RSHIFT 63 +#define BINARY_AND 64 +#define BINARY_XOR 65 +#define BINARY_OR 66 +#define INPLACE_POWER 67 +#define GET_ITER 68 +#define GET_YIELD_FROM_ITER 69 +#define PRINT_EXPR 70 +#define LOAD_BUILD_CLASS 71 +#define YIELD_FROM 72 +#define GET_AWAITABLE 73 +#define INPLACE_LSHIFT 75 +#define INPLACE_RSHIFT 76 +#define INPLACE_AND 77 +#define INPLACE_XOR 78 +#define INPLACE_OR 79 +#define BREAK_LOOP 80 +#define WITH_CLEANUP_START 81 +#define WITH_CLEANUP_FINISH 82 +#define RETURN_VALUE 83 +#define IMPORT_STAR 84 +#define SETUP_ANNOTATIONS 85 +#define YIELD_VALUE 86 +#define POP_BLOCK 87 +#define END_FINALLY 88 +#define POP_EXCEPT 89 +#define HAVE_ARGUMENT 90 +#define STORE_NAME 90 +#define DELETE_NAME 91 +#define UNPACK_SEQUENCE 92 +#define FOR_ITER 93 +#define UNPACK_EX 94 +#define STORE_ATTR 95 +#define DELETE_ATTR 96 +#define STORE_GLOBAL 97 +#define DELETE_GLOBAL 98 +#define LOAD_CONST 100 +#define LOAD_NAME 101 +#define BUILD_TUPLE 102 +#define BUILD_LIST 103 +#define BUILD_SET 104 +#define BUILD_MAP 105 +#define LOAD_ATTR 106 +#define COMPARE_OP 107 +#define IMPORT_NAME 108 +#define IMPORT_FROM 109 +#define JUMP_FORWARD 110 +#define JUMP_IF_FALSE_OR_POP 111 +#define JUMP_IF_TRUE_OR_POP 112 +#define JUMP_ABSOLUTE 113 +#define POP_JUMP_IF_FALSE 114 +#define POP_JUMP_IF_TRUE 115 +#define LOAD_GLOBAL 116 +#define CONTINUE_LOOP 119 +#define SETUP_LOOP 120 +#define SETUP_EXCEPT 121 +#define SETUP_FINALLY 122 +#define LOAD_FAST 124 +#define STORE_FAST 125 +#define DELETE_FAST 126 +#define RAISE_VARARGS 130 +#define CALL_FUNCTION 131 +#define MAKE_FUNCTION 132 +#define BUILD_SLICE 133 +#define LOAD_CLOSURE 135 +#define LOAD_DEREF 136 +#define STORE_DEREF 137 +#define DELETE_DEREF 138 +#define CALL_FUNCTION_KW 141 +#define CALL_FUNCTION_EX 142 +#define SETUP_WITH 143 +#define EXTENDED_ARG 144 +#define LIST_APPEND 145 +#define SET_ADD 146 +#define MAP_ADD 147 +#define LOAD_CLASSDEREF 148 +#define BUILD_LIST_UNPACK 149 +#define BUILD_MAP_UNPACK 150 +#define BUILD_MAP_UNPACK_WITH_CALL 151 +#define BUILD_TUPLE_UNPACK 152 +#define BUILD_SET_UNPACK 153 +#define SETUP_ASYNC_WITH 154 +#define FORMAT_VALUE 155 +#define BUILD_CONST_KEY_MAP 156 +#define BUILD_STRING 157 +#define BUILD_TUPLE_UNPACK_WITH_CALL 158 +#define LOAD_METHOD 160 +#define CALL_METHOD 161 + +/* EXCEPT_HANDLER is a special, implicit block type which is created when + entering an except handler. It is not an opcode but we define it here + as we want it to be available to both frameobject.c and ceval.c, while + remaining private.*/ +#define EXCEPT_HANDLER 257 + + +enum cmp_op {PyCmp_LT=Py_LT, PyCmp_LE=Py_LE, PyCmp_EQ=Py_EQ, PyCmp_NE=Py_NE, + PyCmp_GT=Py_GT, PyCmp_GE=Py_GE, PyCmp_IN, PyCmp_NOT_IN, + PyCmp_IS, PyCmp_IS_NOT, PyCmp_EXC_MATCH, PyCmp_BAD}; + +#define HAS_ARG(op) ((op) >= HAVE_ARGUMENT) + +#ifdef __cplusplus +} +#endif +#endif /* !Py_OPCODE_H */ diff --git a/venv/Include/osdefs.h b/venv/Include/osdefs.h new file mode 100644 index 00000000..bd84c1c1 --- /dev/null +++ b/venv/Include/osdefs.h @@ -0,0 +1,47 @@ +#ifndef Py_OSDEFS_H +#define Py_OSDEFS_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Operating system dependencies */ + +#ifdef MS_WINDOWS +#define SEP L'\\' +#define ALTSEP L'/' +#define MAXPATHLEN 256 +#define DELIM L';' +#endif + +/* Filename separator */ +#ifndef SEP +#define SEP L'/' +#endif + +/* Max pathname length */ +#ifdef __hpux +#include +#include +#ifndef PATH_MAX +#define PATH_MAX MAXPATHLEN +#endif +#endif + +#ifndef MAXPATHLEN +#if defined(PATH_MAX) && PATH_MAX > 1024 +#define MAXPATHLEN PATH_MAX +#else +#define MAXPATHLEN 1024 +#endif +#endif + +/* Search path entry delimiter */ +#ifndef DELIM +#define DELIM L':' +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_OSDEFS_H */ diff --git a/venv/Include/osmodule.h b/venv/Include/osmodule.h new file mode 100644 index 00000000..9095c2fd --- /dev/null +++ b/venv/Include/osmodule.h @@ -0,0 +1,17 @@ + +/* os module interface */ + +#ifndef Py_OSMODULE_H +#define Py_OSMODULE_H +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000 +PyAPI_FUNC(PyObject *) PyOS_FSPath(PyObject *path); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_OSMODULE_H */ diff --git a/venv/Include/parsetok.h b/venv/Include/parsetok.h new file mode 100644 index 00000000..c9407a3f --- /dev/null +++ b/venv/Include/parsetok.h @@ -0,0 +1,108 @@ + +/* Parser-tokenizer link interface */ +#ifndef Py_LIMITED_API +#ifndef Py_PARSETOK_H +#define Py_PARSETOK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + int error; +#ifndef PGEN + /* The filename is useless for pgen, see comment in tok_state structure */ + PyObject *filename; +#endif + int lineno; + int offset; + char *text; /* UTF-8-encoded string */ + int token; + int expected; +} perrdetail; + +#if 0 +#define PyPARSE_YIELD_IS_KEYWORD 0x0001 +#endif + +#define PyPARSE_DONT_IMPLY_DEDENT 0x0002 + +#if 0 +#define PyPARSE_WITH_IS_KEYWORD 0x0003 +#define PyPARSE_PRINT_IS_FUNCTION 0x0004 +#define PyPARSE_UNICODE_LITERALS 0x0008 +#endif + +#define PyPARSE_IGNORE_COOKIE 0x0010 +#define PyPARSE_BARRY_AS_BDFL 0x0020 + +PyAPI_FUNC(node *) PyParser_ParseString(const char *, grammar *, int, + perrdetail *); +PyAPI_FUNC(node *) PyParser_ParseFile (FILE *, const char *, grammar *, int, + const char *, const char *, + perrdetail *); + +PyAPI_FUNC(node *) PyParser_ParseStringFlags(const char *, grammar *, int, + perrdetail *, int); +PyAPI_FUNC(node *) PyParser_ParseFileFlags( + FILE *fp, + const char *filename, /* decoded from the filesystem encoding */ + const char *enc, + grammar *g, + int start, + const char *ps1, + const char *ps2, + perrdetail *err_ret, + int flags); +PyAPI_FUNC(node *) PyParser_ParseFileFlagsEx( + FILE *fp, + const char *filename, /* decoded from the filesystem encoding */ + const char *enc, + grammar *g, + int start, + const char *ps1, + const char *ps2, + perrdetail *err_ret, + int *flags); +PyAPI_FUNC(node *) PyParser_ParseFileObject( + FILE *fp, + PyObject *filename, + const char *enc, + grammar *g, + int start, + const char *ps1, + const char *ps2, + perrdetail *err_ret, + int *flags); + +PyAPI_FUNC(node *) PyParser_ParseStringFlagsFilename( + const char *s, + const char *filename, /* decoded from the filesystem encoding */ + grammar *g, + int start, + perrdetail *err_ret, + int flags); +PyAPI_FUNC(node *) PyParser_ParseStringFlagsFilenameEx( + const char *s, + const char *filename, /* decoded from the filesystem encoding */ + grammar *g, + int start, + perrdetail *err_ret, + int *flags); +PyAPI_FUNC(node *) PyParser_ParseStringObject( + const char *s, + PyObject *filename, + grammar *g, + int start, + perrdetail *err_ret, + int *flags); + +/* Note that the following functions are defined in pythonrun.c, + not in parsetok.c */ +PyAPI_FUNC(void) PyParser_SetError(perrdetail *); +PyAPI_FUNC(void) PyParser_ClearError(perrdetail *); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_PARSETOK_H */ +#endif /* !Py_LIMITED_API */ diff --git a/venv/Include/patchlevel.h b/venv/Include/patchlevel.h new file mode 100644 index 00000000..13876bdc --- /dev/null +++ b/venv/Include/patchlevel.h @@ -0,0 +1,35 @@ + +/* Python version identification scheme. + + When the major or minor version changes, the VERSION variable in + configure.ac must also be changed. + + There is also (independent) API version information in modsupport.h. +*/ + +/* Values for PY_RELEASE_LEVEL */ +#define PY_RELEASE_LEVEL_ALPHA 0xA +#define PY_RELEASE_LEVEL_BETA 0xB +#define PY_RELEASE_LEVEL_GAMMA 0xC /* For release candidates */ +#define PY_RELEASE_LEVEL_FINAL 0xF /* Serial should be 0 here */ + /* Higher for patch releases */ + +/* Version parsed out into numeric values */ +/*--start constants--*/ +#define PY_MAJOR_VERSION 3 +#define PY_MINOR_VERSION 7 +#define PY_MICRO_VERSION 0 +#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL +#define PY_RELEASE_SERIAL 0 + +/* Version as a string */ +#define PY_VERSION "3.7.0" +/*--end constants--*/ + +/* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. + Use this for numeric comparisons, e.g. #if PY_VERSION_HEX >= ... */ +#define PY_VERSION_HEX ((PY_MAJOR_VERSION << 24) | \ + (PY_MINOR_VERSION << 16) | \ + (PY_MICRO_VERSION << 8) | \ + (PY_RELEASE_LEVEL << 4) | \ + (PY_RELEASE_SERIAL << 0)) diff --git a/venv/Include/pgen.h b/venv/Include/pgen.h new file mode 100644 index 00000000..8a325ed0 --- /dev/null +++ b/venv/Include/pgen.h @@ -0,0 +1,18 @@ +#ifndef Py_PGEN_H +#define Py_PGEN_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Parser generator interface */ + +extern grammar *meta_grammar(void); + +struct _node; +extern grammar *pgen(struct _node *); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_PGEN_H */ diff --git a/venv/Include/pgenheaders.h b/venv/Include/pgenheaders.h new file mode 100644 index 00000000..dbc5e0a5 --- /dev/null +++ b/venv/Include/pgenheaders.h @@ -0,0 +1,43 @@ +#ifndef Py_PGENHEADERS_H +#define Py_PGENHEADERS_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Include files and extern declarations used by most of the parser. */ + +#include "Python.h" + +PyAPI_FUNC(void) PySys_WriteStdout(const char *format, ...) + Py_GCC_ATTRIBUTE((format(printf, 1, 2))); +PyAPI_FUNC(void) PySys_WriteStderr(const char *format, ...) + Py_GCC_ATTRIBUTE((format(printf, 1, 2))); + +#define addarc _Py_addarc +#define addbit _Py_addbit +#define adddfa _Py_adddfa +#define addfirstsets _Py_addfirstsets +#define addlabel _Py_addlabel +#define addstate _Py_addstate +#define delbitset _Py_delbitset +#define dumptree _Py_dumptree +#define findlabel _Py_findlabel +#define freegrammar _Py_freegrammar +#define mergebitset _Py_mergebitset +#define meta_grammar _Py_meta_grammar +#define newbitset _Py_newbitset +#define newgrammar _Py_newgrammar +#define pgen _Py_pgen +#define printgrammar _Py_printgrammar +#define printnonterminals _Py_printnonterminals +#define printtree _Py_printtree +#define samebitset _Py_samebitset +#define showtree _Py_showtree +#define tok_dump _Py_tok_dump +#define translatelabels _Py_translatelabels + +#ifdef __cplusplus +} +#endif +#endif /* !Py_PGENHEADERS_H */ diff --git a/venv/Include/py_curses.h b/venv/Include/py_curses.h new file mode 100644 index 00000000..0eebc362 --- /dev/null +++ b/venv/Include/py_curses.h @@ -0,0 +1,159 @@ + +#ifndef Py_CURSES_H +#define Py_CURSES_H + +#ifdef __APPLE__ +/* +** On Mac OS X 10.2 [n]curses.h and stdlib.h use different guards +** against multiple definition of wchar_t. +*/ +#ifdef _BSD_WCHAR_T_DEFINED_ +#define _WCHAR_T +#endif +#endif /* __APPLE__ */ + +/* On FreeBSD, [n]curses.h and stdlib.h/wchar.h use different guards + against multiple definition of wchar_t and wint_t. */ +#if defined(__FreeBSD__) && defined(_XOPEN_SOURCE_EXTENDED) +# ifndef __wchar_t +# define __wchar_t +# endif +# ifndef __wint_t +# define __wint_t +# endif +#endif + +#if !defined(HAVE_CURSES_IS_PAD) && defined(WINDOW_HAS_FLAGS) +/* The following definition is necessary for ncurses 5.7; without it, + some of [n]curses.h set NCURSES_OPAQUE to 1, and then Python + can't get at the WINDOW flags field. */ +#define NCURSES_OPAQUE 0 +#endif + +#ifdef HAVE_NCURSES_H +#include +#else +#include +#endif + +#ifdef HAVE_NCURSES_H +/* configure was checking , but we will + use , which has some or all these features. */ +#if !defined(WINDOW_HAS_FLAGS) && !(NCURSES_OPAQUE+0) +#define WINDOW_HAS_FLAGS 1 +#endif +#if !defined(HAVE_CURSES_IS_PAD) && NCURSES_VERSION_PATCH+0 >= 20090906 +#define HAVE_CURSES_IS_PAD 1 +#endif +#ifndef MVWDELCH_IS_EXPRESSION +#define MVWDELCH_IS_EXPRESSION 1 +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#define PyCurses_API_pointers 4 + +/* Type declarations */ + +typedef struct { + PyObject_HEAD + WINDOW *win; + char *encoding; +} PyCursesWindowObject; + +#define PyCursesWindow_Check(v) (Py_TYPE(v) == &PyCursesWindow_Type) + +#define PyCurses_CAPSULE_NAME "_curses._C_API" + + +#ifdef CURSES_MODULE +/* This section is used when compiling _cursesmodule.c */ + +#else +/* This section is used in modules that use the _cursesmodule API */ + +static void **PyCurses_API; + +#define PyCursesWindow_Type (*(PyTypeObject *) PyCurses_API[0]) +#define PyCursesSetupTermCalled {if (! ((int (*)(void))PyCurses_API[1]) () ) return NULL;} +#define PyCursesInitialised {if (! ((int (*)(void))PyCurses_API[2]) () ) return NULL;} +#define PyCursesInitialisedColor {if (! ((int (*)(void))PyCurses_API[3]) () ) return NULL;} + +#define import_curses() \ + PyCurses_API = (void **)PyCapsule_Import(PyCurses_CAPSULE_NAME, 1); + +#endif + +/* general error messages */ +static const char catchall_ERR[] = "curses function returned ERR"; +static const char catchall_NULL[] = "curses function returned NULL"; + +/* Function Prototype Macros - They are ugly but very, very useful. ;-) + + X - function name + TYPE - parameter Type + ERGSTR - format string for construction of the return value + PARSESTR - format string for argument parsing + */ + +#define NoArgNoReturnFunction(X) \ +static PyObject *PyCurses_ ## X (PyObject *self) \ +{ \ + PyCursesInitialised \ + return PyCursesCheckERR(X(), # X); } + +#define NoArgOrFlagNoReturnFunction(X) \ +static PyObject *PyCurses_ ## X (PyObject *self, PyObject *args) \ +{ \ + int flag = 0; \ + PyCursesInitialised \ + switch(PyTuple_Size(args)) { \ + case 0: \ + return PyCursesCheckERR(X(), # X); \ + case 1: \ + if (!PyArg_ParseTuple(args, "i;True(1) or False(0)", &flag)) return NULL; \ + if (flag) return PyCursesCheckERR(X(), # X); \ + else return PyCursesCheckERR(no ## X (), # X); \ + default: \ + PyErr_SetString(PyExc_TypeError, # X " requires 0 or 1 arguments"); \ + return NULL; } } + +#define NoArgReturnIntFunction(X) \ +static PyObject *PyCurses_ ## X (PyObject *self) \ +{ \ + PyCursesInitialised \ + return PyLong_FromLong((long) X()); } + + +#define NoArgReturnStringFunction(X) \ +static PyObject *PyCurses_ ## X (PyObject *self) \ +{ \ + PyCursesInitialised \ + return PyBytes_FromString(X()); } + +#define NoArgTrueFalseFunction(X) \ +static PyObject *PyCurses_ ## X (PyObject *self) \ +{ \ + PyCursesInitialised \ + if (X () == FALSE) { \ + Py_RETURN_FALSE; \ + } \ + Py_RETURN_TRUE; } + +#define NoArgNoReturnVoidFunction(X) \ +static PyObject *PyCurses_ ## X (PyObject *self) \ +{ \ + PyCursesInitialised \ + X(); \ + Py_RETURN_NONE; } + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(Py_CURSES_H) */ + + diff --git a/venv/Include/pyarena.h b/venv/Include/pyarena.h new file mode 100644 index 00000000..db3ad018 --- /dev/null +++ b/venv/Include/pyarena.h @@ -0,0 +1,64 @@ +/* An arena-like memory interface for the compiler. + */ + +#ifndef Py_LIMITED_API +#ifndef Py_PYARENA_H +#define Py_PYARENA_H + +#ifdef __cplusplus +extern "C" { +#endif + + typedef struct _arena PyArena; + + /* PyArena_New() and PyArena_Free() create a new arena and free it, + respectively. Once an arena has been created, it can be used + to allocate memory via PyArena_Malloc(). Pointers to PyObject can + also be registered with the arena via PyArena_AddPyObject(), and the + arena will ensure that the PyObjects stay alive at least until + PyArena_Free() is called. When an arena is freed, all the memory it + allocated is freed, the arena releases internal references to registered + PyObject*, and none of its pointers are valid. + XXX (tim) What does "none of its pointers are valid" mean? Does it + XXX mean that pointers previously obtained via PyArena_Malloc() are + XXX no longer valid? (That's clearly true, but not sure that's what + XXX the text is trying to say.) + + PyArena_New() returns an arena pointer. On error, it + returns a negative number and sets an exception. + XXX (tim): Not true. On error, PyArena_New() actually returns NULL, + XXX and looks like it may or may not set an exception (e.g., if the + XXX internal PyList_New(0) returns NULL, PyArena_New() passes that on + XXX and an exception is set; OTOH, if the internal + XXX block_new(DEFAULT_BLOCK_SIZE) returns NULL, that's passed on but + XXX an exception is not set in that case). + */ + PyAPI_FUNC(PyArena *) PyArena_New(void); + PyAPI_FUNC(void) PyArena_Free(PyArena *); + + /* Mostly like malloc(), return the address of a block of memory spanning + * `size` bytes, or return NULL (without setting an exception) if enough + * new memory can't be obtained. Unlike malloc(0), PyArena_Malloc() with + * size=0 does not guarantee to return a unique pointer (the pointer + * returned may equal one or more other pointers obtained from + * PyArena_Malloc()). + * Note that pointers obtained via PyArena_Malloc() must never be passed to + * the system free() or realloc(), or to any of Python's similar memory- + * management functions. PyArena_Malloc()-obtained pointers remain valid + * until PyArena_Free(ar) is called, at which point all pointers obtained + * from the arena `ar` become invalid simultaneously. + */ + PyAPI_FUNC(void *) PyArena_Malloc(PyArena *, size_t size); + + /* This routine isn't a proper arena allocation routine. It takes + * a PyObject* and records it so that it can be DECREFed when the + * arena is freed. + */ + PyAPI_FUNC(int) PyArena_AddPyObject(PyArena *, PyObject *); + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_PYARENA_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/pyatomic.h b/venv/Include/pyatomic.h new file mode 100644 index 00000000..9a497a68 --- /dev/null +++ b/venv/Include/pyatomic.h @@ -0,0 +1,535 @@ +#ifndef Py_ATOMIC_H +#define Py_ATOMIC_H +#ifdef Py_BUILD_CORE + +#include "dynamic_annotations.h" + +#include "pyconfig.h" + +#if defined(HAVE_STD_ATOMIC) +#include +#endif + + +#if defined(_MSC_VER) +#include +#include +#endif + +/* This is modeled after the atomics interface from C1x, according to + * the draft at + * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf. + * Operations and types are named the same except with a _Py_ prefix + * and have the same semantics. + * + * Beware, the implementations here are deep magic. + */ + +#if defined(HAVE_STD_ATOMIC) + +typedef enum _Py_memory_order { + _Py_memory_order_relaxed = memory_order_relaxed, + _Py_memory_order_acquire = memory_order_acquire, + _Py_memory_order_release = memory_order_release, + _Py_memory_order_acq_rel = memory_order_acq_rel, + _Py_memory_order_seq_cst = memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + atomic_uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + atomic_int _value; +} _Py_atomic_int; + +#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ + atomic_signal_fence(ORDER) + +#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ + atomic_thread_fence(ORDER) + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER) + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER) + +/* Use builtin atomic operations in GCC >= 4.7 */ +#elif defined(HAVE_BUILTIN_ATOMIC) + +typedef enum _Py_memory_order { + _Py_memory_order_relaxed = __ATOMIC_RELAXED, + _Py_memory_order_acquire = __ATOMIC_ACQUIRE, + _Py_memory_order_release = __ATOMIC_RELEASE, + _Py_memory_order_acq_rel = __ATOMIC_ACQ_REL, + _Py_memory_order_seq_cst = __ATOMIC_SEQ_CST +} _Py_memory_order; + +typedef struct _Py_atomic_address { + uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + int _value; +} _Py_atomic_int; + +#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ + __atomic_signal_fence(ORDER) + +#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ + __atomic_thread_fence(ORDER) + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + (assert((ORDER) == __ATOMIC_RELAXED \ + || (ORDER) == __ATOMIC_SEQ_CST \ + || (ORDER) == __ATOMIC_RELEASE), \ + __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)) + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + (assert((ORDER) == __ATOMIC_RELAXED \ + || (ORDER) == __ATOMIC_SEQ_CST \ + || (ORDER) == __ATOMIC_ACQUIRE \ + || (ORDER) == __ATOMIC_CONSUME), \ + __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER)) + +/* Only support GCC (for expression statements) and x86 (for simple + * atomic semantics) and MSVC x86/x64/ARM */ +#elif defined(__GNUC__) && (defined(__i386__) || defined(__amd64)) +typedef enum _Py_memory_order { + _Py_memory_order_relaxed, + _Py_memory_order_acquire, + _Py_memory_order_release, + _Py_memory_order_acq_rel, + _Py_memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + int _value; +} _Py_atomic_int; + + +static __inline__ void +_Py_atomic_signal_fence(_Py_memory_order order) +{ + if (order != _Py_memory_order_relaxed) + __asm__ volatile("":::"memory"); +} + +static __inline__ void +_Py_atomic_thread_fence(_Py_memory_order order) +{ + if (order != _Py_memory_order_relaxed) + __asm__ volatile("mfence":::"memory"); +} + +/* Tell the race checker about this operation's effects. */ +static __inline__ void +_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order) +{ + (void)address; /* shut up -Wunused-parameter */ + switch(order) { + case _Py_memory_order_release: + case _Py_memory_order_acq_rel: + case _Py_memory_order_seq_cst: + _Py_ANNOTATE_HAPPENS_BEFORE(address); + break; + case _Py_memory_order_relaxed: + case _Py_memory_order_acquire: + break; + } + switch(order) { + case _Py_memory_order_acquire: + case _Py_memory_order_acq_rel: + case _Py_memory_order_seq_cst: + _Py_ANNOTATE_HAPPENS_AFTER(address); + break; + case _Py_memory_order_relaxed: + case _Py_memory_order_release: + break; + } +} + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + __extension__ ({ \ + __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ + __typeof__(atomic_val->_value) new_val = NEW_VAL;\ + volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \ + _Py_memory_order order = ORDER; \ + _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ + \ + /* Perform the operation. */ \ + _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \ + switch(order) { \ + case _Py_memory_order_release: \ + _Py_atomic_signal_fence(_Py_memory_order_release); \ + /* fallthrough */ \ + case _Py_memory_order_relaxed: \ + *volatile_data = new_val; \ + break; \ + \ + case _Py_memory_order_acquire: \ + case _Py_memory_order_acq_rel: \ + case _Py_memory_order_seq_cst: \ + __asm__ volatile("xchg %0, %1" \ + : "+r"(new_val) \ + : "m"(atomic_val->_value) \ + : "memory"); \ + break; \ + } \ + _Py_ANNOTATE_IGNORE_WRITES_END(); \ + }) + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + __extension__ ({ \ + __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ + __typeof__(atomic_val->_value) result; \ + volatile __typeof__(result) *volatile_data = &atomic_val->_value; \ + _Py_memory_order order = ORDER; \ + _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ + \ + /* Perform the operation. */ \ + _Py_ANNOTATE_IGNORE_READS_BEGIN(); \ + switch(order) { \ + case _Py_memory_order_release: \ + case _Py_memory_order_acq_rel: \ + case _Py_memory_order_seq_cst: \ + /* Loads on x86 are not releases by default, so need a */ \ + /* thread fence. */ \ + _Py_atomic_thread_fence(_Py_memory_order_release); \ + break; \ + default: \ + /* No fence */ \ + break; \ + } \ + result = *volatile_data; \ + switch(order) { \ + case _Py_memory_order_acquire: \ + case _Py_memory_order_acq_rel: \ + case _Py_memory_order_seq_cst: \ + /* Loads on x86 are automatically acquire operations so */ \ + /* can get by with just a compiler fence. */ \ + _Py_atomic_signal_fence(_Py_memory_order_acquire); \ + break; \ + default: \ + /* No fence */ \ + break; \ + } \ + _Py_ANNOTATE_IGNORE_READS_END(); \ + result; \ + }) + +#elif defined(_MSC_VER) +/* _Interlocked* functions provide a full memory barrier and are therefore + enough for acq_rel and seq_cst. If the HLE variants aren't available + in hardware they will fall back to a full memory barrier as well. + + This might affect performance but likely only in some very specific and + hard to meassure scenario. +*/ +#if defined(_M_IX86) || defined(_M_X64) +typedef enum _Py_memory_order { + _Py_memory_order_relaxed, + _Py_memory_order_acquire, + _Py_memory_order_release, + _Py_memory_order_acq_rel, + _Py_memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + volatile uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + volatile int _value; +} _Py_atomic_int; + + +#if defined(_M_X64) +#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ + switch (ORDER) { \ + case _Py_memory_order_acquire: \ + _InterlockedExchange64_HLEAcquire((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + case _Py_memory_order_release: \ + _InterlockedExchange64_HLERelease((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + default: \ + _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + } +#else +#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0); +#endif + +#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ + switch (ORDER) { \ + case _Py_memory_order_acquire: \ + _InterlockedExchange_HLEAcquire((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + case _Py_memory_order_release: \ + _InterlockedExchange_HLERelease((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + default: \ + _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + } + +#if defined(_M_X64) +/* This has to be an intptr_t for now. + gil_created() uses -1 as a sentinel value, if this returns + a uintptr_t it will do an unsigned compare and crash +*/ +inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { + __int64 old; + switch (order) { + case _Py_memory_order_acquire: + { + do { + old = *value; + } while(_InterlockedCompareExchange64_HLEAcquire((volatile __int64*)value, old, old) != old); + break; + } + case _Py_memory_order_release: + { + do { + old = *value; + } while(_InterlockedCompareExchange64_HLERelease((volatile __int64*)value, old, old) != old); + break; + } + case _Py_memory_order_relaxed: + old = *value; + break; + default: + { + do { + old = *value; + } while(_InterlockedCompareExchange64((volatile __int64*)value, old, old) != old); + break; + } + } + return old; +} + +#else +#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL +#endif + +inline int _Py_atomic_load_32bit(volatile int* value, int order) { + long old; + switch (order) { + case _Py_memory_order_acquire: + { + do { + old = *value; + } while(_InterlockedCompareExchange_HLEAcquire((volatile long*)value, old, old) != old); + break; + } + case _Py_memory_order_release: + { + do { + old = *value; + } while(_InterlockedCompareExchange_HLERelease((volatile long*)value, old, old) != old); + break; + } + case _Py_memory_order_relaxed: + old = *value; + break; + default: + { + do { + old = *value; + } while(_InterlockedCompareExchange((volatile long*)value, old, old) != old); + break; + } + } + return old; +} + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + if (sizeof(*ATOMIC_VAL._value) == 8) { \ + _Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \ + _Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + ( \ + sizeof(*(ATOMIC_VAL._value)) == 8 ? \ + _Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \ + _Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \ + ) +#elif defined(_M_ARM) || defined(_M_ARM64) +typedef enum _Py_memory_order { + _Py_memory_order_relaxed, + _Py_memory_order_acquire, + _Py_memory_order_release, + _Py_memory_order_acq_rel, + _Py_memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + volatile uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + volatile int _value; +} _Py_atomic_int; + + +#if defined(_M_ARM64) +#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ + switch (ORDER) { \ + case _Py_memory_order_acquire: \ + _InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + case _Py_memory_order_release: \ + _InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + default: \ + _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + } +#else +#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0); +#endif + +#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ + switch (ORDER) { \ + case _Py_memory_order_acquire: \ + _InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + case _Py_memory_order_release: \ + _InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + default: \ + _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + } + +#if defined(_M_ARM64) +/* This has to be an intptr_t for now. + gil_created() uses -1 as a sentinel value, if this returns + a uintptr_t it will do an unsigned compare and crash +*/ +inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { + uintptr_t old; + switch (order) { + case _Py_memory_order_acquire: + { + do { + old = *value; + } while(_InterlockedCompareExchange64_acq(value, old, old) != old); + break; + } + case _Py_memory_order_release: + { + do { + old = *value; + } while(_InterlockedCompareExchange64_rel(value, old, old) != old); + break; + } + case _Py_memory_order_relaxed: + old = *value; + break; + default: + { + do { + old = *value; + } while(_InterlockedCompareExchange64(value, old, old) != old); + break; + } + } + return old; +} + +#else +#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL +#endif + +inline int _Py_atomic_load_32bit(volatile int* value, int order) { + int old; + switch (order) { + case _Py_memory_order_acquire: + { + do { + old = *value; + } while(_InterlockedCompareExchange_acq(value, old, old) != old); + break; + } + case _Py_memory_order_release: + { + do { + old = *value; + } while(_InterlockedCompareExchange_rel(value, old, old) != old); + break; + } + case _Py_memory_order_relaxed: + old = *value; + break; + default: + { + do { + old = *value; + } while(_InterlockedCompareExchange(value, old, old) != old); + break; + } + } + return old; +} + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + if (sizeof(*ATOMIC_VAL._value) == 8) { \ + _Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \ + _Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + ( \ + sizeof(*(ATOMIC_VAL._value)) == 8 ? \ + _Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \ + _Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \ + ) +#endif +#else /* !gcc x86 !_msc_ver */ +typedef enum _Py_memory_order { + _Py_memory_order_relaxed, + _Py_memory_order_acquire, + _Py_memory_order_release, + _Py_memory_order_acq_rel, + _Py_memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + int _value; +} _Py_atomic_int; +/* Fall back to other compilers and processors by assuming that simple + volatile accesses are atomic. This is false, so people should port + this. */ +#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0) +#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0) +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + ((ATOMIC_VAL)->_value = NEW_VAL) +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + ((ATOMIC_VAL)->_value) +#endif + +/* Standardized shortcuts. */ +#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \ + _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst) +#define _Py_atomic_load(ATOMIC_VAL) \ + _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst) + +/* Python-local extensions */ + +#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \ + _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed) +#define _Py_atomic_load_relaxed(ATOMIC_VAL) \ + _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed) +#endif /* Py_BUILD_CORE */ +#endif /* Py_ATOMIC_H */ diff --git a/venv/Include/pycapsule.h b/venv/Include/pycapsule.h new file mode 100644 index 00000000..d9ecda7a --- /dev/null +++ b/venv/Include/pycapsule.h @@ -0,0 +1,59 @@ + +/* Capsule objects let you wrap a C "void *" pointer in a Python + object. They're a way of passing data through the Python interpreter + without creating your own custom type. + + Capsules are used for communication between extension modules. + They provide a way for an extension module to export a C interface + to other extension modules, so that extension modules can use the + Python import mechanism to link to one another. + + For more information, please see "c-api/capsule.html" in the + documentation. +*/ + +#ifndef Py_CAPSULE_H +#define Py_CAPSULE_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_DATA(PyTypeObject) PyCapsule_Type; + +typedef void (*PyCapsule_Destructor)(PyObject *); + +#define PyCapsule_CheckExact(op) (Py_TYPE(op) == &PyCapsule_Type) + + +PyAPI_FUNC(PyObject *) PyCapsule_New( + void *pointer, + const char *name, + PyCapsule_Destructor destructor); + +PyAPI_FUNC(void *) PyCapsule_GetPointer(PyObject *capsule, const char *name); + +PyAPI_FUNC(PyCapsule_Destructor) PyCapsule_GetDestructor(PyObject *capsule); + +PyAPI_FUNC(const char *) PyCapsule_GetName(PyObject *capsule); + +PyAPI_FUNC(void *) PyCapsule_GetContext(PyObject *capsule); + +PyAPI_FUNC(int) PyCapsule_IsValid(PyObject *capsule, const char *name); + +PyAPI_FUNC(int) PyCapsule_SetPointer(PyObject *capsule, void *pointer); + +PyAPI_FUNC(int) PyCapsule_SetDestructor(PyObject *capsule, PyCapsule_Destructor destructor); + +PyAPI_FUNC(int) PyCapsule_SetName(PyObject *capsule, const char *name); + +PyAPI_FUNC(int) PyCapsule_SetContext(PyObject *capsule, void *context); + +PyAPI_FUNC(void *) PyCapsule_Import( + const char *name, /* UTF-8 encoded string */ + int no_block); + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CAPSULE_H */ diff --git a/venv/Include/pyconfig.h b/venv/Include/pyconfig.h new file mode 100644 index 00000000..d2a3f5dd --- /dev/null +++ b/venv/Include/pyconfig.h @@ -0,0 +1,693 @@ +#ifndef Py_CONFIG_H +#define Py_CONFIG_H + +/* pyconfig.h. NOT Generated automatically by configure. + +This is a manually maintained version used for the Watcom, +Borland and Microsoft Visual C++ compilers. It is a +standard part of the Python distribution. + +WINDOWS DEFINES: +The code specific to Windows should be wrapped around one of +the following #defines + +MS_WIN64 - Code specific to the MS Win64 API +MS_WIN32 - Code specific to the MS Win32 (and Win64) API (obsolete, this covers all supported APIs) +MS_WINDOWS - Code specific to Windows, but all versions. +Py_ENABLE_SHARED - Code if the Python core is built as a DLL. + +Also note that neither "_M_IX86" or "_MSC_VER" should be used for +any purpose other than "Windows Intel x86 specific" and "Microsoft +compiler specific". Therefore, these should be very rare. + + +NOTE: The following symbols are deprecated: +NT, USE_DL_EXPORT, USE_DL_IMPORT, DL_EXPORT, DL_IMPORT +MS_CORE_DLL. + +WIN32 is still required for the locale module. + +*/ + +/* Deprecated USE_DL_EXPORT macro - please use Py_BUILD_CORE */ +#ifdef USE_DL_EXPORT +# define Py_BUILD_CORE +#endif /* USE_DL_EXPORT */ + +/* Visual Studio 2005 introduces deprecation warnings for + "insecure" and POSIX functions. The insecure functions should + be replaced by *_s versions (according to Microsoft); the + POSIX functions by _* versions (which, according to Microsoft, + would be ISO C conforming). Neither renaming is feasible, so + we just silence the warnings. */ + +#ifndef _CRT_SECURE_NO_DEPRECATE +#define _CRT_SECURE_NO_DEPRECATE 1 +#endif +#ifndef _CRT_NONSTDC_NO_DEPRECATE +#define _CRT_NONSTDC_NO_DEPRECATE 1 +#endif + +#define HAVE_IO_H +#define HAVE_SYS_UTIME_H +#define HAVE_TEMPNAM +#define HAVE_TMPFILE +#define HAVE_TMPNAM +#define HAVE_CLOCK +#define HAVE_STRERROR + +#include + +#define HAVE_HYPOT +#define HAVE_STRFTIME +#define DONT_HAVE_SIG_ALARM +#define DONT_HAVE_SIG_PAUSE +#define LONG_BIT 32 +#define WORD_BIT 32 + +#define MS_WIN32 /* only support win32 and greater. */ +#define MS_WINDOWS +#ifndef PYTHONPATH +# define PYTHONPATH L".\\DLLs;.\\lib" +#endif +#define NT_THREADS +#define WITH_THREAD +#ifndef NETSCAPE_PI +#define USE_SOCKET +#endif + + +/* Compiler specific defines */ + +/* ------------------------------------------------------------------------*/ +/* Microsoft C defines _MSC_VER */ +#ifdef _MSC_VER + +/* We want COMPILER to expand to a string containing _MSC_VER's *value*. + * This is horridly tricky, because the stringization operator only works + * on macro arguments, and doesn't evaluate macros passed *as* arguments. + * Attempts simpler than the following appear doomed to produce "_MSC_VER" + * literally in the string. + */ +#define _Py_PASTE_VERSION(SUFFIX) \ + ("[MSC v." _Py_STRINGIZE(_MSC_VER) " " SUFFIX "]") +/* e.g., this produces, after compile-time string catenation, + * ("[MSC v.1200 32 bit (Intel)]") + * + * _Py_STRINGIZE(_MSC_VER) expands to + * _Py_STRINGIZE1((_MSC_VER)) expands to + * _Py_STRINGIZE2(_MSC_VER) but as this call is the result of token-pasting + * it's scanned again for macros and so further expands to (under MSVC 6) + * _Py_STRINGIZE2(1200) which then expands to + * "1200" + */ +#define _Py_STRINGIZE(X) _Py_STRINGIZE1((X)) +#define _Py_STRINGIZE1(X) _Py_STRINGIZE2 ## X +#define _Py_STRINGIZE2(X) #X + +/* MSVC defines _WINxx to differentiate the windows platform types + + Note that for compatibility reasons _WIN32 is defined on Win32 + *and* on Win64. For the same reasons, in Python, MS_WIN32 is + defined on Win32 *and* Win64. Win32 only code must therefore be + guarded as follows: + #if defined(MS_WIN32) && !defined(MS_WIN64) +*/ +#ifdef _WIN64 +#define MS_WIN64 +#endif + +/* set the COMPILER */ +#ifdef MS_WIN64 +#if defined(_M_X64) || defined(_M_AMD64) +#if defined(__INTEL_COMPILER) +#define COMPILER ("[ICC v." _Py_STRINGIZE(__INTEL_COMPILER) " 64 bit (amd64) with MSC v." _Py_STRINGIZE(_MSC_VER) " CRT]") +#else +#define COMPILER _Py_PASTE_VERSION("64 bit (AMD64)") +#endif /* __INTEL_COMPILER */ +#define PYD_PLATFORM_TAG "win_amd64" +#else +#define COMPILER _Py_PASTE_VERSION("64 bit (Unknown)") +#endif +#endif /* MS_WIN64 */ + +/* set the version macros for the windows headers */ +/* Python 3.5+ requires Windows Vista or greater */ +#define Py_WINVER 0x0600 /* _WIN32_WINNT_VISTA */ +#define Py_NTDDI NTDDI_VISTA + +/* We only set these values when building Python - we don't want to force + these values on extensions, as that will affect the prototypes and + structures exposed in the Windows headers. Even when building Python, we + allow a single source file to override this - they may need access to + structures etc so it can optionally use new Windows features if it + determines at runtime they are available. +*/ +#if defined(Py_BUILD_CORE) || defined(Py_BUILD_CORE_BUILTIN) || defined(Py_BUILD_CORE_MODULE) +#ifndef NTDDI_VERSION +#define NTDDI_VERSION Py_NTDDI +#endif +#ifndef WINVER +#define WINVER Py_WINVER +#endif +#ifndef _WIN32_WINNT +#define _WIN32_WINNT Py_WINVER +#endif +#endif + +/* _W64 is not defined for VC6 or eVC4 */ +#ifndef _W64 +#define _W64 +#endif + +/* Define like size_t, omitting the "unsigned" */ +#ifdef MS_WIN64 +typedef __int64 ssize_t; +#else +typedef _W64 int ssize_t; +#endif +#define HAVE_SSIZE_T 1 + +#if defined(MS_WIN32) && !defined(MS_WIN64) +#if defined(_M_IX86) +#if defined(__INTEL_COMPILER) +#define COMPILER ("[ICC v." _Py_STRINGIZE(__INTEL_COMPILER) " 32 bit (Intel) with MSC v." _Py_STRINGIZE(_MSC_VER) " CRT]") +#else +#define COMPILER _Py_PASTE_VERSION("32 bit (Intel)") +#endif /* __INTEL_COMPILER */ +#define PYD_PLATFORM_TAG "win32" +#elif defined(_M_ARM) +#define COMPILER _Py_PASTE_VERSION("32 bit (ARM)") +#define PYD_PLATFORM_TAG "win_arm" +#else +#define COMPILER _Py_PASTE_VERSION("32 bit (Unknown)") +#endif +#endif /* MS_WIN32 && !MS_WIN64 */ + +typedef int pid_t; + +#include +#define Py_IS_NAN _isnan +#define Py_IS_INFINITY(X) (!_finite(X) && !_isnan(X)) +#define Py_IS_FINITE(X) _finite(X) +#define copysign _copysign + +/* VS 2010 and above already defines hypot as _hypot */ +#if _MSC_VER < 1600 +#define hypot _hypot +#endif + +/* VS 2015 defines these names with a leading underscore */ +#if _MSC_VER >= 1900 +#define timezone _timezone +#define daylight _daylight +#define tzname _tzname +#endif + +/* Side by Side assemblies supported in VS 2005 and VS 2008 but not 2010*/ +#if _MSC_VER >= 1400 && _MSC_VER < 1600 +#define HAVE_SXS 1 +#endif + +/* define some ANSI types that are not defined in earlier Win headers */ +#if _MSC_VER >= 1200 +/* This file only exists in VC 6.0 or higher */ +#include +#endif + +#endif /* _MSC_VER */ + +/* ------------------------------------------------------------------------*/ +/* egcs/gnu-win32 defines __GNUC__ and _WIN32 */ +#if defined(__GNUC__) && defined(_WIN32) +/* XXX These defines are likely incomplete, but should be easy to fix. + They should be complete enough to build extension modules. */ +/* Suggested by Rene Liebscher to avoid a GCC 2.91.* + bug that requires structure imports. More recent versions of the + compiler don't exhibit this bug. +*/ +#if (__GNUC__==2) && (__GNUC_MINOR__<=91) +#warning "Please use an up-to-date version of gcc! (>2.91 recommended)" +#endif + +#define COMPILER "[gcc]" +#define hypot _hypot +#define PY_LONG_LONG long long +#define PY_LLONG_MIN LLONG_MIN +#define PY_LLONG_MAX LLONG_MAX +#define PY_ULLONG_MAX ULLONG_MAX +#endif /* GNUC */ + +/* ------------------------------------------------------------------------*/ +/* lcc-win32 defines __LCC__ */ +#if defined(__LCC__) +/* XXX These defines are likely incomplete, but should be easy to fix. + They should be complete enough to build extension modules. */ + +#define COMPILER "[lcc-win32]" +typedef int pid_t; +/* __declspec() is supported here too - do nothing to get the defaults */ + +#endif /* LCC */ + +/* ------------------------------------------------------------------------*/ +/* End of compilers - finish up */ + +#ifndef NO_STDIO_H +# include +#endif + +/* 64 bit ints are usually spelt __int64 unless compiler has overridden */ +#ifndef PY_LONG_LONG +# define PY_LONG_LONG __int64 +# define PY_LLONG_MAX _I64_MAX +# define PY_LLONG_MIN _I64_MIN +# define PY_ULLONG_MAX _UI64_MAX +#endif + +/* For Windows the Python core is in a DLL by default. Test +Py_NO_ENABLE_SHARED to find out. Also support MS_NO_COREDLL for b/w compat */ +#if !defined(MS_NO_COREDLL) && !defined(Py_NO_ENABLE_SHARED) +# define Py_ENABLE_SHARED 1 /* standard symbol for shared library */ +# define MS_COREDLL /* deprecated old symbol */ +#endif /* !MS_NO_COREDLL && ... */ + +/* All windows compilers that use this header support __declspec */ +#define HAVE_DECLSPEC_DLL + +/* For an MSVC DLL, we can nominate the .lib files used by extensions */ +#ifdef MS_COREDLL +# if !defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_BUILTIN) + /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib + file in their Makefile (other compilers are + generally taken care of by distutils.) */ +# if defined(_DEBUG) +# pragma comment(lib,"python37_d.lib") +# elif defined(Py_LIMITED_API) +# pragma comment(lib,"python3.lib") +# else +# pragma comment(lib,"python37.lib") +# endif /* _DEBUG */ +# endif /* _MSC_VER */ +# endif /* Py_BUILD_CORE */ +#endif /* MS_COREDLL */ + +#if defined(MS_WIN64) +/* maintain "win32" sys.platform for backward compatibility of Python code, + the Win64 API should be close enough to the Win32 API to make this + preferable */ +# define PLATFORM "win32" +# define SIZEOF_VOID_P 8 +# define SIZEOF_TIME_T 8 +# define SIZEOF_OFF_T 4 +# define SIZEOF_FPOS_T 8 +# define SIZEOF_HKEY 8 +# define SIZEOF_SIZE_T 8 +/* configure.ac defines HAVE_LARGEFILE_SUPPORT iff HAVE_LONG_LONG, + sizeof(off_t) > sizeof(long), and sizeof(PY_LONG_LONG) >= sizeof(off_t). + On Win64 the second condition is not true, but if fpos_t replaces off_t + then this is true. The uses of HAVE_LARGEFILE_SUPPORT imply that Win64 + should define this. */ +# define HAVE_LARGEFILE_SUPPORT +#elif defined(MS_WIN32) +# define PLATFORM "win32" +# define HAVE_LARGEFILE_SUPPORT +# define SIZEOF_VOID_P 4 +# define SIZEOF_OFF_T 4 +# define SIZEOF_FPOS_T 8 +# define SIZEOF_HKEY 4 +# define SIZEOF_SIZE_T 4 + /* MS VS2005 changes time_t to a 64-bit type on all platforms */ +# if defined(_MSC_VER) && _MSC_VER >= 1400 +# define SIZEOF_TIME_T 8 +# else +# define SIZEOF_TIME_T 4 +# endif +#endif + +#ifdef _DEBUG +# define Py_DEBUG +#endif + + +#ifdef MS_WIN32 + +#define SIZEOF_SHORT 2 +#define SIZEOF_INT 4 +#define SIZEOF_LONG 4 +#define SIZEOF_LONG_LONG 8 +#define SIZEOF_DOUBLE 8 +#define SIZEOF_FLOAT 4 + +/* VC 7.1 has them and VC 6.0 does not. VC 6.0 has a version number of 1200. + Microsoft eMbedded Visual C++ 4.0 has a version number of 1201 and doesn't + define these. + If some compiler does not provide them, modify the #if appropriately. */ +#if defined(_MSC_VER) +#if _MSC_VER > 1300 +#define HAVE_UINTPTR_T 1 +#define HAVE_INTPTR_T 1 +#else +/* VC6, VS 2002 and eVC4 don't support the C99 LL suffix for 64-bit integer literals */ +#define Py_LL(x) x##I64 +#endif /* _MSC_VER > 1300 */ +#endif /* _MSC_VER */ + +#endif + +/* define signed and unsigned exact-width 32-bit and 64-bit types, used in the + implementation of Python integers. */ +#define PY_UINT32_T uint32_t +#define PY_UINT64_T uint64_t +#define PY_INT32_T int32_t +#define PY_INT64_T int64_t + +/* Fairly standard from here! */ + +/* Define to 1 if you have the `copysign' function. */ +#define HAVE_COPYSIGN 1 + +/* Define to 1 if you have the `round' function. */ +#if _MSC_VER >= 1800 +#define HAVE_ROUND 1 +#endif + +/* Define to 1 if you have the `isinf' macro. */ +#define HAVE_DECL_ISINF 1 + +/* Define to 1 if you have the `isnan' function. */ +#define HAVE_DECL_ISNAN 1 + +/* Define if on AIX 3. + System headers sometimes define this. + We just want to avoid a redefinition error message. */ +#ifndef _ALL_SOURCE +/* #undef _ALL_SOURCE */ +#endif + +/* Define to empty if the keyword does not work. */ +/* #define const */ + +/* Define to 1 if you have the header file. */ +#define HAVE_CONIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DIRECT_H 1 + +/* Define if you have dirent.h. */ +/* #define DIRENT 1 */ + +/* Define to the type of elements in the array set by `getgroups'. + Usually this is either `int' or `gid_t'. */ +/* #undef GETGROUPS_T */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define if your struct tm has tm_zone. */ +/* #undef HAVE_TM_ZONE */ + +/* Define if you don't have tm_zone but do have the external array + tzname. */ +#define HAVE_TZNAME + +/* Define to `int' if doesn't define. */ +/* #undef mode_t */ + +/* Define if you don't have dirent.h, but have ndir.h. */ +/* #undef NDIR */ + +/* Define to `long' if doesn't define. */ +/* #undef off_t */ + +/* Define to `int' if doesn't define. */ +/* #undef pid_t */ + +/* Define if the system does not provide POSIX.1 features except + with this defined. */ +/* #undef _POSIX_1_SOURCE */ + +/* Define if you need to in order for stat and other things to work. */ +/* #undef _POSIX_SOURCE */ + +/* Define as the return type of signal handlers (int or void). */ +#define RETSIGTYPE void + +/* Define to `unsigned' if doesn't define. */ +/* #undef size_t */ + +/* Define if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if you don't have dirent.h, but have sys/dir.h. */ +/* #undef SYSDIR */ + +/* Define if you don't have dirent.h, but have sys/ndir.h. */ +/* #undef SYSNDIR */ + +/* Define if you can safely include both and . */ +/* #undef TIME_WITH_SYS_TIME */ + +/* Define if your declares struct tm. */ +/* #define TM_IN_SYS_TIME 1 */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ + +/* Define if the closedir function returns void instead of int. */ +/* #undef VOID_CLOSEDIR */ + +/* Define if getpgrp() must be called as getpgrp(0) + and (consequently) setpgrp() as setpgrp(0, 0). */ +/* #undef GETPGRP_HAVE_ARGS */ + +/* Define this if your time.h defines altzone */ +/* #define HAVE_ALTZONE */ + +/* Define if you have the putenv function. */ +#define HAVE_PUTENV + +/* Define if your compiler supports function prototypes */ +#define HAVE_PROTOTYPES + +/* Define if you can safely include both and + (which you can't on SCO ODT 3.0). */ +/* #undef SYS_SELECT_WITH_SYS_TIME */ + +/* Define if you want documentation strings in extension modules */ +#define WITH_DOC_STRINGS 1 + +/* Define if you want to compile in rudimentary thread support */ +/* #undef WITH_THREAD */ + +/* Define if you want to use the GNU readline library */ +/* #define WITH_READLINE 1 */ + +/* Use Python's own small-block memory-allocator. */ +#define WITH_PYMALLOC 1 + +/* Define if you have clock. */ +/* #define HAVE_CLOCK */ + +/* Define when any dynamic module loading is enabled */ +#define HAVE_DYNAMIC_LOADING + +/* Define if you have ftime. */ +#define HAVE_FTIME + +/* Define if you have getpeername. */ +#define HAVE_GETPEERNAME + +/* Define if you have getpgrp. */ +/* #undef HAVE_GETPGRP */ + +/* Define if you have getpid. */ +#define HAVE_GETPID + +/* Define if you have gettimeofday. */ +/* #undef HAVE_GETTIMEOFDAY */ + +/* Define if you have getwd. */ +/* #undef HAVE_GETWD */ + +/* Define if you have lstat. */ +/* #undef HAVE_LSTAT */ + +/* Define if you have the mktime function. */ +#define HAVE_MKTIME + +/* Define if you have nice. */ +/* #undef HAVE_NICE */ + +/* Define if you have readlink. */ +/* #undef HAVE_READLINK */ + +/* Define if you have setpgid. */ +/* #undef HAVE_SETPGID */ + +/* Define if you have setpgrp. */ +/* #undef HAVE_SETPGRP */ + +/* Define if you have setsid. */ +/* #undef HAVE_SETSID */ + +/* Define if you have setvbuf. */ +#define HAVE_SETVBUF + +/* Define if you have siginterrupt. */ +/* #undef HAVE_SIGINTERRUPT */ + +/* Define if you have symlink. */ +/* #undef HAVE_SYMLINK */ + +/* Define if you have tcgetpgrp. */ +/* #undef HAVE_TCGETPGRP */ + +/* Define if you have tcsetpgrp. */ +/* #undef HAVE_TCSETPGRP */ + +/* Define if you have times. */ +/* #undef HAVE_TIMES */ + +/* Define if you have uname. */ +/* #undef HAVE_UNAME */ + +/* Define if you have waitpid. */ +/* #undef HAVE_WAITPID */ + +/* Define to 1 if you have the `wcsftime' function. */ +#if defined(_MSC_VER) && _MSC_VER >= 1310 +#define HAVE_WCSFTIME 1 +#endif + +/* Define to 1 if you have the `wcscoll' function. */ +#define HAVE_WCSCOLL 1 + +/* Define to 1 if you have the `wcsxfrm' function. */ +#define HAVE_WCSXFRM 1 + +/* Define if the zlib library has inflateCopy */ +#define HAVE_ZLIB_COPY 1 + +/* Define if you have the header file. */ +/* #undef HAVE_DLFCN_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PROCESS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SIGNAL_H 1 + +/* Define if you have the prototypes. */ +#define HAVE_STDARG_PROTOTYPES + +/* Define if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define if you have the header file. */ +/* #undef HAVE_SYS_AUDIOIO_H */ + +/* Define if you have the header file. */ +/* #define HAVE_SYS_PARAM_H 1 */ + +/* Define if you have the header file. */ +/* #define HAVE_SYS_SELECT_H 1 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define if you have the header file. */ +/* #define HAVE_SYS_TIME_H 1 */ + +/* Define if you have the header file. */ +/* #define HAVE_SYS_TIMES_H 1 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define if you have the header file. */ +/* #define HAVE_SYS_UN_H 1 */ + +/* Define if you have the header file. */ +/* #define HAVE_SYS_UTIME_H 1 */ + +/* Define if you have the header file. */ +/* #define HAVE_SYS_UTSNAME_H 1 */ + +/* Define if you have the header file. */ +/* #define HAVE_UNISTD_H 1 */ + +/* Define if you have the header file. */ +/* #define HAVE_UTIME_H 1 */ + +/* Define if the compiler provides a wchar.h header file. */ +#define HAVE_WCHAR_H 1 + +/* The size of `wchar_t', as computed by sizeof. */ +#define SIZEOF_WCHAR_T 2 + +/* The size of `_Bool', as computed by sizeof. */ +#define SIZEOF__BOOL 1 + +/* The size of `pid_t', as computed by sizeof. */ +#define SIZEOF_PID_T SIZEOF_INT + +/* Define if you have the dl library (-ldl). */ +/* #undef HAVE_LIBDL */ + +/* Define if you have the mpc library (-lmpc). */ +/* #undef HAVE_LIBMPC */ + +/* Define if you have the nsl library (-lnsl). */ +#define HAVE_LIBNSL 1 + +/* Define if you have the seq library (-lseq). */ +/* #undef HAVE_LIBSEQ */ + +/* Define if you have the socket library (-lsocket). */ +#define HAVE_LIBSOCKET 1 + +/* Define if you have the sun library (-lsun). */ +/* #undef HAVE_LIBSUN */ + +/* Define if you have the termcap library (-ltermcap). */ +/* #undef HAVE_LIBTERMCAP */ + +/* Define if you have the termlib library (-ltermlib). */ +/* #undef HAVE_LIBTERMLIB */ + +/* Define if you have the thread library (-lthread). */ +/* #undef HAVE_LIBTHREAD */ + +/* WinSock does not use a bitmask in select, and uses + socket handles greater than FD_SETSIZE */ +#define Py_SOCKET_FD_CAN_BE_GE_FD_SETSIZE + +/* Define if C doubles are 64-bit IEEE 754 binary format, stored with the + least significant byte first */ +#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754 1 + +/* Define to 1 if you have the `erf' function. */ +#define HAVE_ERF 1 + +/* Define to 1 if you have the `erfc' function. */ +#define HAVE_ERFC 1 + +/* Define if you have the 'inet_pton' function. */ +#define HAVE_INET_PTON 1 + +/* framework name */ +#define _PYTHONFRAMEWORK "" + +/* Define if libssl has X509_VERIFY_PARAM_set1_host and related function */ +#define HAVE_X509_VERIFY_PARAM_SET1_HOST 1 + +#endif /* !Py_CONFIG_H */ diff --git a/venv/Include/pyctype.h b/venv/Include/pyctype.h new file mode 100644 index 00000000..6bce63ee --- /dev/null +++ b/venv/Include/pyctype.h @@ -0,0 +1,33 @@ +#ifndef Py_LIMITED_API +#ifndef PYCTYPE_H +#define PYCTYPE_H + +#define PY_CTF_LOWER 0x01 +#define PY_CTF_UPPER 0x02 +#define PY_CTF_ALPHA (PY_CTF_LOWER|PY_CTF_UPPER) +#define PY_CTF_DIGIT 0x04 +#define PY_CTF_ALNUM (PY_CTF_ALPHA|PY_CTF_DIGIT) +#define PY_CTF_SPACE 0x08 +#define PY_CTF_XDIGIT 0x10 + +PyAPI_DATA(const unsigned int) _Py_ctype_table[256]; + +/* Unlike their C counterparts, the following macros are not meant to + * handle an int with any of the values [EOF, 0-UCHAR_MAX]. The argument + * must be a signed/unsigned char. */ +#define Py_ISLOWER(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_LOWER) +#define Py_ISUPPER(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_UPPER) +#define Py_ISALPHA(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_ALPHA) +#define Py_ISDIGIT(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_DIGIT) +#define Py_ISXDIGIT(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_XDIGIT) +#define Py_ISALNUM(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_ALNUM) +#define Py_ISSPACE(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_SPACE) + +PyAPI_DATA(const unsigned char) _Py_ctype_tolower[256]; +PyAPI_DATA(const unsigned char) _Py_ctype_toupper[256]; + +#define Py_TOLOWER(c) (_Py_ctype_tolower[Py_CHARMASK(c)]) +#define Py_TOUPPER(c) (_Py_ctype_toupper[Py_CHARMASK(c)]) + +#endif /* !PYCTYPE_H */ +#endif /* !Py_LIMITED_API */ diff --git a/venv/Include/pydebug.h b/venv/Include/pydebug.h new file mode 100644 index 00000000..bd4aafe3 --- /dev/null +++ b/venv/Include/pydebug.h @@ -0,0 +1,40 @@ +#ifndef Py_LIMITED_API +#ifndef Py_PYDEBUG_H +#define Py_PYDEBUG_H +#ifdef __cplusplus +extern "C" { +#endif + +/* These global variable are defined in pylifecycle.c */ +/* XXX (ncoghlan): move these declarations to pylifecycle.h? */ +PyAPI_DATA(int) Py_DebugFlag; +PyAPI_DATA(int) Py_VerboseFlag; +PyAPI_DATA(int) Py_QuietFlag; +PyAPI_DATA(int) Py_InteractiveFlag; +PyAPI_DATA(int) Py_InspectFlag; +PyAPI_DATA(int) Py_OptimizeFlag; +PyAPI_DATA(int) Py_NoSiteFlag; +PyAPI_DATA(int) Py_BytesWarningFlag; +PyAPI_DATA(int) Py_FrozenFlag; +PyAPI_DATA(int) Py_IgnoreEnvironmentFlag; +PyAPI_DATA(int) Py_DontWriteBytecodeFlag; +PyAPI_DATA(int) Py_NoUserSiteDirectory; +PyAPI_DATA(int) Py_UnbufferedStdioFlag; +PyAPI_DATA(int) Py_HashRandomizationFlag; +PyAPI_DATA(int) Py_IsolatedFlag; + +#ifdef MS_WINDOWS +PyAPI_DATA(int) Py_LegacyWindowsFSEncodingFlag; +PyAPI_DATA(int) Py_LegacyWindowsStdioFlag; +#endif + +/* this is a wrapper around getenv() that pays attention to + Py_IgnoreEnvironmentFlag. It should be used for getting variables like + PYTHONPATH and PYTHONHOME from the environment */ +#define Py_GETENV(s) (Py_IgnoreEnvironmentFlag ? NULL : getenv(s)) + +#ifdef __cplusplus +} +#endif +#endif /* !Py_PYDEBUG_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/pydtrace.h b/venv/Include/pydtrace.h new file mode 100644 index 00000000..037961d4 --- /dev/null +++ b/venv/Include/pydtrace.h @@ -0,0 +1,57 @@ +/* Static DTrace probes interface */ + +#ifndef Py_DTRACE_H +#define Py_DTRACE_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef WITH_DTRACE + +#include "pydtrace_probes.h" + +/* pydtrace_probes.h, on systems with DTrace, is auto-generated to include + `PyDTrace_{PROBE}` and `PyDTrace_{PROBE}_ENABLED()` macros for every probe + defined in pydtrace_provider.d. + + Calling these functions must be guarded by a `PyDTrace_{PROBE}_ENABLED()` + check to minimize performance impact when probing is off. For example: + + if (PyDTrace_FUNCTION_ENTRY_ENABLED()) + PyDTrace_FUNCTION_ENTRY(f); +*/ + +#else + +/* Without DTrace, compile to nothing. */ + +static inline void PyDTrace_LINE(const char *arg0, const char *arg1, int arg2) {} +static inline void PyDTrace_FUNCTION_ENTRY(const char *arg0, const char *arg1, int arg2) {} +static inline void PyDTrace_FUNCTION_RETURN(const char *arg0, const char *arg1, int arg2) {} +static inline void PyDTrace_GC_START(int arg0) {} +static inline void PyDTrace_GC_DONE(int arg0) {} +static inline void PyDTrace_INSTANCE_NEW_START(int arg0) {} +static inline void PyDTrace_INSTANCE_NEW_DONE(int arg0) {} +static inline void PyDTrace_INSTANCE_DELETE_START(int arg0) {} +static inline void PyDTrace_INSTANCE_DELETE_DONE(int arg0) {} +static inline void PyDTrace_IMPORT_FIND_LOAD_START(const char *arg0) {} +static inline void PyDTrace_IMPORT_FIND_LOAD_DONE(const char *arg0, int arg1) {} + +static inline int PyDTrace_LINE_ENABLED(void) { return 0; } +static inline int PyDTrace_FUNCTION_ENTRY_ENABLED(void) { return 0; } +static inline int PyDTrace_FUNCTION_RETURN_ENABLED(void) { return 0; } +static inline int PyDTrace_GC_START_ENABLED(void) { return 0; } +static inline int PyDTrace_GC_DONE_ENABLED(void) { return 0; } +static inline int PyDTrace_INSTANCE_NEW_START_ENABLED(void) { return 0; } +static inline int PyDTrace_INSTANCE_NEW_DONE_ENABLED(void) { return 0; } +static inline int PyDTrace_INSTANCE_DELETE_START_ENABLED(void) { return 0; } +static inline int PyDTrace_INSTANCE_DELETE_DONE_ENABLED(void) { return 0; } +static inline int PyDTrace_IMPORT_FIND_LOAD_START_ENABLED(void) { return 0; } +static inline int PyDTrace_IMPORT_FIND_LOAD_DONE_ENABLED(void) { return 0; } + +#endif /* !WITH_DTRACE */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_DTRACE_H */ diff --git a/venv/Include/pyerrors.h b/venv/Include/pyerrors.h new file mode 100644 index 00000000..f289471b --- /dev/null +++ b/venv/Include/pyerrors.h @@ -0,0 +1,504 @@ +#ifndef Py_ERRORS_H +#define Py_ERRORS_H +#ifdef __cplusplus +extern "C" { +#endif + +/* Error objects */ + +#ifndef Py_LIMITED_API +/* PyException_HEAD defines the initial segment of every exception class. */ +#define PyException_HEAD PyObject_HEAD PyObject *dict;\ + PyObject *args; PyObject *traceback;\ + PyObject *context; PyObject *cause;\ + char suppress_context; + +typedef struct { + PyException_HEAD +} PyBaseExceptionObject; + +typedef struct { + PyException_HEAD + PyObject *msg; + PyObject *filename; + PyObject *lineno; + PyObject *offset; + PyObject *text; + PyObject *print_file_and_line; +} PySyntaxErrorObject; + +typedef struct { + PyException_HEAD + PyObject *msg; + PyObject *name; + PyObject *path; +} PyImportErrorObject; + +typedef struct { + PyException_HEAD + PyObject *encoding; + PyObject *object; + Py_ssize_t start; + Py_ssize_t end; + PyObject *reason; +} PyUnicodeErrorObject; + +typedef struct { + PyException_HEAD + PyObject *code; +} PySystemExitObject; + +typedef struct { + PyException_HEAD + PyObject *myerrno; + PyObject *strerror; + PyObject *filename; + PyObject *filename2; +#ifdef MS_WINDOWS + PyObject *winerror; +#endif + Py_ssize_t written; /* only for BlockingIOError, -1 otherwise */ +} PyOSErrorObject; + +typedef struct { + PyException_HEAD + PyObject *value; +} PyStopIterationObject; + +/* Compatibility typedefs */ +typedef PyOSErrorObject PyEnvironmentErrorObject; +#ifdef MS_WINDOWS +typedef PyOSErrorObject PyWindowsErrorObject; +#endif +#endif /* !Py_LIMITED_API */ + +/* Error handling definitions */ + +PyAPI_FUNC(void) PyErr_SetNone(PyObject *); +PyAPI_FUNC(void) PyErr_SetObject(PyObject *, PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyErr_SetKeyError(PyObject *); +_PyErr_StackItem *_PyErr_GetTopmostException(PyThreadState *tstate); +#endif +PyAPI_FUNC(void) PyErr_SetString( + PyObject *exception, + const char *string /* decoded from utf-8 */ + ); +PyAPI_FUNC(PyObject *) PyErr_Occurred(void); +PyAPI_FUNC(void) PyErr_Clear(void); +PyAPI_FUNC(void) PyErr_Fetch(PyObject **, PyObject **, PyObject **); +PyAPI_FUNC(void) PyErr_Restore(PyObject *, PyObject *, PyObject *); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(void) PyErr_GetExcInfo(PyObject **, PyObject **, PyObject **); +PyAPI_FUNC(void) PyErr_SetExcInfo(PyObject *, PyObject *, PyObject *); +#endif + +#if defined(__clang__) || \ + (defined(__GNUC_MAJOR__) && \ + ((__GNUC_MAJOR__ >= 3) || \ + (__GNUC_MAJOR__ == 2) && (__GNUC_MINOR__ >= 5))) +#define _Py_NO_RETURN __attribute__((__noreturn__)) +#else +#define _Py_NO_RETURN +#endif + +/* Defined in Python/pylifecycle.c */ +PyAPI_FUNC(void) Py_FatalError(const char *message) _Py_NO_RETURN; + +#if defined(Py_DEBUG) || defined(Py_LIMITED_API) +#define _PyErr_OCCURRED() PyErr_Occurred() +#else +#define _PyErr_OCCURRED() (PyThreadState_GET()->curexc_type) +#endif + +/* Error testing and normalization */ +PyAPI_FUNC(int) PyErr_GivenExceptionMatches(PyObject *, PyObject *); +PyAPI_FUNC(int) PyErr_ExceptionMatches(PyObject *); +PyAPI_FUNC(void) PyErr_NormalizeException(PyObject**, PyObject**, PyObject**); + +/* Traceback manipulation (PEP 3134) */ +PyAPI_FUNC(int) PyException_SetTraceback(PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyException_GetTraceback(PyObject *); + +/* Cause manipulation (PEP 3134) */ +PyAPI_FUNC(PyObject *) PyException_GetCause(PyObject *); +PyAPI_FUNC(void) PyException_SetCause(PyObject *, PyObject *); + +/* Context manipulation (PEP 3134) */ +PyAPI_FUNC(PyObject *) PyException_GetContext(PyObject *); +PyAPI_FUNC(void) PyException_SetContext(PyObject *, PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyErr_ChainExceptions(PyObject *, PyObject *, PyObject *); +#endif + +/* */ + +#define PyExceptionClass_Check(x) \ + (PyType_Check((x)) && \ + PyType_FastSubclass((PyTypeObject*)(x), Py_TPFLAGS_BASE_EXC_SUBCLASS)) + +#define PyExceptionInstance_Check(x) \ + PyType_FastSubclass((x)->ob_type, Py_TPFLAGS_BASE_EXC_SUBCLASS) + +#define PyExceptionClass_Name(x) \ + ((char *)(((PyTypeObject*)(x))->tp_name)) + +#define PyExceptionInstance_Class(x) ((PyObject*)((x)->ob_type)) + + +/* Predefined exceptions */ + +PyAPI_DATA(PyObject *) PyExc_BaseException; +PyAPI_DATA(PyObject *) PyExc_Exception; +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +PyAPI_DATA(PyObject *) PyExc_StopAsyncIteration; +#endif +PyAPI_DATA(PyObject *) PyExc_StopIteration; +PyAPI_DATA(PyObject *) PyExc_GeneratorExit; +PyAPI_DATA(PyObject *) PyExc_ArithmeticError; +PyAPI_DATA(PyObject *) PyExc_LookupError; + +PyAPI_DATA(PyObject *) PyExc_AssertionError; +PyAPI_DATA(PyObject *) PyExc_AttributeError; +PyAPI_DATA(PyObject *) PyExc_BufferError; +PyAPI_DATA(PyObject *) PyExc_EOFError; +PyAPI_DATA(PyObject *) PyExc_FloatingPointError; +PyAPI_DATA(PyObject *) PyExc_OSError; +PyAPI_DATA(PyObject *) PyExc_ImportError; +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000 +PyAPI_DATA(PyObject *) PyExc_ModuleNotFoundError; +#endif +PyAPI_DATA(PyObject *) PyExc_IndexError; +PyAPI_DATA(PyObject *) PyExc_KeyError; +PyAPI_DATA(PyObject *) PyExc_KeyboardInterrupt; +PyAPI_DATA(PyObject *) PyExc_MemoryError; +PyAPI_DATA(PyObject *) PyExc_NameError; +PyAPI_DATA(PyObject *) PyExc_OverflowError; +PyAPI_DATA(PyObject *) PyExc_RuntimeError; +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +PyAPI_DATA(PyObject *) PyExc_RecursionError; +#endif +PyAPI_DATA(PyObject *) PyExc_NotImplementedError; +PyAPI_DATA(PyObject *) PyExc_SyntaxError; +PyAPI_DATA(PyObject *) PyExc_IndentationError; +PyAPI_DATA(PyObject *) PyExc_TabError; +PyAPI_DATA(PyObject *) PyExc_ReferenceError; +PyAPI_DATA(PyObject *) PyExc_SystemError; +PyAPI_DATA(PyObject *) PyExc_SystemExit; +PyAPI_DATA(PyObject *) PyExc_TypeError; +PyAPI_DATA(PyObject *) PyExc_UnboundLocalError; +PyAPI_DATA(PyObject *) PyExc_UnicodeError; +PyAPI_DATA(PyObject *) PyExc_UnicodeEncodeError; +PyAPI_DATA(PyObject *) PyExc_UnicodeDecodeError; +PyAPI_DATA(PyObject *) PyExc_UnicodeTranslateError; +PyAPI_DATA(PyObject *) PyExc_ValueError; +PyAPI_DATA(PyObject *) PyExc_ZeroDivisionError; + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_DATA(PyObject *) PyExc_BlockingIOError; +PyAPI_DATA(PyObject *) PyExc_BrokenPipeError; +PyAPI_DATA(PyObject *) PyExc_ChildProcessError; +PyAPI_DATA(PyObject *) PyExc_ConnectionError; +PyAPI_DATA(PyObject *) PyExc_ConnectionAbortedError; +PyAPI_DATA(PyObject *) PyExc_ConnectionRefusedError; +PyAPI_DATA(PyObject *) PyExc_ConnectionResetError; +PyAPI_DATA(PyObject *) PyExc_FileExistsError; +PyAPI_DATA(PyObject *) PyExc_FileNotFoundError; +PyAPI_DATA(PyObject *) PyExc_InterruptedError; +PyAPI_DATA(PyObject *) PyExc_IsADirectoryError; +PyAPI_DATA(PyObject *) PyExc_NotADirectoryError; +PyAPI_DATA(PyObject *) PyExc_PermissionError; +PyAPI_DATA(PyObject *) PyExc_ProcessLookupError; +PyAPI_DATA(PyObject *) PyExc_TimeoutError; +#endif + + +/* Compatibility aliases */ +PyAPI_DATA(PyObject *) PyExc_EnvironmentError; +PyAPI_DATA(PyObject *) PyExc_IOError; +#ifdef MS_WINDOWS +PyAPI_DATA(PyObject *) PyExc_WindowsError; +#endif + +/* Predefined warning categories */ +PyAPI_DATA(PyObject *) PyExc_Warning; +PyAPI_DATA(PyObject *) PyExc_UserWarning; +PyAPI_DATA(PyObject *) PyExc_DeprecationWarning; +PyAPI_DATA(PyObject *) PyExc_PendingDeprecationWarning; +PyAPI_DATA(PyObject *) PyExc_SyntaxWarning; +PyAPI_DATA(PyObject *) PyExc_RuntimeWarning; +PyAPI_DATA(PyObject *) PyExc_FutureWarning; +PyAPI_DATA(PyObject *) PyExc_ImportWarning; +PyAPI_DATA(PyObject *) PyExc_UnicodeWarning; +PyAPI_DATA(PyObject *) PyExc_BytesWarning; +PyAPI_DATA(PyObject *) PyExc_ResourceWarning; + + +/* Convenience functions */ + +PyAPI_FUNC(int) PyErr_BadArgument(void); +PyAPI_FUNC(PyObject *) PyErr_NoMemory(void); +PyAPI_FUNC(PyObject *) PyErr_SetFromErrno(PyObject *); +PyAPI_FUNC(PyObject *) PyErr_SetFromErrnoWithFilenameObject( + PyObject *, PyObject *); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03040000 +PyAPI_FUNC(PyObject *) PyErr_SetFromErrnoWithFilenameObjects( + PyObject *, PyObject *, PyObject *); +#endif +PyAPI_FUNC(PyObject *) PyErr_SetFromErrnoWithFilename( + PyObject *exc, + const char *filename /* decoded from the filesystem encoding */ + ); +#if defined(MS_WINDOWS) && !defined(Py_LIMITED_API) +PyAPI_FUNC(PyObject *) PyErr_SetFromErrnoWithUnicodeFilename( + PyObject *, const Py_UNICODE *) Py_DEPRECATED(3.3); +#endif /* MS_WINDOWS */ + +PyAPI_FUNC(PyObject *) PyErr_Format( + PyObject *exception, + const char *format, /* ASCII-encoded string */ + ... + ); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +PyAPI_FUNC(PyObject *) PyErr_FormatV( + PyObject *exception, + const char *format, + va_list vargs); +#endif + +#ifndef Py_LIMITED_API +/* Like PyErr_Format(), but saves current exception as __context__ and + __cause__. + */ +PyAPI_FUNC(PyObject *) _PyErr_FormatFromCause( + PyObject *exception, + const char *format, /* ASCII-encoded string */ + ... + ); +#endif + +#ifdef MS_WINDOWS +PyAPI_FUNC(PyObject *) PyErr_SetFromWindowsErrWithFilename( + int ierr, + const char *filename /* decoded from the filesystem encoding */ + ); +#ifndef Py_LIMITED_API +/* XXX redeclare to use WSTRING */ +PyAPI_FUNC(PyObject *) PyErr_SetFromWindowsErrWithUnicodeFilename( + int, const Py_UNICODE *) Py_DEPRECATED(3.3); +#endif +PyAPI_FUNC(PyObject *) PyErr_SetFromWindowsErr(int); +PyAPI_FUNC(PyObject *) PyErr_SetExcFromWindowsErrWithFilenameObject( + PyObject *,int, PyObject *); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03040000 +PyAPI_FUNC(PyObject *) PyErr_SetExcFromWindowsErrWithFilenameObjects( + PyObject *,int, PyObject *, PyObject *); +#endif +PyAPI_FUNC(PyObject *) PyErr_SetExcFromWindowsErrWithFilename( + PyObject *exc, + int ierr, + const char *filename /* decoded from the filesystem encoding */ + ); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyErr_SetExcFromWindowsErrWithUnicodeFilename( + PyObject *,int, const Py_UNICODE *) Py_DEPRECATED(3.3); +#endif +PyAPI_FUNC(PyObject *) PyErr_SetExcFromWindowsErr(PyObject *, int); +#endif /* MS_WINDOWS */ + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000 +PyAPI_FUNC(PyObject *) PyErr_SetImportErrorSubclass(PyObject *, PyObject *, + PyObject *, PyObject *); +#endif +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject *) PyErr_SetImportError(PyObject *, PyObject *, + PyObject *); +#endif + +/* Export the old function so that the existing API remains available: */ +PyAPI_FUNC(void) PyErr_BadInternalCall(void); +PyAPI_FUNC(void) _PyErr_BadInternalCall(const char *filename, int lineno); +/* Mask the old API with a call to the new API for code compiled under + Python 2.0: */ +#define PyErr_BadInternalCall() _PyErr_BadInternalCall(__FILE__, __LINE__) + +/* Function to create a new exception */ +PyAPI_FUNC(PyObject *) PyErr_NewException( + const char *name, PyObject *base, PyObject *dict); +PyAPI_FUNC(PyObject *) PyErr_NewExceptionWithDoc( + const char *name, const char *doc, PyObject *base, PyObject *dict); +PyAPI_FUNC(void) PyErr_WriteUnraisable(PyObject *); + +/* In exceptions.c */ +#ifndef Py_LIMITED_API +/* Helper that attempts to replace the current exception with one of the + * same type but with a prefix added to the exception text. The resulting + * exception description looks like: + * + * prefix (exc_type: original_exc_str) + * + * Only some exceptions can be safely replaced. If the function determines + * it isn't safe to perform the replacement, it will leave the original + * unmodified exception in place. + * + * Returns a borrowed reference to the new exception (if any), NULL if the + * existing exception was left in place. + */ +PyAPI_FUNC(PyObject *) _PyErr_TrySetFromCause( + const char *prefix_format, /* ASCII-encoded string */ + ... + ); +#endif + + +/* In signalmodule.c */ +PyAPI_FUNC(int) PyErr_CheckSignals(void); +PyAPI_FUNC(void) PyErr_SetInterrupt(void); + +/* In signalmodule.c */ +#ifndef Py_LIMITED_API +int PySignal_SetWakeupFd(int fd); +#endif + +/* Support for adding program text to SyntaxErrors */ +PyAPI_FUNC(void) PyErr_SyntaxLocation( + const char *filename, /* decoded from the filesystem encoding */ + int lineno); +PyAPI_FUNC(void) PyErr_SyntaxLocationEx( + const char *filename, /* decoded from the filesystem encoding */ + int lineno, + int col_offset); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) PyErr_SyntaxLocationObject( + PyObject *filename, + int lineno, + int col_offset); +#endif +PyAPI_FUNC(PyObject *) PyErr_ProgramText( + const char *filename, /* decoded from the filesystem encoding */ + int lineno); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyErr_ProgramTextObject( + PyObject *filename, + int lineno); +#endif + +/* The following functions are used to create and modify unicode + exceptions from C */ + +/* create a UnicodeDecodeError object */ +PyAPI_FUNC(PyObject *) PyUnicodeDecodeError_Create( + const char *encoding, /* UTF-8 encoded string */ + const char *object, + Py_ssize_t length, + Py_ssize_t start, + Py_ssize_t end, + const char *reason /* UTF-8 encoded string */ + ); + +/* create a UnicodeEncodeError object */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyUnicodeEncodeError_Create( + const char *encoding, /* UTF-8 encoded string */ + const Py_UNICODE *object, + Py_ssize_t length, + Py_ssize_t start, + Py_ssize_t end, + const char *reason /* UTF-8 encoded string */ + ) Py_DEPRECATED(3.3); +#endif + +/* create a UnicodeTranslateError object */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyUnicodeTranslateError_Create( + const Py_UNICODE *object, + Py_ssize_t length, + Py_ssize_t start, + Py_ssize_t end, + const char *reason /* UTF-8 encoded string */ + ) Py_DEPRECATED(3.3); +PyAPI_FUNC(PyObject *) _PyUnicodeTranslateError_Create( + PyObject *object, + Py_ssize_t start, + Py_ssize_t end, + const char *reason /* UTF-8 encoded string */ + ); +#endif + +/* get the encoding attribute */ +PyAPI_FUNC(PyObject *) PyUnicodeEncodeError_GetEncoding(PyObject *); +PyAPI_FUNC(PyObject *) PyUnicodeDecodeError_GetEncoding(PyObject *); + +/* get the object attribute */ +PyAPI_FUNC(PyObject *) PyUnicodeEncodeError_GetObject(PyObject *); +PyAPI_FUNC(PyObject *) PyUnicodeDecodeError_GetObject(PyObject *); +PyAPI_FUNC(PyObject *) PyUnicodeTranslateError_GetObject(PyObject *); + +/* get the value of the start attribute (the int * may not be NULL) + return 0 on success, -1 on failure */ +PyAPI_FUNC(int) PyUnicodeEncodeError_GetStart(PyObject *, Py_ssize_t *); +PyAPI_FUNC(int) PyUnicodeDecodeError_GetStart(PyObject *, Py_ssize_t *); +PyAPI_FUNC(int) PyUnicodeTranslateError_GetStart(PyObject *, Py_ssize_t *); + +/* assign a new value to the start attribute + return 0 on success, -1 on failure */ +PyAPI_FUNC(int) PyUnicodeEncodeError_SetStart(PyObject *, Py_ssize_t); +PyAPI_FUNC(int) PyUnicodeDecodeError_SetStart(PyObject *, Py_ssize_t); +PyAPI_FUNC(int) PyUnicodeTranslateError_SetStart(PyObject *, Py_ssize_t); + +/* get the value of the end attribute (the int *may not be NULL) + return 0 on success, -1 on failure */ +PyAPI_FUNC(int) PyUnicodeEncodeError_GetEnd(PyObject *, Py_ssize_t *); +PyAPI_FUNC(int) PyUnicodeDecodeError_GetEnd(PyObject *, Py_ssize_t *); +PyAPI_FUNC(int) PyUnicodeTranslateError_GetEnd(PyObject *, Py_ssize_t *); + +/* assign a new value to the end attribute + return 0 on success, -1 on failure */ +PyAPI_FUNC(int) PyUnicodeEncodeError_SetEnd(PyObject *, Py_ssize_t); +PyAPI_FUNC(int) PyUnicodeDecodeError_SetEnd(PyObject *, Py_ssize_t); +PyAPI_FUNC(int) PyUnicodeTranslateError_SetEnd(PyObject *, Py_ssize_t); + +/* get the value of the reason attribute */ +PyAPI_FUNC(PyObject *) PyUnicodeEncodeError_GetReason(PyObject *); +PyAPI_FUNC(PyObject *) PyUnicodeDecodeError_GetReason(PyObject *); +PyAPI_FUNC(PyObject *) PyUnicodeTranslateError_GetReason(PyObject *); + +/* assign a new value to the reason attribute + return 0 on success, -1 on failure */ +PyAPI_FUNC(int) PyUnicodeEncodeError_SetReason( + PyObject *exc, + const char *reason /* UTF-8 encoded string */ + ); +PyAPI_FUNC(int) PyUnicodeDecodeError_SetReason( + PyObject *exc, + const char *reason /* UTF-8 encoded string */ + ); +PyAPI_FUNC(int) PyUnicodeTranslateError_SetReason( + PyObject *exc, + const char *reason /* UTF-8 encoded string */ + ); + +/* These APIs aren't really part of the error implementation, but + often needed to format error messages; the native C lib APIs are + not available on all platforms, which is why we provide emulations + for those platforms in Python/mysnprintf.c, + WARNING: The return value of snprintf varies across platforms; do + not rely on any particular behavior; eventually the C99 defn may + be reliable. +*/ +#if defined(MS_WIN32) && !defined(HAVE_SNPRINTF) +# define HAVE_SNPRINTF +# define snprintf _snprintf +# define vsnprintf _vsnprintf +#endif + +#include +PyAPI_FUNC(int) PyOS_snprintf(char *str, size_t size, const char *format, ...) + Py_GCC_ATTRIBUTE((format(printf, 3, 4))); +PyAPI_FUNC(int) PyOS_vsnprintf(char *str, size_t size, const char *format, va_list va) + Py_GCC_ATTRIBUTE((format(printf, 3, 0))); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_ERRORS_H */ diff --git a/venv/Include/pyexpat.h b/venv/Include/pyexpat.h new file mode 100644 index 00000000..44259bf6 --- /dev/null +++ b/venv/Include/pyexpat.h @@ -0,0 +1,53 @@ +/* Stuff to export relevant 'expat' entry points from pyexpat to other + * parser modules, such as cElementTree. */ + +/* note: you must import expat.h before importing this module! */ + +#define PyExpat_CAPI_MAGIC "pyexpat.expat_CAPI 1.0" +#define PyExpat_CAPSULE_NAME "pyexpat.expat_CAPI" + +struct PyExpat_CAPI +{ + char* magic; /* set to PyExpat_CAPI_MAGIC */ + int size; /* set to sizeof(struct PyExpat_CAPI) */ + int MAJOR_VERSION; + int MINOR_VERSION; + int MICRO_VERSION; + /* pointers to selected expat functions. add new functions at + the end, if needed */ + const XML_LChar * (*ErrorString)(enum XML_Error code); + enum XML_Error (*GetErrorCode)(XML_Parser parser); + XML_Size (*GetErrorColumnNumber)(XML_Parser parser); + XML_Size (*GetErrorLineNumber)(XML_Parser parser); + enum XML_Status (*Parse)( + XML_Parser parser, const char *s, int len, int isFinal); + XML_Parser (*ParserCreate_MM)( + const XML_Char *encoding, const XML_Memory_Handling_Suite *memsuite, + const XML_Char *namespaceSeparator); + void (*ParserFree)(XML_Parser parser); + void (*SetCharacterDataHandler)( + XML_Parser parser, XML_CharacterDataHandler handler); + void (*SetCommentHandler)( + XML_Parser parser, XML_CommentHandler handler); + void (*SetDefaultHandlerExpand)( + XML_Parser parser, XML_DefaultHandler handler); + void (*SetElementHandler)( + XML_Parser parser, XML_StartElementHandler start, + XML_EndElementHandler end); + void (*SetNamespaceDeclHandler)( + XML_Parser parser, XML_StartNamespaceDeclHandler start, + XML_EndNamespaceDeclHandler end); + void (*SetProcessingInstructionHandler)( + XML_Parser parser, XML_ProcessingInstructionHandler handler); + void (*SetUnknownEncodingHandler)( + XML_Parser parser, XML_UnknownEncodingHandler handler, + void *encodingHandlerData); + void (*SetUserData)(XML_Parser parser, void *userData); + void (*SetStartDoctypeDeclHandler)(XML_Parser parser, + XML_StartDoctypeDeclHandler start); + enum XML_Status (*SetEncoding)(XML_Parser parser, const XML_Char *encoding); + int (*DefaultUnknownEncodingHandler)( + void *encodingHandlerData, const XML_Char *name, XML_Encoding *info); + /* always add new stuff to the end! */ +}; + diff --git a/venv/Include/pyfpe.h b/venv/Include/pyfpe.h new file mode 100644 index 00000000..5a99e397 --- /dev/null +++ b/venv/Include/pyfpe.h @@ -0,0 +1,12 @@ +#ifndef Py_PYFPE_H +#define Py_PYFPE_H + +/* These macros used to do something when Python was built with --with-fpectl, + * but support for that was dropped in 3.7. We continue to define them though, + * to avoid breaking API users. + */ + +#define PyFPE_START_PROTECT(err_string, leave_stmt) +#define PyFPE_END_PROTECT(v) + +#endif /* !Py_PYFPE_H */ diff --git a/venv/Include/pyhash.h b/venv/Include/pyhash.h new file mode 100644 index 00000000..9cfd071e --- /dev/null +++ b/venv/Include/pyhash.h @@ -0,0 +1,145 @@ +#ifndef Py_HASH_H + +#define Py_HASH_H +#ifdef __cplusplus +extern "C" { +#endif + +/* Helpers for hash functions */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(Py_hash_t) _Py_HashDouble(double); +PyAPI_FUNC(Py_hash_t) _Py_HashPointer(void*); +PyAPI_FUNC(Py_hash_t) _Py_HashBytes(const void*, Py_ssize_t); +#endif + +/* Prime multiplier used in string and various other hashes. */ +#define _PyHASH_MULTIPLIER 1000003UL /* 0xf4243 */ + +/* Parameters used for the numeric hash implementation. See notes for + _Py_HashDouble in Python/pyhash.c. Numeric hashes are based on + reduction modulo the prime 2**_PyHASH_BITS - 1. */ + +#if SIZEOF_VOID_P >= 8 +# define _PyHASH_BITS 61 +#else +# define _PyHASH_BITS 31 +#endif + +#define _PyHASH_MODULUS (((size_t)1 << _PyHASH_BITS) - 1) +#define _PyHASH_INF 314159 +#define _PyHASH_NAN 0 +#define _PyHASH_IMAG _PyHASH_MULTIPLIER + + +/* hash secret + * + * memory layout on 64 bit systems + * cccccccc cccccccc cccccccc uc -- unsigned char[24] + * pppppppp ssssssss ........ fnv -- two Py_hash_t + * k0k0k0k0 k1k1k1k1 ........ siphash -- two uint64_t + * ........ ........ ssssssss djbx33a -- 16 bytes padding + one Py_hash_t + * ........ ........ eeeeeeee pyexpat XML hash salt + * + * memory layout on 32 bit systems + * cccccccc cccccccc cccccccc uc + * ppppssss ........ ........ fnv -- two Py_hash_t + * k0k0k0k0 k1k1k1k1 ........ siphash -- two uint64_t (*) + * ........ ........ ssss.... djbx33a -- 16 bytes padding + one Py_hash_t + * ........ ........ eeee.... pyexpat XML hash salt + * + * (*) The siphash member may not be available on 32 bit platforms without + * an unsigned int64 data type. + */ +#ifndef Py_LIMITED_API +typedef union { + /* ensure 24 bytes */ + unsigned char uc[24]; + /* two Py_hash_t for FNV */ + struct { + Py_hash_t prefix; + Py_hash_t suffix; + } fnv; + /* two uint64 for SipHash24 */ + struct { + uint64_t k0; + uint64_t k1; + } siphash; + /* a different (!) Py_hash_t for small string optimization */ + struct { + unsigned char padding[16]; + Py_hash_t suffix; + } djbx33a; + struct { + unsigned char padding[16]; + Py_hash_t hashsalt; + } expat; +} _Py_HashSecret_t; +PyAPI_DATA(_Py_HashSecret_t) _Py_HashSecret; +#endif + +#ifdef Py_DEBUG +PyAPI_DATA(int) _Py_HashSecret_Initialized; +#endif + + +/* hash function definition */ +#ifndef Py_LIMITED_API +typedef struct { + Py_hash_t (*const hash)(const void *, Py_ssize_t); + const char *name; + const int hash_bits; + const int seed_bits; +} PyHash_FuncDef; + +PyAPI_FUNC(PyHash_FuncDef*) PyHash_GetFuncDef(void); +#endif + + +/* cutoff for small string DJBX33A optimization in range [1, cutoff). + * + * About 50% of the strings in a typical Python application are smaller than + * 6 to 7 chars. However DJBX33A is vulnerable to hash collision attacks. + * NEVER use DJBX33A for long strings! + * + * A Py_HASH_CUTOFF of 0 disables small string optimization. 32 bit platforms + * should use a smaller cutoff because it is easier to create colliding + * strings. A cutoff of 7 on 64bit platforms and 5 on 32bit platforms should + * provide a decent safety margin. + */ +#ifndef Py_HASH_CUTOFF +# define Py_HASH_CUTOFF 0 +#elif (Py_HASH_CUTOFF > 7 || Py_HASH_CUTOFF < 0) +# error Py_HASH_CUTOFF must in range 0...7. +#endif /* Py_HASH_CUTOFF */ + + +/* hash algorithm selection + * + * The values for Py_HASH_SIPHASH24 and Py_HASH_FNV are hard-coded in the + * configure script. + * + * - FNV is available on all platforms and architectures. + * - SIPHASH24 only works on plaforms that don't require aligned memory for integers. + * - With EXTERNAL embedders can provide an alternative implementation with:: + * + * PyHash_FuncDef PyHash_Func = {...}; + * + * XXX: Figure out __declspec() for extern PyHash_FuncDef. + */ +#define Py_HASH_EXTERNAL 0 +#define Py_HASH_SIPHASH24 1 +#define Py_HASH_FNV 2 + +#ifndef Py_HASH_ALGORITHM +# ifndef HAVE_ALIGNED_REQUIRED +# define Py_HASH_ALGORITHM Py_HASH_SIPHASH24 +# else +# define Py_HASH_ALGORITHM Py_HASH_FNV +# endif /* uint64_t && uint32_t && aligned */ +#endif /* Py_HASH_ALGORITHM */ + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_HASH_H */ diff --git a/venv/Include/pylifecycle.h b/venv/Include/pylifecycle.h new file mode 100644 index 00000000..659c6df6 --- /dev/null +++ b/venv/Include/pylifecycle.h @@ -0,0 +1,214 @@ + +/* Interfaces to configure, query, create & destroy the Python runtime */ + +#ifndef Py_PYLIFECYCLE_H +#define Py_PYLIFECYCLE_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +typedef struct { + const char *prefix; + const char *msg; + int user_err; +} _PyInitError; + +/* Almost all errors causing Python initialization to fail */ +#ifdef _MSC_VER + /* Visual Studio 2015 doesn't implement C99 __func__ in C */ +# define _Py_INIT_GET_FUNC() __FUNCTION__ +#else +# define _Py_INIT_GET_FUNC() __func__ +#endif + +#define _Py_INIT_OK() \ + (_PyInitError){.prefix = NULL, .msg = NULL, .user_err = 0} +#define _Py_INIT_ERR(MSG) \ + (_PyInitError){.prefix = _Py_INIT_GET_FUNC(), .msg = (MSG), .user_err = 0} +/* Error that can be fixed by the user like invalid input parameter. + Don't abort() the process on such error. */ +#define _Py_INIT_USER_ERR(MSG) \ + (_PyInitError){.prefix = _Py_INIT_GET_FUNC(), .msg = (MSG), .user_err = 1} +#define _Py_INIT_NO_MEMORY() _Py_INIT_USER_ERR("memory allocation failed") +#define _Py_INIT_FAILED(err) \ + (err.msg != NULL) + +#endif + + +PyAPI_FUNC(void) Py_SetProgramName(const wchar_t *); +PyAPI_FUNC(wchar_t *) Py_GetProgramName(void); + +PyAPI_FUNC(void) Py_SetPythonHome(const wchar_t *); +PyAPI_FUNC(wchar_t *) Py_GetPythonHome(void); + +#ifndef Py_LIMITED_API +/* Only used by applications that embed the interpreter and need to + * override the standard encoding determination mechanism + */ +PyAPI_FUNC(int) Py_SetStandardStreamEncoding(const char *encoding, + const char *errors); + +/* PEP 432 Multi-phase initialization API (Private while provisional!) */ +PyAPI_FUNC(_PyInitError) _Py_InitializeCore(const _PyCoreConfig *); +PyAPI_FUNC(int) _Py_IsCoreInitialized(void); + +PyAPI_FUNC(_PyInitError) _PyCoreConfig_Read(_PyCoreConfig *); +PyAPI_FUNC(void) _PyCoreConfig_Clear(_PyCoreConfig *); +PyAPI_FUNC(int) _PyCoreConfig_Copy( + _PyCoreConfig *config, + const _PyCoreConfig *config2); + +PyAPI_FUNC(_PyInitError) _PyMainInterpreterConfig_Read( + _PyMainInterpreterConfig *config, + const _PyCoreConfig *core_config); +PyAPI_FUNC(void) _PyMainInterpreterConfig_Clear(_PyMainInterpreterConfig *); +PyAPI_FUNC(int) _PyMainInterpreterConfig_Copy( + _PyMainInterpreterConfig *config, + const _PyMainInterpreterConfig *config2); + +PyAPI_FUNC(_PyInitError) _Py_InitializeMainInterpreter(const _PyMainInterpreterConfig *); +#endif + +/* Initialization and finalization */ +PyAPI_FUNC(void) Py_Initialize(void); +PyAPI_FUNC(void) Py_InitializeEx(int); +#ifndef Py_LIMITED_API +PyAPI_FUNC(_PyInitError) _Py_InitializeEx_Private(int, int); +PyAPI_FUNC(void) _Py_FatalInitError(_PyInitError err) _Py_NO_RETURN; +#endif +PyAPI_FUNC(void) Py_Finalize(void); +PyAPI_FUNC(int) Py_FinalizeEx(void); +PyAPI_FUNC(int) Py_IsInitialized(void); + +/* Subinterpreter support */ +PyAPI_FUNC(PyThreadState *) Py_NewInterpreter(void); +PyAPI_FUNC(void) Py_EndInterpreter(PyThreadState *); + + +/* Py_PyAtExit is for the atexit module, Py_AtExit is for low-level + * exit functions. + */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _Py_PyAtExit(void (*func)(PyObject *), PyObject *); +#endif +PyAPI_FUNC(int) Py_AtExit(void (*func)(void)); + +PyAPI_FUNC(void) Py_Exit(int) _Py_NO_RETURN; + +/* Restore signals that the interpreter has called SIG_IGN on to SIG_DFL. */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _Py_RestoreSignals(void); + +PyAPI_FUNC(int) Py_FdIsInteractive(FILE *, const char *); +#endif + +/* Bootstrap __main__ (defined in Modules/main.c) */ +PyAPI_FUNC(int) Py_Main(int argc, wchar_t **argv); +#ifdef Py_BUILD_CORE +PyAPI_FUNC(int) _Py_UnixMain(int argc, char **argv); +#endif + +/* In getpath.c */ +PyAPI_FUNC(wchar_t *) Py_GetProgramFullPath(void); +PyAPI_FUNC(wchar_t *) Py_GetPrefix(void); +PyAPI_FUNC(wchar_t *) Py_GetExecPrefix(void); +PyAPI_FUNC(wchar_t *) Py_GetPath(void); +#ifdef Py_BUILD_CORE +PyAPI_FUNC(_PyInitError) _PyPathConfig_Init(const _PyCoreConfig *core_config); +PyAPI_FUNC(PyObject*) _PyPathConfig_ComputeArgv0(int argc, wchar_t **argv); +PyAPI_FUNC(int) _Py_FindEnvConfigValue( + FILE *env_file, + const wchar_t *key, + wchar_t *value, + size_t value_size); +#endif +PyAPI_FUNC(void) Py_SetPath(const wchar_t *); +#ifdef MS_WINDOWS +int _Py_CheckPython3(void); +#endif + +/* In their own files */ +PyAPI_FUNC(const char *) Py_GetVersion(void); +PyAPI_FUNC(const char *) Py_GetPlatform(void); +PyAPI_FUNC(const char *) Py_GetCopyright(void); +PyAPI_FUNC(const char *) Py_GetCompiler(void); +PyAPI_FUNC(const char *) Py_GetBuildInfo(void); +#ifndef Py_LIMITED_API +PyAPI_FUNC(const char *) _Py_gitidentifier(void); +PyAPI_FUNC(const char *) _Py_gitversion(void); +#endif + +/* Internal -- various one-time initializations */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyBuiltin_Init(void); +PyAPI_FUNC(_PyInitError) _PySys_BeginInit(PyObject **sysmod); +PyAPI_FUNC(int) _PySys_EndInit(PyObject *sysdict, _PyMainInterpreterConfig *config); +PyAPI_FUNC(_PyInitError) _PyImport_Init(PyInterpreterState *interp); +PyAPI_FUNC(void) _PyExc_Init(PyObject * bltinmod); +PyAPI_FUNC(_PyInitError) _PyImportHooks_Init(void); +PyAPI_FUNC(int) _PyFrame_Init(void); +PyAPI_FUNC(int) _PyFloat_Init(void); +PyAPI_FUNC(int) PyByteArray_Init(void); +PyAPI_FUNC(_PyInitError) _Py_HashRandomization_Init(const _PyCoreConfig *); +#endif +#ifdef Py_BUILD_CORE +PyAPI_FUNC(int) _Py_ReadHashSeed( + const char *seed_text, + int *use_hash_seed, + unsigned long *hash_seed); +#endif + +/* Various internal finalizers */ + +#ifdef Py_BUILD_CORE +PyAPI_FUNC(void) _PyExc_Fini(void); +PyAPI_FUNC(void) _PyImport_Fini(void); +PyAPI_FUNC(void) _PyImport_Fini2(void); +PyAPI_FUNC(void) _PyGC_DumpShutdownStats(void); +PyAPI_FUNC(void) _PyGC_Fini(void); +PyAPI_FUNC(void) _PyType_Fini(void); +PyAPI_FUNC(void) _Py_HashRandomization_Fini(void); +#endif /* Py_BUILD_CORE */ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) PyMethod_Fini(void); +PyAPI_FUNC(void) PyFrame_Fini(void); +PyAPI_FUNC(void) PyCFunction_Fini(void); +PyAPI_FUNC(void) PyDict_Fini(void); +PyAPI_FUNC(void) PyTuple_Fini(void); +PyAPI_FUNC(void) PyList_Fini(void); +PyAPI_FUNC(void) PySet_Fini(void); +PyAPI_FUNC(void) PyBytes_Fini(void); +PyAPI_FUNC(void) PyByteArray_Fini(void); +PyAPI_FUNC(void) PyFloat_Fini(void); +PyAPI_FUNC(void) PyOS_FiniInterrupts(void); +PyAPI_FUNC(void) PySlice_Fini(void); +PyAPI_FUNC(void) PyAsyncGen_Fini(void); + +PyAPI_FUNC(int) _Py_IsFinalizing(void); +#endif /* !Py_LIMITED_API */ + +/* Signals */ +typedef void (*PyOS_sighandler_t)(int); +PyAPI_FUNC(PyOS_sighandler_t) PyOS_getsig(int); +PyAPI_FUNC(PyOS_sighandler_t) PyOS_setsig(int, PyOS_sighandler_t); + +#ifndef Py_LIMITED_API +/* Random */ +PyAPI_FUNC(int) _PyOS_URandom(void *buffer, Py_ssize_t size); +PyAPI_FUNC(int) _PyOS_URandomNonblock(void *buffer, Py_ssize_t size); +#endif /* !Py_LIMITED_API */ + +/* Legacy locale support */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _Py_CoerceLegacyLocale(const _PyCoreConfig *config); +PyAPI_FUNC(int) _Py_LegacyLocaleDetected(void); +PyAPI_FUNC(char *) _Py_SetLocaleFromEnv(int category); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_PYLIFECYCLE_H */ diff --git a/venv/Include/pymacconfig.h b/venv/Include/pymacconfig.h new file mode 100644 index 00000000..9dde11bd --- /dev/null +++ b/venv/Include/pymacconfig.h @@ -0,0 +1,102 @@ +#ifndef PYMACCONFIG_H +#define PYMACCONFIG_H + /* + * This file moves some of the autoconf magic to compile-time + * when building on MacOSX. This is needed for building 4-way + * universal binaries and for 64-bit universal binaries because + * the values redefined below aren't configure-time constant but + * only compile-time constant in these scenarios. + */ + +#if defined(__APPLE__) + +# undef SIZEOF_LONG +# undef SIZEOF_PTHREAD_T +# undef SIZEOF_SIZE_T +# undef SIZEOF_TIME_T +# undef SIZEOF_VOID_P +# undef SIZEOF__BOOL +# undef SIZEOF_UINTPTR_T +# undef SIZEOF_PTHREAD_T +# undef WORDS_BIGENDIAN +# undef DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754 +# undef DOUBLE_IS_BIG_ENDIAN_IEEE754 +# undef DOUBLE_IS_LITTLE_ENDIAN_IEEE754 +# undef HAVE_GCC_ASM_FOR_X87 + +# undef VA_LIST_IS_ARRAY +# if defined(__LP64__) && defined(__x86_64__) +# define VA_LIST_IS_ARRAY 1 +# endif + +# undef HAVE_LARGEFILE_SUPPORT +# ifndef __LP64__ +# define HAVE_LARGEFILE_SUPPORT 1 +# endif + +# undef SIZEOF_LONG +# ifdef __LP64__ +# define SIZEOF__BOOL 1 +# define SIZEOF__BOOL 1 +# define SIZEOF_LONG 8 +# define SIZEOF_PTHREAD_T 8 +# define SIZEOF_SIZE_T 8 +# define SIZEOF_TIME_T 8 +# define SIZEOF_VOID_P 8 +# define SIZEOF_UINTPTR_T 8 +# define SIZEOF_PTHREAD_T 8 +# else +# ifdef __ppc__ +# define SIZEOF__BOOL 4 +# else +# define SIZEOF__BOOL 1 +# endif +# define SIZEOF_LONG 4 +# define SIZEOF_PTHREAD_T 4 +# define SIZEOF_SIZE_T 4 +# define SIZEOF_TIME_T 4 +# define SIZEOF_VOID_P 4 +# define SIZEOF_UINTPTR_T 4 +# define SIZEOF_PTHREAD_T 4 +# endif + +# if defined(__LP64__) + /* MacOSX 10.4 (the first release to support 64-bit code + * at all) only supports 64-bit in the UNIX layer. + * Therefore suppress the toolbox-glue in 64-bit mode. + */ + + /* In 64-bit mode setpgrp always has no arguments, in 32-bit + * mode that depends on the compilation environment + */ +# undef SETPGRP_HAVE_ARG + +# endif + +#ifdef __BIG_ENDIAN__ +#define WORDS_BIGENDIAN 1 +#define DOUBLE_IS_BIG_ENDIAN_IEEE754 +#else +#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754 +#endif /* __BIG_ENDIAN */ + +#ifdef __i386__ +# define HAVE_GCC_ASM_FOR_X87 +#endif + + /* + * The definition in pyconfig.h is only valid on the OS release + * where configure ran on and not necessarily for all systems where + * the executable can be used on. + * + * Specifically: OSX 10.4 has limited supported for '%zd', while + * 10.5 has full support for '%zd'. A binary built on 10.5 won't + * work properly on 10.4 unless we suppress the definition + * of PY_FORMAT_SIZE_T + */ +#undef PY_FORMAT_SIZE_T + + +#endif /* defined(_APPLE__) */ + +#endif /* PYMACCONFIG_H */ diff --git a/venv/Include/pymacro.h b/venv/Include/pymacro.h new file mode 100644 index 00000000..3f6ddbe9 --- /dev/null +++ b/venv/Include/pymacro.h @@ -0,0 +1,100 @@ +#ifndef Py_PYMACRO_H +#define Py_PYMACRO_H + +/* Minimum value between x and y */ +#define Py_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +/* Maximum value between x and y */ +#define Py_MAX(x, y) (((x) > (y)) ? (x) : (y)) + +/* Absolute value of the number x */ +#define Py_ABS(x) ((x) < 0 ? -(x) : (x)) + +#define _Py_XSTRINGIFY(x) #x + +/* Convert the argument to a string. For example, Py_STRINGIFY(123) is replaced + with "123" by the preprocessor. Defines are also replaced by their value. + For example Py_STRINGIFY(__LINE__) is replaced by the line number, not + by "__LINE__". */ +#define Py_STRINGIFY(x) _Py_XSTRINGIFY(x) + +/* Get the size of a structure member in bytes */ +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) + +/* Argument must be a char or an int in [-128, 127] or [0, 255]. */ +#define Py_CHARMASK(c) ((unsigned char)((c) & 0xff)) + +/* Assert a build-time dependency, as an expression. + + Your compile will fail if the condition isn't true, or can't be evaluated + by the compiler. This can be used in an expression: its value is 0. + + Example: + + #define foo_to_char(foo) \ + ((char *)(foo) \ + + Py_BUILD_ASSERT_EXPR(offsetof(struct foo, string) == 0)) + + Written by Rusty Russell, public domain, http://ccodearchive.net/ */ +#define Py_BUILD_ASSERT_EXPR(cond) \ + (sizeof(char [1 - 2*!(cond)]) - 1) + +#define Py_BUILD_ASSERT(cond) do { \ + (void)Py_BUILD_ASSERT_EXPR(cond); \ + } while(0) + +/* Get the number of elements in a visible array + + This does not work on pointers, or arrays declared as [], or function + parameters. With correct compiler support, such usage will cause a build + error (see Py_BUILD_ASSERT_EXPR). + + Written by Rusty Russell, public domain, http://ccodearchive.net/ + + Requires at GCC 3.1+ */ +#if (defined(__GNUC__) && !defined(__STRICT_ANSI__) && \ + (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ >= 4))) +/* Two gcc extensions. + &a[0] degrades to a pointer: a different type from an array */ +#define Py_ARRAY_LENGTH(array) \ + (sizeof(array) / sizeof((array)[0]) \ + + Py_BUILD_ASSERT_EXPR(!__builtin_types_compatible_p(typeof(array), \ + typeof(&(array)[0])))) +#else +#define Py_ARRAY_LENGTH(array) \ + (sizeof(array) / sizeof((array)[0])) +#endif + + +/* Define macros for inline documentation. */ +#define PyDoc_VAR(name) static char name[] +#define PyDoc_STRVAR(name,str) PyDoc_VAR(name) = PyDoc_STR(str) +#ifdef WITH_DOC_STRINGS +#define PyDoc_STR(str) str +#else +#define PyDoc_STR(str) "" +#endif + +/* Below "a" is a power of 2. */ +/* Round down size "n" to be a multiple of "a". */ +#define _Py_SIZE_ROUND_DOWN(n, a) ((size_t)(n) & ~(size_t)((a) - 1)) +/* Round up size "n" to be a multiple of "a". */ +#define _Py_SIZE_ROUND_UP(n, a) (((size_t)(n) + \ + (size_t)((a) - 1)) & ~(size_t)((a) - 1)) +/* Round pointer "p" down to the closest "a"-aligned address <= "p". */ +#define _Py_ALIGN_DOWN(p, a) ((void *)((uintptr_t)(p) & ~(uintptr_t)((a) - 1))) +/* Round pointer "p" up to the closest "a"-aligned address >= "p". */ +#define _Py_ALIGN_UP(p, a) ((void *)(((uintptr_t)(p) + \ + (uintptr_t)((a) - 1)) & ~(uintptr_t)((a) - 1))) +/* Check if pointer "p" is aligned to "a"-bytes boundary. */ +#define _Py_IS_ALIGNED(p, a) (!((uintptr_t)(p) & (uintptr_t)((a) - 1))) + +#ifdef __GNUC__ +#define Py_UNUSED(name) _unused_ ## name __attribute__((unused)) +#else +#define Py_UNUSED(name) _unused_ ## name +#endif + +#define Py_UNREACHABLE() abort() + +#endif /* Py_PYMACRO_H */ diff --git a/venv/Include/pymath.h b/venv/Include/pymath.h new file mode 100644 index 00000000..6cf69f98 --- /dev/null +++ b/venv/Include/pymath.h @@ -0,0 +1,230 @@ +#ifndef Py_PYMATH_H +#define Py_PYMATH_H + +#include "pyconfig.h" /* include for defines */ + +/************************************************************************** +Symbols and macros to supply platform-independent interfaces to mathematical +functions and constants +**************************************************************************/ + +/* Python provides implementations for copysign, round and hypot in + * Python/pymath.c just in case your math library doesn't provide the + * functions. + * + *Note: PC/pyconfig.h defines copysign as _copysign + */ +#ifndef HAVE_COPYSIGN +extern double copysign(double, double); +#endif + +#ifndef HAVE_ROUND +extern double round(double); +#endif + +#ifndef HAVE_HYPOT +extern double hypot(double, double); +#endif + +/* extra declarations */ +#ifndef _MSC_VER +#ifndef __STDC__ +extern double fmod (double, double); +extern double frexp (double, int *); +extern double ldexp (double, int); +extern double modf (double, double *); +extern double pow(double, double); +#endif /* __STDC__ */ +#endif /* _MSC_VER */ + +/* High precision definition of pi and e (Euler) + * The values are taken from libc6's math.h. + */ +#ifndef Py_MATH_PIl +#define Py_MATH_PIl 3.1415926535897932384626433832795029L +#endif +#ifndef Py_MATH_PI +#define Py_MATH_PI 3.14159265358979323846 +#endif + +#ifndef Py_MATH_El +#define Py_MATH_El 2.7182818284590452353602874713526625L +#endif + +#ifndef Py_MATH_E +#define Py_MATH_E 2.7182818284590452354 +#endif + +/* Tau (2pi) to 40 digits, taken from tauday.com/tau-digits. */ +#ifndef Py_MATH_TAU +#define Py_MATH_TAU 6.2831853071795864769252867665590057683943L +#endif + + +/* On x86, Py_FORCE_DOUBLE forces a floating-point number out of an x87 FPU + register and into a 64-bit memory location, rounding from extended + precision to double precision in the process. On other platforms it does + nothing. */ + +/* we take double rounding as evidence of x87 usage */ +#ifndef Py_LIMITED_API +#ifndef Py_FORCE_DOUBLE +# ifdef X87_DOUBLE_ROUNDING +PyAPI_FUNC(double) _Py_force_double(double); +# define Py_FORCE_DOUBLE(X) (_Py_force_double(X)) +# else +# define Py_FORCE_DOUBLE(X) (X) +# endif +#endif +#endif + +#ifndef Py_LIMITED_API +#ifdef HAVE_GCC_ASM_FOR_X87 +PyAPI_FUNC(unsigned short) _Py_get_387controlword(void); +PyAPI_FUNC(void) _Py_set_387controlword(unsigned short); +#endif +#endif + +/* Py_IS_NAN(X) + * Return 1 if float or double arg is a NaN, else 0. + * Caution: + * X is evaluated more than once. + * This may not work on all platforms. Each platform has *some* + * way to spell this, though -- override in pyconfig.h if you have + * a platform where it doesn't work. + * Note: PC/pyconfig.h defines Py_IS_NAN as _isnan + */ +#ifndef Py_IS_NAN +#if defined HAVE_DECL_ISNAN && HAVE_DECL_ISNAN == 1 +#define Py_IS_NAN(X) isnan(X) +#else +#define Py_IS_NAN(X) ((X) != (X)) +#endif +#endif + +/* Py_IS_INFINITY(X) + * Return 1 if float or double arg is an infinity, else 0. + * Caution: + * X is evaluated more than once. + * This implementation may set the underflow flag if |X| is very small; + * it really can't be implemented correctly (& easily) before C99. + * Override in pyconfig.h if you have a better spelling on your platform. + * Py_FORCE_DOUBLE is used to avoid getting false negatives from a + * non-infinite value v sitting in an 80-bit x87 register such that + * v becomes infinite when spilled from the register to 64-bit memory. + * Note: PC/pyconfig.h defines Py_IS_INFINITY as _isinf + */ +#ifndef Py_IS_INFINITY +# if defined HAVE_DECL_ISINF && HAVE_DECL_ISINF == 1 +# define Py_IS_INFINITY(X) isinf(X) +# else +# define Py_IS_INFINITY(X) ((X) && \ + (Py_FORCE_DOUBLE(X)*0.5 == Py_FORCE_DOUBLE(X))) +# endif +#endif + +/* Py_IS_FINITE(X) + * Return 1 if float or double arg is neither infinite nor NAN, else 0. + * Some compilers (e.g. VisualStudio) have intrisics for this, so a special + * macro for this particular test is useful + * Note: PC/pyconfig.h defines Py_IS_FINITE as _finite + */ +#ifndef Py_IS_FINITE +#if defined HAVE_DECL_ISFINITE && HAVE_DECL_ISFINITE == 1 +#define Py_IS_FINITE(X) isfinite(X) +#elif defined HAVE_FINITE +#define Py_IS_FINITE(X) finite(X) +#else +#define Py_IS_FINITE(X) (!Py_IS_INFINITY(X) && !Py_IS_NAN(X)) +#endif +#endif + +/* HUGE_VAL is supposed to expand to a positive double infinity. Python + * uses Py_HUGE_VAL instead because some platforms are broken in this + * respect. We used to embed code in pyport.h to try to worm around that, + * but different platforms are broken in conflicting ways. If you're on + * a platform where HUGE_VAL is defined incorrectly, fiddle your Python + * config to #define Py_HUGE_VAL to something that works on your platform. + */ +#ifndef Py_HUGE_VAL +#define Py_HUGE_VAL HUGE_VAL +#endif + +/* Py_NAN + * A value that evaluates to a NaN. On IEEE 754 platforms INF*0 or + * INF/INF works. Define Py_NO_NAN in pyconfig.h if your platform + * doesn't support NaNs. + */ +#if !defined(Py_NAN) && !defined(Py_NO_NAN) +#if !defined(__INTEL_COMPILER) + #define Py_NAN (Py_HUGE_VAL * 0.) +#else /* __INTEL_COMPILER */ + #if defined(ICC_NAN_STRICT) + #pragma float_control(push) + #pragma float_control(precise, on) + #pragma float_control(except, on) + #if defined(_MSC_VER) + __declspec(noinline) + #else /* Linux */ + __attribute__((noinline)) + #endif /* _MSC_VER */ + static double __icc_nan() + { + return sqrt(-1.0); + } + #pragma float_control (pop) + #define Py_NAN __icc_nan() + #else /* ICC_NAN_RELAXED as default for Intel Compiler */ + static const union { unsigned char buf[8]; double __icc_nan; } __nan_store = {0,0,0,0,0,0,0xf8,0x7f}; + #define Py_NAN (__nan_store.__icc_nan) + #endif /* ICC_NAN_STRICT */ +#endif /* __INTEL_COMPILER */ +#endif + +/* Py_OVERFLOWED(X) + * Return 1 iff a libm function overflowed. Set errno to 0 before calling + * a libm function, and invoke this macro after, passing the function + * result. + * Caution: + * This isn't reliable. C99 no longer requires libm to set errno under + * any exceptional condition, but does require +- HUGE_VAL return + * values on overflow. A 754 box *probably* maps HUGE_VAL to a + * double infinity, and we're cool if that's so, unless the input + * was an infinity and an infinity is the expected result. A C89 + * system sets errno to ERANGE, so we check for that too. We're + * out of luck if a C99 754 box doesn't map HUGE_VAL to +Inf, or + * if the returned result is a NaN, or if a C89 box returns HUGE_VAL + * in non-overflow cases. + * X is evaluated more than once. + * Some platforms have better way to spell this, so expect some #ifdef'ery. + * + * OpenBSD uses 'isinf()' because a compiler bug on that platform causes + * the longer macro version to be mis-compiled. This isn't optimal, and + * should be removed once a newer compiler is available on that platform. + * The system that had the failure was running OpenBSD 3.2 on Intel, with + * gcc 2.95.3. + * + * According to Tim's checkin, the FreeBSD systems use isinf() to work + * around a FPE bug on that platform. + */ +#if defined(__FreeBSD__) || defined(__OpenBSD__) +#define Py_OVERFLOWED(X) isinf(X) +#else +#define Py_OVERFLOWED(X) ((X) != 0.0 && (errno == ERANGE || \ + (X) == Py_HUGE_VAL || \ + (X) == -Py_HUGE_VAL)) +#endif + +/* Return whether integral type *type* is signed or not. */ +#define _Py_IntegralTypeSigned(type) ((type)(-1) < 0) +/* Return the maximum value of integral type *type*. */ +#define _Py_IntegralTypeMax(type) ((_Py_IntegralTypeSigned(type)) ? (((((type)1 << (sizeof(type)*CHAR_BIT - 2)) - 1) << 1) + 1) : ~(type)0) +/* Return the minimum value of integral type *type*. */ +#define _Py_IntegralTypeMin(type) ((_Py_IntegralTypeSigned(type)) ? -_Py_IntegralTypeMax(type) - 1 : 0) +/* Check whether *v* is in the range of integral type *type*. This is most + * useful if *v* is floating-point, since demoting a floating-point *v* to an + * integral type that cannot represent *v*'s integral part is undefined + * behavior. */ +#define _Py_InIntegralTypeRange(type, v) (_Py_IntegralTypeMin(type) <= v && v <= _Py_IntegralTypeMax(type)) + +#endif /* Py_PYMATH_H */ diff --git a/venv/Include/pymem.h b/venv/Include/pymem.h new file mode 100644 index 00000000..8ee0efdd --- /dev/null +++ b/venv/Include/pymem.h @@ -0,0 +1,244 @@ +/* The PyMem_ family: low-level memory allocation interfaces. + See objimpl.h for the PyObject_ memory family. +*/ + +#ifndef Py_PYMEM_H +#define Py_PYMEM_H + +#include "pyport.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +PyAPI_FUNC(void *) PyMem_RawMalloc(size_t size); +PyAPI_FUNC(void *) PyMem_RawCalloc(size_t nelem, size_t elsize); +PyAPI_FUNC(void *) PyMem_RawRealloc(void *ptr, size_t new_size); +PyAPI_FUNC(void) PyMem_RawFree(void *ptr); + +/* Configure the Python memory allocators. Pass NULL to use default + allocators. */ +PyAPI_FUNC(int) _PyMem_SetupAllocators(const char *opt); + +/* Try to get the allocators name set by _PyMem_SetupAllocators(). */ +PyAPI_FUNC(const char*) _PyMem_GetAllocatorsName(void); + +/* Track an allocated memory block in the tracemalloc module. + Return 0 on success, return -1 on error (failed to allocate memory to store + the trace). + + Return -2 if tracemalloc is disabled. + + If memory block is already tracked, update the existing trace. */ +PyAPI_FUNC(int) PyTraceMalloc_Track( + unsigned int domain, + uintptr_t ptr, + size_t size); + +/* Untrack an allocated memory block in the tracemalloc module. + Do nothing if the block was not tracked. + + Return -2 if tracemalloc is disabled, otherwise return 0. */ +PyAPI_FUNC(int) PyTraceMalloc_Untrack( + unsigned int domain, + uintptr_t ptr); + +/* Get the traceback where a memory block was allocated. + + Return a tuple of (filename: str, lineno: int) tuples. + + Return None if the tracemalloc module is disabled or if the memory block + is not tracked by tracemalloc. + + Raise an exception and return NULL on error. */ +PyAPI_FUNC(PyObject*) _PyTraceMalloc_GetTraceback( + unsigned int domain, + uintptr_t ptr); +#endif /* !Py_LIMITED_API */ + + +/* BEWARE: + + Each interface exports both functions and macros. Extension modules should + use the functions, to ensure binary compatibility across Python versions. + Because the Python implementation is free to change internal details, and + the macros may (or may not) expose details for speed, if you do use the + macros you must recompile your extensions with each Python release. + + Never mix calls to PyMem_ with calls to the platform malloc/realloc/ + calloc/free. For example, on Windows different DLLs may end up using + different heaps, and if you use PyMem_Malloc you'll get the memory from the + heap used by the Python DLL; it could be a disaster if you free()'ed that + directly in your own extension. Using PyMem_Free instead ensures Python + can return the memory to the proper heap. As another example, in + PYMALLOC_DEBUG mode, Python wraps all calls to all PyMem_ and PyObject_ + memory functions in special debugging wrappers that add additional + debugging info to dynamic memory blocks. The system routines have no idea + what to do with that stuff, and the Python wrappers have no idea what to do + with raw blocks obtained directly by the system routines then. + + The GIL must be held when using these APIs. +*/ + +/* + * Raw memory interface + * ==================== + */ + +/* Functions + + Functions supplying platform-independent semantics for malloc/realloc/ + free. These functions make sure that allocating 0 bytes returns a distinct + non-NULL pointer (whenever possible -- if we're flat out of memory, NULL + may be returned), even if the platform malloc and realloc don't. + Returned pointers must be checked for NULL explicitly. No action is + performed on failure (no exception is set, no warning is printed, etc). +*/ + +PyAPI_FUNC(void *) PyMem_Malloc(size_t size); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +PyAPI_FUNC(void *) PyMem_Calloc(size_t nelem, size_t elsize); +#endif +PyAPI_FUNC(void *) PyMem_Realloc(void *ptr, size_t new_size); +PyAPI_FUNC(void) PyMem_Free(void *ptr); + +#ifndef Py_LIMITED_API +/* strdup() using PyMem_RawMalloc() */ +PyAPI_FUNC(char *) _PyMem_RawStrdup(const char *str); + +/* strdup() using PyMem_Malloc() */ +PyAPI_FUNC(char *) _PyMem_Strdup(const char *str); + +/* wcsdup() using PyMem_RawMalloc() */ +PyAPI_FUNC(wchar_t*) _PyMem_RawWcsdup(const wchar_t *str); +#endif + +/* Macros. */ + +/* PyMem_MALLOC(0) means malloc(1). Some systems would return NULL + for malloc(0), which would be treated as an error. Some platforms + would return a pointer with no memory behind it, which would break + pymalloc. To solve these problems, allocate an extra byte. */ +/* Returns NULL to indicate error if a negative size or size larger than + Py_ssize_t can represent is supplied. Helps prevents security holes. */ +#define PyMem_MALLOC(n) PyMem_Malloc(n) +#define PyMem_REALLOC(p, n) PyMem_Realloc(p, n) +#define PyMem_FREE(p) PyMem_Free(p) + +/* + * Type-oriented memory interface + * ============================== + * + * Allocate memory for n objects of the given type. Returns a new pointer + * or NULL if the request was too large or memory allocation failed. Use + * these macros rather than doing the multiplication yourself so that proper + * overflow checking is always done. + */ + +#define PyMem_New(type, n) \ + ( ((size_t)(n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL : \ + ( (type *) PyMem_Malloc((n) * sizeof(type)) ) ) +#define PyMem_NEW(type, n) \ + ( ((size_t)(n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL : \ + ( (type *) PyMem_MALLOC((n) * sizeof(type)) ) ) + +/* + * The value of (p) is always clobbered by this macro regardless of success. + * The caller MUST check if (p) is NULL afterwards and deal with the memory + * error if so. This means the original value of (p) MUST be saved for the + * caller's memory error handler to not lose track of it. + */ +#define PyMem_Resize(p, type, n) \ + ( (p) = ((size_t)(n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL : \ + (type *) PyMem_Realloc((p), (n) * sizeof(type)) ) +#define PyMem_RESIZE(p, type, n) \ + ( (p) = ((size_t)(n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL : \ + (type *) PyMem_REALLOC((p), (n) * sizeof(type)) ) + +/* PyMem{Del,DEL} are left over from ancient days, and shouldn't be used + * anymore. They're just confusing aliases for PyMem_{Free,FREE} now. + */ +#define PyMem_Del PyMem_Free +#define PyMem_DEL PyMem_FREE + +#ifndef Py_LIMITED_API +typedef enum { + /* PyMem_RawMalloc(), PyMem_RawRealloc() and PyMem_RawFree() */ + PYMEM_DOMAIN_RAW, + + /* PyMem_Malloc(), PyMem_Realloc() and PyMem_Free() */ + PYMEM_DOMAIN_MEM, + + /* PyObject_Malloc(), PyObject_Realloc() and PyObject_Free() */ + PYMEM_DOMAIN_OBJ +} PyMemAllocatorDomain; + +typedef struct { + /* user context passed as the first argument to the 4 functions */ + void *ctx; + + /* allocate a memory block */ + void* (*malloc) (void *ctx, size_t size); + + /* allocate a memory block initialized by zeros */ + void* (*calloc) (void *ctx, size_t nelem, size_t elsize); + + /* allocate or resize a memory block */ + void* (*realloc) (void *ctx, void *ptr, size_t new_size); + + /* release a memory block */ + void (*free) (void *ctx, void *ptr); +} PyMemAllocatorEx; + +/* Get the memory block allocator of the specified domain. */ +PyAPI_FUNC(void) PyMem_GetAllocator(PyMemAllocatorDomain domain, + PyMemAllocatorEx *allocator); + +/* Set the memory block allocator of the specified domain. + + The new allocator must return a distinct non-NULL pointer when requesting + zero bytes. + + For the PYMEM_DOMAIN_RAW domain, the allocator must be thread-safe: the GIL + is not held when the allocator is called. + + If the new allocator is not a hook (don't call the previous allocator), the + PyMem_SetupDebugHooks() function must be called to reinstall the debug hooks + on top on the new allocator. */ +PyAPI_FUNC(void) PyMem_SetAllocator(PyMemAllocatorDomain domain, + PyMemAllocatorEx *allocator); + +/* Setup hooks to detect bugs in the following Python memory allocator + functions: + + - PyMem_RawMalloc(), PyMem_RawRealloc(), PyMem_RawFree() + - PyMem_Malloc(), PyMem_Realloc(), PyMem_Free() + - PyObject_Malloc(), PyObject_Realloc() and PyObject_Free() + + Newly allocated memory is filled with the byte 0xCB, freed memory is filled + with the byte 0xDB. Additional checks: + + - detect API violations, ex: PyObject_Free() called on a buffer allocated + by PyMem_Malloc() + - detect write before the start of the buffer (buffer underflow) + - detect write after the end of the buffer (buffer overflow) + + The function does nothing if Python is not compiled is debug mode. */ +PyAPI_FUNC(void) PyMem_SetupDebugHooks(void); +#endif + +#ifdef Py_BUILD_CORE +/* Set the memory allocator of the specified domain to the default. + Save the old allocator into *old_alloc if it's non-NULL. + Return on success, or return -1 if the domain is unknown. */ +PyAPI_FUNC(int) _PyMem_SetDefaultAllocator( + PyMemAllocatorDomain domain, + PyMemAllocatorEx *old_alloc); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_PYMEM_H */ diff --git a/venv/Include/pyport.h b/venv/Include/pyport.h new file mode 100644 index 00000000..c1f4c7fb --- /dev/null +++ b/venv/Include/pyport.h @@ -0,0 +1,793 @@ +#ifndef Py_PYPORT_H +#define Py_PYPORT_H + +#include "pyconfig.h" /* include for defines */ + +#include + +/************************************************************************** +Symbols and macros to supply platform-independent interfaces to basic +C language & library operations whose spellings vary across platforms. + +Please try to make documentation here as clear as possible: by definition, +the stuff here is trying to illuminate C's darkest corners. + +Config #defines referenced here: + +SIGNED_RIGHT_SHIFT_ZERO_FILLS +Meaning: To be defined iff i>>j does not extend the sign bit when i is a + signed integral type and i < 0. +Used in: Py_ARITHMETIC_RIGHT_SHIFT + +Py_DEBUG +Meaning: Extra checks compiled in for debug mode. +Used in: Py_SAFE_DOWNCAST + +**************************************************************************/ + +/* typedefs for some C9X-defined synonyms for integral types. + * + * The names in Python are exactly the same as the C9X names, except with a + * Py_ prefix. Until C9X is universally implemented, this is the only way + * to ensure that Python gets reliable names that don't conflict with names + * in non-Python code that are playing their own tricks to define the C9X + * names. + * + * NOTE: don't go nuts here! Python has no use for *most* of the C9X + * integral synonyms. Only define the ones we actually need. + */ + +/* long long is required. Ensure HAVE_LONG_LONG is defined for compatibility. */ +#ifndef HAVE_LONG_LONG +#define HAVE_LONG_LONG 1 +#endif +#ifndef PY_LONG_LONG +#define PY_LONG_LONG long long +/* If LLONG_MAX is defined in limits.h, use that. */ +#define PY_LLONG_MIN LLONG_MIN +#define PY_LLONG_MAX LLONG_MAX +#define PY_ULLONG_MAX ULLONG_MAX +#endif + +#define PY_UINT32_T uint32_t +#define PY_UINT64_T uint64_t + +/* Signed variants of the above */ +#define PY_INT32_T int32_t +#define PY_INT64_T int64_t + +/* If PYLONG_BITS_IN_DIGIT is not defined then we'll use 30-bit digits if all + the necessary integer types are available, and we're on a 64-bit platform + (as determined by SIZEOF_VOID_P); otherwise we use 15-bit digits. */ + +#ifndef PYLONG_BITS_IN_DIGIT +#if SIZEOF_VOID_P >= 8 +#define PYLONG_BITS_IN_DIGIT 30 +#else +#define PYLONG_BITS_IN_DIGIT 15 +#endif +#endif + +/* uintptr_t is the C9X name for an unsigned integral type such that a + * legitimate void* can be cast to uintptr_t and then back to void* again + * without loss of information. Similarly for intptr_t, wrt a signed + * integral type. + */ +typedef uintptr_t Py_uintptr_t; +typedef intptr_t Py_intptr_t; + +/* Py_ssize_t is a signed integral type such that sizeof(Py_ssize_t) == + * sizeof(size_t). C99 doesn't define such a thing directly (size_t is an + * unsigned integral type). See PEP 353 for details. + */ +#ifdef HAVE_SSIZE_T +typedef ssize_t Py_ssize_t; +#elif SIZEOF_VOID_P == SIZEOF_SIZE_T +typedef Py_intptr_t Py_ssize_t; +#else +# error "Python needs a typedef for Py_ssize_t in pyport.h." +#endif + +/* Py_hash_t is the same size as a pointer. */ +#define SIZEOF_PY_HASH_T SIZEOF_SIZE_T +typedef Py_ssize_t Py_hash_t; +/* Py_uhash_t is the unsigned equivalent needed to calculate numeric hash. */ +#define SIZEOF_PY_UHASH_T SIZEOF_SIZE_T +typedef size_t Py_uhash_t; + +/* Only used for compatibility with code that may not be PY_SSIZE_T_CLEAN. */ +#ifdef PY_SSIZE_T_CLEAN +typedef Py_ssize_t Py_ssize_clean_t; +#else +typedef int Py_ssize_clean_t; +#endif + +/* Largest possible value of size_t. */ +#define PY_SIZE_MAX SIZE_MAX + +/* Largest positive value of type Py_ssize_t. */ +#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t)-1)>>1)) +/* Smallest negative value of type Py_ssize_t. */ +#define PY_SSIZE_T_MIN (-PY_SSIZE_T_MAX-1) + +/* PY_FORMAT_SIZE_T is a platform-specific modifier for use in a printf + * format to convert an argument with the width of a size_t or Py_ssize_t. + * C99 introduced "z" for this purpose, but not all platforms support that; + * e.g., MS compilers use "I" instead. + * + * These "high level" Python format functions interpret "z" correctly on + * all platforms (Python interprets the format string itself, and does whatever + * the platform C requires to convert a size_t/Py_ssize_t argument): + * + * PyBytes_FromFormat + * PyErr_Format + * PyBytes_FromFormatV + * PyUnicode_FromFormatV + * + * Lower-level uses require that you interpolate the correct format modifier + * yourself (e.g., calling printf, fprintf, sprintf, PyOS_snprintf); for + * example, + * + * Py_ssize_t index; + * fprintf(stderr, "index %" PY_FORMAT_SIZE_T "d sucks\n", index); + * + * That will expand to %ld, or %Id, or to something else correct for a + * Py_ssize_t on the platform. + */ +#ifndef PY_FORMAT_SIZE_T +# if SIZEOF_SIZE_T == SIZEOF_INT && !defined(__APPLE__) +# define PY_FORMAT_SIZE_T "" +# elif SIZEOF_SIZE_T == SIZEOF_LONG +# define PY_FORMAT_SIZE_T "l" +# elif defined(MS_WINDOWS) +# define PY_FORMAT_SIZE_T "I" +# else +# error "This platform's pyconfig.h needs to define PY_FORMAT_SIZE_T" +# endif +#endif + +/* Py_LOCAL can be used instead of static to get the fastest possible calling + * convention for functions that are local to a given module. + * + * Py_LOCAL_INLINE does the same thing, and also explicitly requests inlining, + * for platforms that support that. + * + * If PY_LOCAL_AGGRESSIVE is defined before python.h is included, more + * "aggressive" inlining/optimization is enabled for the entire module. This + * may lead to code bloat, and may slow things down for those reasons. It may + * also lead to errors, if the code relies on pointer aliasing. Use with + * care. + * + * NOTE: You can only use this for functions that are entirely local to a + * module; functions that are exported via method tables, callbacks, etc, + * should keep using static. + */ + +#if defined(_MSC_VER) +#if defined(PY_LOCAL_AGGRESSIVE) +/* enable more aggressive optimization for visual studio */ +#pragma optimize("agtw", on) +#endif +/* ignore warnings if the compiler decides not to inline a function */ +#pragma warning(disable: 4710) +/* fastest possible local call under MSVC */ +#define Py_LOCAL(type) static type __fastcall +#define Py_LOCAL_INLINE(type) static __inline type __fastcall +#else +#define Py_LOCAL(type) static type +#define Py_LOCAL_INLINE(type) static inline type +#endif + +/* Py_MEMCPY is kept for backwards compatibility, + * see https://bugs.python.org/issue28126 */ +#define Py_MEMCPY memcpy + +#include + +#ifdef HAVE_IEEEFP_H +#include /* needed for 'finite' declaration on some platforms */ +#endif + +#include /* Moved here from the math section, before extern "C" */ + +/******************************************** + * WRAPPER FOR and/or * + ********************************************/ + +#ifdef TIME_WITH_SYS_TIME +#include +#include +#else /* !TIME_WITH_SYS_TIME */ +#ifdef HAVE_SYS_TIME_H +#include +#else /* !HAVE_SYS_TIME_H */ +#include +#endif /* !HAVE_SYS_TIME_H */ +#endif /* !TIME_WITH_SYS_TIME */ + + +/****************************** + * WRAPPER FOR * + ******************************/ + +/* NB caller must include */ + +#ifdef HAVE_SYS_SELECT_H +#include +#endif /* !HAVE_SYS_SELECT_H */ + +/******************************* + * stat() and fstat() fiddling * + *******************************/ + +#ifdef HAVE_SYS_STAT_H +#include +#elif defined(HAVE_STAT_H) +#include +#endif + +#ifndef S_IFMT +/* VisualAge C/C++ Failed to Define MountType Field in sys/stat.h */ +#define S_IFMT 0170000 +#endif + +#ifndef S_IFLNK +/* Windows doesn't define S_IFLNK but posixmodule.c maps + * IO_REPARSE_TAG_SYMLINK to S_IFLNK */ +# define S_IFLNK 0120000 +#endif + +#ifndef S_ISREG +#define S_ISREG(x) (((x) & S_IFMT) == S_IFREG) +#endif + +#ifndef S_ISDIR +#define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR) +#endif + +#ifndef S_ISCHR +#define S_ISCHR(x) (((x) & S_IFMT) == S_IFCHR) +#endif + +#ifdef __cplusplus +/* Move this down here since some C++ #include's don't like to be included + inside an extern "C" */ +extern "C" { +#endif + + +/* Py_ARITHMETIC_RIGHT_SHIFT + * C doesn't define whether a right-shift of a signed integer sign-extends + * or zero-fills. Here a macro to force sign extension: + * Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) + * Return I >> J, forcing sign extension. Arithmetically, return the + * floor of I/2**J. + * Requirements: + * I should have signed integer type. In the terminology of C99, this can + * be either one of the five standard signed integer types (signed char, + * short, int, long, long long) or an extended signed integer type. + * J is an integer >= 0 and strictly less than the number of bits in the + * type of I (because C doesn't define what happens for J outside that + * range either). + * TYPE used to specify the type of I, but is now ignored. It's been left + * in for backwards compatibility with versions <= 2.6 or 3.0. + * Caution: + * I may be evaluated more than once. + */ +#ifdef SIGNED_RIGHT_SHIFT_ZERO_FILLS +#define Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) \ + ((I) < 0 ? -1-((-1-(I)) >> (J)) : (I) >> (J)) +#else +#define Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) ((I) >> (J)) +#endif + +/* Py_FORCE_EXPANSION(X) + * "Simply" returns its argument. However, macro expansions within the + * argument are evaluated. This unfortunate trickery is needed to get + * token-pasting to work as desired in some cases. + */ +#define Py_FORCE_EXPANSION(X) X + +/* Py_SAFE_DOWNCAST(VALUE, WIDE, NARROW) + * Cast VALUE to type NARROW from type WIDE. In Py_DEBUG mode, this + * assert-fails if any information is lost. + * Caution: + * VALUE may be evaluated more than once. + */ +#ifdef Py_DEBUG +#define Py_SAFE_DOWNCAST(VALUE, WIDE, NARROW) \ + (assert((WIDE)(NARROW)(VALUE) == (VALUE)), (NARROW)(VALUE)) +#else +#define Py_SAFE_DOWNCAST(VALUE, WIDE, NARROW) (NARROW)(VALUE) +#endif + +/* Py_SET_ERRNO_ON_MATH_ERROR(x) + * If a libm function did not set errno, but it looks like the result + * overflowed or not-a-number, set errno to ERANGE or EDOM. Set errno + * to 0 before calling a libm function, and invoke this macro after, + * passing the function result. + * Caution: + * This isn't reliable. See Py_OVERFLOWED comments. + * X is evaluated more than once. + */ +#if defined(__FreeBSD__) || defined(__OpenBSD__) || (defined(__hpux) && defined(__ia64)) +#define _Py_SET_EDOM_FOR_NAN(X) if (isnan(X)) errno = EDOM; +#else +#define _Py_SET_EDOM_FOR_NAN(X) ; +#endif +#define Py_SET_ERRNO_ON_MATH_ERROR(X) \ + do { \ + if (errno == 0) { \ + if ((X) == Py_HUGE_VAL || (X) == -Py_HUGE_VAL) \ + errno = ERANGE; \ + else _Py_SET_EDOM_FOR_NAN(X) \ + } \ + } while(0) + +/* Py_SET_ERANGE_ON_OVERFLOW(x) + * An alias of Py_SET_ERRNO_ON_MATH_ERROR for backward-compatibility. + */ +#define Py_SET_ERANGE_IF_OVERFLOW(X) Py_SET_ERRNO_ON_MATH_ERROR(X) + +/* Py_ADJUST_ERANGE1(x) + * Py_ADJUST_ERANGE2(x, y) + * Set errno to 0 before calling a libm function, and invoke one of these + * macros after, passing the function result(s) (Py_ADJUST_ERANGE2 is useful + * for functions returning complex results). This makes two kinds of + * adjustments to errno: (A) If it looks like the platform libm set + * errno=ERANGE due to underflow, clear errno. (B) If it looks like the + * platform libm overflowed but didn't set errno, force errno to ERANGE. In + * effect, we're trying to force a useful implementation of C89 errno + * behavior. + * Caution: + * This isn't reliable. See Py_OVERFLOWED comments. + * X and Y may be evaluated more than once. + */ +#define Py_ADJUST_ERANGE1(X) \ + do { \ + if (errno == 0) { \ + if ((X) == Py_HUGE_VAL || (X) == -Py_HUGE_VAL) \ + errno = ERANGE; \ + } \ + else if (errno == ERANGE && (X) == 0.0) \ + errno = 0; \ + } while(0) + +#define Py_ADJUST_ERANGE2(X, Y) \ + do { \ + if ((X) == Py_HUGE_VAL || (X) == -Py_HUGE_VAL || \ + (Y) == Py_HUGE_VAL || (Y) == -Py_HUGE_VAL) { \ + if (errno == 0) \ + errno = ERANGE; \ + } \ + else if (errno == ERANGE) \ + errno = 0; \ + } while(0) + +/* The functions _Py_dg_strtod and _Py_dg_dtoa in Python/dtoa.c (which are + * required to support the short float repr introduced in Python 3.1) require + * that the floating-point unit that's being used for arithmetic operations + * on C doubles is set to use 53-bit precision. It also requires that the + * FPU rounding mode is round-half-to-even, but that's less often an issue. + * + * If your FPU isn't already set to 53-bit precision/round-half-to-even, and + * you want to make use of _Py_dg_strtod and _Py_dg_dtoa, then you should + * + * #define HAVE_PY_SET_53BIT_PRECISION 1 + * + * and also give appropriate definitions for the following three macros: + * + * _PY_SET_53BIT_PRECISION_START : store original FPU settings, and + * set FPU to 53-bit precision/round-half-to-even + * _PY_SET_53BIT_PRECISION_END : restore original FPU settings + * _PY_SET_53BIT_PRECISION_HEADER : any variable declarations needed to + * use the two macros above. + * + * The macros are designed to be used within a single C function: see + * Python/pystrtod.c for an example of their use. + */ + +/* get and set x87 control word for gcc/x86 */ +#ifdef HAVE_GCC_ASM_FOR_X87 +#define HAVE_PY_SET_53BIT_PRECISION 1 +/* _Py_get/set_387controlword functions are defined in Python/pymath.c */ +#define _Py_SET_53BIT_PRECISION_HEADER \ + unsigned short old_387controlword, new_387controlword +#define _Py_SET_53BIT_PRECISION_START \ + do { \ + old_387controlword = _Py_get_387controlword(); \ + new_387controlword = (old_387controlword & ~0x0f00) | 0x0200; \ + if (new_387controlword != old_387controlword) \ + _Py_set_387controlword(new_387controlword); \ + } while (0) +#define _Py_SET_53BIT_PRECISION_END \ + if (new_387controlword != old_387controlword) \ + _Py_set_387controlword(old_387controlword) +#endif + +/* get and set x87 control word for VisualStudio/x86 */ +#if defined(_MSC_VER) && !defined(_WIN64) /* x87 not supported in 64-bit */ +#define HAVE_PY_SET_53BIT_PRECISION 1 +#define _Py_SET_53BIT_PRECISION_HEADER \ + unsigned int old_387controlword, new_387controlword, out_387controlword +/* We use the __control87_2 function to set only the x87 control word. + The SSE control word is unaffected. */ +#define _Py_SET_53BIT_PRECISION_START \ + do { \ + __control87_2(0, 0, &old_387controlword, NULL); \ + new_387controlword = \ + (old_387controlword & ~(_MCW_PC | _MCW_RC)) | (_PC_53 | _RC_NEAR); \ + if (new_387controlword != old_387controlword) \ + __control87_2(new_387controlword, _MCW_PC | _MCW_RC, \ + &out_387controlword, NULL); \ + } while (0) +#define _Py_SET_53BIT_PRECISION_END \ + do { \ + if (new_387controlword != old_387controlword) \ + __control87_2(old_387controlword, _MCW_PC | _MCW_RC, \ + &out_387controlword, NULL); \ + } while (0) +#endif + +#ifdef HAVE_GCC_ASM_FOR_MC68881 +#define HAVE_PY_SET_53BIT_PRECISION 1 +#define _Py_SET_53BIT_PRECISION_HEADER \ + unsigned int old_fpcr, new_fpcr +#define _Py_SET_53BIT_PRECISION_START \ + do { \ + __asm__ ("fmove.l %%fpcr,%0" : "=g" (old_fpcr)); \ + /* Set double precision / round to nearest. */ \ + new_fpcr = (old_fpcr & ~0xf0) | 0x80; \ + if (new_fpcr != old_fpcr) \ + __asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (new_fpcr)); \ + } while (0) +#define _Py_SET_53BIT_PRECISION_END \ + do { \ + if (new_fpcr != old_fpcr) \ + __asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (old_fpcr)); \ + } while (0) +#endif + +/* default definitions are empty */ +#ifndef HAVE_PY_SET_53BIT_PRECISION +#define _Py_SET_53BIT_PRECISION_HEADER +#define _Py_SET_53BIT_PRECISION_START +#define _Py_SET_53BIT_PRECISION_END +#endif + +/* If we can't guarantee 53-bit precision, don't use the code + in Python/dtoa.c, but fall back to standard code. This + means that repr of a float will be long (17 sig digits). + + Realistically, there are two things that could go wrong: + + (1) doubles aren't IEEE 754 doubles, or + (2) we're on x86 with the rounding precision set to 64-bits + (extended precision), and we don't know how to change + the rounding precision. + */ + +#if !defined(DOUBLE_IS_LITTLE_ENDIAN_IEEE754) && \ + !defined(DOUBLE_IS_BIG_ENDIAN_IEEE754) && \ + !defined(DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754) +#define PY_NO_SHORT_FLOAT_REPR +#endif + +/* double rounding is symptomatic of use of extended precision on x86. If + we're seeing double rounding, and we don't have any mechanism available for + changing the FPU rounding precision, then don't use Python/dtoa.c. */ +#if defined(X87_DOUBLE_ROUNDING) && !defined(HAVE_PY_SET_53BIT_PRECISION) +#define PY_NO_SHORT_FLOAT_REPR +#endif + + +/* Py_DEPRECATED(version) + * Declare a variable, type, or function deprecated. + * Usage: + * extern int old_var Py_DEPRECATED(2.3); + * typedef int T1 Py_DEPRECATED(2.4); + * extern int x() Py_DEPRECATED(2.5); + */ +#if defined(__GNUC__) \ + && ((__GNUC__ >= 4) || (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) +#define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) +#else +#define Py_DEPRECATED(VERSION_UNUSED) +#endif + + +/* _Py_HOT_FUNCTION + * The hot attribute on a function is used to inform the compiler that the + * function is a hot spot of the compiled program. The function is optimized + * more aggressively and on many target it is placed into special subsection of + * the text section so all hot functions appears close together improving + * locality. + * + * Usage: + * int _Py_HOT_FUNCTION x(void) { return 3; } + * + * Issue #28618: This attribute must not be abused, otherwise it can have a + * negative effect on performance. Only the functions were Python spend most of + * its time must use it. Use a profiler when running performance benchmark + * suite to find these functions. + */ +#if defined(__GNUC__) \ + && ((__GNUC__ >= 5) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) +#define _Py_HOT_FUNCTION __attribute__((hot)) +#else +#define _Py_HOT_FUNCTION +#endif + +/* _Py_NO_INLINE + * Disable inlining on a function. For example, it helps to reduce the C stack + * consumption. + * + * Usage: + * int _Py_NO_INLINE x(void) { return 3; } + */ +#if defined(__GNUC__) || defined(__clang__) +# define _Py_NO_INLINE __attribute__((noinline)) +#else +# define _Py_NO_INLINE +#endif + +/************************************************************************** +Prototypes that are missing from the standard include files on some systems +(and possibly only some versions of such systems.) + +Please be conservative with adding new ones, document them and enclose them +in platform-specific #ifdefs. +**************************************************************************/ + +#ifdef SOLARIS +/* Unchecked */ +extern int gethostname(char *, int); +#endif + +#ifdef HAVE__GETPTY +#include /* we need to import mode_t */ +extern char * _getpty(int *, int, mode_t, int); +#endif + +/* On QNX 6, struct termio must be declared by including sys/termio.h + if TCGETA, TCSETA, TCSETAW, or TCSETAF are used. sys/termio.h must + be included before termios.h or it will generate an error. */ +#if defined(HAVE_SYS_TERMIO_H) && !defined(__hpux) +#include +#endif + + +/* On 4.4BSD-descendants, ctype functions serves the whole range of + * wchar_t character set rather than single byte code points only. + * This characteristic can break some operations of string object + * including str.upper() and str.split() on UTF-8 locales. This + * workaround was provided by Tim Robbins of FreeBSD project. + */ + +#if defined(__APPLE__) +# define _PY_PORT_CTYPE_UTF8_ISSUE +#endif + +#ifdef _PY_PORT_CTYPE_UTF8_ISSUE +#ifndef __cplusplus + /* The workaround below is unsafe in C++ because + * the defines these symbols as real functions, + * with a slightly different signature. + * See issue #10910 + */ +#include +#include +#undef isalnum +#define isalnum(c) iswalnum(btowc(c)) +#undef isalpha +#define isalpha(c) iswalpha(btowc(c)) +#undef islower +#define islower(c) iswlower(btowc(c)) +#undef isspace +#define isspace(c) iswspace(btowc(c)) +#undef isupper +#define isupper(c) iswupper(btowc(c)) +#undef tolower +#define tolower(c) towlower(btowc(c)) +#undef toupper +#define toupper(c) towupper(btowc(c)) +#endif +#endif + + +/* Declarations for symbol visibility. + + PyAPI_FUNC(type): Declares a public Python API function and return type + PyAPI_DATA(type): Declares public Python data and its type + PyMODINIT_FUNC: A Python module init function. If these functions are + inside the Python core, they are private to the core. + If in an extension module, it may be declared with + external linkage depending on the platform. + + As a number of platforms support/require "__declspec(dllimport/dllexport)", + we support a HAVE_DECLSPEC_DLL macro to save duplication. +*/ + +/* + All windows ports, except cygwin, are handled in PC/pyconfig.h. + + Cygwin is the only other autoconf platform requiring special + linkage handling and it uses __declspec(). +*/ +#if defined(__CYGWIN__) +# define HAVE_DECLSPEC_DLL +#endif + +/* only get special linkage if built as shared or platform is Cygwin */ +#if defined(Py_ENABLE_SHARED) || defined(__CYGWIN__) +# if defined(HAVE_DECLSPEC_DLL) +# if defined(Py_BUILD_CORE) || defined(Py_BUILD_CORE_BUILTIN) +# define PyAPI_FUNC(RTYPE) __declspec(dllexport) RTYPE +# define PyAPI_DATA(RTYPE) extern __declspec(dllexport) RTYPE + /* module init functions inside the core need no external linkage */ + /* except for Cygwin to handle embedding */ +# if defined(__CYGWIN__) +# define PyMODINIT_FUNC __declspec(dllexport) PyObject* +# else /* __CYGWIN__ */ +# define PyMODINIT_FUNC PyObject* +# endif /* __CYGWIN__ */ +# else /* Py_BUILD_CORE */ + /* Building an extension module, or an embedded situation */ + /* public Python functions and data are imported */ + /* Under Cygwin, auto-import functions to prevent compilation */ + /* failures similar to those described at the bottom of 4.1: */ + /* http://docs.python.org/extending/windows.html#a-cookbook-approach */ +# if !defined(__CYGWIN__) +# define PyAPI_FUNC(RTYPE) __declspec(dllimport) RTYPE +# endif /* !__CYGWIN__ */ +# define PyAPI_DATA(RTYPE) extern __declspec(dllimport) RTYPE + /* module init functions outside the core must be exported */ +# if defined(__cplusplus) +# define PyMODINIT_FUNC extern "C" __declspec(dllexport) PyObject* +# else /* __cplusplus */ +# define PyMODINIT_FUNC __declspec(dllexport) PyObject* +# endif /* __cplusplus */ +# endif /* Py_BUILD_CORE */ +# endif /* HAVE_DECLSPEC_DLL */ +#endif /* Py_ENABLE_SHARED */ + +/* If no external linkage macros defined by now, create defaults */ +#ifndef PyAPI_FUNC +# define PyAPI_FUNC(RTYPE) RTYPE +#endif +#ifndef PyAPI_DATA +# define PyAPI_DATA(RTYPE) extern RTYPE +#endif +#ifndef PyMODINIT_FUNC +# if defined(__cplusplus) +# define PyMODINIT_FUNC extern "C" PyObject* +# else /* __cplusplus */ +# define PyMODINIT_FUNC PyObject* +# endif /* __cplusplus */ +#endif + +/* limits.h constants that may be missing */ + +#ifndef INT_MAX +#define INT_MAX 2147483647 +#endif + +#ifndef LONG_MAX +#if SIZEOF_LONG == 4 +#define LONG_MAX 0X7FFFFFFFL +#elif SIZEOF_LONG == 8 +#define LONG_MAX 0X7FFFFFFFFFFFFFFFL +#else +#error "could not set LONG_MAX in pyport.h" +#endif +#endif + +#ifndef LONG_MIN +#define LONG_MIN (-LONG_MAX-1) +#endif + +#ifndef LONG_BIT +#define LONG_BIT (8 * SIZEOF_LONG) +#endif + +#if LONG_BIT != 8 * SIZEOF_LONG +/* 04-Oct-2000 LONG_BIT is apparently (mis)defined as 64 on some recent + * 32-bit platforms using gcc. We try to catch that here at compile-time + * rather than waiting for integer multiplication to trigger bogus + * overflows. + */ +#error "LONG_BIT definition appears wrong for platform (bad gcc/glibc config?)." +#endif + +#ifdef __cplusplus +} +#endif + +/* + * Hide GCC attributes from compilers that don't support them. + */ +#if (!defined(__GNUC__) || __GNUC__ < 2 || \ + (__GNUC__ == 2 && __GNUC_MINOR__ < 7) ) +#define Py_GCC_ATTRIBUTE(x) +#else +#define Py_GCC_ATTRIBUTE(x) __attribute__(x) +#endif + +/* + * Specify alignment on compilers that support it. + */ +#if defined(__GNUC__) && __GNUC__ >= 3 +#define Py_ALIGNED(x) __attribute__((aligned(x))) +#else +#define Py_ALIGNED(x) +#endif + +/* Eliminate end-of-loop code not reached warnings from SunPro C + * when using do{...}while(0) macros + */ +#ifdef __SUNPRO_C +#pragma error_messages (off,E_END_OF_LOOP_CODE_NOT_REACHED) +#endif + +#ifndef Py_LL +#define Py_LL(x) x##LL +#endif + +#ifndef Py_ULL +#define Py_ULL(x) Py_LL(x##U) +#endif + +#define Py_VA_COPY va_copy + +/* + * Convenient macros to deal with endianness of the platform. WORDS_BIGENDIAN is + * detected by configure and defined in pyconfig.h. The code in pyconfig.h + * also takes care of Apple's universal builds. + */ + +#ifdef WORDS_BIGENDIAN +#define PY_BIG_ENDIAN 1 +#define PY_LITTLE_ENDIAN 0 +#else +#define PY_BIG_ENDIAN 0 +#define PY_LITTLE_ENDIAN 1 +#endif + +#if defined(Py_BUILD_CORE) || defined(Py_BUILD_CORE_BUILTIN) +/* + * Macros to protect CRT calls against instant termination when passed an + * invalid parameter (issue23524). + */ +#if defined _MSC_VER && _MSC_VER >= 1900 + +extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; +#define _Py_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \ + _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler); +#define _Py_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); } + +#else + +#define _Py_BEGIN_SUPPRESS_IPH +#define _Py_END_SUPPRESS_IPH + +#endif /* _MSC_VER >= 1900 */ +#endif /* Py_BUILD_CORE */ + +#ifdef __ANDROID__ +/* The Android langinfo.h header is not used. */ +#undef HAVE_LANGINFO_H +#undef CODESET +#endif + +/* Maximum value of the Windows DWORD type */ +#define PY_DWORD_MAX 4294967295U + +/* This macro used to tell whether Python was built with multithreading + * enabled. Now multithreading is always enabled, but keep the macro + * for compatibility. + */ +#ifndef WITH_THREAD +#define WITH_THREAD +#endif + +#endif /* Py_PYPORT_H */ diff --git a/venv/Include/pystate.h b/venv/Include/pystate.h new file mode 100644 index 00000000..29d7148b --- /dev/null +++ b/venv/Include/pystate.h @@ -0,0 +1,452 @@ + +/* Thread and interpreter state structures and their interfaces */ + + +#ifndef Py_PYSTATE_H +#define Py_PYSTATE_H +#ifdef __cplusplus +extern "C" { +#endif + +#include "pythread.h" + +/* This limitation is for performance and simplicity. If needed it can be +removed (with effort). */ +#define MAX_CO_EXTRA_USERS 255 + +/* State shared between threads */ + +struct _ts; /* Forward */ +struct _is; /* Forward */ +struct _frame; /* Forward declaration for PyFrameObject. */ + +#ifdef Py_LIMITED_API +typedef struct _is PyInterpreterState; +#else +typedef PyObject* (*_PyFrameEvalFunction)(struct _frame *, int); + + +typedef struct { + int install_signal_handlers; /* Install signal handlers? -1 means unset */ + + int ignore_environment; /* -E, Py_IgnoreEnvironmentFlag */ + int use_hash_seed; /* PYTHONHASHSEED=x */ + unsigned long hash_seed; + const char *allocator; /* Memory allocator: _PyMem_SetupAllocators() */ + int dev_mode; /* PYTHONDEVMODE, -X dev */ + int faulthandler; /* PYTHONFAULTHANDLER, -X faulthandler */ + int tracemalloc; /* PYTHONTRACEMALLOC, -X tracemalloc=N */ + int import_time; /* PYTHONPROFILEIMPORTTIME, -X importtime */ + int show_ref_count; /* -X showrefcount */ + int show_alloc_count; /* -X showalloccount */ + int dump_refs; /* PYTHONDUMPREFS */ + int malloc_stats; /* PYTHONMALLOCSTATS */ + int coerce_c_locale; /* PYTHONCOERCECLOCALE, -1 means unknown */ + int coerce_c_locale_warn; /* PYTHONCOERCECLOCALE=warn */ + int utf8_mode; /* PYTHONUTF8, -X utf8; -1 means unknown */ + + wchar_t *program_name; /* Program name, see also Py_GetProgramName() */ + int argc; /* Number of command line arguments, + -1 means unset */ + wchar_t **argv; /* Command line arguments */ + wchar_t *program; /* argv[0] or "" */ + + int nxoption; /* Number of -X options */ + wchar_t **xoptions; /* -X options */ + + int nwarnoption; /* Number of warnings options */ + wchar_t **warnoptions; /* Warnings options */ + + /* Path configuration inputs */ + wchar_t *module_search_path_env; /* PYTHONPATH environment variable */ + wchar_t *home; /* PYTHONHOME environment variable, + see also Py_SetPythonHome(). */ + + /* Path configuration outputs */ + int nmodule_search_path; /* Number of sys.path paths, + -1 means unset */ + wchar_t **module_search_paths; /* sys.path paths */ + wchar_t *executable; /* sys.executable */ + wchar_t *prefix; /* sys.prefix */ + wchar_t *base_prefix; /* sys.base_prefix */ + wchar_t *exec_prefix; /* sys.exec_prefix */ + wchar_t *base_exec_prefix; /* sys.base_exec_prefix */ + + /* Private fields */ + int _disable_importlib; /* Needed by freeze_importlib */ +} _PyCoreConfig; + +#define _PyCoreConfig_INIT \ + (_PyCoreConfig){ \ + .install_signal_handlers = -1, \ + .use_hash_seed = -1, \ + .coerce_c_locale = -1, \ + .utf8_mode = -1, \ + .argc = -1, \ + .nmodule_search_path = -1} +/* Note: _PyCoreConfig_INIT sets other fields to 0/NULL */ + +/* Placeholders while working on the new configuration API + * + * See PEP 432 for final anticipated contents + */ +typedef struct { + int install_signal_handlers; /* Install signal handlers? -1 means unset */ + PyObject *argv; /* sys.argv list, can be NULL */ + PyObject *executable; /* sys.executable str */ + PyObject *prefix; /* sys.prefix str */ + PyObject *base_prefix; /* sys.base_prefix str, can be NULL */ + PyObject *exec_prefix; /* sys.exec_prefix str */ + PyObject *base_exec_prefix; /* sys.base_exec_prefix str, can be NULL */ + PyObject *warnoptions; /* sys.warnoptions list, can be NULL */ + PyObject *xoptions; /* sys._xoptions dict, can be NULL */ + PyObject *module_search_path; /* sys.path list */ +} _PyMainInterpreterConfig; + +#define _PyMainInterpreterConfig_INIT \ + (_PyMainInterpreterConfig){.install_signal_handlers = -1} +/* Note: _PyMainInterpreterConfig_INIT sets other fields to 0/NULL */ + +typedef struct _is { + + struct _is *next; + struct _ts *tstate_head; + + int64_t id; + int64_t id_refcount; + PyThread_type_lock id_mutex; + + PyObject *modules; + PyObject *modules_by_index; + PyObject *sysdict; + PyObject *builtins; + PyObject *importlib; + + /* Used in Python/sysmodule.c. */ + int check_interval; + + /* Used in Modules/_threadmodule.c. */ + long num_threads; + /* Support for runtime thread stack size tuning. + A value of 0 means using the platform's default stack size + or the size specified by the THREAD_STACK_SIZE macro. */ + /* Used in Python/thread.c. */ + size_t pythread_stacksize; + + PyObject *codec_search_path; + PyObject *codec_search_cache; + PyObject *codec_error_registry; + int codecs_initialized; + int fscodec_initialized; + + _PyCoreConfig core_config; + _PyMainInterpreterConfig config; +#ifdef HAVE_DLOPEN + int dlopenflags; +#endif + + PyObject *builtins_copy; + PyObject *import_func; + /* Initialized to PyEval_EvalFrameDefault(). */ + _PyFrameEvalFunction eval_frame; + + Py_ssize_t co_extra_user_count; + freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS]; + +#ifdef HAVE_FORK + PyObject *before_forkers; + PyObject *after_forkers_parent; + PyObject *after_forkers_child; +#endif + /* AtExit module */ + void (*pyexitfunc)(PyObject *); + PyObject *pyexitmodule; + + uint64_t tstate_next_unique_id; +} PyInterpreterState; +#endif /* !Py_LIMITED_API */ + + +/* State unique per thread */ + +#ifndef Py_LIMITED_API +/* Py_tracefunc return -1 when raising an exception, or 0 for success. */ +typedef int (*Py_tracefunc)(PyObject *, struct _frame *, int, PyObject *); + +/* The following values are used for 'what' for tracefunc functions + * + * To add a new kind of trace event, also update "trace_init" in + * Python/sysmodule.c to define the Python level event name + */ +#define PyTrace_CALL 0 +#define PyTrace_EXCEPTION 1 +#define PyTrace_LINE 2 +#define PyTrace_RETURN 3 +#define PyTrace_C_CALL 4 +#define PyTrace_C_EXCEPTION 5 +#define PyTrace_C_RETURN 6 +#define PyTrace_OPCODE 7 +#endif /* Py_LIMITED_API */ + +#ifdef Py_LIMITED_API +typedef struct _ts PyThreadState; +#else + +typedef struct _err_stackitem { + /* This struct represents an entry on the exception stack, which is a + * per-coroutine state. (Coroutine in the computer science sense, + * including the thread and generators). + * This ensures that the exception state is not impacted by "yields" + * from an except handler. + */ + PyObject *exc_type, *exc_value, *exc_traceback; + + struct _err_stackitem *previous_item; + +} _PyErr_StackItem; + + +typedef struct _ts { + /* See Python/ceval.c for comments explaining most fields */ + + struct _ts *prev; + struct _ts *next; + PyInterpreterState *interp; + + struct _frame *frame; + int recursion_depth; + char overflowed; /* The stack has overflowed. Allow 50 more calls + to handle the runtime error. */ + char recursion_critical; /* The current calls must not cause + a stack overflow. */ + int stackcheck_counter; + + /* 'tracing' keeps track of the execution depth when tracing/profiling. + This is to prevent the actual trace/profile code from being recorded in + the trace/profile. */ + int tracing; + int use_tracing; + + Py_tracefunc c_profilefunc; + Py_tracefunc c_tracefunc; + PyObject *c_profileobj; + PyObject *c_traceobj; + + /* The exception currently being raised */ + PyObject *curexc_type; + PyObject *curexc_value; + PyObject *curexc_traceback; + + /* The exception currently being handled, if no coroutines/generators + * are present. Always last element on the stack referred to be exc_info. + */ + _PyErr_StackItem exc_state; + + /* Pointer to the top of the stack of the exceptions currently + * being handled */ + _PyErr_StackItem *exc_info; + + PyObject *dict; /* Stores per-thread state */ + + int gilstate_counter; + + PyObject *async_exc; /* Asynchronous exception to raise */ + unsigned long thread_id; /* Thread id where this tstate was created */ + + int trash_delete_nesting; + PyObject *trash_delete_later; + + /* Called when a thread state is deleted normally, but not when it + * is destroyed after fork(). + * Pain: to prevent rare but fatal shutdown errors (issue 18808), + * Thread.join() must wait for the join'ed thread's tstate to be unlinked + * from the tstate chain. That happens at the end of a thread's life, + * in pystate.c. + * The obvious way doesn't quite work: create a lock which the tstate + * unlinking code releases, and have Thread.join() wait to acquire that + * lock. The problem is that we _are_ at the end of the thread's life: + * if the thread holds the last reference to the lock, decref'ing the + * lock will delete the lock, and that may trigger arbitrary Python code + * if there's a weakref, with a callback, to the lock. But by this time + * _PyThreadState_Current is already NULL, so only the simplest of C code + * can be allowed to run (in particular it must not be possible to + * release the GIL). + * So instead of holding the lock directly, the tstate holds a weakref to + * the lock: that's the value of on_delete_data below. Decref'ing a + * weakref is harmless. + * on_delete points to _threadmodule.c's static release_sentinel() function. + * After the tstate is unlinked, release_sentinel is called with the + * weakref-to-lock (on_delete_data) argument, and release_sentinel releases + * the indirectly held lock. + */ + void (*on_delete)(void *); + void *on_delete_data; + + int coroutine_origin_tracking_depth; + + PyObject *coroutine_wrapper; + int in_coroutine_wrapper; + + PyObject *async_gen_firstiter; + PyObject *async_gen_finalizer; + + PyObject *context; + uint64_t context_ver; + + /* Unique thread state id. */ + uint64_t id; + + /* XXX signal handlers should also be here */ + +} PyThreadState; +#endif /* !Py_LIMITED_API */ + + +PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_New(void); +PyAPI_FUNC(void) PyInterpreterState_Clear(PyInterpreterState *); +PyAPI_FUNC(void) PyInterpreterState_Delete(PyInterpreterState *); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000 +/* New in 3.7 */ +PyAPI_FUNC(int64_t) PyInterpreterState_GetID(PyInterpreterState *); +#endif +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyState_AddModule(PyObject*, struct PyModuleDef*); +#endif /* !Py_LIMITED_API */ +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +/* New in 3.3 */ +PyAPI_FUNC(int) PyState_AddModule(PyObject*, struct PyModuleDef*); +PyAPI_FUNC(int) PyState_RemoveModule(struct PyModuleDef*); +#endif +PyAPI_FUNC(PyObject*) PyState_FindModule(struct PyModuleDef*); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyState_ClearModules(void); +#endif + +PyAPI_FUNC(PyThreadState *) PyThreadState_New(PyInterpreterState *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyThreadState *) _PyThreadState_Prealloc(PyInterpreterState *); +PyAPI_FUNC(void) _PyThreadState_Init(PyThreadState *); +#endif /* !Py_LIMITED_API */ +PyAPI_FUNC(void) PyThreadState_Clear(PyThreadState *); +PyAPI_FUNC(void) PyThreadState_Delete(PyThreadState *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyThreadState_DeleteExcept(PyThreadState *tstate); +#endif /* !Py_LIMITED_API */ +PyAPI_FUNC(void) PyThreadState_DeleteCurrent(void); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyGILState_Reinit(void); +#endif /* !Py_LIMITED_API */ + +/* Return the current thread state. The global interpreter lock must be held. + * When the current thread state is NULL, this issues a fatal error (so that + * the caller needn't check for NULL). */ +PyAPI_FUNC(PyThreadState *) PyThreadState_Get(void); + +#ifndef Py_LIMITED_API +/* Similar to PyThreadState_Get(), but don't issue a fatal error + * if it is NULL. */ +PyAPI_FUNC(PyThreadState *) _PyThreadState_UncheckedGet(void); +#endif /* !Py_LIMITED_API */ + +PyAPI_FUNC(PyThreadState *) PyThreadState_Swap(PyThreadState *); +PyAPI_FUNC(PyObject *) PyThreadState_GetDict(void); +PyAPI_FUNC(int) PyThreadState_SetAsyncExc(unsigned long, PyObject *); + + +/* Variable and macro for in-line access to current thread state */ + +/* Assuming the current thread holds the GIL, this is the + PyThreadState for the current thread. */ +#ifdef Py_BUILD_CORE +# define _PyThreadState_Current _PyRuntime.gilstate.tstate_current +# define PyThreadState_GET() \ + ((PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current)) +#else +# define PyThreadState_GET() PyThreadState_Get() +#endif + +typedef + enum {PyGILState_LOCKED, PyGILState_UNLOCKED} + PyGILState_STATE; + + +/* Ensure that the current thread is ready to call the Python + C API, regardless of the current state of Python, or of its + thread lock. This may be called as many times as desired + by a thread so long as each call is matched with a call to + PyGILState_Release(). In general, other thread-state APIs may + be used between _Ensure() and _Release() calls, so long as the + thread-state is restored to its previous state before the Release(). + For example, normal use of the Py_BEGIN_ALLOW_THREADS/ + Py_END_ALLOW_THREADS macros are acceptable. + + The return value is an opaque "handle" to the thread state when + PyGILState_Ensure() was called, and must be passed to + PyGILState_Release() to ensure Python is left in the same state. Even + though recursive calls are allowed, these handles can *not* be shared - + each unique call to PyGILState_Ensure must save the handle for its + call to PyGILState_Release. + + When the function returns, the current thread will hold the GIL. + + Failure is a fatal error. +*/ +PyAPI_FUNC(PyGILState_STATE) PyGILState_Ensure(void); + +/* Release any resources previously acquired. After this call, Python's + state will be the same as it was prior to the corresponding + PyGILState_Ensure() call (but generally this state will be unknown to + the caller, hence the use of the GILState API.) + + Every call to PyGILState_Ensure must be matched by a call to + PyGILState_Release on the same thread. +*/ +PyAPI_FUNC(void) PyGILState_Release(PyGILState_STATE); + +/* Helper/diagnostic function - get the current thread state for + this thread. May return NULL if no GILState API has been used + on the current thread. Note that the main thread always has such a + thread-state, even if no auto-thread-state call has been made + on the main thread. +*/ +PyAPI_FUNC(PyThreadState *) PyGILState_GetThisThreadState(void); + +#ifndef Py_LIMITED_API +/* Helper/diagnostic function - return 1 if the current thread + currently holds the GIL, 0 otherwise. + + The function returns 1 if _PyGILState_check_enabled is non-zero. */ +PyAPI_FUNC(int) PyGILState_Check(void); + +/* Unsafe function to get the single PyInterpreterState used by this process' + GILState implementation. + + Return NULL before _PyGILState_Init() is called and after _PyGILState_Fini() + is called. */ +PyAPI_FUNC(PyInterpreterState *) _PyGILState_GetInterpreterStateUnsafe(void); +#endif /* !Py_LIMITED_API */ + + +/* The implementation of sys._current_frames() Returns a dict mapping + thread id to that thread's current frame. +*/ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyThread_CurrentFrames(void); +#endif + +/* Routines for advanced debuggers, requested by David Beazley. + Don't use unless you know what you are doing! */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Main(void); +PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Head(void); +PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Next(PyInterpreterState *); +PyAPI_FUNC(PyThreadState *) PyInterpreterState_ThreadHead(PyInterpreterState *); +PyAPI_FUNC(PyThreadState *) PyThreadState_Next(PyThreadState *); + +typedef struct _frame *(*PyThreadFrameGetter)(PyThreadState *self_); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_PYSTATE_H */ diff --git a/venv/Include/pystrcmp.h b/venv/Include/pystrcmp.h new file mode 100644 index 00000000..edb12397 --- /dev/null +++ b/venv/Include/pystrcmp.h @@ -0,0 +1,23 @@ +#ifndef Py_STRCMP_H +#define Py_STRCMP_H + +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_FUNC(int) PyOS_mystrnicmp(const char *, const char *, Py_ssize_t); +PyAPI_FUNC(int) PyOS_mystricmp(const char *, const char *); + +#ifdef MS_WINDOWS +#define PyOS_strnicmp strnicmp +#define PyOS_stricmp stricmp +#else +#define PyOS_strnicmp PyOS_mystrnicmp +#define PyOS_stricmp PyOS_mystricmp +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_STRCMP_H */ diff --git a/venv/Include/pystrhex.h b/venv/Include/pystrhex.h new file mode 100644 index 00000000..66a30e22 --- /dev/null +++ b/venv/Include/pystrhex.h @@ -0,0 +1,19 @@ +#ifndef Py_STRHEX_H +#define Py_STRHEX_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +/* Returns a str() containing the hex representation of argbuf. */ +PyAPI_FUNC(PyObject*) _Py_strhex(const char* argbuf, const Py_ssize_t arglen); +/* Returns a bytes() containing the ASCII hex representation of argbuf. */ +PyAPI_FUNC(PyObject*) _Py_strhex_bytes(const char* argbuf, const Py_ssize_t arglen); +#endif /* !Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_STRHEX_H */ diff --git a/venv/Include/pystrtod.h b/venv/Include/pystrtod.h new file mode 100644 index 00000000..c1e84de6 --- /dev/null +++ b/venv/Include/pystrtod.h @@ -0,0 +1,45 @@ +#ifndef Py_STRTOD_H +#define Py_STRTOD_H + +#ifdef __cplusplus +extern "C" { +#endif + + +PyAPI_FUNC(double) PyOS_string_to_double(const char *str, + char **endptr, + PyObject *overflow_exception); + +/* The caller is responsible for calling PyMem_Free to free the buffer + that's is returned. */ +PyAPI_FUNC(char *) PyOS_double_to_string(double val, + char format_code, + int precision, + int flags, + int *type); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _Py_string_to_number_with_underscores( + const char *str, Py_ssize_t len, const char *what, PyObject *obj, void *arg, + PyObject *(*innerfunc)(const char *, Py_ssize_t, void *)); + +PyAPI_FUNC(double) _Py_parse_inf_or_nan(const char *p, char **endptr); +#endif + + +/* PyOS_double_to_string's "flags" parameter can be set to 0 or more of: */ +#define Py_DTSF_SIGN 0x01 /* always add the sign */ +#define Py_DTSF_ADD_DOT_0 0x02 /* if the result is an integer add ".0" */ +#define Py_DTSF_ALT 0x04 /* "alternate" formatting. it's format_code + specific */ + +/* PyOS_double_to_string's "type", if non-NULL, will be set to one of: */ +#define Py_DTST_FINITE 0 +#define Py_DTST_INFINITE 1 +#define Py_DTST_NAN 2 + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_STRTOD_H */ diff --git a/venv/Include/pythonrun.h b/venv/Include/pythonrun.h new file mode 100644 index 00000000..6f0c6fc6 --- /dev/null +++ b/venv/Include/pythonrun.h @@ -0,0 +1,181 @@ + +/* Interfaces to parse and execute pieces of python code */ + +#ifndef Py_PYTHONRUN_H +#define Py_PYTHONRUN_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) PyRun_SimpleStringFlags(const char *, PyCompilerFlags *); +PyAPI_FUNC(int) PyRun_AnyFileFlags(FILE *, const char *, PyCompilerFlags *); +PyAPI_FUNC(int) PyRun_AnyFileExFlags( + FILE *fp, + const char *filename, /* decoded from the filesystem encoding */ + int closeit, + PyCompilerFlags *flags); +PyAPI_FUNC(int) PyRun_SimpleFileExFlags( + FILE *fp, + const char *filename, /* decoded from the filesystem encoding */ + int closeit, + PyCompilerFlags *flags); +PyAPI_FUNC(int) PyRun_InteractiveOneFlags( + FILE *fp, + const char *filename, /* decoded from the filesystem encoding */ + PyCompilerFlags *flags); +PyAPI_FUNC(int) PyRun_InteractiveOneObject( + FILE *fp, + PyObject *filename, + PyCompilerFlags *flags); +PyAPI_FUNC(int) PyRun_InteractiveLoopFlags( + FILE *fp, + const char *filename, /* decoded from the filesystem encoding */ + PyCompilerFlags *flags); + +PyAPI_FUNC(struct _mod *) PyParser_ASTFromString( + const char *s, + const char *filename, /* decoded from the filesystem encoding */ + int start, + PyCompilerFlags *flags, + PyArena *arena); +PyAPI_FUNC(struct _mod *) PyParser_ASTFromStringObject( + const char *s, + PyObject *filename, + int start, + PyCompilerFlags *flags, + PyArena *arena); +PyAPI_FUNC(struct _mod *) PyParser_ASTFromFile( + FILE *fp, + const char *filename, /* decoded from the filesystem encoding */ + const char* enc, + int start, + const char *ps1, + const char *ps2, + PyCompilerFlags *flags, + int *errcode, + PyArena *arena); +PyAPI_FUNC(struct _mod *) PyParser_ASTFromFileObject( + FILE *fp, + PyObject *filename, + const char* enc, + int start, + const char *ps1, + const char *ps2, + PyCompilerFlags *flags, + int *errcode, + PyArena *arena); +#endif + +#ifndef PyParser_SimpleParseString +#define PyParser_SimpleParseString(S, B) \ + PyParser_SimpleParseStringFlags(S, B, 0) +#define PyParser_SimpleParseFile(FP, S, B) \ + PyParser_SimpleParseFileFlags(FP, S, B, 0) +#endif +PyAPI_FUNC(struct _node *) PyParser_SimpleParseStringFlags(const char *, int, + int); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(struct _node *) PyParser_SimpleParseStringFlagsFilename(const char *, + const char *, + int, int); +#endif +PyAPI_FUNC(struct _node *) PyParser_SimpleParseFileFlags(FILE *, const char *, + int, int); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyRun_StringFlags(const char *, int, PyObject *, + PyObject *, PyCompilerFlags *); + +PyAPI_FUNC(PyObject *) PyRun_FileExFlags( + FILE *fp, + const char *filename, /* decoded from the filesystem encoding */ + int start, + PyObject *globals, + PyObject *locals, + int closeit, + PyCompilerFlags *flags); +#endif + +#ifdef Py_LIMITED_API +PyAPI_FUNC(PyObject *) Py_CompileString(const char *, const char *, int); +#else +#define Py_CompileString(str, p, s) Py_CompileStringExFlags(str, p, s, NULL, -1) +#define Py_CompileStringFlags(str, p, s, f) Py_CompileStringExFlags(str, p, s, f, -1) +PyAPI_FUNC(PyObject *) Py_CompileStringExFlags( + const char *str, + const char *filename, /* decoded from the filesystem encoding */ + int start, + PyCompilerFlags *flags, + int optimize); +PyAPI_FUNC(PyObject *) Py_CompileStringObject( + const char *str, + PyObject *filename, int start, + PyCompilerFlags *flags, + int optimize); +#endif +PyAPI_FUNC(struct symtable *) Py_SymtableString( + const char *str, + const char *filename, /* decoded from the filesystem encoding */ + int start); +#ifndef Py_LIMITED_API +PyAPI_FUNC(struct symtable *) Py_SymtableStringObject( + const char *str, + PyObject *filename, + int start); +#endif + +PyAPI_FUNC(void) PyErr_Print(void); +PyAPI_FUNC(void) PyErr_PrintEx(int); +PyAPI_FUNC(void) PyErr_Display(PyObject *, PyObject *, PyObject *); + +#ifndef Py_LIMITED_API +/* Use macros for a bunch of old variants */ +#define PyRun_String(str, s, g, l) PyRun_StringFlags(str, s, g, l, NULL) +#define PyRun_AnyFile(fp, name) PyRun_AnyFileExFlags(fp, name, 0, NULL) +#define PyRun_AnyFileEx(fp, name, closeit) \ + PyRun_AnyFileExFlags(fp, name, closeit, NULL) +#define PyRun_AnyFileFlags(fp, name, flags) \ + PyRun_AnyFileExFlags(fp, name, 0, flags) +#define PyRun_SimpleString(s) PyRun_SimpleStringFlags(s, NULL) +#define PyRun_SimpleFile(f, p) PyRun_SimpleFileExFlags(f, p, 0, NULL) +#define PyRun_SimpleFileEx(f, p, c) PyRun_SimpleFileExFlags(f, p, c, NULL) +#define PyRun_InteractiveOne(f, p) PyRun_InteractiveOneFlags(f, p, NULL) +#define PyRun_InteractiveLoop(f, p) PyRun_InteractiveLoopFlags(f, p, NULL) +#define PyRun_File(fp, p, s, g, l) \ + PyRun_FileExFlags(fp, p, s, g, l, 0, NULL) +#define PyRun_FileEx(fp, p, s, g, l, c) \ + PyRun_FileExFlags(fp, p, s, g, l, c, NULL) +#define PyRun_FileFlags(fp, p, s, g, l, flags) \ + PyRun_FileExFlags(fp, p, s, g, l, 0, flags) +#endif + +/* Stuff with no proper home (yet) */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(char *) PyOS_Readline(FILE *, FILE *, const char *); +#endif +PyAPI_DATA(int) (*PyOS_InputHook)(void); +PyAPI_DATA(char) *(*PyOS_ReadlineFunctionPointer)(FILE *, FILE *, const char *); +#ifndef Py_LIMITED_API +PyAPI_DATA(PyThreadState*) _PyOS_ReadlineTState; +#endif + +/* Stack size, in "pointers" (so we get extra safety margins + on 64-bit platforms). On a 32-bit platform, this translates + to an 8k margin. */ +#define PYOS_STACK_MARGIN 2048 + +#if defined(WIN32) && !defined(MS_WIN64) && defined(_MSC_VER) && _MSC_VER >= 1300 +/* Enable stack checking under Microsoft C */ +#define USE_STACKCHECK +#endif + +#ifdef USE_STACKCHECK +/* Check that we aren't overflowing our stack */ +PyAPI_FUNC(int) PyOS_CheckStack(void); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_PYTHONRUN_H */ diff --git a/venv/Include/pythread.h b/venv/Include/pythread.h new file mode 100644 index 00000000..eb61033b --- /dev/null +++ b/venv/Include/pythread.h @@ -0,0 +1,155 @@ + +#ifndef Py_PYTHREAD_H +#define Py_PYTHREAD_H + +typedef void *PyThread_type_lock; +typedef void *PyThread_type_sema; + +#ifdef __cplusplus +extern "C" { +#endif + +/* Return status codes for Python lock acquisition. Chosen for maximum + * backwards compatibility, ie failure -> 0, success -> 1. */ +typedef enum PyLockStatus { + PY_LOCK_FAILURE = 0, + PY_LOCK_ACQUIRED = 1, + PY_LOCK_INTR +} PyLockStatus; + +#ifndef Py_LIMITED_API +#define PYTHREAD_INVALID_THREAD_ID ((unsigned long)-1) +#endif + +PyAPI_FUNC(void) PyThread_init_thread(void); +PyAPI_FUNC(unsigned long) PyThread_start_new_thread(void (*)(void *), void *); +PyAPI_FUNC(void) PyThread_exit_thread(void); +PyAPI_FUNC(unsigned long) PyThread_get_thread_ident(void); + +PyAPI_FUNC(PyThread_type_lock) PyThread_allocate_lock(void); +PyAPI_FUNC(void) PyThread_free_lock(PyThread_type_lock); +PyAPI_FUNC(int) PyThread_acquire_lock(PyThread_type_lock, int); +#define WAIT_LOCK 1 +#define NOWAIT_LOCK 0 + +/* PY_TIMEOUT_T is the integral type used to specify timeouts when waiting + on a lock (see PyThread_acquire_lock_timed() below). + PY_TIMEOUT_MAX is the highest usable value (in microseconds) of that + type, and depends on the system threading API. + + NOTE: this isn't the same value as `_thread.TIMEOUT_MAX`. The _thread + module exposes a higher-level API, with timeouts expressed in seconds + and floating-point numbers allowed. +*/ +#define PY_TIMEOUT_T long long + +#if defined(_POSIX_THREADS) + /* PyThread_acquire_lock_timed() uses _PyTime_FromNanoseconds(us * 1000), + convert microseconds to nanoseconds. */ +# define PY_TIMEOUT_MAX (PY_LLONG_MAX / 1000) +#elif defined (NT_THREADS) + /* In the NT API, the timeout is a DWORD and is expressed in milliseconds */ +# if 0xFFFFFFFFLL * 1000 < PY_LLONG_MAX +# define PY_TIMEOUT_MAX (0xFFFFFFFFLL * 1000) +# else +# define PY_TIMEOUT_MAX PY_LLONG_MAX +# endif +#else +# define PY_TIMEOUT_MAX PY_LLONG_MAX +#endif + + +/* If microseconds == 0, the call is non-blocking: it returns immediately + even when the lock can't be acquired. + If microseconds > 0, the call waits up to the specified duration. + If microseconds < 0, the call waits until success (or abnormal failure) + + microseconds must be less than PY_TIMEOUT_MAX. Behaviour otherwise is + undefined. + + If intr_flag is true and the acquire is interrupted by a signal, then the + call will return PY_LOCK_INTR. The caller may reattempt to acquire the + lock. +*/ +PyAPI_FUNC(PyLockStatus) PyThread_acquire_lock_timed(PyThread_type_lock, + PY_TIMEOUT_T microseconds, + int intr_flag); + +PyAPI_FUNC(void) PyThread_release_lock(PyThread_type_lock); + +PyAPI_FUNC(size_t) PyThread_get_stacksize(void); +PyAPI_FUNC(int) PyThread_set_stacksize(size_t); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject*) PyThread_GetInfo(void); +#endif + + +/* Thread Local Storage (TLS) API + TLS API is DEPRECATED. Use Thread Specific Storage (TSS) API. + + The existing TLS API has used int to represent TLS keys across all + platforms, but it is not POSIX-compliant. Therefore, the new TSS API uses + opaque data type to represent TSS keys to be compatible (see PEP 539). +*/ +PyAPI_FUNC(int) PyThread_create_key(void) Py_DEPRECATED(3.7); +PyAPI_FUNC(void) PyThread_delete_key(int key) Py_DEPRECATED(3.7); +PyAPI_FUNC(int) PyThread_set_key_value(int key, void *value) Py_DEPRECATED(3.7); +PyAPI_FUNC(void *) PyThread_get_key_value(int key) Py_DEPRECATED(3.7); +PyAPI_FUNC(void) PyThread_delete_key_value(int key) Py_DEPRECATED(3.7); + +/* Cleanup after a fork */ +PyAPI_FUNC(void) PyThread_ReInitTLS(void) Py_DEPRECATED(3.7); + + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000 +/* New in 3.7 */ +/* Thread Specific Storage (TSS) API */ + +typedef struct _Py_tss_t Py_tss_t; /* opaque */ + +#ifndef Py_LIMITED_API +#if defined(_POSIX_THREADS) + /* Darwin needs pthread.h to know type name the pthread_key_t. */ +# include +# define NATIVE_TSS_KEY_T pthread_key_t +#elif defined(NT_THREADS) + /* In Windows, native TSS key type is DWORD, + but hardcode the unsigned long to avoid errors for include directive. + */ +# define NATIVE_TSS_KEY_T unsigned long +#else +# error "Require native threads. See https://bugs.python.org/issue31370" +#endif + +/* When Py_LIMITED_API is not defined, the type layout of Py_tss_t is + exposed to allow static allocation in the API clients. Even in this case, + you must handle TSS keys through API functions due to compatibility. +*/ +struct _Py_tss_t { + int _is_initialized; + NATIVE_TSS_KEY_T _key; +}; + +#undef NATIVE_TSS_KEY_T + +/* When static allocation, you must initialize with Py_tss_NEEDS_INIT. */ +#define Py_tss_NEEDS_INIT {0} +#endif /* !Py_LIMITED_API */ + +PyAPI_FUNC(Py_tss_t *) PyThread_tss_alloc(void); +PyAPI_FUNC(void) PyThread_tss_free(Py_tss_t *key); + +/* The parameter key must not be NULL. */ +PyAPI_FUNC(int) PyThread_tss_is_created(Py_tss_t *key); +PyAPI_FUNC(int) PyThread_tss_create(Py_tss_t *key); +PyAPI_FUNC(void) PyThread_tss_delete(Py_tss_t *key); +PyAPI_FUNC(int) PyThread_tss_set(Py_tss_t *key, void *value); +PyAPI_FUNC(void *) PyThread_tss_get(Py_tss_t *key); +#endif /* New in 3.7 */ + +#ifdef __cplusplus +} +#endif + +#endif /* !Py_PYTHREAD_H */ diff --git a/venv/Include/pytime.h b/venv/Include/pytime.h new file mode 100644 index 00000000..4870a9df --- /dev/null +++ b/venv/Include/pytime.h @@ -0,0 +1,246 @@ +#ifndef Py_LIMITED_API +#ifndef Py_PYTIME_H +#define Py_PYTIME_H + +#include "pyconfig.h" /* include for defines */ +#include "object.h" + +/************************************************************************** +Symbols and macros to supply platform-independent interfaces to time related +functions and constants +**************************************************************************/ +#ifdef __cplusplus +extern "C" { +#endif + +/* _PyTime_t: Python timestamp with subsecond precision. It can be used to + store a duration, and so indirectly a date (related to another date, like + UNIX epoch). */ +typedef int64_t _PyTime_t; +#define _PyTime_MIN PY_LLONG_MIN +#define _PyTime_MAX PY_LLONG_MAX + +typedef enum { + /* Round towards minus infinity (-inf). + For example, used to read a clock. */ + _PyTime_ROUND_FLOOR=0, + /* Round towards infinity (+inf). + For example, used for timeout to wait "at least" N seconds. */ + _PyTime_ROUND_CEILING=1, + /* Round to nearest with ties going to nearest even integer. + For example, used to round from a Python float. */ + _PyTime_ROUND_HALF_EVEN=2, + /* Round away from zero + For example, used for timeout. _PyTime_ROUND_CEILING rounds + -1e-9 to 0 milliseconds which causes bpo-31786 issue. + _PyTime_ROUND_UP rounds -1e-9 to -1 millisecond which keeps + the timeout sign as expected. select.poll(timeout) must block + for negative values." */ + _PyTime_ROUND_UP=3, + /* _PyTime_ROUND_TIMEOUT (an alias for _PyTime_ROUND_UP) should be + used for timeouts. */ + _PyTime_ROUND_TIMEOUT = _PyTime_ROUND_UP +} _PyTime_round_t; + + +/* Convert a time_t to a PyLong. */ +PyAPI_FUNC(PyObject *) _PyLong_FromTime_t( + time_t sec); + +/* Convert a PyLong to a time_t. */ +PyAPI_FUNC(time_t) _PyLong_AsTime_t( + PyObject *obj); + +/* Convert a number of seconds, int or float, to time_t. */ +PyAPI_FUNC(int) _PyTime_ObjectToTime_t( + PyObject *obj, + time_t *sec, + _PyTime_round_t); + +/* Convert a number of seconds, int or float, to a timeval structure. + usec is in the range [0; 999999] and rounded towards zero. + For example, -1.2 is converted to (-2, 800000). */ +PyAPI_FUNC(int) _PyTime_ObjectToTimeval( + PyObject *obj, + time_t *sec, + long *usec, + _PyTime_round_t); + +/* Convert a number of seconds, int or float, to a timespec structure. + nsec is in the range [0; 999999999] and rounded towards zero. + For example, -1.2 is converted to (-2, 800000000). */ +PyAPI_FUNC(int) _PyTime_ObjectToTimespec( + PyObject *obj, + time_t *sec, + long *nsec, + _PyTime_round_t); + + +/* Create a timestamp from a number of seconds. */ +PyAPI_FUNC(_PyTime_t) _PyTime_FromSeconds(int seconds); + +/* Macro to create a timestamp from a number of seconds, no integer overflow. + Only use the macro for small values, prefer _PyTime_FromSeconds(). */ +#define _PYTIME_FROMSECONDS(seconds) \ + ((_PyTime_t)(seconds) * (1000 * 1000 * 1000)) + +/* Create a timestamp from a number of nanoseconds. */ +PyAPI_FUNC(_PyTime_t) _PyTime_FromNanoseconds(_PyTime_t ns); + +/* Create a timestamp from nanoseconds (Python int). */ +PyAPI_FUNC(int) _PyTime_FromNanosecondsObject(_PyTime_t *t, + PyObject *obj); + +/* Convert a number of seconds (Python float or int) to a timetamp. + Raise an exception and return -1 on error, return 0 on success. */ +PyAPI_FUNC(int) _PyTime_FromSecondsObject(_PyTime_t *t, + PyObject *obj, + _PyTime_round_t round); + +/* Convert a number of milliseconds (Python float or int, 10^-3) to a timetamp. + Raise an exception and return -1 on error, return 0 on success. */ +PyAPI_FUNC(int) _PyTime_FromMillisecondsObject(_PyTime_t *t, + PyObject *obj, + _PyTime_round_t round); + +/* Convert a timestamp to a number of seconds as a C double. */ +PyAPI_FUNC(double) _PyTime_AsSecondsDouble(_PyTime_t t); + +/* Convert timestamp to a number of milliseconds (10^-3 seconds). */ +PyAPI_FUNC(_PyTime_t) _PyTime_AsMilliseconds(_PyTime_t t, + _PyTime_round_t round); + +/* Convert timestamp to a number of microseconds (10^-6 seconds). */ +PyAPI_FUNC(_PyTime_t) _PyTime_AsMicroseconds(_PyTime_t t, + _PyTime_round_t round); + +/* Convert timestamp to a number of nanoseconds (10^-9 seconds) as a Python int + object. */ +PyAPI_FUNC(PyObject *) _PyTime_AsNanosecondsObject(_PyTime_t t); + +/* Create a timestamp from a timeval structure. + Raise an exception and return -1 on overflow, return 0 on success. */ +PyAPI_FUNC(int) _PyTime_FromTimeval(_PyTime_t *tp, struct timeval *tv); + +/* Convert a timestamp to a timeval structure (microsecond resolution). + tv_usec is always positive. + Raise an exception and return -1 if the conversion overflowed, + return 0 on success. */ +PyAPI_FUNC(int) _PyTime_AsTimeval(_PyTime_t t, + struct timeval *tv, + _PyTime_round_t round); + +/* Similar to _PyTime_AsTimeval(), but don't raise an exception on error. */ +PyAPI_FUNC(int) _PyTime_AsTimeval_noraise(_PyTime_t t, + struct timeval *tv, + _PyTime_round_t round); + +/* Convert a timestamp to a number of seconds (secs) and microseconds (us). + us is always positive. This function is similar to _PyTime_AsTimeval() + except that secs is always a time_t type, whereas the timeval structure + uses a C long for tv_sec on Windows. + Raise an exception and return -1 if the conversion overflowed, + return 0 on success. */ +PyAPI_FUNC(int) _PyTime_AsTimevalTime_t( + _PyTime_t t, + time_t *secs, + int *us, + _PyTime_round_t round); + +#if defined(HAVE_CLOCK_GETTIME) || defined(HAVE_KQUEUE) +/* Create a timestamp from a timespec structure. + Raise an exception and return -1 on overflow, return 0 on success. */ +PyAPI_FUNC(int) _PyTime_FromTimespec(_PyTime_t *tp, struct timespec *ts); + +/* Convert a timestamp to a timespec structure (nanosecond resolution). + tv_nsec is always positive. + Raise an exception and return -1 on error, return 0 on success. */ +PyAPI_FUNC(int) _PyTime_AsTimespec(_PyTime_t t, struct timespec *ts); +#endif + +/* Compute ticks * mul / div. + The caller must ensure that ((div - 1) * mul) cannot overflow. */ +PyAPI_FUNC(_PyTime_t) _PyTime_MulDiv(_PyTime_t ticks, + _PyTime_t mul, + _PyTime_t div); + +/* Get the current time from the system clock. + + The function cannot fail. _PyTime_Init() ensures that the system clock + works. */ +PyAPI_FUNC(_PyTime_t) _PyTime_GetSystemClock(void); + +/* Get the time of a monotonic clock, i.e. a clock that cannot go backwards. + The clock is not affected by system clock updates. The reference point of + the returned value is undefined, so that only the difference between the + results of consecutive calls is valid. + + The function cannot fail. _PyTime_Init() ensures that a monotonic clock + is available and works. */ +PyAPI_FUNC(_PyTime_t) _PyTime_GetMonotonicClock(void); + + +/* Structure used by time.get_clock_info() */ +typedef struct { + const char *implementation; + int monotonic; + int adjustable; + double resolution; +} _Py_clock_info_t; + +/* Get the current time from the system clock. + * Fill clock information if info is not NULL. + * Raise an exception and return -1 on error, return 0 on success. + */ +PyAPI_FUNC(int) _PyTime_GetSystemClockWithInfo( + _PyTime_t *t, + _Py_clock_info_t *info); + +/* Get the time of a monotonic clock, i.e. a clock that cannot go backwards. + The clock is not affected by system clock updates. The reference point of + the returned value is undefined, so that only the difference between the + results of consecutive calls is valid. + + Fill info (if set) with information of the function used to get the time. + + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) _PyTime_GetMonotonicClockWithInfo( + _PyTime_t *t, + _Py_clock_info_t *info); + + +/* Initialize time. + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) _PyTime_Init(void); + +/* Converts a timestamp to the Gregorian time, using the local time zone. + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) _PyTime_localtime(time_t t, struct tm *tm); + +/* Converts a timestamp to the Gregorian time, assuming UTC. + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) _PyTime_gmtime(time_t t, struct tm *tm); + +/* Get the performance counter: clock with the highest available resolution to + measure a short duration. + + The function cannot fail. _PyTime_Init() ensures that the system clock + works. */ +PyAPI_FUNC(_PyTime_t) _PyTime_GetPerfCounter(void); + +/* Get the performance counter: clock with the highest available resolution to + measure a short duration. + + Fill info (if set) with information of the function used to get the time. + + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) _PyTime_GetPerfCounterWithInfo( + _PyTime_t *t, + _Py_clock_info_t *info); + +#ifdef __cplusplus +} +#endif + +#endif /* Py_PYTIME_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/rangeobject.h b/venv/Include/rangeobject.h new file mode 100644 index 00000000..7e4dc288 --- /dev/null +++ b/venv/Include/rangeobject.h @@ -0,0 +1,27 @@ + +/* Range object interface */ + +#ifndef Py_RANGEOBJECT_H +#define Py_RANGEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* +A range object represents an integer range. This is an immutable object; +a range cannot change its value after creation. + +Range objects behave like the corresponding tuple objects except that +they are represented by a start, stop, and step datamembers. +*/ + +PyAPI_DATA(PyTypeObject) PyRange_Type; +PyAPI_DATA(PyTypeObject) PyRangeIter_Type; +PyAPI_DATA(PyTypeObject) PyLongRangeIter_Type; + +#define PyRange_Check(op) (Py_TYPE(op) == &PyRange_Type) + +#ifdef __cplusplus +} +#endif +#endif /* !Py_RANGEOBJECT_H */ diff --git a/venv/Include/setobject.h b/venv/Include/setobject.h new file mode 100644 index 00000000..fc0ea839 --- /dev/null +++ b/venv/Include/setobject.h @@ -0,0 +1,108 @@ +/* Set object interface */ + +#ifndef Py_SETOBJECT_H +#define Py_SETOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API + +/* There are three kinds of entries in the table: + +1. Unused: key == NULL and hash == 0 +2. Dummy: key == dummy and hash == -1 +3. Active: key != NULL and key != dummy and hash != -1 + +The hash field of Unused slots is always zero. + +The hash field of Dummy slots are set to -1 +meaning that dummy entries can be detected by +either entry->key==dummy or by entry->hash==-1. +*/ + +#define PySet_MINSIZE 8 + +typedef struct { + PyObject *key; + Py_hash_t hash; /* Cached hash code of the key */ +} setentry; + +/* The SetObject data structure is shared by set and frozenset objects. + +Invariant for sets: + - hash is -1 + +Invariants for frozensets: + - data is immutable. + - hash is the hash of the frozenset or -1 if not computed yet. + +*/ + +typedef struct { + PyObject_HEAD + + Py_ssize_t fill; /* Number active and dummy entries*/ + Py_ssize_t used; /* Number active entries */ + + /* The table contains mask + 1 slots, and that's a power of 2. + * We store the mask instead of the size because the mask is more + * frequently needed. + */ + Py_ssize_t mask; + + /* The table points to a fixed-size smalltable for small tables + * or to additional malloc'ed memory for bigger tables. + * The table pointer is never NULL which saves us from repeated + * runtime null-tests. + */ + setentry *table; + Py_hash_t hash; /* Only used by frozenset objects */ + Py_ssize_t finger; /* Search finger for pop() */ + + setentry smalltable[PySet_MINSIZE]; + PyObject *weakreflist; /* List of weak references */ +} PySetObject; + +#define PySet_GET_SIZE(so) (assert(PyAnySet_Check(so)),(((PySetObject *)(so))->used)) + +PyAPI_DATA(PyObject *) _PySet_Dummy; + +PyAPI_FUNC(int) _PySet_NextEntry(PyObject *set, Py_ssize_t *pos, PyObject **key, Py_hash_t *hash); +PyAPI_FUNC(int) _PySet_Update(PyObject *set, PyObject *iterable); +PyAPI_FUNC(int) PySet_ClearFreeList(void); + +#endif /* Section excluded by Py_LIMITED_API */ + +PyAPI_DATA(PyTypeObject) PySet_Type; +PyAPI_DATA(PyTypeObject) PyFrozenSet_Type; +PyAPI_DATA(PyTypeObject) PySetIter_Type; + +PyAPI_FUNC(PyObject *) PySet_New(PyObject *); +PyAPI_FUNC(PyObject *) PyFrozenSet_New(PyObject *); + +PyAPI_FUNC(int) PySet_Add(PyObject *set, PyObject *key); +PyAPI_FUNC(int) PySet_Clear(PyObject *set); +PyAPI_FUNC(int) PySet_Contains(PyObject *anyset, PyObject *key); +PyAPI_FUNC(int) PySet_Discard(PyObject *set, PyObject *key); +PyAPI_FUNC(PyObject *) PySet_Pop(PyObject *set); +PyAPI_FUNC(Py_ssize_t) PySet_Size(PyObject *anyset); + +#define PyFrozenSet_CheckExact(ob) (Py_TYPE(ob) == &PyFrozenSet_Type) +#define PyAnySet_CheckExact(ob) \ + (Py_TYPE(ob) == &PySet_Type || Py_TYPE(ob) == &PyFrozenSet_Type) +#define PyAnySet_Check(ob) \ + (Py_TYPE(ob) == &PySet_Type || Py_TYPE(ob) == &PyFrozenSet_Type || \ + PyType_IsSubtype(Py_TYPE(ob), &PySet_Type) || \ + PyType_IsSubtype(Py_TYPE(ob), &PyFrozenSet_Type)) +#define PySet_Check(ob) \ + (Py_TYPE(ob) == &PySet_Type || \ + PyType_IsSubtype(Py_TYPE(ob), &PySet_Type)) +#define PyFrozenSet_Check(ob) \ + (Py_TYPE(ob) == &PyFrozenSet_Type || \ + PyType_IsSubtype(Py_TYPE(ob), &PyFrozenSet_Type)) + +#ifdef __cplusplus +} +#endif +#endif /* !Py_SETOBJECT_H */ diff --git a/venv/Include/sliceobject.h b/venv/Include/sliceobject.h new file mode 100644 index 00000000..c238b099 --- /dev/null +++ b/venv/Include/sliceobject.h @@ -0,0 +1,63 @@ +#ifndef Py_SLICEOBJECT_H +#define Py_SLICEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* The unique ellipsis object "..." */ + +PyAPI_DATA(PyObject) _Py_EllipsisObject; /* Don't use this directly */ + +#define Py_Ellipsis (&_Py_EllipsisObject) + +/* Slice object interface */ + +/* + +A slice object containing start, stop, and step data members (the +names are from range). After much talk with Guido, it was decided to +let these be any arbitrary python type. Py_None stands for omitted values. +*/ +#ifndef Py_LIMITED_API +typedef struct { + PyObject_HEAD + PyObject *start, *stop, *step; /* not NULL */ +} PySliceObject; +#endif + +PyAPI_DATA(PyTypeObject) PySlice_Type; +PyAPI_DATA(PyTypeObject) PyEllipsis_Type; + +#define PySlice_Check(op) (Py_TYPE(op) == &PySlice_Type) + +PyAPI_FUNC(PyObject *) PySlice_New(PyObject* start, PyObject* stop, + PyObject* step); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PySlice_FromIndices(Py_ssize_t start, Py_ssize_t stop); +PyAPI_FUNC(int) _PySlice_GetLongIndices(PySliceObject *self, PyObject *length, + PyObject **start_ptr, PyObject **stop_ptr, + PyObject **step_ptr); +#endif +PyAPI_FUNC(int) PySlice_GetIndices(PyObject *r, Py_ssize_t length, + Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step); +PyAPI_FUNC(int) PySlice_GetIndicesEx(PyObject *r, Py_ssize_t length, + Py_ssize_t *start, Py_ssize_t *stop, + Py_ssize_t *step, Py_ssize_t *slicelength) Py_DEPRECATED(3.7); + +#if !defined(Py_LIMITED_API) || (Py_LIMITED_API+0 >= 0x03050400 && Py_LIMITED_API+0 < 0x03060000) || Py_LIMITED_API+0 >= 0x03060100 +#define PySlice_GetIndicesEx(slice, length, start, stop, step, slicelen) ( \ + PySlice_Unpack((slice), (start), (stop), (step)) < 0 ? \ + ((*(slicelen) = 0), -1) : \ + ((*(slicelen) = PySlice_AdjustIndices((length), (start), (stop), *(step))), \ + 0)) +PyAPI_FUNC(int) PySlice_Unpack(PyObject *slice, + Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step); +PyAPI_FUNC(Py_ssize_t) PySlice_AdjustIndices(Py_ssize_t length, + Py_ssize_t *start, Py_ssize_t *stop, + Py_ssize_t step); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_SLICEOBJECT_H */ diff --git a/venv/Include/structmember.h b/venv/Include/structmember.h new file mode 100644 index 00000000..b54f7081 --- /dev/null +++ b/venv/Include/structmember.h @@ -0,0 +1,74 @@ +#ifndef Py_STRUCTMEMBER_H +#define Py_STRUCTMEMBER_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Interface to map C struct members to Python object attributes */ + +#include /* For offsetof */ + +/* An array of PyMemberDef structures defines the name, type and offset + of selected members of a C structure. These can be read by + PyMember_GetOne() and set by PyMember_SetOne() (except if their READONLY + flag is set). The array must be terminated with an entry whose name + pointer is NULL. */ + +typedef struct PyMemberDef { + const char *name; + int type; + Py_ssize_t offset; + int flags; + const char *doc; +} PyMemberDef; + +/* Types */ +#define T_SHORT 0 +#define T_INT 1 +#define T_LONG 2 +#define T_FLOAT 3 +#define T_DOUBLE 4 +#define T_STRING 5 +#define T_OBJECT 6 +/* XXX the ordering here is weird for binary compatibility */ +#define T_CHAR 7 /* 1-character string */ +#define T_BYTE 8 /* 8-bit signed int */ +/* unsigned variants: */ +#define T_UBYTE 9 +#define T_USHORT 10 +#define T_UINT 11 +#define T_ULONG 12 + +/* Added by Jack: strings contained in the structure */ +#define T_STRING_INPLACE 13 + +/* Added by Lillo: bools contained in the structure (assumed char) */ +#define T_BOOL 14 + +#define T_OBJECT_EX 16 /* Like T_OBJECT, but raises AttributeError + when the value is NULL, instead of + converting to None. */ +#define T_LONGLONG 17 +#define T_ULONGLONG 18 + +#define T_PYSSIZET 19 /* Py_ssize_t */ +#define T_NONE 20 /* Value is always None */ + + +/* Flags */ +#define READONLY 1 +#define READ_RESTRICTED 2 +#define PY_WRITE_RESTRICTED 4 +#define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED) + + +/* Current API, use this */ +PyAPI_FUNC(PyObject *) PyMember_GetOne(const char *, struct PyMemberDef *); +PyAPI_FUNC(int) PyMember_SetOne(char *, struct PyMemberDef *, PyObject *); + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_STRUCTMEMBER_H */ diff --git a/venv/Include/structseq.h b/venv/Include/structseq.h new file mode 100644 index 00000000..e5e5d5c5 --- /dev/null +++ b/venv/Include/structseq.h @@ -0,0 +1,49 @@ + +/* Named tuple object interface */ + +#ifndef Py_STRUCTSEQ_H +#define Py_STRUCTSEQ_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct PyStructSequence_Field { + const char *name; + const char *doc; +} PyStructSequence_Field; + +typedef struct PyStructSequence_Desc { + const char *name; + const char *doc; + struct PyStructSequence_Field *fields; + int n_in_sequence; +} PyStructSequence_Desc; + +extern char* PyStructSequence_UnnamedField; + +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) PyStructSequence_InitType(PyTypeObject *type, + PyStructSequence_Desc *desc); +PyAPI_FUNC(int) PyStructSequence_InitType2(PyTypeObject *type, + PyStructSequence_Desc *desc); +#endif +PyAPI_FUNC(PyTypeObject*) PyStructSequence_NewType(PyStructSequence_Desc *desc); + +PyAPI_FUNC(PyObject *) PyStructSequence_New(PyTypeObject* type); + +#ifndef Py_LIMITED_API +typedef PyTupleObject PyStructSequence; + +/* Macro, *only* to be used to fill in brand new objects */ +#define PyStructSequence_SET_ITEM(op, i, v) PyTuple_SET_ITEM(op, i, v) + +#define PyStructSequence_GET_ITEM(op, i) PyTuple_GET_ITEM(op, i) +#endif + +PyAPI_FUNC(void) PyStructSequence_SetItem(PyObject*, Py_ssize_t, PyObject*); +PyAPI_FUNC(PyObject*) PyStructSequence_GetItem(PyObject*, Py_ssize_t); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_STRUCTSEQ_H */ diff --git a/venv/Include/symtable.h b/venv/Include/symtable.h new file mode 100644 index 00000000..007f88db --- /dev/null +++ b/venv/Include/symtable.h @@ -0,0 +1,118 @@ +#ifndef Py_LIMITED_API +#ifndef Py_SYMTABLE_H +#define Py_SYMTABLE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* XXX(ncoghlan): This is a weird mix of public names and interpreter internal + * names. + */ + +typedef enum _block_type { FunctionBlock, ClassBlock, ModuleBlock } + _Py_block_ty; + +struct _symtable_entry; + +struct symtable { + PyObject *st_filename; /* name of file being compiled, + decoded from the filesystem encoding */ + struct _symtable_entry *st_cur; /* current symbol table entry */ + struct _symtable_entry *st_top; /* symbol table entry for module */ + PyObject *st_blocks; /* dict: map AST node addresses + * to symbol table entries */ + PyObject *st_stack; /* list: stack of namespace info */ + PyObject *st_global; /* borrowed ref to st_top->ste_symbols */ + int st_nblocks; /* number of blocks used. kept for + consistency with the corresponding + compiler structure */ + PyObject *st_private; /* name of current class or NULL */ + PyFutureFeatures *st_future; /* module's future features that affect + the symbol table */ + int recursion_depth; /* current recursion depth */ + int recursion_limit; /* recursion limit */ +}; + +typedef struct _symtable_entry { + PyObject_HEAD + PyObject *ste_id; /* int: key in ste_table->st_blocks */ + PyObject *ste_symbols; /* dict: variable names to flags */ + PyObject *ste_name; /* string: name of current block */ + PyObject *ste_varnames; /* list of function parameters */ + PyObject *ste_children; /* list of child blocks */ + PyObject *ste_directives;/* locations of global and nonlocal statements */ + _Py_block_ty ste_type; /* module, class, or function */ + int ste_nested; /* true if block is nested */ + unsigned ste_free : 1; /* true if block has free variables */ + unsigned ste_child_free : 1; /* true if a child block has free vars, + including free refs to globals */ + unsigned ste_generator : 1; /* true if namespace is a generator */ + unsigned ste_coroutine : 1; /* true if namespace is a coroutine */ + unsigned ste_varargs : 1; /* true if block has varargs */ + unsigned ste_varkeywords : 1; /* true if block has varkeywords */ + unsigned ste_returns_value : 1; /* true if namespace uses return with + an argument */ + unsigned ste_needs_class_closure : 1; /* for class scopes, true if a + closure over __class__ + should be created */ + int ste_lineno; /* first line of block */ + int ste_col_offset; /* offset of first line of block */ + int ste_opt_lineno; /* lineno of last exec or import * */ + int ste_opt_col_offset; /* offset of last exec or import * */ + struct symtable *ste_table; +} PySTEntryObject; + +PyAPI_DATA(PyTypeObject) PySTEntry_Type; + +#define PySTEntry_Check(op) (Py_TYPE(op) == &PySTEntry_Type) + +PyAPI_FUNC(int) PyST_GetScope(PySTEntryObject *, PyObject *); + +PyAPI_FUNC(struct symtable *) PySymtable_Build( + mod_ty mod, + const char *filename, /* decoded from the filesystem encoding */ + PyFutureFeatures *future); +PyAPI_FUNC(struct symtable *) PySymtable_BuildObject( + mod_ty mod, + PyObject *filename, + PyFutureFeatures *future); +PyAPI_FUNC(PySTEntryObject *) PySymtable_Lookup(struct symtable *, void *); + +PyAPI_FUNC(void) PySymtable_Free(struct symtable *); + +/* Flags for def-use information */ + +#define DEF_GLOBAL 1 /* global stmt */ +#define DEF_LOCAL 2 /* assignment in code block */ +#define DEF_PARAM 2<<1 /* formal parameter */ +#define DEF_NONLOCAL 2<<2 /* nonlocal stmt */ +#define USE 2<<3 /* name is used */ +#define DEF_FREE 2<<4 /* name used but not defined in nested block */ +#define DEF_FREE_CLASS 2<<5 /* free variable from class's method */ +#define DEF_IMPORT 2<<6 /* assignment occurred via import */ +#define DEF_ANNOT 2<<7 /* this name is annotated */ + +#define DEF_BOUND (DEF_LOCAL | DEF_PARAM | DEF_IMPORT) + +/* GLOBAL_EXPLICIT and GLOBAL_IMPLICIT are used internally by the symbol + table. GLOBAL is returned from PyST_GetScope() for either of them. + It is stored in ste_symbols at bits 12-15. +*/ +#define SCOPE_OFFSET 11 +#define SCOPE_MASK (DEF_GLOBAL | DEF_LOCAL | DEF_PARAM | DEF_NONLOCAL) + +#define LOCAL 1 +#define GLOBAL_EXPLICIT 2 +#define GLOBAL_IMPLICIT 3 +#define FREE 4 +#define CELL 5 + +#define GENERATOR 1 +#define GENERATOR_EXPRESSION 2 + +#ifdef __cplusplus +} +#endif +#endif /* !Py_SYMTABLE_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/sysmodule.h b/venv/Include/sysmodule.h new file mode 100644 index 00000000..719ecfcf --- /dev/null +++ b/venv/Include/sysmodule.h @@ -0,0 +1,48 @@ + +/* System module interface */ + +#ifndef Py_SYSMODULE_H +#define Py_SYSMODULE_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_FUNC(PyObject *) PySys_GetObject(const char *); +PyAPI_FUNC(int) PySys_SetObject(const char *, PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PySys_GetObjectId(_Py_Identifier *key); +PyAPI_FUNC(int) _PySys_SetObjectId(_Py_Identifier *key, PyObject *); +#endif + +PyAPI_FUNC(void) PySys_SetArgv(int, wchar_t **); +PyAPI_FUNC(void) PySys_SetArgvEx(int, wchar_t **, int); +PyAPI_FUNC(void) PySys_SetPath(const wchar_t *); + +PyAPI_FUNC(void) PySys_WriteStdout(const char *format, ...) + Py_GCC_ATTRIBUTE((format(printf, 1, 2))); +PyAPI_FUNC(void) PySys_WriteStderr(const char *format, ...) + Py_GCC_ATTRIBUTE((format(printf, 1, 2))); +PyAPI_FUNC(void) PySys_FormatStdout(const char *format, ...); +PyAPI_FUNC(void) PySys_FormatStderr(const char *format, ...); + +PyAPI_FUNC(void) PySys_ResetWarnOptions(void); +PyAPI_FUNC(void) PySys_AddWarnOption(const wchar_t *); +PyAPI_FUNC(void) PySys_AddWarnOptionUnicode(PyObject *); +PyAPI_FUNC(int) PySys_HasWarnOptions(void); + +PyAPI_FUNC(void) PySys_AddXOption(const wchar_t *); +PyAPI_FUNC(PyObject *) PySys_GetXOptions(void); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(size_t) _PySys_GetSizeOf(PyObject *); +#endif + +#ifdef Py_BUILD_CORE +PyAPI_FUNC(int) _PySys_AddXOptionWithError(const wchar_t *s); +PyAPI_FUNC(int) _PySys_AddWarnOptionWithError(PyObject *option); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_SYSMODULE_H */ diff --git a/venv/Include/token.h b/venv/Include/token.h new file mode 100644 index 00000000..cd1cd00f --- /dev/null +++ b/venv/Include/token.h @@ -0,0 +1,92 @@ + +/* Token types */ +#ifndef Py_LIMITED_API +#ifndef Py_TOKEN_H +#define Py_TOKEN_H +#ifdef __cplusplus +extern "C" { +#endif + +#undef TILDE /* Prevent clash of our definition with system macro. Ex AIX, ioctl.h */ + +#define ENDMARKER 0 +#define NAME 1 +#define NUMBER 2 +#define STRING 3 +#define NEWLINE 4 +#define INDENT 5 +#define DEDENT 6 +#define LPAR 7 +#define RPAR 8 +#define LSQB 9 +#define RSQB 10 +#define COLON 11 +#define COMMA 12 +#define SEMI 13 +#define PLUS 14 +#define MINUS 15 +#define STAR 16 +#define SLASH 17 +#define VBAR 18 +#define AMPER 19 +#define LESS 20 +#define GREATER 21 +#define EQUAL 22 +#define DOT 23 +#define PERCENT 24 +#define LBRACE 25 +#define RBRACE 26 +#define EQEQUAL 27 +#define NOTEQUAL 28 +#define LESSEQUAL 29 +#define GREATEREQUAL 30 +#define TILDE 31 +#define CIRCUMFLEX 32 +#define LEFTSHIFT 33 +#define RIGHTSHIFT 34 +#define DOUBLESTAR 35 +#define PLUSEQUAL 36 +#define MINEQUAL 37 +#define STAREQUAL 38 +#define SLASHEQUAL 39 +#define PERCENTEQUAL 40 +#define AMPEREQUAL 41 +#define VBAREQUAL 42 +#define CIRCUMFLEXEQUAL 43 +#define LEFTSHIFTEQUAL 44 +#define RIGHTSHIFTEQUAL 45 +#define DOUBLESTAREQUAL 46 +#define DOUBLESLASH 47 +#define DOUBLESLASHEQUAL 48 +#define AT 49 +#define ATEQUAL 50 +#define RARROW 51 +#define ELLIPSIS 52 +/* Don't forget to update the table _PyParser_TokenNames in tokenizer.c! */ +#define OP 53 +#define ERRORTOKEN 54 +/* These aren't used by the C tokenizer but are needed for tokenize.py */ +#define COMMENT 55 +#define NL 56 +#define ENCODING 57 +#define N_TOKENS 58 + +/* Special definitions for cooperation with parser */ + +#define NT_OFFSET 256 + +#define ISTERMINAL(x) ((x) < NT_OFFSET) +#define ISNONTERMINAL(x) ((x) >= NT_OFFSET) +#define ISEOF(x) ((x) == ENDMARKER) + + +PyAPI_DATA(const char *) _PyParser_TokenNames[]; /* Token names */ +PyAPI_FUNC(int) PyToken_OneChar(int); +PyAPI_FUNC(int) PyToken_TwoChars(int, int); +PyAPI_FUNC(int) PyToken_ThreeChars(int, int, int); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TOKEN_H */ +#endif /* Py_LIMITED_API */ diff --git a/venv/Include/traceback.h b/venv/Include/traceback.h new file mode 100644 index 00000000..b5874100 --- /dev/null +++ b/venv/Include/traceback.h @@ -0,0 +1,119 @@ + +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +#include "pystate.h" + +struct _frame; + +/* Traceback interface */ +#ifndef Py_LIMITED_API +typedef struct _traceback { + PyObject_HEAD + struct _traceback *tb_next; + struct _frame *tb_frame; + int tb_lasti; + int tb_lineno; +} PyTracebackObject; +#endif + +PyAPI_FUNC(int) PyTraceBack_Here(struct _frame *); +PyAPI_FUNC(int) PyTraceBack_Print(PyObject *, PyObject *); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _Py_DisplaySourceLine(PyObject *, PyObject *, int, int); +PyAPI_FUNC(void) _PyTraceback_Add(const char *, const char *, int); +#endif + +/* Reveal traceback type so we can typecheck traceback objects */ +PyAPI_DATA(PyTypeObject) PyTraceBack_Type; +#define PyTraceBack_Check(v) (Py_TYPE(v) == &PyTraceBack_Type) + +#ifndef Py_LIMITED_API +/* Write the Python traceback into the file 'fd'. For example: + + Traceback (most recent call first): + File "xxx", line xxx in + File "xxx", line xxx in + ... + File "xxx", line xxx in + + This function is written for debug purpose only, to dump the traceback in + the worst case: after a segmentation fault, at fatal error, etc. That's why, + it is very limited. Strings are truncated to 100 characters and encoded to + ASCII with backslashreplace. It doesn't write the source code, only the + function name, filename and line number of each frame. Write only the first + 100 frames: if the traceback is truncated, write the line " ...". + + This function is signal safe. */ + +PyAPI_FUNC(void) _Py_DumpTraceback( + int fd, + PyThreadState *tstate); + +/* Write the traceback of all threads into the file 'fd'. current_thread can be + NULL. + + Return NULL on success, or an error message on error. + + This function is written for debug purpose only. It calls + _Py_DumpTraceback() for each thread, and so has the same limitations. It + only write the traceback of the first 100 threads: write "..." if there are + more threads. + + If current_tstate is NULL, the function tries to get the Python thread state + of the current thread. It is not an error if the function is unable to get + the current Python thread state. + + If interp is NULL, the function tries to get the interpreter state from + the current Python thread state, or from + _PyGILState_GetInterpreterStateUnsafe() in last resort. + + It is better to pass NULL to interp and current_tstate, the function tries + different options to retrieve these informations. + + This function is signal safe. */ + +PyAPI_FUNC(const char*) _Py_DumpTracebackThreads( + int fd, + PyInterpreterState *interp, + PyThreadState *current_tstate); +#endif /* !Py_LIMITED_API */ + +#ifndef Py_LIMITED_API + +/* Write a Unicode object into the file descriptor fd. Encode the string to + ASCII using the backslashreplace error handler. + + Do nothing if text is not a Unicode object. The function accepts Unicode + string which is not ready (PyUnicode_WCHAR_KIND). + + This function is signal safe. */ +PyAPI_FUNC(void) _Py_DumpASCII(int fd, PyObject *text); + +/* Format an integer as decimal into the file descriptor fd. + + This function is signal safe. */ +PyAPI_FUNC(void) _Py_DumpDecimal( + int fd, + unsigned long value); + +/* Format an integer as hexadecimal into the file descriptor fd with at least + width digits. + + The maximum width is sizeof(unsigned long)*2 digits. + + This function is signal safe. */ +PyAPI_FUNC(void) _Py_DumpHexadecimal( + int fd, + unsigned long value, + Py_ssize_t width); + +#endif /* !Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ diff --git a/venv/Include/tupleobject.h b/venv/Include/tupleobject.h new file mode 100644 index 00000000..72a7d8d5 --- /dev/null +++ b/venv/Include/tupleobject.h @@ -0,0 +1,73 @@ + +/* Tuple object interface */ + +#ifndef Py_TUPLEOBJECT_H +#define Py_TUPLEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* +Another generally useful object type is a tuple of object pointers. +For Python, this is an immutable type. C code can change the tuple items +(but not their number), and even use tuples as general-purpose arrays of +object references, but in general only brand new tuples should be mutated, +not ones that might already have been exposed to Python code. + +*** WARNING *** PyTuple_SetItem does not increment the new item's reference +count, but does decrement the reference count of the item it replaces, +if not nil. It does *decrement* the reference count if it is *not* +inserted in the tuple. Similarly, PyTuple_GetItem does not increment the +returned item's reference count. +*/ + +#ifndef Py_LIMITED_API +typedef struct { + PyObject_VAR_HEAD + PyObject *ob_item[1]; + + /* ob_item contains space for 'ob_size' elements. + * Items must normally not be NULL, except during construction when + * the tuple is not yet visible outside the function that builds it. + */ +} PyTupleObject; +#endif + +PyAPI_DATA(PyTypeObject) PyTuple_Type; +PyAPI_DATA(PyTypeObject) PyTupleIter_Type; + +#define PyTuple_Check(op) \ + PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_TUPLE_SUBCLASS) +#define PyTuple_CheckExact(op) (Py_TYPE(op) == &PyTuple_Type) + +PyAPI_FUNC(PyObject *) PyTuple_New(Py_ssize_t size); +PyAPI_FUNC(Py_ssize_t) PyTuple_Size(PyObject *); +PyAPI_FUNC(PyObject *) PyTuple_GetItem(PyObject *, Py_ssize_t); +PyAPI_FUNC(int) PyTuple_SetItem(PyObject *, Py_ssize_t, PyObject *); +PyAPI_FUNC(PyObject *) PyTuple_GetSlice(PyObject *, Py_ssize_t, Py_ssize_t); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyTuple_Resize(PyObject **, Py_ssize_t); +#endif +PyAPI_FUNC(PyObject *) PyTuple_Pack(Py_ssize_t, ...); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyTuple_MaybeUntrack(PyObject *); +#endif + +/* Macro, trading safety for speed */ +#ifndef Py_LIMITED_API +#define PyTuple_GET_ITEM(op, i) (((PyTupleObject *)(op))->ob_item[i]) +#define PyTuple_GET_SIZE(op) (assert(PyTuple_Check(op)),Py_SIZE(op)) + +/* Macro, *only* to be used to fill in brand new tuples */ +#define PyTuple_SET_ITEM(op, i, v) (((PyTupleObject *)(op))->ob_item[i] = v) +#endif + +PyAPI_FUNC(int) PyTuple_ClearFreeList(void); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _PyTuple_DebugMallocStats(FILE *out); +#endif /* Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TUPLEOBJECT_H */ diff --git a/venv/Include/typeslots.h b/venv/Include/typeslots.h new file mode 100644 index 00000000..0ce6a377 --- /dev/null +++ b/venv/Include/typeslots.h @@ -0,0 +1,85 @@ +/* Do not renumber the file; these numbers are part of the stable ABI. */ +/* Disabled, see #10181 */ +#undef Py_bf_getbuffer +#undef Py_bf_releasebuffer +#define Py_mp_ass_subscript 3 +#define Py_mp_length 4 +#define Py_mp_subscript 5 +#define Py_nb_absolute 6 +#define Py_nb_add 7 +#define Py_nb_and 8 +#define Py_nb_bool 9 +#define Py_nb_divmod 10 +#define Py_nb_float 11 +#define Py_nb_floor_divide 12 +#define Py_nb_index 13 +#define Py_nb_inplace_add 14 +#define Py_nb_inplace_and 15 +#define Py_nb_inplace_floor_divide 16 +#define Py_nb_inplace_lshift 17 +#define Py_nb_inplace_multiply 18 +#define Py_nb_inplace_or 19 +#define Py_nb_inplace_power 20 +#define Py_nb_inplace_remainder 21 +#define Py_nb_inplace_rshift 22 +#define Py_nb_inplace_subtract 23 +#define Py_nb_inplace_true_divide 24 +#define Py_nb_inplace_xor 25 +#define Py_nb_int 26 +#define Py_nb_invert 27 +#define Py_nb_lshift 28 +#define Py_nb_multiply 29 +#define Py_nb_negative 30 +#define Py_nb_or 31 +#define Py_nb_positive 32 +#define Py_nb_power 33 +#define Py_nb_remainder 34 +#define Py_nb_rshift 35 +#define Py_nb_subtract 36 +#define Py_nb_true_divide 37 +#define Py_nb_xor 38 +#define Py_sq_ass_item 39 +#define Py_sq_concat 40 +#define Py_sq_contains 41 +#define Py_sq_inplace_concat 42 +#define Py_sq_inplace_repeat 43 +#define Py_sq_item 44 +#define Py_sq_length 45 +#define Py_sq_repeat 46 +#define Py_tp_alloc 47 +#define Py_tp_base 48 +#define Py_tp_bases 49 +#define Py_tp_call 50 +#define Py_tp_clear 51 +#define Py_tp_dealloc 52 +#define Py_tp_del 53 +#define Py_tp_descr_get 54 +#define Py_tp_descr_set 55 +#define Py_tp_doc 56 +#define Py_tp_getattr 57 +#define Py_tp_getattro 58 +#define Py_tp_hash 59 +#define Py_tp_init 60 +#define Py_tp_is_gc 61 +#define Py_tp_iter 62 +#define Py_tp_iternext 63 +#define Py_tp_methods 64 +#define Py_tp_new 65 +#define Py_tp_repr 66 +#define Py_tp_richcompare 67 +#define Py_tp_setattr 68 +#define Py_tp_setattro 69 +#define Py_tp_str 70 +#define Py_tp_traverse 71 +#define Py_tp_members 72 +#define Py_tp_getset 73 +#define Py_tp_free 74 +#define Py_nb_matrix_multiply 75 +#define Py_nb_inplace_matrix_multiply 76 +#define Py_am_await 77 +#define Py_am_aiter 78 +#define Py_am_anext 79 +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +/* New in 3.5 */ +#define Py_tp_finalize 80 +#endif diff --git a/venv/Include/ucnhash.h b/venv/Include/ucnhash.h new file mode 100644 index 00000000..45362e99 --- /dev/null +++ b/venv/Include/ucnhash.h @@ -0,0 +1,36 @@ +/* Unicode name database interface */ +#ifndef Py_LIMITED_API +#ifndef Py_UCNHASH_H +#define Py_UCNHASH_H +#ifdef __cplusplus +extern "C" { +#endif + +/* revised ucnhash CAPI interface (exported through a "wrapper") */ + +#define PyUnicodeData_CAPSULE_NAME "unicodedata.ucnhash_CAPI" + +typedef struct { + + /* Size of this struct */ + int size; + + /* Get name for a given character code. Returns non-zero if + success, zero if not. Does not set Python exceptions. + If self is NULL, data come from the default version of the database. + If it is not NULL, it should be a unicodedata.ucd_X_Y_Z object */ + int (*getname)(PyObject *self, Py_UCS4 code, char* buffer, int buflen, + int with_alias_and_seq); + + /* Get character code for a given name. Same error handling + as for getname. */ + int (*getcode)(PyObject *self, const char* name, int namelen, Py_UCS4* code, + int with_named_seq); + +} _PyUnicode_Name_CAPI; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_UCNHASH_H */ +#endif /* !Py_LIMITED_API */ diff --git a/venv/Include/unicodeobject.h b/venv/Include/unicodeobject.h new file mode 100644 index 00000000..0274de67 --- /dev/null +++ b/venv/Include/unicodeobject.h @@ -0,0 +1,2334 @@ +#ifndef Py_UNICODEOBJECT_H +#define Py_UNICODEOBJECT_H + +#include + +/* + +Unicode implementation based on original code by Fredrik Lundh, +modified by Marc-Andre Lemburg (mal@lemburg.com) according to the +Unicode Integration Proposal. (See +http://www.egenix.com/files/python/unicode-proposal.txt). + +Copyright (c) Corporation for National Research Initiatives. + + + Original header: + -------------------------------------------------------------------- + + * Yet another Unicode string type for Python. This type supports the + * 16-bit Basic Multilingual Plane (BMP) only. + * + * Written by Fredrik Lundh, January 1999. + * + * Copyright (c) 1999 by Secret Labs AB. + * Copyright (c) 1999 by Fredrik Lundh. + * + * fredrik@pythonware.com + * http://www.pythonware.com + * + * -------------------------------------------------------------------- + * This Unicode String Type is + * + * Copyright (c) 1999 by Secret Labs AB + * Copyright (c) 1999 by Fredrik Lundh + * + * By obtaining, using, and/or copying this software and/or its + * associated documentation, you agree that you have read, understood, + * and will comply with the following terms and conditions: + * + * Permission to use, copy, modify, and distribute this software and its + * associated documentation for any purpose and without fee is hereby + * granted, provided that the above copyright notice appears in all + * copies, and that both that copyright notice and this permission notice + * appear in supporting documentation, and that the name of Secret Labs + * AB or the author not be used in advertising or publicity pertaining to + * distribution of the software without specific, written prior + * permission. + * + * SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * -------------------------------------------------------------------- */ + +#include + +/* === Internal API ======================================================= */ + +/* --- Internal Unicode Format -------------------------------------------- */ + +/* Python 3.x requires unicode */ +#define Py_USING_UNICODE + +#ifndef SIZEOF_WCHAR_T +#error Must define SIZEOF_WCHAR_T +#endif + +#define Py_UNICODE_SIZE SIZEOF_WCHAR_T + +/* If wchar_t can be used for UCS-4 storage, set Py_UNICODE_WIDE. + Otherwise, Unicode strings are stored as UCS-2 (with limited support + for UTF-16) */ + +#if Py_UNICODE_SIZE >= 4 +#define Py_UNICODE_WIDE +#endif + +/* Set these flags if the platform has "wchar.h" and the + wchar_t type is a 16-bit unsigned type */ +/* #define HAVE_WCHAR_H */ +/* #define HAVE_USABLE_WCHAR_T */ + +/* Py_UNICODE was the native Unicode storage format (code unit) used by + Python and represents a single Unicode element in the Unicode type. + With PEP 393, Py_UNICODE is deprecated and replaced with a + typedef to wchar_t. */ + +#ifndef Py_LIMITED_API +#define PY_UNICODE_TYPE wchar_t +typedef wchar_t Py_UNICODE /* Py_DEPRECATED(3.3) */; +#endif + +/* If the compiler provides a wchar_t type we try to support it + through the interface functions PyUnicode_FromWideChar(), + PyUnicode_AsWideChar() and PyUnicode_AsWideCharString(). */ + +#ifdef HAVE_USABLE_WCHAR_T +# ifndef HAVE_WCHAR_H +# define HAVE_WCHAR_H +# endif +#endif + +#ifdef HAVE_WCHAR_H +# include +#endif + +/* Py_UCS4 and Py_UCS2 are typedefs for the respective + unicode representations. */ +typedef uint32_t Py_UCS4; +typedef uint16_t Py_UCS2; +typedef uint8_t Py_UCS1; + +/* --- Internal Unicode Operations ---------------------------------------- */ + +/* Since splitting on whitespace is an important use case, and + whitespace in most situations is solely ASCII whitespace, we + optimize for the common case by using a quick look-up table + _Py_ascii_whitespace (see below) with an inlined check. + + */ +#ifndef Py_LIMITED_API +#define Py_UNICODE_ISSPACE(ch) \ + ((ch) < 128U ? _Py_ascii_whitespace[(ch)] : _PyUnicode_IsWhitespace(ch)) + +#define Py_UNICODE_ISLOWER(ch) _PyUnicode_IsLowercase(ch) +#define Py_UNICODE_ISUPPER(ch) _PyUnicode_IsUppercase(ch) +#define Py_UNICODE_ISTITLE(ch) _PyUnicode_IsTitlecase(ch) +#define Py_UNICODE_ISLINEBREAK(ch) _PyUnicode_IsLinebreak(ch) + +#define Py_UNICODE_TOLOWER(ch) _PyUnicode_ToLowercase(ch) +#define Py_UNICODE_TOUPPER(ch) _PyUnicode_ToUppercase(ch) +#define Py_UNICODE_TOTITLE(ch) _PyUnicode_ToTitlecase(ch) + +#define Py_UNICODE_ISDECIMAL(ch) _PyUnicode_IsDecimalDigit(ch) +#define Py_UNICODE_ISDIGIT(ch) _PyUnicode_IsDigit(ch) +#define Py_UNICODE_ISNUMERIC(ch) _PyUnicode_IsNumeric(ch) +#define Py_UNICODE_ISPRINTABLE(ch) _PyUnicode_IsPrintable(ch) + +#define Py_UNICODE_TODECIMAL(ch) _PyUnicode_ToDecimalDigit(ch) +#define Py_UNICODE_TODIGIT(ch) _PyUnicode_ToDigit(ch) +#define Py_UNICODE_TONUMERIC(ch) _PyUnicode_ToNumeric(ch) + +#define Py_UNICODE_ISALPHA(ch) _PyUnicode_IsAlpha(ch) + +#define Py_UNICODE_ISALNUM(ch) \ + (Py_UNICODE_ISALPHA(ch) || \ + Py_UNICODE_ISDECIMAL(ch) || \ + Py_UNICODE_ISDIGIT(ch) || \ + Py_UNICODE_ISNUMERIC(ch)) + +#define Py_UNICODE_COPY(target, source, length) \ + memcpy((target), (source), (length)*sizeof(Py_UNICODE)) + +#define Py_UNICODE_FILL(target, value, length) \ + do {Py_ssize_t i_; Py_UNICODE *t_ = (target); Py_UNICODE v_ = (value);\ + for (i_ = 0; i_ < (length); i_++) t_[i_] = v_;\ + } while (0) + +/* macros to work with surrogates */ +#define Py_UNICODE_IS_SURROGATE(ch) (0xD800 <= (ch) && (ch) <= 0xDFFF) +#define Py_UNICODE_IS_HIGH_SURROGATE(ch) (0xD800 <= (ch) && (ch) <= 0xDBFF) +#define Py_UNICODE_IS_LOW_SURROGATE(ch) (0xDC00 <= (ch) && (ch) <= 0xDFFF) +/* Join two surrogate characters and return a single Py_UCS4 value. */ +#define Py_UNICODE_JOIN_SURROGATES(high, low) \ + (((((Py_UCS4)(high) & 0x03FF) << 10) | \ + ((Py_UCS4)(low) & 0x03FF)) + 0x10000) +/* high surrogate = top 10 bits added to D800 */ +#define Py_UNICODE_HIGH_SURROGATE(ch) (0xD800 - (0x10000 >> 10) + ((ch) >> 10)) +/* low surrogate = bottom 10 bits added to DC00 */ +#define Py_UNICODE_LOW_SURROGATE(ch) (0xDC00 + ((ch) & 0x3FF)) + +/* Check if substring matches at given offset. The offset must be + valid, and the substring must not be empty. */ + +#define Py_UNICODE_MATCH(string, offset, substring) \ + ((*((string)->wstr + (offset)) == *((substring)->wstr)) && \ + ((*((string)->wstr + (offset) + (substring)->wstr_length-1) == *((substring)->wstr + (substring)->wstr_length-1))) && \ + !memcmp((string)->wstr + (offset), (substring)->wstr, (substring)->wstr_length*sizeof(Py_UNICODE))) + +#endif /* Py_LIMITED_API */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* --- Unicode Type ------------------------------------------------------- */ + +#ifndef Py_LIMITED_API + +/* ASCII-only strings created through PyUnicode_New use the PyASCIIObject + structure. state.ascii and state.compact are set, and the data + immediately follow the structure. utf8_length and wstr_length can be found + in the length field; the utf8 pointer is equal to the data pointer. */ +typedef struct { + /* There are 4 forms of Unicode strings: + + - compact ascii: + + * structure = PyASCIIObject + * test: PyUnicode_IS_COMPACT_ASCII(op) + * kind = PyUnicode_1BYTE_KIND + * compact = 1 + * ascii = 1 + * ready = 1 + * (length is the length of the utf8 and wstr strings) + * (data starts just after the structure) + * (since ASCII is decoded from UTF-8, the utf8 string are the data) + + - compact: + + * structure = PyCompactUnicodeObject + * test: PyUnicode_IS_COMPACT(op) && !PyUnicode_IS_ASCII(op) + * kind = PyUnicode_1BYTE_KIND, PyUnicode_2BYTE_KIND or + PyUnicode_4BYTE_KIND + * compact = 1 + * ready = 1 + * ascii = 0 + * utf8 is not shared with data + * utf8_length = 0 if utf8 is NULL + * wstr is shared with data and wstr_length=length + if kind=PyUnicode_2BYTE_KIND and sizeof(wchar_t)=2 + or if kind=PyUnicode_4BYTE_KIND and sizeof(wchar_t)=4 + * wstr_length = 0 if wstr is NULL + * (data starts just after the structure) + + - legacy string, not ready: + + * structure = PyUnicodeObject + * test: kind == PyUnicode_WCHAR_KIND + * length = 0 (use wstr_length) + * hash = -1 + * kind = PyUnicode_WCHAR_KIND + * compact = 0 + * ascii = 0 + * ready = 0 + * interned = SSTATE_NOT_INTERNED + * wstr is not NULL + * data.any is NULL + * utf8 is NULL + * utf8_length = 0 + + - legacy string, ready: + + * structure = PyUnicodeObject structure + * test: !PyUnicode_IS_COMPACT(op) && kind != PyUnicode_WCHAR_KIND + * kind = PyUnicode_1BYTE_KIND, PyUnicode_2BYTE_KIND or + PyUnicode_4BYTE_KIND + * compact = 0 + * ready = 1 + * data.any is not NULL + * utf8 is shared and utf8_length = length with data.any if ascii = 1 + * utf8_length = 0 if utf8 is NULL + * wstr is shared with data.any and wstr_length = length + if kind=PyUnicode_2BYTE_KIND and sizeof(wchar_t)=2 + or if kind=PyUnicode_4BYTE_KIND and sizeof(wchar_4)=4 + * wstr_length = 0 if wstr is NULL + + Compact strings use only one memory block (structure + characters), + whereas legacy strings use one block for the structure and one block + for characters. + + Legacy strings are created by PyUnicode_FromUnicode() and + PyUnicode_FromStringAndSize(NULL, size) functions. They become ready + when PyUnicode_READY() is called. + + See also _PyUnicode_CheckConsistency(). + */ + PyObject_HEAD + Py_ssize_t length; /* Number of code points in the string */ + Py_hash_t hash; /* Hash value; -1 if not set */ + struct { + /* + SSTATE_NOT_INTERNED (0) + SSTATE_INTERNED_MORTAL (1) + SSTATE_INTERNED_IMMORTAL (2) + + If interned != SSTATE_NOT_INTERNED, the two references from the + dictionary to this object are *not* counted in ob_refcnt. + */ + unsigned int interned:2; + /* Character size: + + - PyUnicode_WCHAR_KIND (0): + + * character type = wchar_t (16 or 32 bits, depending on the + platform) + + - PyUnicode_1BYTE_KIND (1): + + * character type = Py_UCS1 (8 bits, unsigned) + * all characters are in the range U+0000-U+00FF (latin1) + * if ascii is set, all characters are in the range U+0000-U+007F + (ASCII), otherwise at least one character is in the range + U+0080-U+00FF + + - PyUnicode_2BYTE_KIND (2): + + * character type = Py_UCS2 (16 bits, unsigned) + * all characters are in the range U+0000-U+FFFF (BMP) + * at least one character is in the range U+0100-U+FFFF + + - PyUnicode_4BYTE_KIND (4): + + * character type = Py_UCS4 (32 bits, unsigned) + * all characters are in the range U+0000-U+10FFFF + * at least one character is in the range U+10000-U+10FFFF + */ + unsigned int kind:3; + /* Compact is with respect to the allocation scheme. Compact unicode + objects only require one memory block while non-compact objects use + one block for the PyUnicodeObject struct and another for its data + buffer. */ + unsigned int compact:1; + /* The string only contains characters in the range U+0000-U+007F (ASCII) + and the kind is PyUnicode_1BYTE_KIND. If ascii is set and compact is + set, use the PyASCIIObject structure. */ + unsigned int ascii:1; + /* The ready flag indicates whether the object layout is initialized + completely. This means that this is either a compact object, or + the data pointer is filled out. The bit is redundant, and helps + to minimize the test in PyUnicode_IS_READY(). */ + unsigned int ready:1; + /* Padding to ensure that PyUnicode_DATA() is always aligned to + 4 bytes (see issue #19537 on m68k). */ + unsigned int :24; + } state; + wchar_t *wstr; /* wchar_t representation (null-terminated) */ +} PyASCIIObject; + +/* Non-ASCII strings allocated through PyUnicode_New use the + PyCompactUnicodeObject structure. state.compact is set, and the data + immediately follow the structure. */ +typedef struct { + PyASCIIObject _base; + Py_ssize_t utf8_length; /* Number of bytes in utf8, excluding the + * terminating \0. */ + char *utf8; /* UTF-8 representation (null-terminated) */ + Py_ssize_t wstr_length; /* Number of code points in wstr, possible + * surrogates count as two code points. */ +} PyCompactUnicodeObject; + +/* Strings allocated through PyUnicode_FromUnicode(NULL, len) use the + PyUnicodeObject structure. The actual string data is initially in the wstr + block, and copied into the data block using _PyUnicode_Ready. */ +typedef struct { + PyCompactUnicodeObject _base; + union { + void *any; + Py_UCS1 *latin1; + Py_UCS2 *ucs2; + Py_UCS4 *ucs4; + } data; /* Canonical, smallest-form Unicode buffer */ +} PyUnicodeObject; +#endif + +PyAPI_DATA(PyTypeObject) PyUnicode_Type; +PyAPI_DATA(PyTypeObject) PyUnicodeIter_Type; + +#define PyUnicode_Check(op) \ + PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_UNICODE_SUBCLASS) +#define PyUnicode_CheckExact(op) (Py_TYPE(op) == &PyUnicode_Type) + +/* Fast access macros */ +#ifndef Py_LIMITED_API + +#define PyUnicode_WSTR_LENGTH(op) \ + (PyUnicode_IS_COMPACT_ASCII(op) ? \ + ((PyASCIIObject*)op)->length : \ + ((PyCompactUnicodeObject*)op)->wstr_length) + +/* Returns the deprecated Py_UNICODE representation's size in code units + (this includes surrogate pairs as 2 units). + If the Py_UNICODE representation is not available, it will be computed + on request. Use PyUnicode_GET_LENGTH() for the length in code points. */ + +#define PyUnicode_GET_SIZE(op) \ + (assert(PyUnicode_Check(op)), \ + (((PyASCIIObject *)(op))->wstr) ? \ + PyUnicode_WSTR_LENGTH(op) : \ + ((void)PyUnicode_AsUnicode((PyObject *)(op)), \ + assert(((PyASCIIObject *)(op))->wstr), \ + PyUnicode_WSTR_LENGTH(op))) + /* Py_DEPRECATED(3.3) */ + +#define PyUnicode_GET_DATA_SIZE(op) \ + (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) + /* Py_DEPRECATED(3.3) */ + +/* Alias for PyUnicode_AsUnicode(). This will create a wchar_t/Py_UNICODE + representation on demand. Using this macro is very inefficient now, + try to port your code to use the new PyUnicode_*BYTE_DATA() macros or + use PyUnicode_WRITE() and PyUnicode_READ(). */ + +#define PyUnicode_AS_UNICODE(op) \ + (assert(PyUnicode_Check(op)), \ + (((PyASCIIObject *)(op))->wstr) ? (((PyASCIIObject *)(op))->wstr) : \ + PyUnicode_AsUnicode((PyObject *)(op))) + /* Py_DEPRECATED(3.3) */ + +#define PyUnicode_AS_DATA(op) \ + ((const char *)(PyUnicode_AS_UNICODE(op))) + /* Py_DEPRECATED(3.3) */ + + +/* --- Flexible String Representation Helper Macros (PEP 393) -------------- */ + +/* Values for PyASCIIObject.state: */ + +/* Interning state. */ +#define SSTATE_NOT_INTERNED 0 +#define SSTATE_INTERNED_MORTAL 1 +#define SSTATE_INTERNED_IMMORTAL 2 + +/* Return true if the string contains only ASCII characters, or 0 if not. The + string may be compact (PyUnicode_IS_COMPACT_ASCII) or not, but must be + ready. */ +#define PyUnicode_IS_ASCII(op) \ + (assert(PyUnicode_Check(op)), \ + assert(PyUnicode_IS_READY(op)), \ + ((PyASCIIObject*)op)->state.ascii) + +/* Return true if the string is compact or 0 if not. + No type checks or Ready calls are performed. */ +#define PyUnicode_IS_COMPACT(op) \ + (((PyASCIIObject*)(op))->state.compact) + +/* Return true if the string is a compact ASCII string (use PyASCIIObject + structure), or 0 if not. No type checks or Ready calls are performed. */ +#define PyUnicode_IS_COMPACT_ASCII(op) \ + (((PyASCIIObject*)op)->state.ascii && PyUnicode_IS_COMPACT(op)) + +enum PyUnicode_Kind { +/* String contains only wstr byte characters. This is only possible + when the string was created with a legacy API and _PyUnicode_Ready() + has not been called yet. */ + PyUnicode_WCHAR_KIND = 0, +/* Return values of the PyUnicode_KIND() macro: */ + PyUnicode_1BYTE_KIND = 1, + PyUnicode_2BYTE_KIND = 2, + PyUnicode_4BYTE_KIND = 4 +}; + +/* Return pointers to the canonical representation cast to unsigned char, + Py_UCS2, or Py_UCS4 for direct character access. + No checks are performed, use PyUnicode_KIND() before to ensure + these will work correctly. */ + +#define PyUnicode_1BYTE_DATA(op) ((Py_UCS1*)PyUnicode_DATA(op)) +#define PyUnicode_2BYTE_DATA(op) ((Py_UCS2*)PyUnicode_DATA(op)) +#define PyUnicode_4BYTE_DATA(op) ((Py_UCS4*)PyUnicode_DATA(op)) + +/* Return one of the PyUnicode_*_KIND values defined above. */ +#define PyUnicode_KIND(op) \ + (assert(PyUnicode_Check(op)), \ + assert(PyUnicode_IS_READY(op)), \ + ((PyASCIIObject *)(op))->state.kind) + +/* Return a void pointer to the raw unicode buffer. */ +#define _PyUnicode_COMPACT_DATA(op) \ + (PyUnicode_IS_ASCII(op) ? \ + ((void*)((PyASCIIObject*)(op) + 1)) : \ + ((void*)((PyCompactUnicodeObject*)(op) + 1))) + +#define _PyUnicode_NONCOMPACT_DATA(op) \ + (assert(((PyUnicodeObject*)(op))->data.any), \ + ((((PyUnicodeObject *)(op))->data.any))) + +#define PyUnicode_DATA(op) \ + (assert(PyUnicode_Check(op)), \ + PyUnicode_IS_COMPACT(op) ? _PyUnicode_COMPACT_DATA(op) : \ + _PyUnicode_NONCOMPACT_DATA(op)) + +/* In the access macros below, "kind" may be evaluated more than once. + All other macro parameters are evaluated exactly once, so it is safe + to put side effects into them (such as increasing the index). */ + +/* Write into the canonical representation, this macro does not do any sanity + checks and is intended for usage in loops. The caller should cache the + kind and data pointers obtained from other macro calls. + index is the index in the string (starts at 0) and value is the new + code point value which should be written to that location. */ +#define PyUnicode_WRITE(kind, data, index, value) \ + do { \ + switch ((kind)) { \ + case PyUnicode_1BYTE_KIND: { \ + ((Py_UCS1 *)(data))[(index)] = (Py_UCS1)(value); \ + break; \ + } \ + case PyUnicode_2BYTE_KIND: { \ + ((Py_UCS2 *)(data))[(index)] = (Py_UCS2)(value); \ + break; \ + } \ + default: { \ + assert((kind) == PyUnicode_4BYTE_KIND); \ + ((Py_UCS4 *)(data))[(index)] = (Py_UCS4)(value); \ + } \ + } \ + } while (0) + +/* Read a code point from the string's canonical representation. No checks + or ready calls are performed. */ +#define PyUnicode_READ(kind, data, index) \ + ((Py_UCS4) \ + ((kind) == PyUnicode_1BYTE_KIND ? \ + ((const Py_UCS1 *)(data))[(index)] : \ + ((kind) == PyUnicode_2BYTE_KIND ? \ + ((const Py_UCS2 *)(data))[(index)] : \ + ((const Py_UCS4 *)(data))[(index)] \ + ) \ + )) + +/* PyUnicode_READ_CHAR() is less efficient than PyUnicode_READ() because it + calls PyUnicode_KIND() and might call it twice. For single reads, use + PyUnicode_READ_CHAR, for multiple consecutive reads callers should + cache kind and use PyUnicode_READ instead. */ +#define PyUnicode_READ_CHAR(unicode, index) \ + (assert(PyUnicode_Check(unicode)), \ + assert(PyUnicode_IS_READY(unicode)), \ + (Py_UCS4) \ + (PyUnicode_KIND((unicode)) == PyUnicode_1BYTE_KIND ? \ + ((const Py_UCS1 *)(PyUnicode_DATA((unicode))))[(index)] : \ + (PyUnicode_KIND((unicode)) == PyUnicode_2BYTE_KIND ? \ + ((const Py_UCS2 *)(PyUnicode_DATA((unicode))))[(index)] : \ + ((const Py_UCS4 *)(PyUnicode_DATA((unicode))))[(index)] \ + ) \ + )) + +/* Returns the length of the unicode string. The caller has to make sure that + the string has it's canonical representation set before calling + this macro. Call PyUnicode_(FAST_)Ready to ensure that. */ +#define PyUnicode_GET_LENGTH(op) \ + (assert(PyUnicode_Check(op)), \ + assert(PyUnicode_IS_READY(op)), \ + ((PyASCIIObject *)(op))->length) + + +/* Fast check to determine whether an object is ready. Equivalent to + PyUnicode_IS_COMPACT(op) || ((PyUnicodeObject*)(op))->data.any) */ + +#define PyUnicode_IS_READY(op) (((PyASCIIObject*)op)->state.ready) + +/* PyUnicode_READY() does less work than _PyUnicode_Ready() in the best + case. If the canonical representation is not yet set, it will still call + _PyUnicode_Ready(). + Returns 0 on success and -1 on errors. */ +#define PyUnicode_READY(op) \ + (assert(PyUnicode_Check(op)), \ + (PyUnicode_IS_READY(op) ? \ + 0 : _PyUnicode_Ready((PyObject *)(op)))) + +/* Return a maximum character value which is suitable for creating another + string based on op. This is always an approximation but more efficient + than iterating over the string. */ +#define PyUnicode_MAX_CHAR_VALUE(op) \ + (assert(PyUnicode_IS_READY(op)), \ + (PyUnicode_IS_ASCII(op) ? \ + (0x7f) : \ + (PyUnicode_KIND(op) == PyUnicode_1BYTE_KIND ? \ + (0xffU) : \ + (PyUnicode_KIND(op) == PyUnicode_2BYTE_KIND ? \ + (0xffffU) : \ + (0x10ffffU))))) + +#endif + +/* --- Constants ---------------------------------------------------------- */ + +/* This Unicode character will be used as replacement character during + decoding if the errors argument is set to "replace". Note: the + Unicode character U+FFFD is the official REPLACEMENT CHARACTER in + Unicode 3.0. */ + +#define Py_UNICODE_REPLACEMENT_CHARACTER ((Py_UCS4) 0xFFFD) + +/* === Public API ========================================================= */ + +/* --- Plain Py_UNICODE --------------------------------------------------- */ + +/* With PEP 393, this is the recommended way to allocate a new unicode object. + This function will allocate the object and its buffer in a single memory + block. Objects created using this function are not resizable. */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_New( + Py_ssize_t size, /* Number of code points in the new string */ + Py_UCS4 maxchar /* maximum code point value in the string */ + ); +#endif + +/* Initializes the canonical string representation from the deprecated + wstr/Py_UNICODE representation. This function is used to convert Unicode + objects which were created using the old API to the new flexible format + introduced with PEP 393. + + Don't call this function directly, use the public PyUnicode_READY() macro + instead. */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyUnicode_Ready( + PyObject *unicode /* Unicode object */ + ); +#endif + +/* Get a copy of a Unicode string. */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) _PyUnicode_Copy( + PyObject *unicode + ); +#endif + +/* Copy character from one unicode object into another, this function performs + character conversion when necessary and falls back to memcpy() if possible. + + Fail if to is too small (smaller than *how_many* or smaller than + len(from)-from_start), or if kind(from[from_start:from_start+how_many]) > + kind(to), or if *to* has more than 1 reference. + + Return the number of written character, or return -1 and raise an exception + on error. + + Pseudo-code: + + how_many = min(how_many, len(from) - from_start) + to[to_start:to_start+how_many] = from[from_start:from_start+how_many] + return how_many + + Note: The function doesn't write a terminating null character. + */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(Py_ssize_t) PyUnicode_CopyCharacters( + PyObject *to, + Py_ssize_t to_start, + PyObject *from, + Py_ssize_t from_start, + Py_ssize_t how_many + ); + +/* Unsafe version of PyUnicode_CopyCharacters(): don't check arguments and so + may crash if parameters are invalid (e.g. if the output string + is too short). */ +PyAPI_FUNC(void) _PyUnicode_FastCopyCharacters( + PyObject *to, + Py_ssize_t to_start, + PyObject *from, + Py_ssize_t from_start, + Py_ssize_t how_many + ); +#endif + +#ifndef Py_LIMITED_API +/* Fill a string with a character: write fill_char into + unicode[start:start+length]. + + Fail if fill_char is bigger than the string maximum character, or if the + string has more than 1 reference. + + Return the number of written character, or return -1 and raise an exception + on error. */ +PyAPI_FUNC(Py_ssize_t) PyUnicode_Fill( + PyObject *unicode, + Py_ssize_t start, + Py_ssize_t length, + Py_UCS4 fill_char + ); + +/* Unsafe version of PyUnicode_Fill(): don't check arguments and so may crash + if parameters are invalid (e.g. if length is longer than the string). */ +PyAPI_FUNC(void) _PyUnicode_FastFill( + PyObject *unicode, + Py_ssize_t start, + Py_ssize_t length, + Py_UCS4 fill_char + ); +#endif + +/* Create a Unicode Object from the Py_UNICODE buffer u of the given + size. + + u may be NULL which causes the contents to be undefined. It is the + user's responsibility to fill in the needed data afterwards. Note + that modifying the Unicode object contents after construction is + only allowed if u was set to NULL. + + The buffer is copied into the new object. */ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_FromUnicode( + const Py_UNICODE *u, /* Unicode buffer */ + Py_ssize_t size /* size of buffer */ + ) /* Py_DEPRECATED(3.3) */; +#endif + +/* Similar to PyUnicode_FromUnicode(), but u points to UTF-8 encoded bytes */ +PyAPI_FUNC(PyObject*) PyUnicode_FromStringAndSize( + const char *u, /* UTF-8 encoded string */ + Py_ssize_t size /* size of buffer */ + ); + +/* Similar to PyUnicode_FromUnicode(), but u points to null-terminated + UTF-8 encoded bytes. The size is determined with strlen(). */ +PyAPI_FUNC(PyObject*) PyUnicode_FromString( + const char *u /* UTF-8 encoded string */ + ); + +#ifndef Py_LIMITED_API +/* Create a new string from a buffer of Py_UCS1, Py_UCS2 or Py_UCS4 characters. + Scan the string to find the maximum character. */ +PyAPI_FUNC(PyObject*) PyUnicode_FromKindAndData( + int kind, + const void *buffer, + Py_ssize_t size); + +/* Create a new string from a buffer of ASCII characters. + WARNING: Don't check if the string contains any non-ASCII character. */ +PyAPI_FUNC(PyObject*) _PyUnicode_FromASCII( + const char *buffer, + Py_ssize_t size); +#endif + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject*) PyUnicode_Substring( + PyObject *str, + Py_ssize_t start, + Py_ssize_t end); +#endif + +#ifndef Py_LIMITED_API +/* Compute the maximum character of the substring unicode[start:end]. + Return 127 for an empty string. */ +PyAPI_FUNC(Py_UCS4) _PyUnicode_FindMaxChar ( + PyObject *unicode, + Py_ssize_t start, + Py_ssize_t end); +#endif + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +/* Copy the string into a UCS4 buffer including the null character if copy_null + is set. Return NULL and raise an exception on error. Raise a SystemError if + the buffer is smaller than the string. Return buffer on success. + + buflen is the length of the buffer in (Py_UCS4) characters. */ +PyAPI_FUNC(Py_UCS4*) PyUnicode_AsUCS4( + PyObject *unicode, + Py_UCS4* buffer, + Py_ssize_t buflen, + int copy_null); + +/* Copy the string into a UCS4 buffer. A new buffer is allocated using + * PyMem_Malloc; if this fails, NULL is returned with a memory error + exception set. */ +PyAPI_FUNC(Py_UCS4*) PyUnicode_AsUCS4Copy(PyObject *unicode); +#endif + +#ifndef Py_LIMITED_API +/* Return a read-only pointer to the Unicode object's internal + Py_UNICODE buffer. + If the wchar_t/Py_UNICODE representation is not yet available, this + function will calculate it. */ + +PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( + PyObject *unicode /* Unicode object */ + ) /* Py_DEPRECATED(3.3) */; + +/* Similar to PyUnicode_AsUnicode(), but raises a ValueError if the string + contains null characters. */ +PyAPI_FUNC(const Py_UNICODE *) _PyUnicode_AsUnicode( + PyObject *unicode /* Unicode object */ + ); + +/* Return a read-only pointer to the Unicode object's internal + Py_UNICODE buffer and save the length at size. + If the wchar_t/Py_UNICODE representation is not yet available, this + function will calculate it. */ + +PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicodeAndSize( + PyObject *unicode, /* Unicode object */ + Py_ssize_t *size /* location where to save the length */ + ) /* Py_DEPRECATED(3.3) */; +#endif + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +/* Get the length of the Unicode object. */ + +PyAPI_FUNC(Py_ssize_t) PyUnicode_GetLength( + PyObject *unicode +); +#endif + +/* Get the number of Py_UNICODE units in the + string representation. */ + +PyAPI_FUNC(Py_ssize_t) PyUnicode_GetSize( + PyObject *unicode /* Unicode object */ + ) Py_DEPRECATED(3.3); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +/* Read a character from the string. */ + +PyAPI_FUNC(Py_UCS4) PyUnicode_ReadChar( + PyObject *unicode, + Py_ssize_t index + ); + +/* Write a character to the string. The string must have been created through + PyUnicode_New, must not be shared, and must not have been hashed yet. + + Return 0 on success, -1 on error. */ + +PyAPI_FUNC(int) PyUnicode_WriteChar( + PyObject *unicode, + Py_ssize_t index, + Py_UCS4 character + ); +#endif + +#ifndef Py_LIMITED_API +/* Get the maximum ordinal for a Unicode character. */ +PyAPI_FUNC(Py_UNICODE) PyUnicode_GetMax(void) Py_DEPRECATED(3.3); +#endif + +/* Resize a Unicode object. The length is the number of characters, except + if the kind of the string is PyUnicode_WCHAR_KIND: in this case, the length + is the number of Py_UNICODE characters. + + *unicode is modified to point to the new (resized) object and 0 + returned on success. + + Try to resize the string in place (which is usually faster than allocating + a new string and copy characters), or create a new string. + + Error handling is implemented as follows: an exception is set, -1 + is returned and *unicode left untouched. + + WARNING: The function doesn't check string content, the result may not be a + string in canonical representation. */ + +PyAPI_FUNC(int) PyUnicode_Resize( + PyObject **unicode, /* Pointer to the Unicode object */ + Py_ssize_t length /* New length */ + ); + +/* Decode obj to a Unicode object. + + bytes, bytearray and other bytes-like objects are decoded according to the + given encoding and error handler. The encoding and error handler can be + NULL to have the interface use UTF-8 and "strict". + + All other objects (including Unicode objects) raise an exception. + + The API returns NULL in case of an error. The caller is responsible + for decref'ing the returned objects. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_FromEncodedObject( + PyObject *obj, /* Object */ + const char *encoding, /* encoding */ + const char *errors /* error handling */ + ); + +/* Copy an instance of a Unicode subtype to a new true Unicode object if + necessary. If obj is already a true Unicode object (not a subtype), return + the reference with *incremented* refcount. + + The API returns NULL in case of an error. The caller is responsible + for decref'ing the returned objects. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_FromObject( + PyObject *obj /* Object */ + ); + +PyAPI_FUNC(PyObject *) PyUnicode_FromFormatV( + const char *format, /* ASCII-encoded string */ + va_list vargs + ); +PyAPI_FUNC(PyObject *) PyUnicode_FromFormat( + const char *format, /* ASCII-encoded string */ + ... + ); + +#ifndef Py_LIMITED_API +typedef struct { + PyObject *buffer; + void *data; + enum PyUnicode_Kind kind; + Py_UCS4 maxchar; + Py_ssize_t size; + Py_ssize_t pos; + + /* minimum number of allocated characters (default: 0) */ + Py_ssize_t min_length; + + /* minimum character (default: 127, ASCII) */ + Py_UCS4 min_char; + + /* If non-zero, overallocate the buffer (default: 0). */ + unsigned char overallocate; + + /* If readonly is 1, buffer is a shared string (cannot be modified) + and size is set to 0. */ + unsigned char readonly; +} _PyUnicodeWriter ; + +/* Initialize a Unicode writer. + * + * By default, the minimum buffer size is 0 character and overallocation is + * disabled. Set min_length, min_char and overallocate attributes to control + * the allocation of the buffer. */ +PyAPI_FUNC(void) +_PyUnicodeWriter_Init(_PyUnicodeWriter *writer); + +/* Prepare the buffer to write 'length' characters + with the specified maximum character. + + Return 0 on success, raise an exception and return -1 on error. */ +#define _PyUnicodeWriter_Prepare(WRITER, LENGTH, MAXCHAR) \ + (((MAXCHAR) <= (WRITER)->maxchar \ + && (LENGTH) <= (WRITER)->size - (WRITER)->pos) \ + ? 0 \ + : (((LENGTH) == 0) \ + ? 0 \ + : _PyUnicodeWriter_PrepareInternal((WRITER), (LENGTH), (MAXCHAR)))) + +/* Don't call this function directly, use the _PyUnicodeWriter_Prepare() macro + instead. */ +PyAPI_FUNC(int) +_PyUnicodeWriter_PrepareInternal(_PyUnicodeWriter *writer, + Py_ssize_t length, Py_UCS4 maxchar); + +/* Prepare the buffer to have at least the kind KIND. + For example, kind=PyUnicode_2BYTE_KIND ensures that the writer will + support characters in range U+000-U+FFFF. + + Return 0 on success, raise an exception and return -1 on error. */ +#define _PyUnicodeWriter_PrepareKind(WRITER, KIND) \ + (assert((KIND) != PyUnicode_WCHAR_KIND), \ + (KIND) <= (WRITER)->kind \ + ? 0 \ + : _PyUnicodeWriter_PrepareKindInternal((WRITER), (KIND))) + +/* Don't call this function directly, use the _PyUnicodeWriter_PrepareKind() + macro instead. */ +PyAPI_FUNC(int) +_PyUnicodeWriter_PrepareKindInternal(_PyUnicodeWriter *writer, + enum PyUnicode_Kind kind); + +/* Append a Unicode character. + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) +_PyUnicodeWriter_WriteChar(_PyUnicodeWriter *writer, + Py_UCS4 ch + ); + +/* Append a Unicode string. + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) +_PyUnicodeWriter_WriteStr(_PyUnicodeWriter *writer, + PyObject *str /* Unicode string */ + ); + +/* Append a substring of a Unicode string. + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) +_PyUnicodeWriter_WriteSubstring(_PyUnicodeWriter *writer, + PyObject *str, /* Unicode string */ + Py_ssize_t start, + Py_ssize_t end + ); + +/* Append an ASCII-encoded byte string. + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) +_PyUnicodeWriter_WriteASCIIString(_PyUnicodeWriter *writer, + const char *str, /* ASCII-encoded byte string */ + Py_ssize_t len /* number of bytes, or -1 if unknown */ + ); + +/* Append a latin1-encoded byte string. + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) +_PyUnicodeWriter_WriteLatin1String(_PyUnicodeWriter *writer, + const char *str, /* latin1-encoded byte string */ + Py_ssize_t len /* length in bytes */ + ); + +/* Get the value of the writer as a Unicode string. Clear the + buffer of the writer. Raise an exception and return NULL + on error. */ +PyAPI_FUNC(PyObject *) +_PyUnicodeWriter_Finish(_PyUnicodeWriter *writer); + +/* Deallocate memory of a writer (clear its internal buffer). */ +PyAPI_FUNC(void) +_PyUnicodeWriter_Dealloc(_PyUnicodeWriter *writer); +#endif + +#ifndef Py_LIMITED_API +/* Format the object based on the format_spec, as defined in PEP 3101 + (Advanced String Formatting). */ +PyAPI_FUNC(int) _PyUnicode_FormatAdvancedWriter( + _PyUnicodeWriter *writer, + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end); +#endif + +PyAPI_FUNC(void) PyUnicode_InternInPlace(PyObject **); +PyAPI_FUNC(void) PyUnicode_InternImmortal(PyObject **); +PyAPI_FUNC(PyObject *) PyUnicode_InternFromString( + const char *u /* UTF-8 encoded string */ + ); +#ifndef Py_LIMITED_API +PyAPI_FUNC(void) _Py_ReleaseInternedUnicodeStrings(void); +#endif + +/* Use only if you know it's a string */ +#define PyUnicode_CHECK_INTERNED(op) \ + (((PyASCIIObject *)(op))->state.interned) + +/* --- wchar_t support for platforms which support it --------------------- */ + +#ifdef HAVE_WCHAR_H + +/* Create a Unicode Object from the wchar_t buffer w of the given + size. + + The buffer is copied into the new object. */ + +PyAPI_FUNC(PyObject*) PyUnicode_FromWideChar( + const wchar_t *w, /* wchar_t buffer */ + Py_ssize_t size /* size of buffer */ + ); + +/* Copies the Unicode Object contents into the wchar_t buffer w. At + most size wchar_t characters are copied. + + Note that the resulting wchar_t string may or may not be + 0-terminated. It is the responsibility of the caller to make sure + that the wchar_t string is 0-terminated in case this is required by + the application. + + Returns the number of wchar_t characters copied (excluding a + possibly trailing 0-termination character) or -1 in case of an + error. */ + +PyAPI_FUNC(Py_ssize_t) PyUnicode_AsWideChar( + PyObject *unicode, /* Unicode object */ + wchar_t *w, /* wchar_t buffer */ + Py_ssize_t size /* size of buffer */ + ); + +/* Convert the Unicode object to a wide character string. The output string + always ends with a nul character. If size is not NULL, write the number of + wide characters (excluding the null character) into *size. + + Returns a buffer allocated by PyMem_Malloc() (use PyMem_Free() to free it) + on success. On error, returns NULL, *size is undefined and raises a + MemoryError. */ + +PyAPI_FUNC(wchar_t*) PyUnicode_AsWideCharString( + PyObject *unicode, /* Unicode object */ + Py_ssize_t *size /* number of characters of the result */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(void*) _PyUnicode_AsKind(PyObject *s, unsigned int kind); +#endif + +#endif + +/* --- Unicode ordinals --------------------------------------------------- */ + +/* Create a Unicode Object from the given Unicode code point ordinal. + + The ordinal must be in range(0x110000). A ValueError is + raised in case it is not. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_FromOrdinal(int ordinal); + +/* --- Free-list management ----------------------------------------------- */ + +/* Clear the free list used by the Unicode implementation. + + This can be used to release memory used for objects on the free + list back to the Python memory allocator. + +*/ + +PyAPI_FUNC(int) PyUnicode_ClearFreeList(void); + +/* === Builtin Codecs ===================================================== + + Many of these APIs take two arguments encoding and errors. These + parameters encoding and errors have the same semantics as the ones + of the builtin str() API. + + Setting encoding to NULL causes the default encoding (UTF-8) to be used. + + Error handling is set by errors which may also be set to NULL + meaning to use the default handling defined for the codec. Default + error handling for all builtin codecs is "strict" (ValueErrors are + raised). + + The codecs all use a similar interface. Only deviation from the + generic ones are documented. + +*/ + +/* --- Manage the default encoding ---------------------------------------- */ + +/* Returns a pointer to the default encoding (UTF-8) of the + Unicode object unicode and the size of the encoded representation + in bytes stored in *size. + + In case of an error, no *size is set. + + This function caches the UTF-8 encoded string in the unicodeobject + and subsequent calls will return the same string. The memory is released + when the unicodeobject is deallocated. + + _PyUnicode_AsStringAndSize is a #define for PyUnicode_AsUTF8AndSize to + support the previous internal function with the same behaviour. + + *** This API is for interpreter INTERNAL USE ONLY and will likely + *** be removed or changed in the future. + + *** If you need to access the Unicode object as UTF-8 bytes string, + *** please use PyUnicode_AsUTF8String() instead. +*/ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(const char *) PyUnicode_AsUTF8AndSize( + PyObject *unicode, + Py_ssize_t *size); +#define _PyUnicode_AsStringAndSize PyUnicode_AsUTF8AndSize +#endif + +/* Returns a pointer to the default encoding (UTF-8) of the + Unicode object unicode. + + Like PyUnicode_AsUTF8AndSize(), this also caches the UTF-8 representation + in the unicodeobject. + + _PyUnicode_AsString is a #define for PyUnicode_AsUTF8 to + support the previous internal function with the same behaviour. + + Use of this API is DEPRECATED since no size information can be + extracted from the returned data. + + *** This API is for interpreter INTERNAL USE ONLY and will likely + *** be removed or changed for Python 3.1. + + *** If you need to access the Unicode object as UTF-8 bytes string, + *** please use PyUnicode_AsUTF8String() instead. + +*/ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(const char *) PyUnicode_AsUTF8(PyObject *unicode); +#define _PyUnicode_AsString PyUnicode_AsUTF8 +#endif + +/* Returns "utf-8". */ + +PyAPI_FUNC(const char*) PyUnicode_GetDefaultEncoding(void); + +/* --- Generic Codecs ----------------------------------------------------- */ + +/* Create a Unicode object by decoding the encoded string s of the + given size. */ + +PyAPI_FUNC(PyObject*) PyUnicode_Decode( + const char *s, /* encoded string */ + Py_ssize_t size, /* size of buffer */ + const char *encoding, /* encoding */ + const char *errors /* error handling */ + ); + +/* Decode a Unicode object unicode and return the result as Python + object. + + This API is DEPRECATED. The only supported standard encoding is rot13. + Use PyCodec_Decode() to decode with rot13 and non-standard codecs + that decode from str. */ + +PyAPI_FUNC(PyObject*) PyUnicode_AsDecodedObject( + PyObject *unicode, /* Unicode object */ + const char *encoding, /* encoding */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.6); + +/* Decode a Unicode object unicode and return the result as Unicode + object. + + This API is DEPRECATED. The only supported standard encoding is rot13. + Use PyCodec_Decode() to decode with rot13 and non-standard codecs + that decode from str to str. */ + +PyAPI_FUNC(PyObject*) PyUnicode_AsDecodedUnicode( + PyObject *unicode, /* Unicode object */ + const char *encoding, /* encoding */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.6); + +/* Encodes a Py_UNICODE buffer of the given size and returns a + Python string object. */ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_Encode( + const Py_UNICODE *s, /* Unicode char buffer */ + Py_ssize_t size, /* number of Py_UNICODE chars to encode */ + const char *encoding, /* encoding */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.3); +#endif + +/* Encodes a Unicode object and returns the result as Python + object. + + This API is DEPRECATED. It is superseded by PyUnicode_AsEncodedString() + since all standard encodings (except rot13) encode str to bytes. + Use PyCodec_Encode() for encoding with rot13 and non-standard codecs + that encode form str to non-bytes. */ + +PyAPI_FUNC(PyObject*) PyUnicode_AsEncodedObject( + PyObject *unicode, /* Unicode object */ + const char *encoding, /* encoding */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.6); + +/* Encodes a Unicode object and returns the result as Python string + object. */ + +PyAPI_FUNC(PyObject*) PyUnicode_AsEncodedString( + PyObject *unicode, /* Unicode object */ + const char *encoding, /* encoding */ + const char *errors /* error handling */ + ); + +/* Encodes a Unicode object and returns the result as Unicode + object. + + This API is DEPRECATED. The only supported standard encodings is rot13. + Use PyCodec_Encode() to encode with rot13 and non-standard codecs + that encode from str to str. */ + +PyAPI_FUNC(PyObject*) PyUnicode_AsEncodedUnicode( + PyObject *unicode, /* Unicode object */ + const char *encoding, /* encoding */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.6); + +/* Build an encoding map. */ + +PyAPI_FUNC(PyObject*) PyUnicode_BuildEncodingMap( + PyObject* string /* 256 character map */ + ); + +/* --- UTF-7 Codecs ------------------------------------------------------- */ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeUTF7( + const char *string, /* UTF-7 encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors /* error handling */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeUTF7Stateful( + const char *string, /* UTF-7 encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + Py_ssize_t *consumed /* bytes consumed */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_EncodeUTF7( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length, /* number of Py_UNICODE chars to encode */ + int base64SetO, /* Encode RFC2152 Set O characters in base64 */ + int base64WhiteSpace, /* Encode whitespace (sp, ht, nl, cr) in base64 */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.3); +PyAPI_FUNC(PyObject*) _PyUnicode_EncodeUTF7( + PyObject *unicode, /* Unicode object */ + int base64SetO, /* Encode RFC2152 Set O characters in base64 */ + int base64WhiteSpace, /* Encode whitespace (sp, ht, nl, cr) in base64 */ + const char *errors /* error handling */ + ); +#endif + +/* --- UTF-8 Codecs ------------------------------------------------------- */ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeUTF8( + const char *string, /* UTF-8 encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors /* error handling */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeUTF8Stateful( + const char *string, /* UTF-8 encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + Py_ssize_t *consumed /* bytes consumed */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_AsUTF8String( + PyObject *unicode /* Unicode object */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) _PyUnicode_AsUTF8String( + PyObject *unicode, + const char *errors); + +PyAPI_FUNC(PyObject*) PyUnicode_EncodeUTF8( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length, /* number of Py_UNICODE chars to encode */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.3); +#endif + +/* --- UTF-32 Codecs ------------------------------------------------------ */ + +/* Decodes length bytes from a UTF-32 encoded buffer string and returns + the corresponding Unicode object. + + errors (if non-NULL) defines the error handling. It defaults + to "strict". + + If byteorder is non-NULL, the decoder starts decoding using the + given byte order: + + *byteorder == -1: little endian + *byteorder == 0: native order + *byteorder == 1: big endian + + In native mode, the first four bytes of the stream are checked for a + BOM mark. If found, the BOM mark is analysed, the byte order + adjusted and the BOM skipped. In the other modes, no BOM mark + interpretation is done. After completion, *byteorder is set to the + current byte order at the end of input data. + + If byteorder is NULL, the codec starts in native order mode. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeUTF32( + const char *string, /* UTF-32 encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + int *byteorder /* pointer to byteorder to use + 0=native;-1=LE,1=BE; updated on + exit */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeUTF32Stateful( + const char *string, /* UTF-32 encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + int *byteorder, /* pointer to byteorder to use + 0=native;-1=LE,1=BE; updated on + exit */ + Py_ssize_t *consumed /* bytes consumed */ + ); + +/* Returns a Python string using the UTF-32 encoding in native byte + order. The string always starts with a BOM mark. */ + +PyAPI_FUNC(PyObject*) PyUnicode_AsUTF32String( + PyObject *unicode /* Unicode object */ + ); + +/* Returns a Python string object holding the UTF-32 encoded value of + the Unicode data. + + If byteorder is not 0, output is written according to the following + byte order: + + byteorder == -1: little endian + byteorder == 0: native byte order (writes a BOM mark) + byteorder == 1: big endian + + If byteorder is 0, the output string will always start with the + Unicode BOM mark (U+FEFF). In the other two modes, no BOM mark is + prepended. + +*/ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_EncodeUTF32( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length, /* number of Py_UNICODE chars to encode */ + const char *errors, /* error handling */ + int byteorder /* byteorder to use 0=BOM+native;-1=LE,1=BE */ + ) Py_DEPRECATED(3.3); +PyAPI_FUNC(PyObject*) _PyUnicode_EncodeUTF32( + PyObject *object, /* Unicode object */ + const char *errors, /* error handling */ + int byteorder /* byteorder to use 0=BOM+native;-1=LE,1=BE */ + ); +#endif + +/* --- UTF-16 Codecs ------------------------------------------------------ */ + +/* Decodes length bytes from a UTF-16 encoded buffer string and returns + the corresponding Unicode object. + + errors (if non-NULL) defines the error handling. It defaults + to "strict". + + If byteorder is non-NULL, the decoder starts decoding using the + given byte order: + + *byteorder == -1: little endian + *byteorder == 0: native order + *byteorder == 1: big endian + + In native mode, the first two bytes of the stream are checked for a + BOM mark. If found, the BOM mark is analysed, the byte order + adjusted and the BOM skipped. In the other modes, no BOM mark + interpretation is done. After completion, *byteorder is set to the + current byte order at the end of input data. + + If byteorder is NULL, the codec starts in native order mode. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeUTF16( + const char *string, /* UTF-16 encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + int *byteorder /* pointer to byteorder to use + 0=native;-1=LE,1=BE; updated on + exit */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeUTF16Stateful( + const char *string, /* UTF-16 encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + int *byteorder, /* pointer to byteorder to use + 0=native;-1=LE,1=BE; updated on + exit */ + Py_ssize_t *consumed /* bytes consumed */ + ); + +/* Returns a Python string using the UTF-16 encoding in native byte + order. The string always starts with a BOM mark. */ + +PyAPI_FUNC(PyObject*) PyUnicode_AsUTF16String( + PyObject *unicode /* Unicode object */ + ); + +/* Returns a Python string object holding the UTF-16 encoded value of + the Unicode data. + + If byteorder is not 0, output is written according to the following + byte order: + + byteorder == -1: little endian + byteorder == 0: native byte order (writes a BOM mark) + byteorder == 1: big endian + + If byteorder is 0, the output string will always start with the + Unicode BOM mark (U+FEFF). In the other two modes, no BOM mark is + prepended. + + Note that Py_UNICODE data is being interpreted as UTF-16 reduced to + UCS-2. This trick makes it possible to add full UTF-16 capabilities + at a later point without compromising the APIs. + +*/ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_EncodeUTF16( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length, /* number of Py_UNICODE chars to encode */ + const char *errors, /* error handling */ + int byteorder /* byteorder to use 0=BOM+native;-1=LE,1=BE */ + ) Py_DEPRECATED(3.3); +PyAPI_FUNC(PyObject*) _PyUnicode_EncodeUTF16( + PyObject* unicode, /* Unicode object */ + const char *errors, /* error handling */ + int byteorder /* byteorder to use 0=BOM+native;-1=LE,1=BE */ + ); +#endif + +/* --- Unicode-Escape Codecs ---------------------------------------------- */ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeUnicodeEscape( + const char *string, /* Unicode-Escape encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors /* error handling */ + ); + +#ifndef Py_LIMITED_API +/* Helper for PyUnicode_DecodeUnicodeEscape that detects invalid escape + chars. */ +PyAPI_FUNC(PyObject*) _PyUnicode_DecodeUnicodeEscape( + const char *string, /* Unicode-Escape encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + const char **first_invalid_escape /* on return, points to first + invalid escaped char in + string. */ +); +#endif + +PyAPI_FUNC(PyObject*) PyUnicode_AsUnicodeEscapeString( + PyObject *unicode /* Unicode object */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_EncodeUnicodeEscape( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length /* Number of Py_UNICODE chars to encode */ + ) Py_DEPRECATED(3.3); +#endif + +/* --- Raw-Unicode-Escape Codecs ------------------------------------------ */ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeRawUnicodeEscape( + const char *string, /* Raw-Unicode-Escape encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors /* error handling */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_AsRawUnicodeEscapeString( + PyObject *unicode /* Unicode object */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_EncodeRawUnicodeEscape( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length /* Number of Py_UNICODE chars to encode */ + ) Py_DEPRECATED(3.3); +#endif + +/* --- Unicode Internal Codec --------------------------------------------- + + Only for internal use in _codecsmodule.c */ + +#ifndef Py_LIMITED_API +PyObject *_PyUnicode_DecodeUnicodeInternal( + const char *string, + Py_ssize_t length, + const char *errors + ); +#endif + +/* --- Latin-1 Codecs ----------------------------------------------------- + + Note: Latin-1 corresponds to the first 256 Unicode ordinals. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeLatin1( + const char *string, /* Latin-1 encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors /* error handling */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_AsLatin1String( + PyObject *unicode /* Unicode object */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) _PyUnicode_AsLatin1String( + PyObject* unicode, + const char* errors); + +PyAPI_FUNC(PyObject*) PyUnicode_EncodeLatin1( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length, /* Number of Py_UNICODE chars to encode */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.3); +#endif + +/* --- ASCII Codecs ------------------------------------------------------- + + Only 7-bit ASCII data is excepted. All other codes generate errors. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeASCII( + const char *string, /* ASCII encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors /* error handling */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_AsASCIIString( + PyObject *unicode /* Unicode object */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) _PyUnicode_AsASCIIString( + PyObject* unicode, + const char* errors); + +PyAPI_FUNC(PyObject*) PyUnicode_EncodeASCII( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length, /* Number of Py_UNICODE chars to encode */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.3); +#endif + +/* --- Character Map Codecs ----------------------------------------------- + + This codec uses mappings to encode and decode characters. + + Decoding mappings must map byte ordinals (integers in the range from 0 to + 255) to Unicode strings, integers (which are then interpreted as Unicode + ordinals) or None. Unmapped data bytes (ones which cause a LookupError) + as well as mapped to None, 0xFFFE or '\ufffe' are treated as "undefined + mapping" and cause an error. + + Encoding mappings must map Unicode ordinal integers to bytes objects, + integers in the range from 0 to 255 or None. Unmapped character + ordinals (ones which cause a LookupError) as well as mapped to + None are treated as "undefined mapping" and cause an error. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeCharmap( + const char *string, /* Encoded string */ + Py_ssize_t length, /* size of string */ + PyObject *mapping, /* decoding mapping */ + const char *errors /* error handling */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_AsCharmapString( + PyObject *unicode, /* Unicode object */ + PyObject *mapping /* encoding mapping */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_EncodeCharmap( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length, /* Number of Py_UNICODE chars to encode */ + PyObject *mapping, /* encoding mapping */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.3); +PyAPI_FUNC(PyObject*) _PyUnicode_EncodeCharmap( + PyObject *unicode, /* Unicode object */ + PyObject *mapping, /* encoding mapping */ + const char *errors /* error handling */ + ); +#endif + +/* Translate a Py_UNICODE buffer of the given length by applying a + character mapping table to it and return the resulting Unicode + object. + + The mapping table must map Unicode ordinal integers to Unicode strings, + Unicode ordinal integers or None (causing deletion of the character). + + Mapping tables may be dictionaries or sequences. Unmapped character + ordinals (ones which cause a LookupError) are left untouched and + are copied as-is. + +*/ + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) PyUnicode_TranslateCharmap( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length, /* Number of Py_UNICODE chars to encode */ + PyObject *table, /* Translate table */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.3); +#endif + +#ifdef MS_WINDOWS + +/* --- MBCS codecs for Windows -------------------------------------------- */ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeMBCS( + const char *string, /* MBCS encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors /* error handling */ + ); + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeMBCSStateful( + const char *string, /* MBCS encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + Py_ssize_t *consumed /* bytes consumed */ + ); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject*) PyUnicode_DecodeCodePageStateful( + int code_page, /* code page number */ + const char *string, /* encoded string */ + Py_ssize_t length, /* size of string */ + const char *errors, /* error handling */ + Py_ssize_t *consumed /* bytes consumed */ + ); +#endif + +PyAPI_FUNC(PyObject*) PyUnicode_AsMBCSString( + PyObject *unicode /* Unicode object */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) PyUnicode_EncodeMBCS( + const Py_UNICODE *data, /* Unicode char buffer */ + Py_ssize_t length, /* number of Py_UNICODE chars to encode */ + const char *errors /* error handling */ + ) Py_DEPRECATED(3.3); +#endif + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +PyAPI_FUNC(PyObject*) PyUnicode_EncodeCodePage( + int code_page, /* code page number */ + PyObject *unicode, /* Unicode object */ + const char *errors /* error handling */ + ); +#endif + +#endif /* MS_WINDOWS */ + +#ifndef Py_LIMITED_API +/* --- Decimal Encoder ---------------------------------------------------- */ + +/* Takes a Unicode string holding a decimal value and writes it into + an output buffer using standard ASCII digit codes. + + The output buffer has to provide at least length+1 bytes of storage + area. The output string is 0-terminated. + + The encoder converts whitespace to ' ', decimal characters to their + corresponding ASCII digit and all other Latin-1 characters except + \0 as-is. Characters outside this range (Unicode ordinals 1-256) + are treated as errors. This includes embedded NULL bytes. + + Error handling is defined by the errors argument: + + NULL or "strict": raise a ValueError + "ignore": ignore the wrong characters (these are not copied to the + output buffer) + "replace": replaces illegal characters with '?' + + Returns 0 on success, -1 on failure. + +*/ + +PyAPI_FUNC(int) PyUnicode_EncodeDecimal( + Py_UNICODE *s, /* Unicode buffer */ + Py_ssize_t length, /* Number of Py_UNICODE chars to encode */ + char *output, /* Output buffer; must have size >= length */ + const char *errors /* error handling */ + ) /* Py_DEPRECATED(3.3) */; + +/* Transforms code points that have decimal digit property to the + corresponding ASCII digit code points. + + Returns a new Unicode string on success, NULL on failure. +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_TransformDecimalToASCII( + Py_UNICODE *s, /* Unicode buffer */ + Py_ssize_t length /* Number of Py_UNICODE chars to transform */ + ) /* Py_DEPRECATED(3.3) */; + +/* Coverts a Unicode object holding a decimal value to an ASCII string + for using in int, float and complex parsers. + Transforms code points that have decimal digit property to the + corresponding ASCII digit code points. Transforms spaces to ASCII. + Transforms code points starting from the first non-ASCII code point that + is neither a decimal digit nor a space to the end into '?'. */ + +PyAPI_FUNC(PyObject*) _PyUnicode_TransformDecimalAndSpaceToASCII( + PyObject *unicode /* Unicode object */ + ); +#endif + +/* --- Locale encoding --------------------------------------------------- */ + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +/* Decode a string from the current locale encoding. The decoder is strict if + *surrogateescape* is equal to zero, otherwise it uses the 'surrogateescape' + error handler (PEP 383) to escape undecodable bytes. If a byte sequence can + be decoded as a surrogate character and *surrogateescape* is not equal to + zero, the byte sequence is escaped using the 'surrogateescape' error handler + instead of being decoded. *str* must end with a null character but cannot + contain embedded null characters. */ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeLocaleAndSize( + const char *str, + Py_ssize_t len, + const char *errors); + +/* Similar to PyUnicode_DecodeLocaleAndSize(), but compute the string + length using strlen(). */ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeLocale( + const char *str, + const char *errors); + +/* Encode a Unicode object to the current locale encoding. The encoder is + strict is *surrogateescape* is equal to zero, otherwise the + "surrogateescape" error handler is used. Return a bytes object. The string + cannot contain embedded null characters. */ + +PyAPI_FUNC(PyObject*) PyUnicode_EncodeLocale( + PyObject *unicode, + const char *errors + ); +#endif + +/* --- File system encoding ---------------------------------------------- */ + +/* ParseTuple converter: encode str objects to bytes using + PyUnicode_EncodeFSDefault(); bytes objects are output as-is. */ + +PyAPI_FUNC(int) PyUnicode_FSConverter(PyObject*, void*); + +/* ParseTuple converter: decode bytes objects to unicode using + PyUnicode_DecodeFSDefaultAndSize(); str objects are output as-is. */ + +PyAPI_FUNC(int) PyUnicode_FSDecoder(PyObject*, void*); + +/* Decode a null-terminated string using Py_FileSystemDefaultEncoding + and the "surrogateescape" error handler. + + If Py_FileSystemDefaultEncoding is not set, fall back to the locale + encoding. + + Use PyUnicode_DecodeFSDefaultAndSize() if the string length is known. +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeFSDefault( + const char *s /* encoded string */ + ); + +/* Decode a string using Py_FileSystemDefaultEncoding + and the "surrogateescape" error handler. + + If Py_FileSystemDefaultEncoding is not set, fall back to the locale + encoding. +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_DecodeFSDefaultAndSize( + const char *s, /* encoded string */ + Py_ssize_t size /* size */ + ); + +/* Encode a Unicode object to Py_FileSystemDefaultEncoding with the + "surrogateescape" error handler, and return bytes. + + If Py_FileSystemDefaultEncoding is not set, fall back to the locale + encoding. +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_EncodeFSDefault( + PyObject *unicode + ); + +/* --- Methods & Slots ---------------------------------------------------- + + These are capable of handling Unicode objects and strings on input + (we refer to them as strings in the descriptions) and return + Unicode objects or integers as appropriate. */ + +/* Concat two strings giving a new Unicode string. */ + +PyAPI_FUNC(PyObject*) PyUnicode_Concat( + PyObject *left, /* Left string */ + PyObject *right /* Right string */ + ); + +/* Concat two strings and put the result in *pleft + (sets *pleft to NULL on error) */ + +PyAPI_FUNC(void) PyUnicode_Append( + PyObject **pleft, /* Pointer to left string */ + PyObject *right /* Right string */ + ); + +/* Concat two strings, put the result in *pleft and drop the right object + (sets *pleft to NULL on error) */ + +PyAPI_FUNC(void) PyUnicode_AppendAndDel( + PyObject **pleft, /* Pointer to left string */ + PyObject *right /* Right string */ + ); + +/* Split a string giving a list of Unicode strings. + + If sep is NULL, splitting will be done at all whitespace + substrings. Otherwise, splits occur at the given separator. + + At most maxsplit splits will be done. If negative, no limit is set. + + Separators are not included in the resulting list. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_Split( + PyObject *s, /* String to split */ + PyObject *sep, /* String separator */ + Py_ssize_t maxsplit /* Maxsplit count */ + ); + +/* Dito, but split at line breaks. + + CRLF is considered to be one line break. Line breaks are not + included in the resulting list. */ + +PyAPI_FUNC(PyObject*) PyUnicode_Splitlines( + PyObject *s, /* String to split */ + int keepends /* If true, line end markers are included */ + ); + +/* Partition a string using a given separator. */ + +PyAPI_FUNC(PyObject*) PyUnicode_Partition( + PyObject *s, /* String to partition */ + PyObject *sep /* String separator */ + ); + +/* Partition a string using a given separator, searching from the end of the + string. */ + +PyAPI_FUNC(PyObject*) PyUnicode_RPartition( + PyObject *s, /* String to partition */ + PyObject *sep /* String separator */ + ); + +/* Split a string giving a list of Unicode strings. + + If sep is NULL, splitting will be done at all whitespace + substrings. Otherwise, splits occur at the given separator. + + At most maxsplit splits will be done. But unlike PyUnicode_Split + PyUnicode_RSplit splits from the end of the string. If negative, + no limit is set. + + Separators are not included in the resulting list. + +*/ + +PyAPI_FUNC(PyObject*) PyUnicode_RSplit( + PyObject *s, /* String to split */ + PyObject *sep, /* String separator */ + Py_ssize_t maxsplit /* Maxsplit count */ + ); + +/* Translate a string by applying a character mapping table to it and + return the resulting Unicode object. + + The mapping table must map Unicode ordinal integers to Unicode strings, + Unicode ordinal integers or None (causing deletion of the character). + + Mapping tables may be dictionaries or sequences. Unmapped character + ordinals (ones which cause a LookupError) are left untouched and + are copied as-is. + +*/ + +PyAPI_FUNC(PyObject *) PyUnicode_Translate( + PyObject *str, /* String */ + PyObject *table, /* Translate table */ + const char *errors /* error handling */ + ); + +/* Join a sequence of strings using the given separator and return + the resulting Unicode string. */ + +PyAPI_FUNC(PyObject*) PyUnicode_Join( + PyObject *separator, /* Separator string */ + PyObject *seq /* Sequence object */ + ); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) _PyUnicode_JoinArray( + PyObject *separator, + PyObject *const *items, + Py_ssize_t seqlen + ); +#endif /* Py_LIMITED_API */ + +/* Return 1 if substr matches str[start:end] at the given tail end, 0 + otherwise. */ + +PyAPI_FUNC(Py_ssize_t) PyUnicode_Tailmatch( + PyObject *str, /* String */ + PyObject *substr, /* Prefix or Suffix string */ + Py_ssize_t start, /* Start index */ + Py_ssize_t end, /* Stop index */ + int direction /* Tail end: -1 prefix, +1 suffix */ + ); + +/* Return the first position of substr in str[start:end] using the + given search direction or -1 if not found. -2 is returned in case + an error occurred and an exception is set. */ + +PyAPI_FUNC(Py_ssize_t) PyUnicode_Find( + PyObject *str, /* String */ + PyObject *substr, /* Substring to find */ + Py_ssize_t start, /* Start index */ + Py_ssize_t end, /* Stop index */ + int direction /* Find direction: +1 forward, -1 backward */ + ); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000 +/* Like PyUnicode_Find, but search for single character only. */ +PyAPI_FUNC(Py_ssize_t) PyUnicode_FindChar( + PyObject *str, + Py_UCS4 ch, + Py_ssize_t start, + Py_ssize_t end, + int direction + ); +#endif + +/* Count the number of occurrences of substr in str[start:end]. */ + +PyAPI_FUNC(Py_ssize_t) PyUnicode_Count( + PyObject *str, /* String */ + PyObject *substr, /* Substring to count */ + Py_ssize_t start, /* Start index */ + Py_ssize_t end /* Stop index */ + ); + +/* Replace at most maxcount occurrences of substr in str with replstr + and return the resulting Unicode object. */ + +PyAPI_FUNC(PyObject *) PyUnicode_Replace( + PyObject *str, /* String */ + PyObject *substr, /* Substring to find */ + PyObject *replstr, /* Substring to replace */ + Py_ssize_t maxcount /* Max. number of replacements to apply; + -1 = all */ + ); + +/* Compare two strings and return -1, 0, 1 for less than, equal, + greater than resp. + Raise an exception and return -1 on error. */ + +PyAPI_FUNC(int) PyUnicode_Compare( + PyObject *left, /* Left string */ + PyObject *right /* Right string */ + ); + +#ifndef Py_LIMITED_API +/* Test whether a unicode is equal to ASCII identifier. Return 1 if true, + 0 otherwise. The right argument must be ASCII identifier. + Any error occurs inside will be cleared before return. */ + +PyAPI_FUNC(int) _PyUnicode_EqualToASCIIId( + PyObject *left, /* Left string */ + _Py_Identifier *right /* Right identifier */ + ); +#endif + +/* Compare a Unicode object with C string and return -1, 0, 1 for less than, + equal, and greater than, respectively. It is best to pass only + ASCII-encoded strings, but the function interprets the input string as + ISO-8859-1 if it contains non-ASCII characters. + This function does not raise exceptions. */ + +PyAPI_FUNC(int) PyUnicode_CompareWithASCIIString( + PyObject *left, + const char *right /* ASCII-encoded string */ + ); + +#ifndef Py_LIMITED_API +/* Test whether a unicode is equal to ASCII string. Return 1 if true, + 0 otherwise. The right argument must be ASCII-encoded string. + Any error occurs inside will be cleared before return. */ + +PyAPI_FUNC(int) _PyUnicode_EqualToASCIIString( + PyObject *left, + const char *right /* ASCII-encoded string */ + ); +#endif + +/* Rich compare two strings and return one of the following: + + - NULL in case an exception was raised + - Py_True or Py_False for successful comparisons + - Py_NotImplemented in case the type combination is unknown + + Possible values for op: + + Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE + +*/ + +PyAPI_FUNC(PyObject *) PyUnicode_RichCompare( + PyObject *left, /* Left string */ + PyObject *right, /* Right string */ + int op /* Operation: Py_EQ, Py_NE, Py_GT, etc. */ + ); + +/* Apply an argument tuple or dictionary to a format string and return + the resulting Unicode string. */ + +PyAPI_FUNC(PyObject *) PyUnicode_Format( + PyObject *format, /* Format string */ + PyObject *args /* Argument tuple or dictionary */ + ); + +/* Checks whether element is contained in container and return 1/0 + accordingly. + + element has to coerce to a one element Unicode string. -1 is + returned in case of an error. */ + +PyAPI_FUNC(int) PyUnicode_Contains( + PyObject *container, /* Container string */ + PyObject *element /* Element string */ + ); + +/* Checks whether argument is a valid identifier. */ + +PyAPI_FUNC(int) PyUnicode_IsIdentifier(PyObject *s); + +#ifndef Py_LIMITED_API +/* Externally visible for str.strip(unicode) */ +PyAPI_FUNC(PyObject *) _PyUnicode_XStrip( + PyObject *self, + int striptype, + PyObject *sepobj + ); +#endif + +/* Using explicit passed-in values, insert the thousands grouping + into the string pointed to by buffer. For the argument descriptions, + see Objects/stringlib/localeutil.h */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(Py_ssize_t) _PyUnicode_InsertThousandsGrouping( + PyObject *unicode, + Py_ssize_t index, + Py_ssize_t n_buffer, + void *digits, + Py_ssize_t n_digits, + Py_ssize_t min_width, + const char *grouping, + PyObject *thousands_sep, + Py_UCS4 *maxchar); +#endif +/* === Characters Type APIs =============================================== */ + +/* Helper array used by Py_UNICODE_ISSPACE(). */ + +#ifndef Py_LIMITED_API +PyAPI_DATA(const unsigned char) _Py_ascii_whitespace[]; + +/* These should not be used directly. Use the Py_UNICODE_IS* and + Py_UNICODE_TO* macros instead. + + These APIs are implemented in Objects/unicodectype.c. + +*/ + +PyAPI_FUNC(int) _PyUnicode_IsLowercase( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsUppercase( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsTitlecase( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsXidStart( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsXidContinue( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsWhitespace( + const Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsLinebreak( + const Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(Py_UCS4) _PyUnicode_ToLowercase( + Py_UCS4 ch /* Unicode character */ + ) /* Py_DEPRECATED(3.3) */; + +PyAPI_FUNC(Py_UCS4) _PyUnicode_ToUppercase( + Py_UCS4 ch /* Unicode character */ + ) /* Py_DEPRECATED(3.3) */; + +PyAPI_FUNC(Py_UCS4) _PyUnicode_ToTitlecase( + Py_UCS4 ch /* Unicode character */ + ) Py_DEPRECATED(3.3); + +PyAPI_FUNC(int) _PyUnicode_ToLowerFull( + Py_UCS4 ch, /* Unicode character */ + Py_UCS4 *res + ); + +PyAPI_FUNC(int) _PyUnicode_ToTitleFull( + Py_UCS4 ch, /* Unicode character */ + Py_UCS4 *res + ); + +PyAPI_FUNC(int) _PyUnicode_ToUpperFull( + Py_UCS4 ch, /* Unicode character */ + Py_UCS4 *res + ); + +PyAPI_FUNC(int) _PyUnicode_ToFoldedFull( + Py_UCS4 ch, /* Unicode character */ + Py_UCS4 *res + ); + +PyAPI_FUNC(int) _PyUnicode_IsCaseIgnorable( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsCased( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_ToDecimalDigit( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_ToDigit( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(double) _PyUnicode_ToNumeric( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsDecimalDigit( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsDigit( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsNumeric( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsPrintable( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(int) _PyUnicode_IsAlpha( + Py_UCS4 ch /* Unicode character */ + ); + +PyAPI_FUNC(size_t) Py_UNICODE_strlen( + const Py_UNICODE *u + ) Py_DEPRECATED(3.3); + +PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strcpy( + Py_UNICODE *s1, + const Py_UNICODE *s2) Py_DEPRECATED(3.3); + +PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strcat( + Py_UNICODE *s1, const Py_UNICODE *s2) Py_DEPRECATED(3.3); + +PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strncpy( + Py_UNICODE *s1, + const Py_UNICODE *s2, + size_t n) Py_DEPRECATED(3.3); + +PyAPI_FUNC(int) Py_UNICODE_strcmp( + const Py_UNICODE *s1, + const Py_UNICODE *s2 + ) Py_DEPRECATED(3.3); + +PyAPI_FUNC(int) Py_UNICODE_strncmp( + const Py_UNICODE *s1, + const Py_UNICODE *s2, + size_t n + ) Py_DEPRECATED(3.3); + +PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strchr( + const Py_UNICODE *s, + Py_UNICODE c + ) Py_DEPRECATED(3.3); + +PyAPI_FUNC(Py_UNICODE*) Py_UNICODE_strrchr( + const Py_UNICODE *s, + Py_UNICODE c + ) Py_DEPRECATED(3.3); + +PyAPI_FUNC(PyObject*) _PyUnicode_FormatLong(PyObject *, int, int, int); + +/* Create a copy of a unicode string ending with a nul character. Return NULL + and raise a MemoryError exception on memory allocation failure, otherwise + return a new allocated buffer (use PyMem_Free() to free the buffer). */ + +PyAPI_FUNC(Py_UNICODE*) PyUnicode_AsUnicodeCopy( + PyObject *unicode + ) Py_DEPRECATED(3.3); +#endif /* Py_LIMITED_API */ + +#if defined(Py_DEBUG) && !defined(Py_LIMITED_API) +PyAPI_FUNC(int) _PyUnicode_CheckConsistency( + PyObject *op, + int check_content); +#elif !defined(NDEBUG) +/* For asserts that call _PyUnicode_CheckConsistency(), which would + * otherwise be a problem when building with asserts but without Py_DEBUG. */ +#define _PyUnicode_CheckConsistency(op, check_content) PyUnicode_Check(op) +#endif + +#ifndef Py_LIMITED_API +/* Return an interned Unicode object for an Identifier; may fail if there is no memory.*/ +PyAPI_FUNC(PyObject*) _PyUnicode_FromId(_Py_Identifier*); +/* Clear all static strings. */ +PyAPI_FUNC(void) _PyUnicode_ClearStaticStrings(void); + +/* Fast equality check when the inputs are known to be exact unicode types + and where the hash values are equal (i.e. a very probable match) */ +PyAPI_FUNC(int) _PyUnicode_EQ(PyObject *, PyObject *); +#endif /* !Py_LIMITED_API */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_UNICODEOBJECT_H */ diff --git a/venv/Include/warnings.h b/venv/Include/warnings.h new file mode 100644 index 00000000..a675bb5d --- /dev/null +++ b/venv/Include/warnings.h @@ -0,0 +1,67 @@ +#ifndef Py_WARNINGS_H +#define Py_WARNINGS_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject*) _PyWarnings_Init(void); +#endif + +PyAPI_FUNC(int) PyErr_WarnEx( + PyObject *category, + const char *message, /* UTF-8 encoded string */ + Py_ssize_t stack_level); +PyAPI_FUNC(int) PyErr_WarnFormat( + PyObject *category, + Py_ssize_t stack_level, + const char *format, /* ASCII-encoded string */ + ...); + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000 +/* Emit a ResourceWarning warning */ +PyAPI_FUNC(int) PyErr_ResourceWarning( + PyObject *source, + Py_ssize_t stack_level, + const char *format, /* ASCII-encoded string */ + ...); +#endif +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) PyErr_WarnExplicitObject( + PyObject *category, + PyObject *message, + PyObject *filename, + int lineno, + PyObject *module, + PyObject *registry); +#endif +PyAPI_FUNC(int) PyErr_WarnExplicit( + PyObject *category, + const char *message, /* UTF-8 encoded string */ + const char *filename, /* decoded from the filesystem encoding */ + int lineno, + const char *module, /* UTF-8 encoded string */ + PyObject *registry); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) +PyErr_WarnExplicitFormat(PyObject *category, + const char *filename, int lineno, + const char *module, PyObject *registry, + const char *format, ...); +#endif + +/* DEPRECATED: Use PyErr_WarnEx() instead. */ +#ifndef Py_LIMITED_API +#define PyErr_Warn(category, msg) PyErr_WarnEx(category, msg, 1) +#endif + +#ifndef Py_LIMITED_API +void _PyErr_WarnUnawaitedCoroutine(PyObject *coro); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_WARNINGS_H */ + diff --git a/venv/Include/weakrefobject.h b/venv/Include/weakrefobject.h new file mode 100644 index 00000000..17051568 --- /dev/null +++ b/venv/Include/weakrefobject.h @@ -0,0 +1,86 @@ +/* Weak references objects for Python. */ + +#ifndef Py_WEAKREFOBJECT_H +#define Py_WEAKREFOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + +typedef struct _PyWeakReference PyWeakReference; + +/* PyWeakReference is the base struct for the Python ReferenceType, ProxyType, + * and CallableProxyType. + */ +#ifndef Py_LIMITED_API +struct _PyWeakReference { + PyObject_HEAD + + /* The object to which this is a weak reference, or Py_None if none. + * Note that this is a stealth reference: wr_object's refcount is + * not incremented to reflect this pointer. + */ + PyObject *wr_object; + + /* A callable to invoke when wr_object dies, or NULL if none. */ + PyObject *wr_callback; + + /* A cache for wr_object's hash code. As usual for hashes, this is -1 + * if the hash code isn't known yet. + */ + Py_hash_t hash; + + /* If wr_object is weakly referenced, wr_object has a doubly-linked NULL- + * terminated list of weak references to it. These are the list pointers. + * If wr_object goes away, wr_object is set to Py_None, and these pointers + * have no meaning then. + */ + PyWeakReference *wr_prev; + PyWeakReference *wr_next; +}; +#endif + +PyAPI_DATA(PyTypeObject) _PyWeakref_RefType; +PyAPI_DATA(PyTypeObject) _PyWeakref_ProxyType; +PyAPI_DATA(PyTypeObject) _PyWeakref_CallableProxyType; + +#define PyWeakref_CheckRef(op) PyObject_TypeCheck(op, &_PyWeakref_RefType) +#define PyWeakref_CheckRefExact(op) \ + (Py_TYPE(op) == &_PyWeakref_RefType) +#define PyWeakref_CheckProxy(op) \ + ((Py_TYPE(op) == &_PyWeakref_ProxyType) || \ + (Py_TYPE(op) == &_PyWeakref_CallableProxyType)) + +#define PyWeakref_Check(op) \ + (PyWeakref_CheckRef(op) || PyWeakref_CheckProxy(op)) + + +PyAPI_FUNC(PyObject *) PyWeakref_NewRef(PyObject *ob, + PyObject *callback); +PyAPI_FUNC(PyObject *) PyWeakref_NewProxy(PyObject *ob, + PyObject *callback); +PyAPI_FUNC(PyObject *) PyWeakref_GetObject(PyObject *ref); + +#ifndef Py_LIMITED_API +PyAPI_FUNC(Py_ssize_t) _PyWeakref_GetWeakrefCount(PyWeakReference *head); + +PyAPI_FUNC(void) _PyWeakref_ClearRef(PyWeakReference *self); +#endif + +/* Explanation for the Py_REFCNT() check: when a weakref's target is part + of a long chain of deallocations which triggers the trashcan mechanism, + clearing the weakrefs can be delayed long after the target's refcount + has dropped to zero. In the meantime, code accessing the weakref will + be able to "see" the target object even though it is supposed to be + unreachable. See issue #16602. */ + +#define PyWeakref_GET_OBJECT(ref) \ + (Py_REFCNT(((PyWeakReference *)(ref))->wr_object) > 0 \ + ? ((PyWeakReference *)(ref))->wr_object \ + : Py_None) + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_WEAKREFOBJECT_H */ diff --git a/venv/Lib/__future__.py b/venv/Lib/__future__.py new file mode 100644 index 00000000..ce8bed7a --- /dev/null +++ b/venv/Lib/__future__.py @@ -0,0 +1,146 @@ +"""Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/compile.h. + +No feature line is ever to be deleted from this file. +""" + +all_feature_names = [ + "nested_scopes", + "generators", + "division", + "absolute_import", + "with_statement", + "print_function", + "unicode_literals", + "barry_as_FLUFL", + "generator_stop", + "annotations", +] + +__all__ = ["all_feature_names"] + all_feature_names + +# The CO_xxx symbols are defined here under the same names defined in +# code.h and used by compile.h, so that an editor search will find them here. +# However, they're not exported in __all__, because they don't really belong to +# this module. +CO_NESTED = 0x0010 # nested_scopes +CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) +CO_FUTURE_DIVISION = 0x2000 # division +CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default +CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement +CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function +CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals +CO_FUTURE_BARRY_AS_BDFL = 0x40000 +CO_FUTURE_GENERATOR_STOP = 0x80000 # StopIteration becomes RuntimeError in generators +CO_FUTURE_ANNOTATIONS = 0x100000 # annotations become strings at runtime + +class _Feature: + def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): + self.optional = optionalRelease + self.mandatory = mandatoryRelease + self.compiler_flag = compiler_flag + + def getOptionalRelease(self): + """Return first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + """ + + return self.optional + + def getMandatoryRelease(self): + """Return release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, is None. + """ + + return self.mandatory + + def __repr__(self): + return "_Feature" + repr((self.optional, + self.mandatory, + self.compiler_flag)) + +nested_scopes = _Feature((2, 1, 0, "beta", 1), + (2, 2, 0, "alpha", 0), + CO_NESTED) + +generators = _Feature((2, 2, 0, "alpha", 1), + (2, 3, 0, "final", 0), + CO_GENERATOR_ALLOWED) + +division = _Feature((2, 2, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_DIVISION) + +absolute_import = _Feature((2, 5, 0, "alpha", 1), + (3, 0, 0, "alpha", 0), + CO_FUTURE_ABSOLUTE_IMPORT) + +with_statement = _Feature((2, 5, 0, "alpha", 1), + (2, 6, 0, "alpha", 0), + CO_FUTURE_WITH_STATEMENT) + +print_function = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_PRINT_FUNCTION) + +unicode_literals = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_UNICODE_LITERALS) + +barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2), + (3, 9, 0, "alpha", 0), + CO_FUTURE_BARRY_AS_BDFL) + +generator_stop = _Feature((3, 5, 0, "beta", 1), + (3, 7, 0, "alpha", 0), + CO_FUTURE_GENERATOR_STOP) + +annotations = _Feature((3, 7, 0, "beta", 1), + (4, 0, 0, "alpha", 0), + CO_FUTURE_ANNOTATIONS) diff --git a/venv/Lib/__pycache__/__future__.cpython-37.pyc b/venv/Lib/__pycache__/__future__.cpython-37.pyc new file mode 100644 index 00000000..b8480889 Binary files /dev/null and b/venv/Lib/__pycache__/__future__.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/_bootlocale.cpython-37.pyc b/venv/Lib/__pycache__/_bootlocale.cpython-37.pyc new file mode 100644 index 00000000..e96e0759 Binary files /dev/null and b/venv/Lib/__pycache__/_bootlocale.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/_collections_abc.cpython-37.pyc b/venv/Lib/__pycache__/_collections_abc.cpython-37.pyc new file mode 100644 index 00000000..b90baa61 Binary files /dev/null and b/venv/Lib/__pycache__/_collections_abc.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/_weakrefset.cpython-37.pyc b/venv/Lib/__pycache__/_weakrefset.cpython-37.pyc new file mode 100644 index 00000000..7e08dfd6 Binary files /dev/null and b/venv/Lib/__pycache__/_weakrefset.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/abc.cpython-37.pyc b/venv/Lib/__pycache__/abc.cpython-37.pyc new file mode 100644 index 00000000..f0d7964c Binary files /dev/null and b/venv/Lib/__pycache__/abc.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/base64.cpython-37.pyc b/venv/Lib/__pycache__/base64.cpython-37.pyc new file mode 100644 index 00000000..8d0c9360 Binary files /dev/null and b/venv/Lib/__pycache__/base64.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/bisect.cpython-37.pyc b/venv/Lib/__pycache__/bisect.cpython-37.pyc new file mode 100644 index 00000000..b47c83f8 Binary files /dev/null and b/venv/Lib/__pycache__/bisect.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/codecs.cpython-37.pyc b/venv/Lib/__pycache__/codecs.cpython-37.pyc new file mode 100644 index 00000000..8d5a558a Binary files /dev/null and b/venv/Lib/__pycache__/codecs.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/copy.cpython-37.pyc b/venv/Lib/__pycache__/copy.cpython-37.pyc new file mode 100644 index 00000000..2814c4ce Binary files /dev/null and b/venv/Lib/__pycache__/copy.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/copyreg.cpython-37.pyc b/venv/Lib/__pycache__/copyreg.cpython-37.pyc new file mode 100644 index 00000000..0161bba5 Binary files /dev/null and b/venv/Lib/__pycache__/copyreg.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/enum.cpython-37.pyc b/venv/Lib/__pycache__/enum.cpython-37.pyc new file mode 100644 index 00000000..d23a30f9 Binary files /dev/null and b/venv/Lib/__pycache__/enum.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/fnmatch.cpython-37.pyc b/venv/Lib/__pycache__/fnmatch.cpython-37.pyc new file mode 100644 index 00000000..faa45233 Binary files /dev/null and b/venv/Lib/__pycache__/fnmatch.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/functools.cpython-37.pyc b/venv/Lib/__pycache__/functools.cpython-37.pyc new file mode 100644 index 00000000..ffc92585 Binary files /dev/null and b/venv/Lib/__pycache__/functools.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/genericpath.cpython-37.pyc b/venv/Lib/__pycache__/genericpath.cpython-37.pyc new file mode 100644 index 00000000..aefccafc Binary files /dev/null and b/venv/Lib/__pycache__/genericpath.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/hashlib.cpython-37.pyc b/venv/Lib/__pycache__/hashlib.cpython-37.pyc new file mode 100644 index 00000000..218e6503 Binary files /dev/null and b/venv/Lib/__pycache__/hashlib.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/heapq.cpython-37.pyc b/venv/Lib/__pycache__/heapq.cpython-37.pyc new file mode 100644 index 00000000..7e917c79 Binary files /dev/null and b/venv/Lib/__pycache__/heapq.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/hmac.cpython-37.pyc b/venv/Lib/__pycache__/hmac.cpython-37.pyc new file mode 100644 index 00000000..09c84bb0 Binary files /dev/null and b/venv/Lib/__pycache__/hmac.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/imp.cpython-37.pyc b/venv/Lib/__pycache__/imp.cpython-37.pyc new file mode 100644 index 00000000..52bd2d4c Binary files /dev/null and b/venv/Lib/__pycache__/imp.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/io.cpython-37.pyc b/venv/Lib/__pycache__/io.cpython-37.pyc new file mode 100644 index 00000000..705dc751 Binary files /dev/null and b/venv/Lib/__pycache__/io.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/keyword.cpython-37.pyc b/venv/Lib/__pycache__/keyword.cpython-37.pyc new file mode 100644 index 00000000..b728de06 Binary files /dev/null and b/venv/Lib/__pycache__/keyword.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/linecache.cpython-37.pyc b/venv/Lib/__pycache__/linecache.cpython-37.pyc new file mode 100644 index 00000000..49d912be Binary files /dev/null and b/venv/Lib/__pycache__/linecache.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/locale.cpython-37.pyc b/venv/Lib/__pycache__/locale.cpython-37.pyc new file mode 100644 index 00000000..bd9b115d Binary files /dev/null and b/venv/Lib/__pycache__/locale.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/ntpath.cpython-37.pyc b/venv/Lib/__pycache__/ntpath.cpython-37.pyc new file mode 100644 index 00000000..517523fe Binary files /dev/null and b/venv/Lib/__pycache__/ntpath.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/operator.cpython-37.pyc b/venv/Lib/__pycache__/operator.cpython-37.pyc new file mode 100644 index 00000000..3adb2a32 Binary files /dev/null and b/venv/Lib/__pycache__/operator.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/os.cpython-37.pyc b/venv/Lib/__pycache__/os.cpython-37.pyc new file mode 100644 index 00000000..71b51479 Binary files /dev/null and b/venv/Lib/__pycache__/os.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/posixpath.cpython-37.pyc b/venv/Lib/__pycache__/posixpath.cpython-37.pyc new file mode 100644 index 00000000..b58477be Binary files /dev/null and b/venv/Lib/__pycache__/posixpath.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/random.cpython-37.pyc b/venv/Lib/__pycache__/random.cpython-37.pyc new file mode 100644 index 00000000..95cea152 Binary files /dev/null and b/venv/Lib/__pycache__/random.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/re.cpython-37.pyc b/venv/Lib/__pycache__/re.cpython-37.pyc new file mode 100644 index 00000000..8102d372 Binary files /dev/null and b/venv/Lib/__pycache__/re.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/reprlib.cpython-37.pyc b/venv/Lib/__pycache__/reprlib.cpython-37.pyc new file mode 100644 index 00000000..19646996 Binary files /dev/null and b/venv/Lib/__pycache__/reprlib.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/shutil.cpython-37.pyc b/venv/Lib/__pycache__/shutil.cpython-37.pyc new file mode 100644 index 00000000..eb2ac749 Binary files /dev/null and b/venv/Lib/__pycache__/shutil.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/site.cpython-37.pyc b/venv/Lib/__pycache__/site.cpython-37.pyc new file mode 100644 index 00000000..dafa94f6 Binary files /dev/null and b/venv/Lib/__pycache__/site.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/sre_compile.cpython-37.pyc b/venv/Lib/__pycache__/sre_compile.cpython-37.pyc new file mode 100644 index 00000000..17ca4a0f Binary files /dev/null and b/venv/Lib/__pycache__/sre_compile.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/sre_constants.cpython-37.pyc b/venv/Lib/__pycache__/sre_constants.cpython-37.pyc new file mode 100644 index 00000000..f295d947 Binary files /dev/null and b/venv/Lib/__pycache__/sre_constants.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/sre_parse.cpython-37.pyc b/venv/Lib/__pycache__/sre_parse.cpython-37.pyc new file mode 100644 index 00000000..4ee8b490 Binary files /dev/null and b/venv/Lib/__pycache__/sre_parse.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/stat.cpython-37.pyc b/venv/Lib/__pycache__/stat.cpython-37.pyc new file mode 100644 index 00000000..d3b38e1f Binary files /dev/null and b/venv/Lib/__pycache__/stat.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/struct.cpython-37.pyc b/venv/Lib/__pycache__/struct.cpython-37.pyc new file mode 100644 index 00000000..1c37bf08 Binary files /dev/null and b/venv/Lib/__pycache__/struct.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/tarfile.cpython-37.pyc b/venv/Lib/__pycache__/tarfile.cpython-37.pyc new file mode 100644 index 00000000..ab855b15 Binary files /dev/null and b/venv/Lib/__pycache__/tarfile.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/tempfile.cpython-37.pyc b/venv/Lib/__pycache__/tempfile.cpython-37.pyc new file mode 100644 index 00000000..2ece8baa Binary files /dev/null and b/venv/Lib/__pycache__/tempfile.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/token.cpython-37.pyc b/venv/Lib/__pycache__/token.cpython-37.pyc new file mode 100644 index 00000000..1092d387 Binary files /dev/null and b/venv/Lib/__pycache__/token.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/tokenize.cpython-37.pyc b/venv/Lib/__pycache__/tokenize.cpython-37.pyc new file mode 100644 index 00000000..86f3618a Binary files /dev/null and b/venv/Lib/__pycache__/tokenize.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/types.cpython-37.pyc b/venv/Lib/__pycache__/types.cpython-37.pyc new file mode 100644 index 00000000..1bf96718 Binary files /dev/null and b/venv/Lib/__pycache__/types.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/warnings.cpython-37.pyc b/venv/Lib/__pycache__/warnings.cpython-37.pyc new file mode 100644 index 00000000..1ab0a1ce Binary files /dev/null and b/venv/Lib/__pycache__/warnings.cpython-37.pyc differ diff --git a/venv/Lib/__pycache__/weakref.cpython-37.pyc b/venv/Lib/__pycache__/weakref.cpython-37.pyc new file mode 100644 index 00000000..3760d881 Binary files /dev/null and b/venv/Lib/__pycache__/weakref.cpython-37.pyc differ diff --git a/venv/Lib/_bootlocale.py b/venv/Lib/_bootlocale.py new file mode 100644 index 00000000..3273a3b4 --- /dev/null +++ b/venv/Lib/_bootlocale.py @@ -0,0 +1,46 @@ +"""A minimal subset of the locale module used at interpreter startup +(imported by the _io module), in order to reduce startup time. + +Don't import directly from third-party code; use the `locale` module instead! +""" + +import sys +import _locale + +if sys.platform.startswith("win"): + def getpreferredencoding(do_setlocale=True): + if sys.flags.utf8_mode: + return 'UTF-8' + return _locale._getdefaultlocale()[1] +else: + try: + _locale.CODESET + except AttributeError: + if hasattr(sys, 'getandroidapilevel'): + # On Android langinfo.h and CODESET are missing, and UTF-8 is + # always used in mbstowcs() and wcstombs(). + def getpreferredencoding(do_setlocale=True): + return 'UTF-8' + else: + def getpreferredencoding(do_setlocale=True): + if sys.flags.utf8_mode: + return 'UTF-8' + # This path for legacy systems needs the more complex + # getdefaultlocale() function, import the full locale module. + import locale + return locale.getpreferredencoding(do_setlocale) + else: + def getpreferredencoding(do_setlocale=True): + assert not do_setlocale + if sys.flags.utf8_mode: + return 'UTF-8' + result = _locale.nl_langinfo(_locale.CODESET) + if not result and sys.platform == 'darwin': + # nl_langinfo can return an empty string + # when the setting has an invalid value. + # Default to UTF-8 in that case because + # UTF-8 is the default charset on OSX and + # returning nothing will crash the + # interpreter. + result = 'UTF-8' + return result diff --git a/venv/Lib/_collections_abc.py b/venv/Lib/_collections_abc.py new file mode 100644 index 00000000..dbe30dff --- /dev/null +++ b/venv/Lib/_collections_abc.py @@ -0,0 +1,1011 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +Unit tests are in test_collections. +""" + +from abc import ABCMeta, abstractmethod +import sys + +__all__ = ["Awaitable", "Coroutine", + "AsyncIterable", "AsyncIterator", "AsyncGenerator", + "Hashable", "Iterable", "Iterator", "Generator", "Reversible", + "Sized", "Container", "Callable", "Collection", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + "ByteString", + ] + +# This module has been renamed from collections.abc to _collections_abc to +# speed up interpreter startup. Some of the types such as MutableMapping are +# required early but collections module imports a lot of other modules. +# See issue #19218 +__name__ = "collections.abc" + +# Private list of types that we want to register with the various ABCs +# so that they will pass tests like: +# it = iter(somebytearray) +# assert isinstance(it, Iterable) +# Note: in other implementations, these types might not be distinct +# and they may have their own implementation specific types that +# are not included on this list. +bytes_iterator = type(iter(b'')) +bytearray_iterator = type(iter(bytearray())) +#callable_iterator = ??? +dict_keyiterator = type(iter({}.keys())) +dict_valueiterator = type(iter({}.values())) +dict_itemiterator = type(iter({}.items())) +list_iterator = type(iter([])) +list_reverseiterator = type(iter(reversed([]))) +range_iterator = type(iter(range(0))) +longrange_iterator = type(iter(range(1 << 1000))) +set_iterator = type(iter(set())) +str_iterator = type(iter("")) +tuple_iterator = type(iter(())) +zip_iterator = type(iter(zip())) +## views ## +dict_keys = type({}.keys()) +dict_values = type({}.values()) +dict_items = type({}.items()) +## misc ## +mappingproxy = type(type.__dict__) +generator = type((lambda: (yield))()) +## coroutine ## +async def _coro(): pass +_coro = _coro() +coroutine = type(_coro) +_coro.close() # Prevent ResourceWarning +del _coro +## asynchronous generator ## +async def _ag(): yield +_ag = _ag() +async_generator = type(_ag) +del _ag + + +### ONE-TRICK PONIES ### + +def _check_methods(C, *methods): + mro = C.__mro__ + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + +class Hashable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __hash__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Hashable: + return _check_methods(C, "__hash__") + return NotImplemented + + +class Awaitable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __await__(self): + yield + + @classmethod + def __subclasshook__(cls, C): + if cls is Awaitable: + return _check_methods(C, "__await__") + return NotImplemented + + +class Coroutine(Awaitable): + + __slots__ = () + + @abstractmethod + def send(self, value): + """Send a value into the coroutine. + Return next yielded value or raise StopIteration. + """ + raise StopIteration + + @abstractmethod + def throw(self, typ, val=None, tb=None): + """Raise an exception in the coroutine. + Return next yielded value or raise StopIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + def close(self): + """Raise GeneratorExit inside coroutine. + """ + try: + self.throw(GeneratorExit) + except (GeneratorExit, StopIteration): + pass + else: + raise RuntimeError("coroutine ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is Coroutine: + return _check_methods(C, '__await__', 'send', 'throw', 'close') + return NotImplemented + + +Coroutine.register(coroutine) + + +class AsyncIterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __aiter__(self): + return AsyncIterator() + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterable: + return _check_methods(C, "__aiter__") + return NotImplemented + + +class AsyncIterator(AsyncIterable): + + __slots__ = () + + @abstractmethod + async def __anext__(self): + """Return the next item or raise StopAsyncIteration when exhausted.""" + raise StopAsyncIteration + + def __aiter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterator: + return _check_methods(C, "__anext__", "__aiter__") + return NotImplemented + + +class AsyncGenerator(AsyncIterator): + + __slots__ = () + + async def __anext__(self): + """Return the next item from the asynchronous generator. + When exhausted, raise StopAsyncIteration. + """ + return await self.asend(None) + + @abstractmethod + async def asend(self, value): + """Send a value into the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + raise StopAsyncIteration + + @abstractmethod + async def athrow(self, typ, val=None, tb=None): + """Raise an exception in the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + async def aclose(self): + """Raise GeneratorExit inside coroutine. + """ + try: + await self.athrow(GeneratorExit) + except (GeneratorExit, StopAsyncIteration): + pass + else: + raise RuntimeError("asynchronous generator ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncGenerator: + return _check_methods(C, '__aiter__', '__anext__', + 'asend', 'athrow', 'aclose') + return NotImplemented + + +AsyncGenerator.register(async_generator) + + +class Iterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + return _check_methods(C, "__iter__") + return NotImplemented + + +class Iterator(Iterable): + + __slots__ = () + + @abstractmethod + def __next__(self): + 'Return the next item from the iterator. When exhausted, raise StopIteration' + raise StopIteration + + def __iter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterator: + return _check_methods(C, '__iter__', '__next__') + return NotImplemented + +Iterator.register(bytes_iterator) +Iterator.register(bytearray_iterator) +#Iterator.register(callable_iterator) +Iterator.register(dict_keyiterator) +Iterator.register(dict_valueiterator) +Iterator.register(dict_itemiterator) +Iterator.register(list_iterator) +Iterator.register(list_reverseiterator) +Iterator.register(range_iterator) +Iterator.register(longrange_iterator) +Iterator.register(set_iterator) +Iterator.register(str_iterator) +Iterator.register(tuple_iterator) +Iterator.register(zip_iterator) + + +class Reversible(Iterable): + + __slots__ = () + + @abstractmethod + def __reversed__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Reversible: + return _check_methods(C, "__reversed__", "__iter__") + return NotImplemented + + +class Generator(Iterator): + + __slots__ = () + + def __next__(self): + """Return the next item from the generator. + When exhausted, raise StopIteration. + """ + return self.send(None) + + @abstractmethod + def send(self, value): + """Send a value into the generator. + Return next yielded value or raise StopIteration. + """ + raise StopIteration + + @abstractmethod + def throw(self, typ, val=None, tb=None): + """Raise an exception in the generator. + Return next yielded value or raise StopIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + def close(self): + """Raise GeneratorExit inside generator. + """ + try: + self.throw(GeneratorExit) + except (GeneratorExit, StopIteration): + pass + else: + raise RuntimeError("generator ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is Generator: + return _check_methods(C, '__iter__', '__next__', + 'send', 'throw', 'close') + return NotImplemented + +Generator.register(generator) + + +class Sized(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + return _check_methods(C, "__len__") + return NotImplemented + + +class Container(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + return _check_methods(C, "__contains__") + return NotImplemented + +class Collection(Sized, Iterable, Container): + + __slots__ = () + + @classmethod + def __subclasshook__(cls, C): + if cls is Collection: + return _check_methods(C, "__len__", "__iter__", "__contains__") + return NotImplemented + +class Callable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __call__(self, *args, **kwds): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Callable: + return _check_methods(C, "__call__") + return NotImplemented + + +### SETS ### + + +class Set(Collection): + + """A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), redefine __le__ and __ge__, + then the other operations will automatically follow suit. + """ + + __slots__ = () + + def __le__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for elem in self: + if elem not in other: + return False + return True + + def __lt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) < len(other) and self.__le__(other) + + def __gt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) > len(other) and self.__ge__(other) + + def __ge__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True + + def __eq__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) == len(other) and self.__le__(other) + + @classmethod + def _from_iterable(cls, it): + '''Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + ''' + return cls(it) + + def __and__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + return self._from_iterable(value for value in other if value in self) + + __rand__ = __and__ + + def isdisjoint(self, other): + 'Return True if two sets have a null intersection.' + for value in other: + if value in self: + return False + return True + + def __or__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + chain = (e for s in (self, other) for e in s) + return self._from_iterable(chain) + + __ror__ = __or__ + + def __sub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in self + if value not in other) + + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + + def __xor__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return (self - other) | (other - self) + + __rxor__ = __xor__ + + def _hash(self): + """Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + """ + MAX = sys.maxsize + MASK = 2 * MAX + 1 + n = len(self) + h = 1927868237 * (n + 1) + h &= MASK + for x in self: + hx = hash(x) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + h = h * 69069 + 907133923 + h &= MASK + if h > MAX: + h -= MASK + 1 + if h == -1: + h = 590923713 + return h + +Set.register(frozenset) + + +class MutableSet(Set): + """A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ + + __slots__ = () + + @abstractmethod + def add(self, value): + """Add an element.""" + raise NotImplementedError + + @abstractmethod + def discard(self, value): + """Remove an element. Do not raise an exception if absent.""" + raise NotImplementedError + + def remove(self, value): + """Remove an element. If not a member, raise a KeyError.""" + if value not in self: + raise KeyError(value) + self.discard(value) + + def pop(self): + """Return the popped value. Raise KeyError if empty.""" + it = iter(self) + try: + value = next(it) + except StopIteration: + raise KeyError from None + self.discard(value) + return value + + def clear(self): + """This is slow (creates N new iterators!) but effective.""" + try: + while True: + self.pop() + except KeyError: + pass + + def __ior__(self, it): + for value in it: + self.add(value) + return self + + def __iand__(self, it): + for value in (self - it): + self.discard(value) + return self + + def __ixor__(self, it): + if it is self: + self.clear() + else: + if not isinstance(it, Set): + it = self._from_iterable(it) + for value in it: + if value in self: + self.discard(value) + else: + self.add(value) + return self + + def __isub__(self, it): + if it is self: + self.clear() + else: + for value in it: + self.discard(value) + return self + +MutableSet.register(set) + + +### MAPPINGS ### + + +class Mapping(Collection): + + __slots__ = () + + """A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + + """ + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def keys(self): + "D.keys() -> a set-like object providing a view on D's keys" + return KeysView(self) + + def items(self): + "D.items() -> a set-like object providing a view on D's items" + return ItemsView(self) + + def values(self): + "D.values() -> an object providing a view on D's values" + return ValuesView(self) + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + __reversed__ = None + +Mapping.register(mappingproxy) + + +class MappingView(Sized): + + __slots__ = '_mapping', + + def __init__(self, mapping): + self._mapping = mapping + + def __len__(self): + return len(self._mapping) + + def __repr__(self): + return '{0.__class__.__name__}({0._mapping!r})'.format(self) + + +class KeysView(MappingView, Set): + + __slots__ = () + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, key): + return key in self._mapping + + def __iter__(self): + yield from self._mapping + +KeysView.register(dict_keys) + + +class ItemsView(MappingView, Set): + + __slots__ = () + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, item): + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v is value or v == value + + def __iter__(self): + for key in self._mapping: + yield (key, self._mapping[key]) + +ItemsView.register(dict_items) + + +class ValuesView(MappingView, Collection): + + __slots__ = () + + def __contains__(self, value): + for key in self._mapping: + v = self._mapping[key] + if v is value or v == value: + return True + return False + + def __iter__(self): + for key in self._mapping: + yield self._mapping[key] + +ValuesView.register(dict_values) + + +class MutableMapping(Mapping): + + __slots__ = () + + """A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + + """ + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + '''D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + ''' + try: + key = next(iter(self)) + except StopIteration: + raise KeyError from None + value = self[key] + del self[key] + return key, value + + def clear(self): + 'D.clear() -> None. Remove all items from D.' + try: + while True: + self.popitem() + except KeyError: + pass + + def update(*args, **kwds): + ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + ''' + if not args: + raise TypeError("descriptor 'update' of 'MutableMapping' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('update expected at most 1 arguments, got %d' % + len(args)) + if args: + other = args[0] + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' + try: + return self[key] + except KeyError: + self[key] = default + return default + +MutableMapping.register(dict) + + +### SEQUENCES ### + + +class Sequence(Reversible, Collection): + + """All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + """ + + __slots__ = () + + @abstractmethod + def __getitem__(self, index): + raise IndexError + + def __iter__(self): + i = 0 + try: + while True: + v = self[i] + yield v + i += 1 + except IndexError: + return + + def __contains__(self, value): + for v in self: + if v is value or v == value: + return True + return False + + def __reversed__(self): + for i in reversed(range(len(self))): + yield self[i] + + def index(self, value, start=0, stop=None): + '''S.index(value, [start, [stop]]) -> integer -- return first index of value. + Raises ValueError if the value is not present. + + Supporting start and stop arguments is optional, but + recommended. + ''' + if start is not None and start < 0: + start = max(len(self) + start, 0) + if stop is not None and stop < 0: + stop += len(self) + + i = start + while stop is None or i < stop: + try: + v = self[i] + if v is value or v == value: + return i + except IndexError: + break + i += 1 + raise ValueError + + def count(self, value): + 'S.count(value) -> integer -- return number of occurrences of value' + return sum(1 for v in self if v is value or v == value) + +Sequence.register(tuple) +Sequence.register(str) +Sequence.register(range) +Sequence.register(memoryview) + + +class ByteString(Sequence): + + """This unifies bytes and bytearray. + + XXX Should add all their methods. + """ + + __slots__ = () + +ByteString.register(bytes) +ByteString.register(bytearray) + + +class MutableSequence(Sequence): + + __slots__ = () + + """All the operations on a read-write sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + + """ + + @abstractmethod + def __setitem__(self, index, value): + raise IndexError + + @abstractmethod + def __delitem__(self, index): + raise IndexError + + @abstractmethod + def insert(self, index, value): + 'S.insert(index, value) -- insert value before index' + raise IndexError + + def append(self, value): + 'S.append(value) -- append value to the end of the sequence' + self.insert(len(self), value) + + def clear(self): + 'S.clear() -> None -- remove all items from S' + try: + while True: + self.pop() + except IndexError: + pass + + def reverse(self): + 'S.reverse() -- reverse *IN PLACE*' + n = len(self) + for i in range(n//2): + self[i], self[n-i-1] = self[n-i-1], self[i] + + def extend(self, values): + 'S.extend(iterable) -- extend sequence by appending elements from the iterable' + for v in values: + self.append(v) + + def pop(self, index=-1): + '''S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + ''' + v = self[index] + del self[index] + return v + + def remove(self, value): + '''S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + ''' + del self[self.index(value)] + + def __iadd__(self, values): + self.extend(values) + return self + +MutableSequence.register(list) +MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/venv/Lib/_dummy_thread.py b/venv/Lib/_dummy_thread.py new file mode 100644 index 00000000..a2cae54b --- /dev/null +++ b/venv/Lib/_dummy_thread.py @@ -0,0 +1,163 @@ +"""Drop-in replacement for the thread module. + +Meant to be used as a brain-dead substitute so that threaded code does +not need to be rewritten for when the thread module is not present. + +Suggested usage is:: + + try: + import _thread + except ImportError: + import _dummy_thread as _thread + +""" +# Exports only things specified by thread documentation; +# skipping obsolete synonyms allocate(), start_new(), exit_thread(). +__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', + 'interrupt_main', 'LockType'] + +# A dummy value +TIMEOUT_MAX = 2**31 + +# NOTE: this module can be imported early in the extension building process, +# and so top level imports of other modules should be avoided. Instead, all +# imports are done when needed on a function-by-function basis. Since threads +# are disabled, the import lock should not be an issue anyway (??). + +error = RuntimeError + +def start_new_thread(function, args, kwargs={}): + """Dummy implementation of _thread.start_new_thread(). + + Compatibility is maintained by making sure that ``args`` is a + tuple and ``kwargs`` is a dictionary. If an exception is raised + and it is SystemExit (which can be done by _thread.exit()) it is + caught and nothing is done; all other exceptions are printed out + by using traceback.print_exc(). + + If the executed function calls interrupt_main the KeyboardInterrupt will be + raised when the function returns. + + """ + if type(args) != type(tuple()): + raise TypeError("2nd arg must be a tuple") + if type(kwargs) != type(dict()): + raise TypeError("3rd arg must be a dict") + global _main + _main = False + try: + function(*args, **kwargs) + except SystemExit: + pass + except: + import traceback + traceback.print_exc() + _main = True + global _interrupt + if _interrupt: + _interrupt = False + raise KeyboardInterrupt + +def exit(): + """Dummy implementation of _thread.exit().""" + raise SystemExit + +def get_ident(): + """Dummy implementation of _thread.get_ident(). + + Since this module should only be used when _threadmodule is not + available, it is safe to assume that the current process is the + only thread. Thus a constant can be safely returned. + """ + return 1 + +def allocate_lock(): + """Dummy implementation of _thread.allocate_lock().""" + return LockType() + +def stack_size(size=None): + """Dummy implementation of _thread.stack_size().""" + if size is not None: + raise error("setting thread stack size not supported") + return 0 + +def _set_sentinel(): + """Dummy implementation of _thread._set_sentinel().""" + return LockType() + +class LockType(object): + """Class implementing dummy implementation of _thread.LockType. + + Compatibility is maintained by maintaining self.locked_status + which is a boolean that stores the state of the lock. Pickling of + the lock, though, should not be done since if the _thread module is + then used with an unpickled ``lock()`` from here problems could + occur from this class not having atomic methods. + + """ + + def __init__(self): + self.locked_status = False + + def acquire(self, waitflag=None, timeout=-1): + """Dummy implementation of acquire(). + + For blocking calls, self.locked_status is automatically set to + True and returned appropriately based on value of + ``waitflag``. If it is non-blocking, then the value is + actually checked and not set if it is already acquired. This + is all done so that threading.Condition's assert statements + aren't triggered and throw a little fit. + + """ + if waitflag is None or waitflag: + self.locked_status = True + return True + else: + if not self.locked_status: + self.locked_status = True + return True + else: + if timeout > 0: + import time + time.sleep(timeout) + return False + + __enter__ = acquire + + def __exit__(self, typ, val, tb): + self.release() + + def release(self): + """Release the dummy lock.""" + # XXX Perhaps shouldn't actually bother to test? Could lead + # to problems for complex, threaded code. + if not self.locked_status: + raise error + self.locked_status = False + return True + + def locked(self): + return self.locked_status + + def __repr__(self): + return "<%s %s.%s object at %s>" % ( + "locked" if self.locked_status else "unlocked", + self.__class__.__module__, + self.__class__.__qualname__, + hex(id(self)) + ) + +# Used to signal that interrupt_main was called in a "thread" +_interrupt = False +# True when not executing in a "thread" +_main = True + +def interrupt_main(): + """Set _interrupt flag to True to have start_new_thread raise + KeyboardInterrupt upon exiting.""" + if _main: + raise KeyboardInterrupt + else: + global _interrupt + _interrupt = True diff --git a/venv/Lib/_weakrefset.py b/venv/Lib/_weakrefset.py new file mode 100644 index 00000000..304c66f5 --- /dev/null +++ b/venv/Lib/_weakrefset.py @@ -0,0 +1,196 @@ +# Access WeakSet through the weakref module. +# This code is separated-out because it is needed +# by abc.py to load everything else at startup. + +from _weakref import ref + +__all__ = ['WeakSet'] + + +class _IterationGuard: + # This context manager registers itself in the current iterators of the + # weak container, such as to delay all removals until the context manager + # exits. + # This technique should be relatively thread-safe (since sets are). + + def __init__(self, weakcontainer): + # Don't create cycles + self.weakcontainer = ref(weakcontainer) + + def __enter__(self): + w = self.weakcontainer() + if w is not None: + w._iterating.add(self) + return self + + def __exit__(self, e, t, b): + w = self.weakcontainer() + if w is not None: + s = w._iterating + s.remove(self) + if not s: + w._commit_removals() + + +class WeakSet: + def __init__(self, data=None): + self.data = set() + def _remove(item, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(item) + else: + self.data.discard(item) + self._remove = _remove + # A list of keys to be removed + self._pending_removals = [] + self._iterating = set() + if data is not None: + self.update(data) + + def _commit_removals(self): + l = self._pending_removals + discard = self.data.discard + while l: + discard(l.pop()) + + def __iter__(self): + with _IterationGuard(self): + for itemref in self.data: + item = itemref() + if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. + yield item + + def __len__(self): + return len(self.data) - len(self._pending_removals) + + def __contains__(self, item): + try: + wr = ref(item) + except TypeError: + return False + return wr in self.data + + def __reduce__(self): + return (self.__class__, (list(self),), + getattr(self, '__dict__', None)) + + def add(self, item): + if self._pending_removals: + self._commit_removals() + self.data.add(ref(item, self._remove)) + + def clear(self): + if self._pending_removals: + self._commit_removals() + self.data.clear() + + def copy(self): + return self.__class__(self) + + def pop(self): + if self._pending_removals: + self._commit_removals() + while True: + try: + itemref = self.data.pop() + except KeyError: + raise KeyError('pop from empty WeakSet') from None + item = itemref() + if item is not None: + return item + + def remove(self, item): + if self._pending_removals: + self._commit_removals() + self.data.remove(ref(item)) + + def discard(self, item): + if self._pending_removals: + self._commit_removals() + self.data.discard(ref(item)) + + def update(self, other): + if self._pending_removals: + self._commit_removals() + for element in other: + self.add(element) + + def __ior__(self, other): + self.update(other) + return self + + def difference(self, other): + newset = self.copy() + newset.difference_update(other) + return newset + __sub__ = difference + + def difference_update(self, other): + self.__isub__(other) + def __isub__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.difference_update(ref(item) for item in other) + return self + + def intersection(self, other): + return self.__class__(item for item in other if item in self) + __and__ = intersection + + def intersection_update(self, other): + self.__iand__(other) + def __iand__(self, other): + if self._pending_removals: + self._commit_removals() + self.data.intersection_update(ref(item) for item in other) + return self + + def issubset(self, other): + return self.data.issubset(ref(item) for item in other) + __le__ = issubset + + def __lt__(self, other): + return self.data < set(map(ref, other)) + + def issuperset(self, other): + return self.data.issuperset(ref(item) for item in other) + __ge__ = issuperset + + def __gt__(self, other): + return self.data > set(map(ref, other)) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.data == set(map(ref, other)) + + def symmetric_difference(self, other): + newset = self.copy() + newset.symmetric_difference_update(other) + return newset + __xor__ = symmetric_difference + + def symmetric_difference_update(self, other): + self.__ixor__(other) + def __ixor__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) + return self + + def union(self, other): + return self.__class__(e for s in (self, other) for e in s) + __or__ = union + + def isdisjoint(self, other): + return len(self.intersection(other)) == 0 diff --git a/venv/Lib/abc.py b/venv/Lib/abc.py new file mode 100644 index 00000000..70941412 --- /dev/null +++ b/venv/Lib/abc.py @@ -0,0 +1,170 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) according to PEP 3119.""" + + +def abstractmethod(funcobj): + """A decorator indicating abstract methods. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract methods are overridden. + The abstract methods can be called using any of the normal + 'super' call mechanisms. + + Usage: + + class C(metaclass=ABCMeta): + @abstractmethod + def my_abstract_method(self, ...): + ... + """ + funcobj.__isabstractmethod__ = True + return funcobj + + +class abstractclassmethod(classmethod): + """A decorator indicating abstract classmethods. + + Similar to abstractmethod. + + Usage: + + class C(metaclass=ABCMeta): + @abstractclassmethod + def my_abstract_classmethod(cls, ...): + ... + + 'abstractclassmethod' is deprecated. Use 'classmethod' with + 'abstractmethod' instead. + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractstaticmethod(staticmethod): + """A decorator indicating abstract staticmethods. + + Similar to abstractmethod. + + Usage: + + class C(metaclass=ABCMeta): + @abstractstaticmethod + def my_abstract_staticmethod(...): + ... + + 'abstractstaticmethod' is deprecated. Use 'staticmethod' with + 'abstractmethod' instead. + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractproperty(property): + """A decorator indicating abstract properties. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract properties are overridden. + The abstract properties can be called using any of the normal + 'super' call mechanisms. + + Usage: + + class C(metaclass=ABCMeta): + @abstractproperty + def my_abstract_property(self): + ... + + This defines a read-only property; you can also define a read-write + abstract property using the 'long' form of property declaration: + + class C(metaclass=ABCMeta): + def getx(self): ... + def setx(self, value): ... + x = abstractproperty(getx, setx) + + 'abstractproperty' is deprecated. Use 'property' with 'abstractmethod' + instead. + """ + + __isabstractmethod__ = True + + +try: + from _abc import (get_cache_token, _abc_init, _abc_register, + _abc_instancecheck, _abc_subclasscheck, _get_dump, + _reset_registry, _reset_caches) +except ImportError: + from _py_abc import ABCMeta, get_cache_token + ABCMeta.__module__ = 'abc' +else: + class ABCMeta(type): + """Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + """ + def __new__(mcls, name, bases, namespace, **kwargs): + cls = super().__new__(mcls, name, bases, namespace, **kwargs) + _abc_init(cls) + return cls + + def register(cls, subclass): + """Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + """ + return _abc_register(cls, subclass) + + def __instancecheck__(cls, instance): + """Override for isinstance(instance, cls).""" + return _abc_instancecheck(cls, instance) + + def __subclasscheck__(cls, subclass): + """Override for issubclass(subclass, cls).""" + return _abc_subclasscheck(cls, subclass) + + def _dump_registry(cls, file=None): + """Debug helper to print the ABC registry.""" + print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file) + print(f"Inv. counter: {get_cache_token()}", file=file) + (_abc_registry, _abc_cache, _abc_negative_cache, + _abc_negative_cache_version) = _get_dump(cls) + print(f"_abc_registry: {_abc_registry!r}", file=file) + print(f"_abc_cache: {_abc_cache!r}", file=file) + print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file) + print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}", + file=file) + + def _abc_registry_clear(cls): + """Clear the registry (for debugging or testing).""" + _reset_registry(cls) + + def _abc_caches_clear(cls): + """Clear the caches (for debugging or testing).""" + _reset_caches(cls) + + +class ABC(metaclass=ABCMeta): + """Helper class that provides a standard way to create an ABC using + inheritance. + """ + __slots__ = () diff --git a/venv/Lib/base64.py b/venv/Lib/base64.py new file mode 100644 index 00000000..eb8f258a --- /dev/null +++ b/venv/Lib/base64.py @@ -0,0 +1,602 @@ +#! /usr/bin/env python3 + +"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings""" + +# Modified 04-Oct-1995 by Jack Jansen to use binascii module +# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support +# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere + +import re +import struct +import binascii + + +__all__ = [ + # Legacy interface exports traditional RFC 2045 Base64 encodings + 'encode', 'decode', 'encodebytes', 'decodebytes', + # Generalized interface for other encodings + 'b64encode', 'b64decode', 'b32encode', 'b32decode', + 'b16encode', 'b16decode', + # Base85 and Ascii85 encodings + 'b85encode', 'b85decode', 'a85encode', 'a85decode', + # Standard Base64 encoding + 'standard_b64encode', 'standard_b64decode', + # Some common Base64 alternatives. As referenced by RFC 3458, see thread + # starting at: + # + # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html + 'urlsafe_b64encode', 'urlsafe_b64decode', + ] + + +bytes_types = (bytes, bytearray) # Types acceptable as binary data + +def _bytes_from_decode_data(s): + if isinstance(s, str): + try: + return s.encode('ascii') + except UnicodeEncodeError: + raise ValueError('string argument should contain only ASCII characters') + if isinstance(s, bytes_types): + return s + try: + return memoryview(s).tobytes() + except TypeError: + raise TypeError("argument should be a bytes-like object or ASCII " + "string, not %r" % s.__class__.__name__) from None + + +# Base64 encoding/decoding uses binascii + +def b64encode(s, altchars=None): + """Encode the bytes-like object s using Base64 and return a bytes object. + + Optional altchars should be a byte string of length 2 which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. + """ + encoded = binascii.b2a_base64(s, newline=False) + if altchars is not None: + assert len(altchars) == 2, repr(altchars) + return encoded.translate(bytes.maketrans(b'+/', altchars)) + return encoded + + +def b64decode(s, altchars=None, validate=False): + """Decode the Base64 encoded bytes-like object or ASCII string s. + + Optional altchars must be a bytes-like object or ASCII string of length 2 + which specifies the alternative alphabet used instead of the '+' and '/' + characters. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded. + + If validate is False (the default), characters that are neither in the + normal base-64 alphabet nor the alternative alphabet are discarded prior + to the padding check. If validate is True, these non-alphabet characters + in the input result in a binascii.Error. + """ + s = _bytes_from_decode_data(s) + if altchars is not None: + altchars = _bytes_from_decode_data(altchars) + assert len(altchars) == 2, repr(altchars) + s = s.translate(bytes.maketrans(altchars, b'+/')) + if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s): + raise binascii.Error('Non-base64 digit found') + return binascii.a2b_base64(s) + + +def standard_b64encode(s): + """Encode bytes-like object s using the standard Base64 alphabet. + + The result is returned as a bytes object. + """ + return b64encode(s) + +def standard_b64decode(s): + """Decode bytes encoded with the standard Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the standard alphabet + are discarded prior to the padding check. + """ + return b64decode(s) + + +_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') +_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') + +def urlsafe_b64encode(s): + """Encode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object to encode. The result is returned as a + bytes object. The alphabet uses '-' instead of '+' and '_' instead of + '/'. + """ + return b64encode(s).translate(_urlsafe_encode_translation) + +def urlsafe_b64decode(s): + """Decode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the URL-safe base-64 + alphabet, and are not a plus '+' or slash '/', are discarded prior to the + padding check. + + The alphabet uses '-' instead of '+' and '_' instead of '/'. + """ + s = _bytes_from_decode_data(s) + s = s.translate(_urlsafe_decode_translation) + return b64decode(s) + + + +# Base32 encoding/decoding must be done in Python +_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567' +_b32tab2 = None +_b32rev = None + +def b32encode(s): + """Encode the bytes-like object s using Base32 and return a bytes object. + """ + global _b32tab2 + # Delay the initialization of the table to not waste memory + # if the function is never called + if _b32tab2 is None: + b32tab = [bytes((i,)) for i in _b32alphabet] + _b32tab2 = [a + b for a in b32tab for b in b32tab] + b32tab = None + + if not isinstance(s, bytes_types): + s = memoryview(s).tobytes() + leftover = len(s) % 5 + # Pad the last quantum with zero bits if necessary + if leftover: + s = s + b'\0' * (5 - leftover) # Don't use += ! + encoded = bytearray() + from_bytes = int.from_bytes + b32tab2 = _b32tab2 + for i in range(0, len(s), 5): + c = from_bytes(s[i: i + 5], 'big') + encoded += (b32tab2[c >> 30] + # bits 1 - 10 + b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20 + b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30 + b32tab2[c & 0x3ff] # bits 31 - 40 + ) + # Adjust for any leftover partial quanta + if leftover == 1: + encoded[-6:] = b'======' + elif leftover == 2: + encoded[-4:] = b'====' + elif leftover == 3: + encoded[-3:] = b'===' + elif leftover == 4: + encoded[-1:] = b'=' + return bytes(encoded) + +def b32decode(s, casefold=False, map01=None): + """Decode the Base32 encoded bytes-like object or ASCII string s. + + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. + + RFC 3548 allows for optional mapping of the digit 0 (zero) to the + letter O (oh), and for optional mapping of the digit 1 (one) to + either the letter I (eye) or letter L (el). The optional argument + map01 when not None, specifies which letter the digit 1 should be + mapped to (when map01 is not None, the digit 0 is always mapped to + the letter O). For security purposes the default is None, so that + 0 and 1 are not allowed in the input. + + The result is returned as a bytes object. A binascii.Error is raised if + the input is incorrectly padded or if there are non-alphabet + characters present in the input. + """ + global _b32rev + # Delay the initialization of the table to not waste memory + # if the function is never called + if _b32rev is None: + _b32rev = {v: k for k, v in enumerate(_b32alphabet)} + s = _bytes_from_decode_data(s) + if len(s) % 8: + raise binascii.Error('Incorrect padding') + # Handle section 2.4 zero and one mapping. The flag map01 will be either + # False, or the character to map the digit 1 (one) to. It should be + # either L (el) or I (eye). + if map01 is not None: + map01 = _bytes_from_decode_data(map01) + assert len(map01) == 1, repr(map01) + s = s.translate(bytes.maketrans(b'01', b'O' + map01)) + if casefold: + s = s.upper() + # Strip off pad characters from the right. We need to count the pad + # characters because this will tell us how many null bytes to remove from + # the end of the decoded string. + l = len(s) + s = s.rstrip(b'=') + padchars = l - len(s) + # Now decode the full quanta + decoded = bytearray() + b32rev = _b32rev + for i in range(0, len(s), 8): + quanta = s[i: i + 8] + acc = 0 + try: + for c in quanta: + acc = (acc << 5) + b32rev[c] + except KeyError: + raise binascii.Error('Non-base32 digit found') from None + decoded += acc.to_bytes(5, 'big') + # Process the last, partial quanta + if padchars: + acc <<= 5 * padchars + last = acc.to_bytes(5, 'big') + if padchars == 1: + decoded[-5:] = last[:-1] + elif padchars == 3: + decoded[-5:] = last[:-2] + elif padchars == 4: + decoded[-5:] = last[:-3] + elif padchars == 6: + decoded[-5:] = last[:-4] + else: + raise binascii.Error('Incorrect padding') + return bytes(decoded) + + + +# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns +# lowercase. The RFC also recommends against accepting input case +# insensitively. +def b16encode(s): + """Encode the bytes-like object s using Base16 and return a bytes object. + """ + return binascii.hexlify(s).upper() + + +def b16decode(s, casefold=False): + """Decode the Base16 encoded bytes-like object or ASCII string s. + + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded or if there are non-alphabet characters present + in the input. + """ + s = _bytes_from_decode_data(s) + if casefold: + s = s.upper() + if re.search(b'[^0-9A-F]', s): + raise binascii.Error('Non-base16 digit found') + return binascii.unhexlify(s) + +# +# Ascii85 encoding/decoding +# + +_a85chars = None +_a85chars2 = None +_A85START = b"<~" +_A85END = b"~>" + +def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False): + # Helper function for a85encode and b85encode + if not isinstance(b, bytes_types): + b = memoryview(b).tobytes() + + padding = (-len(b)) % 4 + if padding: + b = b + b'\0' * padding + words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b) + + chunks = [b'z' if foldnuls and not word else + b'y' if foldspaces and word == 0x20202020 else + (chars2[word // 614125] + + chars2[word // 85 % 7225] + + chars[word % 85]) + for word in words] + + if padding and not pad: + if chunks[-1] == b'z': + chunks[-1] = chars[0] * 5 + chunks[-1] = chunks[-1][:-padding] + + return b''.join(chunks) + +def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False): + """Encode bytes-like object b using Ascii85 and return a bytes object. + + foldspaces is an optional flag that uses the special short sequence 'y' + instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This + feature is not supported by the "standard" Adobe encoding. + + wrapcol controls whether the output should have newline (b'\\n') characters + added to it. If this is non-zero, each output line will be at most this + many characters long. + + pad controls whether the input is padded to a multiple of 4 before + encoding. Note that the btoa implementation always pads. + + adobe controls whether the encoded byte sequence is framed with <~ and ~>, + which is used by the Adobe implementation. + """ + global _a85chars, _a85chars2 + # Delay the initialization of tables to not waste memory + # if the function is never called + if _a85chars is None: + _a85chars = [bytes((i,)) for i in range(33, 118)] + _a85chars2 = [(a + b) for a in _a85chars for b in _a85chars] + + result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces) + + if adobe: + result = _A85START + result + if wrapcol: + wrapcol = max(2 if adobe else 1, wrapcol) + chunks = [result[i: i + wrapcol] + for i in range(0, len(result), wrapcol)] + if adobe: + if len(chunks[-1]) + 2 > wrapcol: + chunks.append(b'') + result = b'\n'.join(chunks) + if adobe: + result += _A85END + + return result + +def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): + """Decode the Ascii85 encoded bytes-like object or ASCII string b. + + foldspaces is a flag that specifies whether the 'y' short sequence should be + accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is + not supported by the "standard" Adobe encoding. + + adobe controls whether the input sequence is in Adobe Ascii85 format (i.e. + is framed with <~ and ~>). + + ignorechars should be a byte string containing characters to ignore from the + input. This should only contain whitespace characters, and by default + contains all whitespace characters in ASCII. + + The result is returned as a bytes object. + """ + b = _bytes_from_decode_data(b) + if adobe: + if not b.endswith(_A85END): + raise ValueError( + "Ascii85 encoded byte sequences must end " + "with {!r}".format(_A85END) + ) + if b.startswith(_A85START): + b = b[2:-2] # Strip off start/end markers + else: + b = b[:-2] + # + # We have to go through this stepwise, so as to ignore spaces and handle + # special short sequences + # + packI = struct.Struct('!I').pack + decoded = [] + decoded_append = decoded.append + curr = [] + curr_append = curr.append + curr_clear = curr.clear + for x in b + b'u' * 4: + if b'!'[0] <= x <= b'u'[0]: + curr_append(x) + if len(curr) == 5: + acc = 0 + for x in curr: + acc = 85 * acc + (x - 33) + try: + decoded_append(packI(acc)) + except struct.error: + raise ValueError('Ascii85 overflow') from None + curr_clear() + elif x == b'z'[0]: + if curr: + raise ValueError('z inside Ascii85 5-tuple') + decoded_append(b'\0\0\0\0') + elif foldspaces and x == b'y'[0]: + if curr: + raise ValueError('y inside Ascii85 5-tuple') + decoded_append(b'\x20\x20\x20\x20') + elif x in ignorechars: + # Skip whitespace + continue + else: + raise ValueError('Non-Ascii85 digit found: %c' % x) + + result = b''.join(decoded) + padding = 4 - len(curr) + if padding: + # Throw away the extra padding + result = result[:-padding] + return result + +# The following code is originally taken (with permission) from Mercurial + +_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~") +_b85chars = None +_b85chars2 = None +_b85dec = None + +def b85encode(b, pad=False): + """Encode bytes-like object b in base85 format and return a bytes object. + + If pad is true, the input is padded with b'\\0' so its length is a multiple of + 4 bytes before encoding. + """ + global _b85chars, _b85chars2 + # Delay the initialization of tables to not waste memory + # if the function is never called + if _b85chars is None: + _b85chars = [bytes((i,)) for i in _b85alphabet] + _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars] + return _85encode(b, _b85chars, _b85chars2, pad) + +def b85decode(b): + """Decode the base85-encoded bytes-like object or ASCII string b + + The result is returned as a bytes object. + """ + global _b85dec + # Delay the initialization of tables to not waste memory + # if the function is never called + if _b85dec is None: + _b85dec = [None] * 256 + for i, c in enumerate(_b85alphabet): + _b85dec[c] = i + + b = _bytes_from_decode_data(b) + padding = (-len(b)) % 5 + b = b + b'~' * padding + out = [] + packI = struct.Struct('!I').pack + for i in range(0, len(b), 5): + chunk = b[i:i + 5] + acc = 0 + try: + for c in chunk: + acc = acc * 85 + _b85dec[c] + except TypeError: + for j, c in enumerate(chunk): + if _b85dec[c] is None: + raise ValueError('bad base85 character at position %d' + % (i + j)) from None + raise + try: + out.append(packI(acc)) + except struct.error: + raise ValueError('base85 overflow in hunk starting at byte %d' + % i) from None + + result = b''.join(out) + if padding: + result = result[:-padding] + return result + +# Legacy interface. This code could be cleaned up since I don't believe +# binascii has any line length limitations. It just doesn't seem worth it +# though. The files should be opened in binary mode. + +MAXLINESIZE = 76 # Excluding the CRLF +MAXBINSIZE = (MAXLINESIZE//4)*3 + +def encode(input, output): + """Encode a file; input and output are binary files.""" + while True: + s = input.read(MAXBINSIZE) + if not s: + break + while len(s) < MAXBINSIZE: + ns = input.read(MAXBINSIZE-len(s)) + if not ns: + break + s += ns + line = binascii.b2a_base64(s) + output.write(line) + + +def decode(input, output): + """Decode a file; input and output are binary files.""" + while True: + line = input.readline() + if not line: + break + s = binascii.a2b_base64(line) + output.write(s) + +def _input_type_check(s): + try: + m = memoryview(s) + except TypeError as err: + msg = "expected bytes-like object, not %s" % s.__class__.__name__ + raise TypeError(msg) from err + if m.format not in ('c', 'b', 'B'): + msg = ("expected single byte elements, not %r from %s" % + (m.format, s.__class__.__name__)) + raise TypeError(msg) + if m.ndim != 1: + msg = ("expected 1-D data, not %d-D data from %s" % + (m.ndim, s.__class__.__name__)) + raise TypeError(msg) + + +def encodebytes(s): + """Encode a bytestring into a bytes object containing multiple lines + of base-64 data.""" + _input_type_check(s) + pieces = [] + for i in range(0, len(s), MAXBINSIZE): + chunk = s[i : i + MAXBINSIZE] + pieces.append(binascii.b2a_base64(chunk)) + return b"".join(pieces) + +def encodestring(s): + """Legacy alias of encodebytes().""" + import warnings + warnings.warn("encodestring() is a deprecated alias since 3.1, " + "use encodebytes()", + DeprecationWarning, 2) + return encodebytes(s) + + +def decodebytes(s): + """Decode a bytestring of base-64 data into a bytes object.""" + _input_type_check(s) + return binascii.a2b_base64(s) + +def decodestring(s): + """Legacy alias of decodebytes().""" + import warnings + warnings.warn("decodestring() is a deprecated alias since Python 3.1, " + "use decodebytes()", + DeprecationWarning, 2) + return decodebytes(s) + + +# Usable as a script... +def main(): + """Small main program""" + import sys, getopt + try: + opts, args = getopt.getopt(sys.argv[1:], 'deut') + except getopt.error as msg: + sys.stdout = sys.stderr + print(msg) + print("""usage: %s [-d|-e|-u|-t] [file|-] + -d, -u: decode + -e: encode (default) + -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]) + sys.exit(2) + func = encode + for o, a in opts: + if o == '-e': func = encode + if o == '-d': func = decode + if o == '-u': func = decode + if o == '-t': test(); return + if args and args[0] != '-': + with open(args[0], 'rb') as f: + func(f, sys.stdout.buffer) + else: + func(sys.stdin.buffer, sys.stdout.buffer) + + +def test(): + s0 = b"Aladdin:open sesame" + print(repr(s0)) + s1 = encodebytes(s0) + print(repr(s1)) + s2 = decodebytes(s1) + print(repr(s2)) + assert s0 == s2 + + +if __name__ == '__main__': + main() diff --git a/venv/Lib/bisect.py b/venv/Lib/bisect.py new file mode 100644 index 00000000..7732c639 --- /dev/null +++ b/venv/Lib/bisect.py @@ -0,0 +1,92 @@ +"""Bisection algorithms.""" + +def insort_right(a, x, lo=0, hi=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the right of the rightmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if x < a[mid]: hi = mid + else: lo = mid+1 + a.insert(lo, x) + +def bisect_right(a, x, lo=0, hi=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e <= x, and all e in + a[i:] have e > x. So if x already appears in the list, a.insert(x) will + insert just after the rightmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if x < a[mid]: hi = mid + else: lo = mid+1 + return lo + +def insort_left(a, x, lo=0, hi=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the left of the leftmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if a[mid] < x: lo = mid+1 + else: hi = mid + a.insert(lo, x) + + +def bisect_left(a, x, lo=0, hi=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if a[mid] < x: lo = mid+1 + else: hi = mid + return lo + +# Overwrite above definitions with a fast C implementation +try: + from _bisect import * +except ImportError: + pass + +# Create aliases +bisect = bisect_right +insort = insort_right diff --git a/venv/Lib/codecs.py b/venv/Lib/codecs.py new file mode 100644 index 00000000..a70ed20f --- /dev/null +++ b/venv/Lib/codecs.py @@ -0,0 +1,1114 @@ +""" codecs -- Python Codec Registry, API and helpers. + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" + +import builtins +import sys + +### Registry and builtin stateless codec functions + +try: + from _codecs import * +except ImportError as why: + raise SystemError('Failed to load the builtin codecs: %s' % why) + +__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", + "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", + "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", + "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", + "CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder", + "StreamReader", "StreamWriter", + "StreamReaderWriter", "StreamRecoder", + "getencoder", "getdecoder", "getincrementalencoder", + "getincrementaldecoder", "getreader", "getwriter", + "encode", "decode", "iterencode", "iterdecode", + "strict_errors", "ignore_errors", "replace_errors", + "xmlcharrefreplace_errors", + "backslashreplace_errors", "namereplace_errors", + "register_error", "lookup_error"] + +### Constants + +# +# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) +# and its possible byte string values +# for UTF8/UTF16/UTF32 output and little/big endian machines +# + +# UTF-8 +BOM_UTF8 = b'\xef\xbb\xbf' + +# UTF-16, little endian +BOM_LE = BOM_UTF16_LE = b'\xff\xfe' + +# UTF-16, big endian +BOM_BE = BOM_UTF16_BE = b'\xfe\xff' + +# UTF-32, little endian +BOM_UTF32_LE = b'\xff\xfe\x00\x00' + +# UTF-32, big endian +BOM_UTF32_BE = b'\x00\x00\xfe\xff' + +if sys.byteorder == 'little': + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_LE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_LE + +else: + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_BE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_BE + +# Old broken names (don't use in new code) +BOM32_LE = BOM_UTF16_LE +BOM32_BE = BOM_UTF16_BE +BOM64_LE = BOM_UTF32_LE +BOM64_BE = BOM_UTF32_BE + + +### Codec base classes (defining the API) + +class CodecInfo(tuple): + """Codec details when looking up the codec registry""" + + # Private API to allow Python 3.4 to blacklist the known non-Unicode + # codecs in the standard library. A more general mechanism to + # reliably distinguish test encodings from other codecs will hopefully + # be defined for Python 3.5 + # + # See http://bugs.python.org/issue19619 + _is_text_encoding = True # Assume codecs are text encodings by default + + def __new__(cls, encode, decode, streamreader=None, streamwriter=None, + incrementalencoder=None, incrementaldecoder=None, name=None, + *, _is_text_encoding=None): + self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) + self.name = name + self.encode = encode + self.decode = decode + self.incrementalencoder = incrementalencoder + self.incrementaldecoder = incrementaldecoder + self.streamwriter = streamwriter + self.streamreader = streamreader + if _is_text_encoding is not None: + self._is_text_encoding = _is_text_encoding + return self + + def __repr__(self): + return "<%s.%s object for encoding %s at %#x>" % \ + (self.__class__.__module__, self.__class__.__qualname__, + self.name, id(self)) + +class Codec: + + """ Defines the interface for stateless encoders/decoders. + + The .encode()/.decode() methods may use different error + handling schemes by providing the errors argument. These + string values are predefined: + + 'strict' - raise a ValueError error (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace' - replace with a suitable replacement character; + Python will use the official U+FFFD REPLACEMENT + CHARACTER for the builtin Unicode codecs on + decoding and '?' on encoding. + 'surrogateescape' - replace with private code points U+DCnn. + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference (only for encoding). + 'backslashreplace' - Replace with backslashed escape sequences. + 'namereplace' - Replace with \\N{...} escape sequences + (only for encoding). + + The set of allowed values can be extended via register_error. + + """ + def encode(self, input, errors='strict'): + + """ Encodes the object input and returns a tuple (output + object, length consumed). + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamWriter for codecs which have to keep state in order to + make encoding efficient. + + The encoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + + def decode(self, input, errors='strict'): + + """ Decodes the object input and returns a tuple (output + object, length consumed). + + input must be an object which provides the bf_getreadbuf + buffer slot. Python strings, buffer objects and memory + mapped files are examples of objects providing this slot. + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamReader for codecs which have to keep state in order to + make decoding efficient. + + The decoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + +class IncrementalEncoder(object): + """ + An IncrementalEncoder encodes an input in multiple steps. The input can + be passed piece by piece to the encode() method. The IncrementalEncoder + remembers the state of the encoding process between calls to encode(). + """ + def __init__(self, errors='strict'): + """ + Creates an IncrementalEncoder instance. + + The IncrementalEncoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + self.buffer = "" + + def encode(self, input, final=False): + """ + Encodes input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Resets the encoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the encoder. + """ + return 0 + + def setstate(self, state): + """ + Set the current state of the encoder. state must have been + returned by getstate(). + """ + +class BufferedIncrementalEncoder(IncrementalEncoder): + """ + This subclass of IncrementalEncoder can be used as the baseclass for an + incremental encoder if the encoder must keep some of the output in a + buffer between calls to encode(). + """ + def __init__(self, errors='strict'): + IncrementalEncoder.__init__(self, errors) + # unencoded input that is kept between calls to encode() + self.buffer = "" + + def _buffer_encode(self, input, errors, final): + # Overwrite this method in subclasses: It must encode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def encode(self, input, final=False): + # encode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_encode(data, self.errors, final) + # keep unencoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalEncoder.reset(self) + self.buffer = "" + + def getstate(self): + return self.buffer or 0 + + def setstate(self, state): + self.buffer = state or "" + +class IncrementalDecoder(object): + """ + An IncrementalDecoder decodes an input in multiple steps. The input can + be passed piece by piece to the decode() method. The IncrementalDecoder + remembers the state of the decoding process between calls to decode(). + """ + def __init__(self, errors='strict'): + """ + Create an IncrementalDecoder instance. + + The IncrementalDecoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + + def decode(self, input, final=False): + """ + Decode input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Reset the decoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the decoder. + + This must be a (buffered_input, additional_state_info) tuple. + buffered_input must be a bytes object containing bytes that + were passed to decode() that have not yet been converted. + additional_state_info must be a non-negative integer + representing the state of the decoder WITHOUT yet having + processed the contents of buffered_input. In the initial state + and after reset(), getstate() must return (b"", 0). + """ + return (b"", 0) + + def setstate(self, state): + """ + Set the current state of the decoder. + + state must have been returned by getstate(). The effect of + setstate((b"", 0)) must be equivalent to reset(). + """ + +class BufferedIncrementalDecoder(IncrementalDecoder): + """ + This subclass of IncrementalDecoder can be used as the baseclass for an + incremental decoder if the decoder must be able to handle incomplete + byte sequences. + """ + def __init__(self, errors='strict'): + IncrementalDecoder.__init__(self, errors) + # undecoded input that is kept between calls to decode() + self.buffer = b"" + + def _buffer_decode(self, input, errors, final): + # Overwrite this method in subclasses: It must decode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def decode(self, input, final=False): + # decode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_decode(data, self.errors, final) + # keep undecoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalDecoder.reset(self) + self.buffer = b"" + + def getstate(self): + # additional state info is always 0 + return (self.buffer, 0) + + def setstate(self, state): + # ignore additional state info + self.buffer = state[0] + +# +# The StreamWriter and StreamReader class provide generic working +# interfaces which can be used to implement new encoding submodules +# very easily. See encodings/utf_8.py for an example on how this is +# done. +# + +class StreamWriter(Codec): + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamWriter instance. + + stream must be a file-like object open for writing. + + The StreamWriter may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference. + 'backslashreplace' - Replace with backslashed escape + sequences. + 'namereplace' - Replace with \\N{...} escape sequences. + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + + def write(self, object): + + """ Writes the object's contents encoded to self.stream. + """ + data, consumed = self.encode(object, self.errors) + self.stream.write(data) + + def writelines(self, list): + + """ Writes the concatenated list of strings to the stream + using .write(). + """ + self.write(''.join(list)) + + def reset(self): + + """ Flushes and resets the codec buffers used for keeping state. + + Calling this method should ensure that the data on the + output is put into a clean state, that allows appending + of new fresh data without having to rescan the whole + stream to recover state. + + """ + pass + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + if whence == 0 and offset == 0: + self.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReader(Codec): + + charbuffertype = str + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamReader instance. + + stream must be a file-like object open for reading. + + The StreamReader may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'backslashreplace' - Replace with backslashed escape sequences; + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + self.bytebuffer = b"" + self._empty_charbuffer = self.charbuffertype() + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def decode(self, input, errors='strict'): + raise NotImplementedError + + def read(self, size=-1, chars=-1, firstline=False): + + """ Decodes data from the stream self.stream and returns the + resulting object. + + chars indicates the number of decoded code points or bytes to + return. read() will never return more data than requested, + but it might return less, if there is not enough available. + + size indicates the approximate maximum number of decoded + bytes or code points to read for decoding. The decoder + can modify this setting as appropriate. The default value + -1 indicates to read and decode as much as possible. size + is intended to prevent having to decode huge files in one + step. + + If firstline is true, and a UnicodeDecodeError happens + after the first line terminator in the input only the first line + will be returned, the rest of the input will be kept until the + next call to read(). + + The method should use a greedy read strategy, meaning that + it should read as much data as is allowed within the + definition of the encoding and the given size, e.g. if + optional encoding endings or state markers are available + on the stream, these should be read too. + """ + # If we have lines cached, first merge them back into characters + if self.linebuffer: + self.charbuffer = self._empty_charbuffer.join(self.linebuffer) + self.linebuffer = None + + if chars < 0: + # For compatibility with other read() methods that take a + # single argument + chars = size + + # read until we get the required number of characters (if available) + while True: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: + break + # we need more data + if size < 0: + newdata = self.stream.read() + else: + newdata = self.stream.read(size) + # decode bytes (those remaining from the last call included) + data = self.bytebuffer + newdata + if not data: + break + try: + newchars, decodedbytes = self.decode(data, self.errors) + except UnicodeDecodeError as exc: + if firstline: + newchars, decodedbytes = \ + self.decode(data[:exc.start], self.errors) + lines = newchars.splitlines(keepends=True) + if len(lines)<=1: + raise + else: + raise + # keep undecoded bytes until the next call + self.bytebuffer = data[decodedbytes:] + # put new characters in the character buffer + self.charbuffer += newchars + # there was no data available + if not newdata: + break + if chars < 0: + # Return everything we've got + result = self.charbuffer + self.charbuffer = self._empty_charbuffer + else: + # Return the first chars characters + result = self.charbuffer[:chars] + self.charbuffer = self.charbuffer[chars:] + return result + + def readline(self, size=None, keepends=True): + + """ Read one line from the input stream and return the + decoded data. + + size, if given, is passed as size argument to the + read() method. + + """ + # If we have lines cached from an earlier read, return + # them unconditionally + if self.linebuffer: + line = self.linebuffer[0] + del self.linebuffer[0] + if len(self.linebuffer) == 1: + # revert to charbuffer mode; we might need more data + # next time + self.charbuffer = self.linebuffer[0] + self.linebuffer = None + if not keepends: + line = line.splitlines(keepends=False)[0] + return line + + readsize = size or 72 + line = self._empty_charbuffer + # If size is given, we call read() only once + while True: + data = self.read(readsize, firstline=True) + if data: + # If we're at a "\r" read one extra character (which might + # be a "\n") to get a proper line ending. If the stream is + # temporarily exhausted we return the wrong line ending. + if (isinstance(data, str) and data.endswith("\r")) or \ + (isinstance(data, bytes) and data.endswith(b"\r")): + data += self.read(size=1, chars=1) + + line += data + lines = line.splitlines(keepends=True) + if lines: + if len(lines) > 1: + # More than one line result; the first line is a full line + # to return + line = lines[0] + del lines[0] + if len(lines) > 1: + # cache the remaining lines + lines[-1] += self.charbuffer + self.linebuffer = lines + self.charbuffer = None + else: + # only one remaining line, put it back into charbuffer + self.charbuffer = lines[0] + self.charbuffer + if not keepends: + line = line.splitlines(keepends=False)[0] + break + line0withend = lines[0] + line0withoutend = lines[0].splitlines(keepends=False)[0] + if line0withend != line0withoutend: # We really have a line end + # Put the rest back together and keep it until the next call + self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \ + self.charbuffer + if keepends: + line = line0withend + else: + line = line0withoutend + break + # we didn't get anything or this was our only try + if not data or size is not None: + if line and not keepends: + line = line.splitlines(keepends=False)[0] + break + if readsize < 8000: + readsize *= 2 + return line + + def readlines(self, sizehint=None, keepends=True): + + """ Read all lines available on the input stream + and return them as a list. + + Line breaks are implemented using the codec's decoder + method and are included in the list entries. + + sizehint, if given, is ignored since there is no efficient + way to finding the true end-of-line. + + """ + data = self.read() + return data.splitlines(keepends) + + def reset(self): + + """ Resets the codec buffers used for keeping state. + + Note that no stream repositioning should take place. + This method is primarily intended to be able to recover + from decoding errors. + + """ + self.bytebuffer = b"" + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def seek(self, offset, whence=0): + """ Set the input stream's current position. + + Resets the codec buffers used for keeping state. + """ + self.stream.seek(offset, whence) + self.reset() + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + line = self.readline() + if line: + return line + raise StopIteration + + def __iter__(self): + return self + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReaderWriter: + + """ StreamReaderWriter instances allow wrapping streams which + work in both read and write modes. + + The design is such that one can use the factory functions + returned by the codec.lookup() function to construct the + instance. + + """ + # Optional attributes set by the file wrappers below + encoding = 'unknown' + + def __init__(self, stream, Reader, Writer, errors='strict'): + + """ Creates a StreamReaderWriter instance. + + stream must be a Stream-like object. + + Reader, Writer must be factory functions or classes + providing the StreamReader, StreamWriter interface resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + return self.reader.read(size) + + def readline(self, size=None): + + return self.reader.readline(size) + + def readlines(self, sizehint=None): + + return self.reader.readlines(sizehint) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + return next(self.reader) + + def __iter__(self): + return self + + def write(self, data): + + return self.writer.write(data) + + def writelines(self, list): + + return self.writer.writelines(list) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + self.reader.reset() + if whence == 0 and offset == 0: + self.writer.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + # these are needed to make "with StreamReaderWriter(...)" work properly + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamRecoder: + + """ StreamRecoder instances translate data from one encoding to another. + + They use the complete set of APIs returned by the + codecs.lookup() function to implement their task. + + Data written to the StreamRecoder is first decoded into an + intermediate format (depending on the "decode" codec) and then + written to the underlying stream using an instance of the provided + Writer class. + + In the other direction, data is read from the underlying stream using + a Reader instance and then encoded and returned to the caller. + + """ + # Optional attributes set by the file wrappers below + data_encoding = 'unknown' + file_encoding = 'unknown' + + def __init__(self, stream, encode, decode, Reader, Writer, + errors='strict'): + + """ Creates a StreamRecoder instance which implements a two-way + conversion: encode and decode work on the frontend (the + data visible to .read() and .write()) while Reader and Writer + work on the backend (the data in stream). + + You can use these objects to do transparent + transcodings from e.g. latin-1 to utf-8 and back. + + stream must be a file-like object. + + encode and decode must adhere to the Codec interface; Reader and + Writer must be factory functions or classes providing the + StreamReader and StreamWriter interfaces resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.encode = encode + self.decode = decode + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + data = self.reader.read(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readline(self, size=None): + + if size is None: + data = self.reader.readline() + else: + data = self.reader.readline(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readlines(self, sizehint=None): + + data = self.reader.read() + data, bytesencoded = self.encode(data, self.errors) + return data.splitlines(keepends=True) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + data = next(self.reader) + data, bytesencoded = self.encode(data, self.errors) + return data + + def __iter__(self): + return self + + def write(self, data): + + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def writelines(self, list): + + data = ''.join(list) + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### Shortcuts + +def open(filename, mode='r', encoding=None, errors='strict', buffering=1): + + """ Open an encoded file using the given mode and return + a wrapped version providing transparent encoding/decoding. + + Note: The wrapped version will only accept the object format + defined by the codecs, i.e. Unicode objects for most builtin + codecs. Output is also codec dependent and will usually be + Unicode as well. + + Underlying encoded files are always opened in binary mode. + The default file mode is 'r', meaning to open the file in read mode. + + encoding specifies the encoding which is to be used for the + file. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + buffering has the same meaning as for the builtin open() API. + It defaults to line buffered. + + The returned wrapped file object provides an extra attribute + .encoding which allows querying the used encoding. This + attribute is only available if an encoding was specified as + parameter. + + """ + if encoding is not None and \ + 'b' not in mode: + # Force opening of the file in binary mode + mode = mode + 'b' + file = builtins.open(filename, mode, buffering) + if encoding is None: + return file + info = lookup(encoding) + srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) + # Add attributes to simplify introspection + srw.encoding = encoding + return srw + +def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): + + """ Return a wrapped version of file which provides transparent + encoding translation. + + Data written to the wrapped file is decoded according + to the given data_encoding and then encoded to the underlying + file using file_encoding. The intermediate data type + will usually be Unicode but depends on the specified codecs. + + Bytes read from the file are decoded using file_encoding and then + passed back to the caller encoded using data_encoding. + + If file_encoding is not given, it defaults to data_encoding. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + The returned wrapped file object provides two extra attributes + .data_encoding and .file_encoding which reflect the given + parameters of the same name. The attributes can be used for + introspection by Python programs. + + """ + if file_encoding is None: + file_encoding = data_encoding + data_info = lookup(data_encoding) + file_info = lookup(file_encoding) + sr = StreamRecoder(file, data_info.encode, data_info.decode, + file_info.streamreader, file_info.streamwriter, errors) + # Add attributes to simplify introspection + sr.data_encoding = data_encoding + sr.file_encoding = file_encoding + return sr + +### Helpers for codec lookup + +def getencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its encoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).encode + +def getdecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its decoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).decode + +def getincrementalencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalEncoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental encoder. + + """ + encoder = lookup(encoding).incrementalencoder + if encoder is None: + raise LookupError(encoding) + return encoder + +def getincrementaldecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalDecoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental decoder. + + """ + decoder = lookup(encoding).incrementaldecoder + if decoder is None: + raise LookupError(encoding) + return decoder + +def getreader(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamReader class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamreader + +def getwriter(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamWriter class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamwriter + +def iterencode(iterator, encoding, errors='strict', **kwargs): + """ + Encoding iterator. + + Encodes the input strings from the iterator using an IncrementalEncoder. + + errors and kwargs are passed through to the IncrementalEncoder + constructor. + """ + encoder = getincrementalencoder(encoding)(errors, **kwargs) + for input in iterator: + output = encoder.encode(input) + if output: + yield output + output = encoder.encode("", True) + if output: + yield output + +def iterdecode(iterator, encoding, errors='strict', **kwargs): + """ + Decoding iterator. + + Decodes the input strings from the iterator using an IncrementalDecoder. + + errors and kwargs are passed through to the IncrementalDecoder + constructor. + """ + decoder = getincrementaldecoder(encoding)(errors, **kwargs) + for input in iterator: + output = decoder.decode(input) + if output: + yield output + output = decoder.decode(b"", True) + if output: + yield output + +### Helpers for charmap-based codecs + +def make_identity_dict(rng): + + """ make_identity_dict(rng) -> dict + + Return a dictionary where elements of the rng sequence are + mapped to themselves. + + """ + return {i:i for i in rng} + +def make_encoding_map(decoding_map): + + """ Creates an encoding map from a decoding map. + + If a target mapping in the decoding map occurs multiple + times, then that target is mapped to None (undefined mapping), + causing an exception when encountered by the charmap codec + during translation. + + One example where this happens is cp875.py which decodes + multiple character to \\u001a. + + """ + m = {} + for k,v in decoding_map.items(): + if not v in m: + m[v] = k + else: + m[v] = None + return m + +### error handlers + +try: + strict_errors = lookup_error("strict") + ignore_errors = lookup_error("ignore") + replace_errors = lookup_error("replace") + xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") + backslashreplace_errors = lookup_error("backslashreplace") + namereplace_errors = lookup_error("namereplace") +except LookupError: + # In --disable-unicode builds, these error handler are missing + strict_errors = None + ignore_errors = None + replace_errors = None + xmlcharrefreplace_errors = None + backslashreplace_errors = None + namereplace_errors = None + +# Tell modulefinder that using codecs probably needs the encodings +# package +_false = 0 +if _false: + import encodings + +### Tests + +if __name__ == '__main__': + + # Make stdout translate Latin-1 output into UTF-8 output + sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') + + # Have stdin translate Latin-1 input into UTF-8 input + sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1') diff --git a/venv/Lib/collections/__init__.py b/venv/Lib/collections/__init__.py new file mode 100644 index 00000000..9a753db7 --- /dev/null +++ b/venv/Lib/collections/__init__.py @@ -0,0 +1,1279 @@ +'''This module implements specialized container datatypes providing +alternatives to Python's general purpose built-in containers, dict, +list, set, and tuple. + +* namedtuple factory function for creating tuple subclasses with named fields +* deque list-like container with fast appends and pops on either end +* ChainMap dict-like class for creating a single view of multiple mappings +* Counter dict subclass for counting hashable objects +* OrderedDict dict subclass that remembers the order entries were added +* defaultdict dict subclass that calls a factory function to supply missing values +* UserDict wrapper around dictionary objects for easier dict subclassing +* UserList wrapper around list objects for easier list subclassing +* UserString wrapper around string objects for easier string subclassing + +''' + +__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', + 'UserString', 'Counter', 'OrderedDict', 'ChainMap'] + +import _collections_abc +from operator import itemgetter as _itemgetter, eq as _eq +from keyword import iskeyword as _iskeyword +import sys as _sys +import heapq as _heapq +from _weakref import proxy as _proxy +from itertools import repeat as _repeat, chain as _chain, starmap as _starmap +from reprlib import recursive_repr as _recursive_repr + +try: + from _collections import deque +except ImportError: + pass +else: + _collections_abc.MutableSequence.register(deque) + +try: + from _collections import defaultdict +except ImportError: + pass + + +def __getattr__(name): + # For backwards compatibility, continue to make the collections ABCs + # through Python 3.6 available through the collections module. + # Note, no new collections ABCs were added in Python 3.7 + if name in _collections_abc.__all__: + obj = getattr(_collections_abc, name) + import warnings + warnings.warn("Using or importing the ABCs from 'collections' instead " + "of from 'collections.abc' is deprecated, " + "and in 3.8 it will stop working", + DeprecationWarning, stacklevel=2) + globals()[name] = obj + return obj + raise AttributeError(f'module {__name__!r} has no attribute {name!r}') + +################################################################################ +### OrderedDict +################################################################################ + +class _OrderedDictKeysView(_collections_abc.KeysView): + + def __reversed__(self): + yield from reversed(self._mapping) + +class _OrderedDictItemsView(_collections_abc.ItemsView): + + def __reversed__(self): + for key in reversed(self._mapping): + yield (key, self._mapping[key]) + +class _OrderedDictValuesView(_collections_abc.ValuesView): + + def __reversed__(self): + for key in reversed(self._mapping): + yield self._mapping[key] + +class _Link(object): + __slots__ = 'prev', 'next', 'key', '__weakref__' + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # The sentinel is in self.__hardroot with a weakref proxy in self.__root. + # The prev links are weakref proxies (to prevent circular references). + # Individual links are kept alive by the hard reference in self.__map. + # Those hard references disappear when a key is deleted from an OrderedDict. + + def __init__(*args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries. Keyword argument order is preserved. + ''' + if not args: + raise TypeError("descriptor '__init__' of 'OrderedDict' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, + dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + self.__map[key] = link = Link() + root = self.__root + last = root.prev + link.prev, link.next, link.key = last, root, key + last.next = link + root.prev = proxy(link) + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link = self.__map.pop(key) + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + link.prev = None + link.next = None + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root.next + while curr is not root: + yield curr.key + curr = curr.next + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root.prev + while curr is not root: + yield curr.key + curr = curr.prev + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root.prev = root.next = root + self.__map.clear() + dict.clear(self) + + def popitem(self, last=True): + '''Remove and return a (key, value) pair from the dictionary. + + Pairs are returned in LIFO order if last is true or FIFO order if false. + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root.prev + link_prev = link.prev + link_prev.next = root + root.prev = link_prev + else: + link = root.next + link_next = link.next + root.next = link_next + link_next.prev = root + key = link.key + del self.__map[key] + value = dict.pop(self, key) + return key, value + + def move_to_end(self, key, last=True): + '''Move an existing element to the end (or beginning if last is false). + + Raise KeyError if the element does not exist. + ''' + link = self.__map[key] + link_prev = link.prev + link_next = link.next + soft_link = link_next.prev + link_prev.next = link_next + link_next.prev = link_prev + root = self.__root + if last: + last = root.prev + link.prev = last + link.next = root + root.prev = soft_link + last.next = link + else: + first = root.next + link.prev = root + link.next = first + first.prev = soft_link + root.next = link + + def __sizeof__(self): + sizeof = _sys.getsizeof + n = len(self) + 1 # number of links including root + size = sizeof(self.__dict__) # instance dictionary + size += sizeof(self.__map) * 2 # internal dict and inherited dict + size += sizeof(self.__hardroot) * n # link objects + size += sizeof(self.__root) * n # proxy objects + return size + + update = __update = _collections_abc.MutableMapping.update + + def keys(self): + "D.keys() -> a set-like object providing a view on D's keys" + return _OrderedDictKeysView(self) + + def items(self): + "D.items() -> a set-like object providing a view on D's items" + return _OrderedDictItemsView(self) + + def values(self): + "D.values() -> an object providing a view on D's values" + return _OrderedDictValuesView(self) + + __ne__ = _collections_abc.MutableMapping.__ne__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + '''Insert key with a value of default if key is not in the dictionary. + + Return the value for key if key is in the dictionary, else default. + ''' + if key in self: + return self[key] + self[key] = default + return default + + @_recursive_repr() + def __repr__(self): + 'od.__repr__() <==> repr(od)' + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def __reduce__(self): + 'Return state information for pickling' + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + return self.__class__, (), inst_dict or None, None, iter(self.items()) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''Create a new ordered dictionary with keys from iterable and values set to value. + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return dict.__eq__(self, other) and all(map(_eq, self, other)) + return dict.__eq__(self, other) + + +try: + from _collections import OrderedDict +except ImportError: + # Leave the pure Python version in place. + pass + + +################################################################################ +### namedtuple +################################################################################ + +_nt_itemgetters = {} + +def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessible by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Validate the field names. At the user's option, either generate an error + # message or automatically replace the field name with a valid name. + if isinstance(field_names, str): + field_names = field_names.replace(',', ' ').split() + field_names = list(map(str, field_names)) + typename = _sys.intern(str(typename)) + + if rename: + seen = set() + for index, name in enumerate(field_names): + if (not name.isidentifier() + or _iskeyword(name) + or name.startswith('_') + or name in seen): + field_names[index] = f'_{index}' + seen.add(name) + + for name in [typename] + field_names: + if type(name) is not str: + raise TypeError('Type names and field names must be strings') + if not name.isidentifier(): + raise ValueError('Type names and field names must be valid ' + f'identifiers: {name!r}') + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a ' + f'keyword: {name!r}') + + seen = set() + for name in field_names: + if name.startswith('_') and not rename: + raise ValueError('Field names cannot start with an underscore: ' + f'{name!r}') + if name in seen: + raise ValueError(f'Encountered duplicate field name: {name!r}') + seen.add(name) + + field_defaults = {} + if defaults is not None: + defaults = tuple(defaults) + if len(defaults) > len(field_names): + raise TypeError('Got more default values than field names') + field_defaults = dict(reversed(list(zip(reversed(field_names), + reversed(defaults))))) + + # Variables used in the methods and docstrings + field_names = tuple(map(_sys.intern, field_names)) + num_fields = len(field_names) + arg_list = repr(field_names).replace("'", "")[1:-1] + repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' + tuple_new = tuple.__new__ + _len = len + + # Create all the named tuple methods to be added to the class namespace + + s = f'def __new__(_cls, {arg_list}): return _tuple_new(_cls, ({arg_list}))' + namespace = {'_tuple_new': tuple_new, '__name__': f'namedtuple_{typename}'} + # Note: exec() has the side-effect of interning the field names + exec(s, namespace) + __new__ = namespace['__new__'] + __new__.__doc__ = f'Create new instance of {typename}({arg_list})' + if defaults is not None: + __new__.__defaults__ = defaults + + @classmethod + def _make(cls, iterable): + result = tuple_new(cls, iterable) + if _len(result) != num_fields: + raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') + return result + + _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' + 'or iterable') + + def _replace(_self, **kwds): + result = _self._make(map(kwds.pop, field_names, _self)) + if kwds: + raise ValueError(f'Got unexpected field names: {list(kwds)!r}') + return result + + _replace.__doc__ = (f'Return a new {typename} object replacing specified ' + 'fields with new values') + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + repr_fmt % self + + def _asdict(self): + 'Return a new OrderedDict which maps field names to their values.' + return OrderedDict(zip(self._fields, self)) + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return tuple(self) + + # Modify function metadata to help with introspection and debugging + + for method in (__new__, _make.__func__, _replace, + __repr__, _asdict, __getnewargs__): + method.__qualname__ = f'{typename}.{method.__name__}' + + # Build-up the class namespace dictionary + # and use type() to build the result class + class_namespace = { + '__doc__': f'{typename}({arg_list})', + '__slots__': (), + '_fields': field_names, + '_fields_defaults': field_defaults, + '__new__': __new__, + '_make': _make, + '_replace': _replace, + '__repr__': __repr__, + '_asdict': _asdict, + '__getnewargs__': __getnewargs__, + } + cache = _nt_itemgetters + for index, name in enumerate(field_names): + try: + itemgetter_object, doc = cache[index] + except KeyError: + itemgetter_object = _itemgetter(index) + doc = f'Alias for field number {index}' + cache[index] = itemgetter_object, doc + class_namespace[name] = property(itemgetter_object, doc=doc) + + result = type(typename, (tuple,), class_namespace) + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython), or where the user has + # specified a particular module. + if module is None: + try: + module = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + if module is not None: + result.__module__ = module + + return result + + +######################################################################## +### Counter +######################################################################## + +def _count_elements(mapping, iterable): + 'Tally elements from the iterable.' + mapping_get = mapping.get + for elem in iterable: + mapping[elem] = mapping_get(elem, 0) + 1 + +try: # Load C helper function if available + from _collections import _count_elements +except ImportError: + pass + +class Counter(dict): + '''Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + ''' + # References: + # http://en.wikipedia.org/wiki/Multiset + # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html + # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm + # http://code.activestate.com/recipes/259174/ + # Knuth, TAOCP Vol. II section 4.6.3 + + def __init__(*args, **kwds): + '''Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + ''' + if not args: + raise TypeError("descriptor '__init__' of 'Counter' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + super(Counter, self).__init__() + self.update(*args, **kwds) + + def __missing__(self, key): + 'The count of elements not in the Counter is zero.' + # Needed so that self[missing_item] does not raise KeyError + return 0 + + def most_common(self, n=None): + '''List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abcdeabcdabcaba').most_common(3) + [('a', 5), ('b', 4), ('c', 3)] + + ''' + # Emulate Bag.sortedByCount from Smalltalk + if n is None: + return sorted(self.items(), key=_itemgetter(1), reverse=True) + return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) + + def elements(self): + '''Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + ''' + # Emulate Bag.do from Smalltalk and Multiset.begin from C++. + return _chain.from_iterable(_starmap(_repeat, self.items())) + + # Override dict methods where necessary + + @classmethod + def fromkeys(cls, iterable, v=None): + # There is no equivalent method for counters because setting v=1 + # means that no element can have a count greater than one. + raise NotImplementedError( + 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') + + def update(*args, **kwds): + '''Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + ''' + # The regular dict.update() operation makes no sense here because the + # replace behavior results in the some of original untouched counts + # being mixed-in with all of the other counts for a mismash that + # doesn't have a straight-forward interpretation in most counting + # contexts. Instead, we implement straight-addition. Both the inputs + # and outputs are allowed to contain zero and negative counts. + + if not args: + raise TypeError("descriptor 'update' of 'Counter' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + iterable = args[0] if args else None + if iterable is not None: + if isinstance(iterable, _collections_abc.Mapping): + if self: + self_get = self.get + for elem, count in iterable.items(): + self[elem] = count + self_get(elem, 0) + else: + super(Counter, self).update(iterable) # fast path when counter is empty + else: + _count_elements(self, iterable) + if kwds: + self.update(kwds) + + def subtract(*args, **kwds): + '''Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + ''' + if not args: + raise TypeError("descriptor 'subtract' of 'Counter' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + iterable = args[0] if args else None + if iterable is not None: + self_get = self.get + if isinstance(iterable, _collections_abc.Mapping): + for elem, count in iterable.items(): + self[elem] = self_get(elem, 0) - count + else: + for elem in iterable: + self[elem] = self_get(elem, 0) - 1 + if kwds: + self.subtract(kwds) + + def copy(self): + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) + + def __delitem__(self, elem): + 'Like dict.__delitem__() but does not raise KeyError for missing values.' + if elem in self: + super().__delitem__(elem) + + def __repr__(self): + if not self: + return '%s()' % self.__class__.__name__ + try: + items = ', '.join(map('%r: %r'.__mod__, self.most_common())) + return '%s({%s})' % (self.__class__.__name__, items) + except TypeError: + # handle case where values are not orderable + return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) + + # Multiset-style mathematical operations discussed in: + # Knuth TAOCP Volume II section 4.6.3 exercise 19 + # and at http://en.wikipedia.org/wiki/Multiset + # + # Outputs guaranteed to only include positive counts. + # + # To strip negative and zero counts, add-in an empty counter: + # c += Counter() + + def __add__(self, other): + '''Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count + other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __sub__(self, other): + ''' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count - other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count + return result + + def __or__(self, other): + '''Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __and__(self, other): + ''' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count + if newcount > 0: + result[elem] = newcount + return result + + def __pos__(self): + 'Adds an empty counter, effectively stripping negative and zero counts' + result = Counter() + for elem, count in self.items(): + if count > 0: + result[elem] = count + return result + + def __neg__(self): + '''Subtracts from an empty counter. Strips positive and zero counts, + and flips the sign on negative counts. + + ''' + result = Counter() + for elem, count in self.items(): + if count < 0: + result[elem] = 0 - count + return result + + def _keep_positive(self): + '''Internal method to strip elements with a negative or zero count''' + nonpositive = [elem for elem, count in self.items() if not count > 0] + for elem in nonpositive: + del self[elem] + return self + + def __iadd__(self, other): + '''Inplace add from another counter, keeping only positive counts. + + >>> c = Counter('abbb') + >>> c += Counter('bcc') + >>> c + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] += count + return self._keep_positive() + + def __isub__(self, other): + '''Inplace subtract counter, but keep only results with positive counts. + + >>> c = Counter('abbbc') + >>> c -= Counter('bccd') + >>> c + Counter({'b': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] -= count + return self._keep_positive() + + def __ior__(self, other): + '''Inplace union is the maximum of value from either counter. + + >>> c = Counter('abbb') + >>> c |= Counter('bcc') + >>> c + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + for elem, other_count in other.items(): + count = self[elem] + if other_count > count: + self[elem] = other_count + return self._keep_positive() + + def __iand__(self, other): + '''Inplace intersection is the minimum of corresponding counts. + + >>> c = Counter('abbb') + >>> c &= Counter('bcc') + >>> c + Counter({'b': 1}) + + ''' + for elem, count in self.items(): + other_count = other[elem] + if other_count < count: + self[elem] = other_count + return self._keep_positive() + + +######################################################################## +### ChainMap +######################################################################## + +class ChainMap(_collections_abc.MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + be accessed or updated using the *maps* attribute. There is no other + state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + d = {} + for mapping in reversed(self.maps): + d.update(mapping) # reuses stored hash values if possible + return iter(d) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self, m=None): # like Django's Context.push() + '''New ChainMap with a new map followed by all previous maps. + If no map is provided, an empty dict is used. + ''' + if m is None: + m = {} + return self.__class__(m, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + +################################################################################ +### UserDict +################################################################################ + +class UserDict(_collections_abc.MutableMapping): + + # Start by filling-out the abstract methods + def __init__(*args, **kwargs): + if not args: + raise TypeError("descriptor '__init__' of 'UserDict' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + dict = None + self.data = {} + if dict is not None: + self.update(dict) + if len(kwargs): + self.update(kwargs) + def __len__(self): return len(self.data) + def __getitem__(self, key): + if key in self.data: + return self.data[key] + if hasattr(self.__class__, "__missing__"): + return self.__class__.__missing__(self, key) + raise KeyError(key) + def __setitem__(self, key, item): self.data[key] = item + def __delitem__(self, key): del self.data[key] + def __iter__(self): + return iter(self.data) + + # Modify __contains__ to work correctly when __missing__ is present + def __contains__(self, key): + return key in self.data + + # Now, add the methods in dicts but not in MutableMapping + def __repr__(self): return repr(self.data) + def copy(self): + if self.__class__ is UserDict: + return UserDict(self.data.copy()) + import copy + data = self.data + try: + self.data = {} + c = copy.copy(self) + finally: + self.data = data + c.update(self) + return c + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + + +################################################################################ +### UserList +################################################################################ + +class UserList(_collections_abc.MutableSequence): + """A more or less complete user-defined wrapper around list objects.""" + def __init__(self, initlist=None): + self.data = [] + if initlist is not None: + # XXX should this accept an arbitrary sequence? + if type(initlist) == type(self.data): + self.data[:] = initlist + elif isinstance(initlist, UserList): + self.data[:] = initlist.data[:] + else: + self.data = list(initlist) + def __repr__(self): return repr(self.data) + def __lt__(self, other): return self.data < self.__cast(other) + def __le__(self, other): return self.data <= self.__cast(other) + def __eq__(self, other): return self.data == self.__cast(other) + def __gt__(self, other): return self.data > self.__cast(other) + def __ge__(self, other): return self.data >= self.__cast(other) + def __cast(self, other): + return other.data if isinstance(other, UserList) else other + def __contains__(self, item): return item in self.data + def __len__(self): return len(self.data) + def __getitem__(self, i): return self.data[i] + def __setitem__(self, i, item): self.data[i] = item + def __delitem__(self, i): del self.data[i] + def __add__(self, other): + if isinstance(other, UserList): + return self.__class__(self.data + other.data) + elif isinstance(other, type(self.data)): + return self.__class__(self.data + other) + return self.__class__(self.data + list(other)) + def __radd__(self, other): + if isinstance(other, UserList): + return self.__class__(other.data + self.data) + elif isinstance(other, type(self.data)): + return self.__class__(other + self.data) + return self.__class__(list(other) + self.data) + def __iadd__(self, other): + if isinstance(other, UserList): + self.data += other.data + elif isinstance(other, type(self.data)): + self.data += other + else: + self.data += list(other) + return self + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __imul__(self, n): + self.data *= n + return self + def append(self, item): self.data.append(item) + def insert(self, i, item): self.data.insert(i, item) + def pop(self, i=-1): return self.data.pop(i) + def remove(self, item): self.data.remove(item) + def clear(self): self.data.clear() + def copy(self): return self.__class__(self) + def count(self, item): return self.data.count(item) + def index(self, item, *args): return self.data.index(item, *args) + def reverse(self): self.data.reverse() + def sort(self, *args, **kwds): self.data.sort(*args, **kwds) + def extend(self, other): + if isinstance(other, UserList): + self.data.extend(other.data) + else: + self.data.extend(other) + + + +################################################################################ +### UserString +################################################################################ + +class UserString(_collections_abc.Sequence): + def __init__(self, seq): + if isinstance(seq, str): + self.data = seq + elif isinstance(seq, UserString): + self.data = seq.data[:] + else: + self.data = str(seq) + def __str__(self): return str(self.data) + def __repr__(self): return repr(self.data) + def __int__(self): return int(self.data) + def __float__(self): return float(self.data) + def __complex__(self): return complex(self.data) + def __hash__(self): return hash(self.data) + def __getnewargs__(self): + return (self.data[:],) + + def __eq__(self, string): + if isinstance(string, UserString): + return self.data == string.data + return self.data == string + def __lt__(self, string): + if isinstance(string, UserString): + return self.data < string.data + return self.data < string + def __le__(self, string): + if isinstance(string, UserString): + return self.data <= string.data + return self.data <= string + def __gt__(self, string): + if isinstance(string, UserString): + return self.data > string.data + return self.data > string + def __ge__(self, string): + if isinstance(string, UserString): + return self.data >= string.data + return self.data >= string + + def __contains__(self, char): + if isinstance(char, UserString): + char = char.data + return char in self.data + + def __len__(self): return len(self.data) + def __getitem__(self, index): return self.__class__(self.data[index]) + def __add__(self, other): + if isinstance(other, UserString): + return self.__class__(self.data + other.data) + elif isinstance(other, str): + return self.__class__(self.data + other) + return self.__class__(self.data + str(other)) + def __radd__(self, other): + if isinstance(other, str): + return self.__class__(other + self.data) + return self.__class__(str(other) + self.data) + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __mod__(self, args): + return self.__class__(self.data % args) + def __rmod__(self, format): + return self.__class__(format % args) + + # the following methods are defined in alphabetical order: + def capitalize(self): return self.__class__(self.data.capitalize()) + def casefold(self): + return self.__class__(self.data.casefold()) + def center(self, width, *args): + return self.__class__(self.data.center(width, *args)) + def count(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.count(sub, start, end) + def encode(self, encoding=None, errors=None): # XXX improve this? + if encoding: + if errors: + return self.__class__(self.data.encode(encoding, errors)) + return self.__class__(self.data.encode(encoding)) + return self.__class__(self.data.encode()) + def endswith(self, suffix, start=0, end=_sys.maxsize): + return self.data.endswith(suffix, start, end) + def expandtabs(self, tabsize=8): + return self.__class__(self.data.expandtabs(tabsize)) + def find(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.find(sub, start, end) + def format(self, *args, **kwds): + return self.data.format(*args, **kwds) + def format_map(self, mapping): + return self.data.format_map(mapping) + def index(self, sub, start=0, end=_sys.maxsize): + return self.data.index(sub, start, end) + def isalpha(self): return self.data.isalpha() + def isalnum(self): return self.data.isalnum() + def isascii(self): return self.data.isascii() + def isdecimal(self): return self.data.isdecimal() + def isdigit(self): return self.data.isdigit() + def isidentifier(self): return self.data.isidentifier() + def islower(self): return self.data.islower() + def isnumeric(self): return self.data.isnumeric() + def isprintable(self): return self.data.isprintable() + def isspace(self): return self.data.isspace() + def istitle(self): return self.data.istitle() + def isupper(self): return self.data.isupper() + def join(self, seq): return self.data.join(seq) + def ljust(self, width, *args): + return self.__class__(self.data.ljust(width, *args)) + def lower(self): return self.__class__(self.data.lower()) + def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) + maketrans = str.maketrans + def partition(self, sep): + return self.data.partition(sep) + def replace(self, old, new, maxsplit=-1): + if isinstance(old, UserString): + old = old.data + if isinstance(new, UserString): + new = new.data + return self.__class__(self.data.replace(old, new, maxsplit)) + def rfind(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.rfind(sub, start, end) + def rindex(self, sub, start=0, end=_sys.maxsize): + return self.data.rindex(sub, start, end) + def rjust(self, width, *args): + return self.__class__(self.data.rjust(width, *args)) + def rpartition(self, sep): + return self.data.rpartition(sep) + def rstrip(self, chars=None): + return self.__class__(self.data.rstrip(chars)) + def split(self, sep=None, maxsplit=-1): + return self.data.split(sep, maxsplit) + def rsplit(self, sep=None, maxsplit=-1): + return self.data.rsplit(sep, maxsplit) + def splitlines(self, keepends=False): return self.data.splitlines(keepends) + def startswith(self, prefix, start=0, end=_sys.maxsize): + return self.data.startswith(prefix, start, end) + def strip(self, chars=None): return self.__class__(self.data.strip(chars)) + def swapcase(self): return self.__class__(self.data.swapcase()) + def title(self): return self.__class__(self.data.title()) + def translate(self, *args): + return self.__class__(self.data.translate(*args)) + def upper(self): return self.__class__(self.data.upper()) + def zfill(self, width): return self.__class__(self.data.zfill(width)) diff --git a/venv/Lib/collections/__pycache__/__init__.cpython-37.opt-1.pyc b/venv/Lib/collections/__pycache__/__init__.cpython-37.opt-1.pyc new file mode 100644 index 00000000..b8ff332a Binary files /dev/null and b/venv/Lib/collections/__pycache__/__init__.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/collections/__pycache__/__init__.cpython-37.opt-2.pyc b/venv/Lib/collections/__pycache__/__init__.cpython-37.opt-2.pyc new file mode 100644 index 00000000..6422064e Binary files /dev/null and b/venv/Lib/collections/__pycache__/__init__.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/collections/__pycache__/__init__.cpython-37.pyc b/venv/Lib/collections/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..b8ff332a Binary files /dev/null and b/venv/Lib/collections/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/collections/__pycache__/abc.cpython-37.opt-1.pyc b/venv/Lib/collections/__pycache__/abc.cpython-37.opt-1.pyc new file mode 100644 index 00000000..27c30ae7 Binary files /dev/null and b/venv/Lib/collections/__pycache__/abc.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/collections/__pycache__/abc.cpython-37.opt-2.pyc b/venv/Lib/collections/__pycache__/abc.cpython-37.opt-2.pyc new file mode 100644 index 00000000..27c30ae7 Binary files /dev/null and b/venv/Lib/collections/__pycache__/abc.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/collections/__pycache__/abc.cpython-37.pyc b/venv/Lib/collections/__pycache__/abc.cpython-37.pyc new file mode 100644 index 00000000..27c30ae7 Binary files /dev/null and b/venv/Lib/collections/__pycache__/abc.cpython-37.pyc differ diff --git a/venv/Lib/collections/abc.py b/venv/Lib/collections/abc.py new file mode 100644 index 00000000..891600d1 --- /dev/null +++ b/venv/Lib/collections/abc.py @@ -0,0 +1,2 @@ +from _collections_abc import * +from _collections_abc import __all__ diff --git a/venv/Lib/copy.py b/venv/Lib/copy.py new file mode 100644 index 00000000..f86040a3 --- /dev/null +++ b/venv/Lib/copy.py @@ -0,0 +1,313 @@ +"""Generic (shallow and deep) copying operations. + +Interface summary: + + import copy + + x = copy.copy(y) # make a shallow copy of y + x = copy.deepcopy(y) # make a deep copy of y + +For module specific errors, copy.Error is raised. + +The difference between shallow and deep copying is only relevant for +compound objects (objects that contain other objects, like lists or +class instances). + +- A shallow copy constructs a new compound object and then (to the + extent possible) inserts *the same objects* into it that the + original contains. + +- A deep copy constructs a new compound object and then, recursively, + inserts *copies* into it of the objects found in the original. + +Two problems often exist with deep copy operations that don't exist +with shallow copy operations: + + a) recursive objects (compound objects that, directly or indirectly, + contain a reference to themselves) may cause a recursive loop + + b) because deep copy copies *everything* it may copy too much, e.g. + administrative data structures that should be shared even between + copies + +Python's deep copy operation avoids these problems by: + + a) keeping a table of objects already copied during the current + copying pass + + b) letting user-defined classes override the copying operation or the + set of components copied + +This version does not copy types like module, class, function, method, +nor stack trace, stack frame, nor file, socket, window, nor array, nor +any similar types. + +Classes can use the same interfaces to control copying that they use +to control pickling: they can define methods called __getinitargs__(), +__getstate__() and __setstate__(). See the documentation for module +"pickle" for information on these methods. +""" + +import types +import weakref +from copyreg import dispatch_table + +class Error(Exception): + pass +error = Error # backward compatibility + +try: + from org.python.core import PyStringMap +except ImportError: + PyStringMap = None + +__all__ = ["Error", "copy", "deepcopy"] + +def copy(x): + """Shallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + """ + + cls = type(x) + + copier = _copy_dispatch.get(cls) + if copier: + return copier(x) + + try: + issc = issubclass(cls, type) + except TypeError: # cls is not a class + issc = False + if issc: + # treat it as a regular class: + return _copy_immutable(x) + + copier = getattr(cls, "__copy__", None) + if copier: + return copier(x) + + reductor = dispatch_table.get(cls) + if reductor: + rv = reductor(x) + else: + reductor = getattr(x, "__reduce_ex__", None) + if reductor: + rv = reductor(4) + else: + reductor = getattr(x, "__reduce__", None) + if reductor: + rv = reductor() + else: + raise Error("un(shallow)copyable object of type %s" % cls) + + if isinstance(rv, str): + return x + return _reconstruct(x, None, *rv) + + +_copy_dispatch = d = {} + +def _copy_immutable(x): + return x +for t in (type(None), int, float, bool, complex, str, tuple, + bytes, frozenset, type, range, slice, + types.BuiltinFunctionType, type(Ellipsis), type(NotImplemented), + types.FunctionType, weakref.ref): + d[t] = _copy_immutable +t = getattr(types, "CodeType", None) +if t is not None: + d[t] = _copy_immutable + +d[list] = list.copy +d[dict] = dict.copy +d[set] = set.copy +d[bytearray] = bytearray.copy + +if PyStringMap is not None: + d[PyStringMap] = PyStringMap.copy + +del d, t + +def deepcopy(x, memo=None, _nil=[]): + """Deep copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + """ + + if memo is None: + memo = {} + + d = id(x) + y = memo.get(d, _nil) + if y is not _nil: + return y + + cls = type(x) + + copier = _deepcopy_dispatch.get(cls) + if copier: + y = copier(x, memo) + else: + try: + issc = issubclass(cls, type) + except TypeError: # cls is not a class (old Boost; see SF #502085) + issc = 0 + if issc: + y = _deepcopy_atomic(x, memo) + else: + copier = getattr(x, "__deepcopy__", None) + if copier: + y = copier(memo) + else: + reductor = dispatch_table.get(cls) + if reductor: + rv = reductor(x) + else: + reductor = getattr(x, "__reduce_ex__", None) + if reductor: + rv = reductor(4) + else: + reductor = getattr(x, "__reduce__", None) + if reductor: + rv = reductor() + else: + raise Error( + "un(deep)copyable object of type %s" % cls) + if isinstance(rv, str): + y = x + else: + y = _reconstruct(x, memo, *rv) + + # If is its own copy, don't memoize. + if y is not x: + memo[d] = y + _keep_alive(x, memo) # Make sure x lives at least as long as d + return y + +_deepcopy_dispatch = d = {} + +def _deepcopy_atomic(x, memo): + return x +d[type(None)] = _deepcopy_atomic +d[type(Ellipsis)] = _deepcopy_atomic +d[type(NotImplemented)] = _deepcopy_atomic +d[int] = _deepcopy_atomic +d[float] = _deepcopy_atomic +d[bool] = _deepcopy_atomic +d[complex] = _deepcopy_atomic +d[bytes] = _deepcopy_atomic +d[str] = _deepcopy_atomic +try: + d[types.CodeType] = _deepcopy_atomic +except AttributeError: + pass +d[type] = _deepcopy_atomic +d[types.BuiltinFunctionType] = _deepcopy_atomic +d[types.FunctionType] = _deepcopy_atomic +d[weakref.ref] = _deepcopy_atomic + +def _deepcopy_list(x, memo, deepcopy=deepcopy): + y = [] + memo[id(x)] = y + append = y.append + for a in x: + append(deepcopy(a, memo)) + return y +d[list] = _deepcopy_list + +def _deepcopy_tuple(x, memo, deepcopy=deepcopy): + y = [deepcopy(a, memo) for a in x] + # We're not going to put the tuple in the memo, but it's still important we + # check for it, in case the tuple contains recursive mutable structures. + try: + return memo[id(x)] + except KeyError: + pass + for k, j in zip(x, y): + if k is not j: + y = tuple(y) + break + else: + y = x + return y +d[tuple] = _deepcopy_tuple + +def _deepcopy_dict(x, memo, deepcopy=deepcopy): + y = {} + memo[id(x)] = y + for key, value in x.items(): + y[deepcopy(key, memo)] = deepcopy(value, memo) + return y +d[dict] = _deepcopy_dict +if PyStringMap is not None: + d[PyStringMap] = _deepcopy_dict + +def _deepcopy_method(x, memo): # Copy instance methods + return type(x)(x.__func__, deepcopy(x.__self__, memo)) +d[types.MethodType] = _deepcopy_method + +del d + +def _keep_alive(x, memo): + """Keeps a reference to the object x in the memo. + + Because we remember objects by their id, we have + to assure that possibly temporary objects are kept + alive by referencing them. + We store a reference at the id of the memo, which should + normally not be used unless someone tries to deepcopy + the memo itself... + """ + try: + memo[id(memo)].append(x) + except KeyError: + # aha, this is the first one :-) + memo[id(memo)]=[x] + +def _reconstruct(x, memo, func, args, + state=None, listiter=None, dictiter=None, + deepcopy=deepcopy): + deep = memo is not None + if deep and args: + args = (deepcopy(arg, memo) for arg in args) + y = func(*args) + if deep: + memo[id(x)] = y + + if state is not None: + if deep: + state = deepcopy(state, memo) + if hasattr(y, '__setstate__'): + y.__setstate__(state) + else: + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + else: + slotstate = None + if state is not None: + y.__dict__.update(state) + if slotstate is not None: + for key, value in slotstate.items(): + setattr(y, key, value) + + if listiter is not None: + if deep: + for item in listiter: + item = deepcopy(item, memo) + y.append(item) + else: + for item in listiter: + y.append(item) + if dictiter is not None: + if deep: + for key, value in dictiter: + key = deepcopy(key, memo) + value = deepcopy(value, memo) + y[key] = value + else: + for key, value in dictiter: + y[key] = value + return y + +del types, weakref, PyStringMap diff --git a/venv/Lib/copyreg.py b/venv/Lib/copyreg.py new file mode 100644 index 00000000..bbe1af4e --- /dev/null +++ b/venv/Lib/copyreg.py @@ -0,0 +1,206 @@ +"""Helper to provide extensibility for pickle. + +This is only useful to add pickle support for extension types defined in +C, not for instances of user-defined classes. +""" + +__all__ = ["pickle", "constructor", + "add_extension", "remove_extension", "clear_extension_cache"] + +dispatch_table = {} + +def pickle(ob_type, pickle_function, constructor_ob=None): + if not callable(pickle_function): + raise TypeError("reduction functions must be callable") + dispatch_table[ob_type] = pickle_function + + # The constructor_ob function is a vestige of safe for unpickling. + # There is no reason for the caller to pass it anymore. + if constructor_ob is not None: + constructor(constructor_ob) + +def constructor(object): + if not callable(object): + raise TypeError("constructors must be callable") + +# Example: provide pickling support for complex numbers. + +try: + complex +except NameError: + pass +else: + + def pickle_complex(c): + return complex, (c.real, c.imag) + + pickle(complex, pickle_complex, complex) + +# Support for pickling new-style objects + +def _reconstructor(cls, base, state): + if base is object: + obj = object.__new__(cls) + else: + obj = base.__new__(cls, state) + if base.__init__ != object.__init__: + base.__init__(obj, state) + return obj + +_HEAPTYPE = 1<<9 + +# Python code for object.__reduce_ex__ for protocols 0 and 1 + +def _reduce_ex(self, proto): + assert proto < 2 + for base in self.__class__.__mro__: + if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: + break + else: + base = object # not really reachable + if base is object: + state = None + else: + if base is self.__class__: + raise TypeError("can't pickle %s objects" % base.__name__) + state = base(self) + args = (self.__class__, base, state) + try: + getstate = self.__getstate__ + except AttributeError: + if getattr(self, "__slots__", None): + raise TypeError("a class that defines __slots__ without " + "defining __getstate__ cannot be pickled") from None + try: + dict = self.__dict__ + except AttributeError: + dict = None + else: + dict = getstate() + if dict: + return _reconstructor, args, dict + else: + return _reconstructor, args + +# Helper for __reduce_ex__ protocol 2 + +def __newobj__(cls, *args): + return cls.__new__(cls, *args) + +def __newobj_ex__(cls, args, kwargs): + """Used by pickle protocol 4, instead of __newobj__ to allow classes with + keyword-only arguments to be pickled correctly. + """ + return cls.__new__(cls, *args, **kwargs) + +def _slotnames(cls): + """Return a list of slot names for a given class. + + This needs to find slots defined by the class and its bases, so we + can't simply return the __slots__ attribute. We must walk down + the Method Resolution Order and concatenate the __slots__ of each + class found there. (This assumes classes don't modify their + __slots__ attribute to misrepresent their slots after the class is + defined.) + """ + + # Get the value from a cache in the class if possible + names = cls.__dict__.get("__slotnames__") + if names is not None: + return names + + # Not cached -- calculate the value + names = [] + if not hasattr(cls, "__slots__"): + # This class has no slots + pass + else: + # Slots found -- gather slot names from all base classes + for c in cls.__mro__: + if "__slots__" in c.__dict__: + slots = c.__dict__['__slots__'] + # if class has a single slot, it can be given as a string + if isinstance(slots, str): + slots = (slots,) + for name in slots: + # special descriptors + if name in ("__dict__", "__weakref__"): + continue + # mangled names + elif name.startswith('__') and not name.endswith('__'): + stripped = c.__name__.lstrip('_') + if stripped: + names.append('_%s%s' % (stripped, name)) + else: + names.append(name) + else: + names.append(name) + + # Cache the outcome in the class if at all possible + try: + cls.__slotnames__ = names + except: + pass # But don't die if we can't + + return names + +# A registry of extension codes. This is an ad-hoc compression +# mechanism. Whenever a global reference to , is about +# to be pickled, the (, ) tuple is looked up here to see +# if it is a registered extension code for it. Extension codes are +# universal, so that the meaning of a pickle does not depend on +# context. (There are also some codes reserved for local use that +# don't have this restriction.) Codes are positive ints; 0 is +# reserved. + +_extension_registry = {} # key -> code +_inverted_registry = {} # code -> key +_extension_cache = {} # code -> object +# Don't ever rebind those names: pickling grabs a reference to them when +# it's initialized, and won't see a rebinding. + +def add_extension(module, name, code): + """Register an extension code.""" + code = int(code) + if not 1 <= code <= 0x7fffffff: + raise ValueError("code out of range") + key = (module, name) + if (_extension_registry.get(key) == code and + _inverted_registry.get(code) == key): + return # Redundant registrations are benign + if key in _extension_registry: + raise ValueError("key %s is already registered with code %s" % + (key, _extension_registry[key])) + if code in _inverted_registry: + raise ValueError("code %s is already in use for key %s" % + (code, _inverted_registry[code])) + _extension_registry[key] = code + _inverted_registry[code] = key + +def remove_extension(module, name, code): + """Unregister an extension code. For testing only.""" + key = (module, name) + if (_extension_registry.get(key) != code or + _inverted_registry.get(code) != key): + raise ValueError("key %s is not registered with code %s" % + (key, code)) + del _extension_registry[key] + del _inverted_registry[code] + if code in _extension_cache: + del _extension_cache[code] + +def clear_extension_cache(): + _extension_cache.clear() + +# Standard extension code assignments + +# Reserved ranges + +# First Last Count Purpose +# 1 127 127 Reserved for Python standard library +# 128 191 64 Reserved for Zope +# 192 239 48 Reserved for 3rd parties +# 240 255 16 Reserved for private use (will never be assigned) +# 256 Inf Inf Reserved for future assignment + +# Extension codes are assigned by the Python Software Foundation. diff --git a/venv/Lib/distutils/__init__.py b/venv/Lib/distutils/__init__.py new file mode 100644 index 00000000..29fc1da4 --- /dev/null +++ b/venv/Lib/distutils/__init__.py @@ -0,0 +1,101 @@ +import os +import sys +import warnings +import imp +import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib + # Important! To work on pypy, this must be a module that resides in the + # lib-python/modified-x.y.z directory + +dirname = os.path.dirname + +distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils') +if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)): + warnings.warn( + "The virtualenv distutils package at %s appears to be in the same location as the system distutils?") +else: + __path__.insert(0, distutils_path) + real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ('', '', imp.PKG_DIRECTORY)) + # Copy the relevant attributes + try: + __revision__ = real_distutils.__revision__ + except AttributeError: + pass + __version__ = real_distutils.__version__ + +from distutils import dist, sysconfig + +try: + basestring +except NameError: + basestring = str + +## patch build_ext (distutils doesn't know how to get the libs directory +## path on windows - it hardcodes the paths around the patched sys.prefix) + +if sys.platform == 'win32': + from distutils.command.build_ext import build_ext as old_build_ext + class build_ext(old_build_ext): + def finalize_options (self): + if self.library_dirs is None: + self.library_dirs = [] + elif isinstance(self.library_dirs, basestring): + self.library_dirs = self.library_dirs.split(os.pathsep) + + self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs")) + old_build_ext.finalize_options(self) + + from distutils.command import build_ext as build_ext_module + build_ext_module.build_ext = build_ext + +## distutils.dist patches: + +old_find_config_files = dist.Distribution.find_config_files +def find_config_files(self): + found = old_find_config_files(self) + system_distutils = os.path.join(distutils_path, 'distutils.cfg') + #if os.path.exists(system_distutils): + # found.insert(0, system_distutils) + # What to call the per-user config file + if os.name == 'posix': + user_filename = ".pydistutils.cfg" + else: + user_filename = "pydistutils.cfg" + user_filename = os.path.join(sys.prefix, user_filename) + if os.path.isfile(user_filename): + for item in list(found): + if item.endswith('pydistutils.cfg'): + found.remove(item) + found.append(user_filename) + return found +dist.Distribution.find_config_files = find_config_files + +## distutils.sysconfig patches: + +old_get_python_inc = sysconfig.get_python_inc +def sysconfig_get_python_inc(plat_specific=0, prefix=None): + if prefix is None: + prefix = sys.real_prefix + return old_get_python_inc(plat_specific, prefix) +sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__ +sysconfig.get_python_inc = sysconfig_get_python_inc + +old_get_python_lib = sysconfig.get_python_lib +def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None): + if standard_lib and prefix is None: + prefix = sys.real_prefix + return old_get_python_lib(plat_specific, standard_lib, prefix) +sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__ +sysconfig.get_python_lib = sysconfig_get_python_lib + +old_get_config_vars = sysconfig.get_config_vars +def sysconfig_get_config_vars(*args): + real_vars = old_get_config_vars(*args) + if sys.platform == 'win32': + lib_dir = os.path.join(sys.real_prefix, "libs") + if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars: + real_vars['LIBDIR'] = lib_dir # asked for all + elif isinstance(real_vars, list) and 'LIBDIR' in args: + real_vars = real_vars + [lib_dir] # asked for list + return real_vars +sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__ +sysconfig.get_config_vars = sysconfig_get_config_vars diff --git a/venv/Lib/distutils/__pycache__/__init__.cpython-37.pyc b/venv/Lib/distutils/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..6648f417 Binary files /dev/null and b/venv/Lib/distutils/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/distutils/distutils.cfg b/venv/Lib/distutils/distutils.cfg new file mode 100644 index 00000000..1af230ec --- /dev/null +++ b/venv/Lib/distutils/distutils.cfg @@ -0,0 +1,6 @@ +# This is a config file local to this virtualenv installation +# You may include options that will be used by all distutils commands, +# and by easy_install. For instance: +# +# [easy_install] +# find_links = http://mylocalsite diff --git a/venv/Lib/encodings/__init__.py b/venv/Lib/encodings/__init__.py new file mode 100644 index 00000000..025b7a8d --- /dev/null +++ b/venv/Lib/encodings/__init__.py @@ -0,0 +1,170 @@ +""" Standard "encodings" Package + + Standard Python encoding modules are stored in this package + directory. + + Codec modules must have names corresponding to normalized encoding + names as defined in the normalize_encoding() function below, e.g. + 'utf-8' must be implemented by the module 'utf_8.py'. + + Each codec module must export the following interface: + + * getregentry() -> codecs.CodecInfo object + The getregentry() API must return a CodecInfo object with encoder, decoder, + incrementalencoder, incrementaldecoder, streamwriter and streamreader + atttributes which adhere to the Python Codec Interface Standard. + + In addition, a module may optionally also define the following + APIs which are then used by the package's codec search function: + + * getaliases() -> sequence of encoding name strings to use as aliases + + Alias names returned by getaliases() must be normalized encoding + names as defined by normalize_encoding(). + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +"""#" + +import codecs +import sys +from . import aliases + +_cache = {} +_unknown = '--unknown--' +_import_tail = ['*'] +_aliases = aliases.aliases + +class CodecRegistryError(LookupError, SystemError): + pass + +def normalize_encoding(encoding): + + """ Normalize an encoding name. + + Normalization works as follows: all non-alphanumeric + characters except the dot used for Python package names are + collapsed and replaced with a single underscore, e.g. ' -;#' + becomes '_'. Leading and trailing underscores are removed. + + Note that encoding names should be ASCII only; if they do use + non-ASCII characters, these must be Latin-1 compatible. + + """ + if isinstance(encoding, bytes): + encoding = str(encoding, "ascii") + + chars = [] + punct = False + for c in encoding: + if c.isalnum() or c == '.': + if punct and chars: + chars.append('_') + chars.append(c) + punct = False + else: + punct = True + return ''.join(chars) + +def search_function(encoding): + + # Cache lookup + entry = _cache.get(encoding, _unknown) + if entry is not _unknown: + return entry + + # Import the module: + # + # First try to find an alias for the normalized encoding + # name and lookup the module using the aliased name, then try to + # lookup the module using the standard import scheme, i.e. first + # try in the encodings package, then at top-level. + # + norm_encoding = normalize_encoding(encoding) + aliased_encoding = _aliases.get(norm_encoding) or \ + _aliases.get(norm_encoding.replace('.', '_')) + if aliased_encoding is not None: + modnames = [aliased_encoding, + norm_encoding] + else: + modnames = [norm_encoding] + for modname in modnames: + if not modname or '.' in modname: + continue + try: + # Import is absolute to prevent the possibly malicious import of a + # module with side-effects that is not in the 'encodings' package. + mod = __import__('encodings.' + modname, fromlist=_import_tail, + level=0) + except ImportError: + # ImportError may occur because 'encodings.(modname)' does not exist, + # or because it imports a name that does not exist (see mbcs and oem) + pass + else: + break + else: + mod = None + + try: + getregentry = mod.getregentry + except AttributeError: + # Not a codec module + mod = None + + if mod is None: + # Cache misses + _cache[encoding] = None + return None + + # Now ask the module for the registry entry + entry = getregentry() + if not isinstance(entry, codecs.CodecInfo): + if not 4 <= len(entry) <= 7: + raise CodecRegistryError('module "%s" (%s) failed to register' + % (mod.__name__, mod.__file__)) + if not callable(entry[0]) or not callable(entry[1]) or \ + (entry[2] is not None and not callable(entry[2])) or \ + (entry[3] is not None and not callable(entry[3])) or \ + (len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \ + (len(entry) > 5 and entry[5] is not None and not callable(entry[5])): + raise CodecRegistryError('incompatible codecs in module "%s" (%s)' + % (mod.__name__, mod.__file__)) + if len(entry)<7 or entry[6] is None: + entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],) + entry = codecs.CodecInfo(*entry) + + # Cache the codec registry entry + _cache[encoding] = entry + + # Register its aliases (without overwriting previously registered + # aliases) + try: + codecaliases = mod.getaliases() + except AttributeError: + pass + else: + for alias in codecaliases: + if alias not in _aliases: + _aliases[alias] = modname + + # Return the registry entry + return entry + +# Register the search_function in the Python codec registry +codecs.register(search_function) + +if sys.platform == 'win32': + def _alias_mbcs(encoding): + try: + import _winapi + ansi_code_page = "cp%s" % _winapi.GetACP() + if encoding == ansi_code_page: + import encodings.mbcs + return encodings.mbcs.getregentry() + except ImportError: + # Imports may fail while we are shutting down + pass + + codecs.register(_alias_mbcs) diff --git a/venv/Lib/encodings/__pycache__/__init__.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/__init__.cpython-37.opt-1.pyc new file mode 100644 index 00000000..4cb5ae60 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/__init__.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/__init__.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/__init__.cpython-37.opt-2.pyc new file mode 100644 index 00000000..8a87822f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/__init__.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/__init__.cpython-37.pyc b/venv/Lib/encodings/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..4cb5ae60 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/aliases.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/aliases.cpython-37.opt-1.pyc new file mode 100644 index 00000000..15038949 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/aliases.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/aliases.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/aliases.cpython-37.opt-2.pyc new file mode 100644 index 00000000..03cd0b57 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/aliases.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/aliases.cpython-37.pyc b/venv/Lib/encodings/__pycache__/aliases.cpython-37.pyc new file mode 100644 index 00000000..15038949 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/aliases.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/ascii.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/ascii.cpython-37.opt-1.pyc new file mode 100644 index 00000000..bfa98a26 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/ascii.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/ascii.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/ascii.cpython-37.opt-2.pyc new file mode 100644 index 00000000..3a8eb1c3 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/ascii.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/ascii.cpython-37.pyc b/venv/Lib/encodings/__pycache__/ascii.cpython-37.pyc new file mode 100644 index 00000000..bfa98a26 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/ascii.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/base64_codec.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/base64_codec.cpython-37.opt-1.pyc new file mode 100644 index 00000000..76bd033a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/base64_codec.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/base64_codec.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/base64_codec.cpython-37.opt-2.pyc new file mode 100644 index 00000000..cabcf36a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/base64_codec.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/base64_codec.cpython-37.pyc b/venv/Lib/encodings/__pycache__/base64_codec.cpython-37.pyc new file mode 100644 index 00000000..9a370871 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/base64_codec.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/big5.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/big5.cpython-37.opt-1.pyc new file mode 100644 index 00000000..27083345 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/big5.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/big5.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/big5.cpython-37.opt-2.pyc new file mode 100644 index 00000000..27083345 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/big5.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/big5.cpython-37.pyc b/venv/Lib/encodings/__pycache__/big5.cpython-37.pyc new file mode 100644 index 00000000..27083345 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/big5.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/big5hkscs.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/big5hkscs.cpython-37.opt-1.pyc new file mode 100644 index 00000000..829ae046 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/big5hkscs.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/big5hkscs.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/big5hkscs.cpython-37.opt-2.pyc new file mode 100644 index 00000000..829ae046 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/big5hkscs.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/big5hkscs.cpython-37.pyc b/venv/Lib/encodings/__pycache__/big5hkscs.cpython-37.pyc new file mode 100644 index 00000000..829ae046 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/big5hkscs.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/bz2_codec.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/bz2_codec.cpython-37.opt-1.pyc new file mode 100644 index 00000000..34abee1c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/bz2_codec.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/bz2_codec.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/bz2_codec.cpython-37.opt-2.pyc new file mode 100644 index 00000000..9db8e8ce Binary files /dev/null and b/venv/Lib/encodings/__pycache__/bz2_codec.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/bz2_codec.cpython-37.pyc b/venv/Lib/encodings/__pycache__/bz2_codec.cpython-37.pyc new file mode 100644 index 00000000..5184aae4 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/bz2_codec.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/charmap.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/charmap.cpython-37.opt-1.pyc new file mode 100644 index 00000000..0c2f699a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/charmap.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/charmap.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/charmap.cpython-37.opt-2.pyc new file mode 100644 index 00000000..ea084a3f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/charmap.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/charmap.cpython-37.pyc b/venv/Lib/encodings/__pycache__/charmap.cpython-37.pyc new file mode 100644 index 00000000..0c2f699a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/charmap.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp037.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp037.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7fbf5e83 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp037.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp037.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp037.cpython-37.opt-2.pyc new file mode 100644 index 00000000..16bfac5e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp037.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp037.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp037.cpython-37.pyc new file mode 100644 index 00000000..7fbf5e83 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp037.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1006.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1006.cpython-37.opt-1.pyc new file mode 100644 index 00000000..e1857930 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1006.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1006.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1006.cpython-37.opt-2.pyc new file mode 100644 index 00000000..2dbf57e8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1006.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1006.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1006.cpython-37.pyc new file mode 100644 index 00000000..e1857930 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1006.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1026.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1026.cpython-37.opt-1.pyc new file mode 100644 index 00000000..3d719597 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1026.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1026.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1026.cpython-37.opt-2.pyc new file mode 100644 index 00000000..a26fad86 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1026.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1026.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1026.cpython-37.pyc new file mode 100644 index 00000000..3d719597 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1026.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1125.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1125.cpython-37.opt-1.pyc new file mode 100644 index 00000000..79bcd09b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1125.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1125.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1125.cpython-37.opt-2.pyc new file mode 100644 index 00000000..1ef7bf2c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1125.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1125.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1125.cpython-37.pyc new file mode 100644 index 00000000..79bcd09b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1125.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1140.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1140.cpython-37.opt-1.pyc new file mode 100644 index 00000000..79ef88b4 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1140.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1140.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1140.cpython-37.opt-2.pyc new file mode 100644 index 00000000..2facaab4 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1140.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1140.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1140.cpython-37.pyc new file mode 100644 index 00000000..79ef88b4 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1140.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1250.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1250.cpython-37.opt-1.pyc new file mode 100644 index 00000000..54bd5982 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1250.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1250.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1250.cpython-37.opt-2.pyc new file mode 100644 index 00000000..4b12c49d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1250.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1250.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1250.cpython-37.pyc new file mode 100644 index 00000000..54bd5982 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1250.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1251.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1251.cpython-37.opt-1.pyc new file mode 100644 index 00000000..acf0bf07 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1251.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1251.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1251.cpython-37.opt-2.pyc new file mode 100644 index 00000000..ded2c3ad Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1251.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1251.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1251.cpython-37.pyc new file mode 100644 index 00000000..acf0bf07 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1251.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1252.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1252.cpython-37.opt-1.pyc new file mode 100644 index 00000000..cf0c121a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1252.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1252.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1252.cpython-37.opt-2.pyc new file mode 100644 index 00000000..f2cff737 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1252.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1252.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1252.cpython-37.pyc new file mode 100644 index 00000000..cf0c121a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1252.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1253.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1253.cpython-37.opt-1.pyc new file mode 100644 index 00000000..3bcf094c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1253.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1253.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1253.cpython-37.opt-2.pyc new file mode 100644 index 00000000..1ba309b8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1253.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1253.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1253.cpython-37.pyc new file mode 100644 index 00000000..3bcf094c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1253.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1254.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1254.cpython-37.opt-1.pyc new file mode 100644 index 00000000..de98fd85 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1254.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1254.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1254.cpython-37.opt-2.pyc new file mode 100644 index 00000000..150b8be2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1254.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1254.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1254.cpython-37.pyc new file mode 100644 index 00000000..de98fd85 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1254.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1255.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1255.cpython-37.opt-1.pyc new file mode 100644 index 00000000..1c5c076f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1255.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1255.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1255.cpython-37.opt-2.pyc new file mode 100644 index 00000000..d7818b8c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1255.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1255.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1255.cpython-37.pyc new file mode 100644 index 00000000..1c5c076f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1255.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1256.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1256.cpython-37.opt-1.pyc new file mode 100644 index 00000000..863be433 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1256.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1256.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1256.cpython-37.opt-2.pyc new file mode 100644 index 00000000..0fb63a89 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1256.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1256.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1256.cpython-37.pyc new file mode 100644 index 00000000..863be433 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1256.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1257.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1257.cpython-37.opt-1.pyc new file mode 100644 index 00000000..2ff013e3 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1257.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1257.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1257.cpython-37.opt-2.pyc new file mode 100644 index 00000000..0139d068 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1257.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1257.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1257.cpython-37.pyc new file mode 100644 index 00000000..2ff013e3 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1257.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1258.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp1258.cpython-37.opt-1.pyc new file mode 100644 index 00000000..72152659 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1258.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1258.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp1258.cpython-37.opt-2.pyc new file mode 100644 index 00000000..7500868a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1258.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp1258.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp1258.cpython-37.pyc new file mode 100644 index 00000000..72152659 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp1258.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp273.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp273.cpython-37.opt-1.pyc new file mode 100644 index 00000000..ede19929 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp273.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp273.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp273.cpython-37.opt-2.pyc new file mode 100644 index 00000000..1c0c36f3 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp273.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp273.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp273.cpython-37.pyc new file mode 100644 index 00000000..ede19929 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp273.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp424.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp424.cpython-37.opt-1.pyc new file mode 100644 index 00000000..17275f03 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp424.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp424.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp424.cpython-37.opt-2.pyc new file mode 100644 index 00000000..c205c0ae Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp424.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp424.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp424.cpython-37.pyc new file mode 100644 index 00000000..17275f03 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp424.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp437.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp437.cpython-37.opt-1.pyc new file mode 100644 index 00000000..4e838658 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp437.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp437.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp437.cpython-37.opt-2.pyc new file mode 100644 index 00000000..40f49340 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp437.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp437.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp437.cpython-37.pyc new file mode 100644 index 00000000..4e838658 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp437.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp500.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp500.cpython-37.opt-1.pyc new file mode 100644 index 00000000..5738a7cc Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp500.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp500.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp500.cpython-37.opt-2.pyc new file mode 100644 index 00000000..26c4c996 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp500.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp500.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp500.cpython-37.pyc new file mode 100644 index 00000000..5738a7cc Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp500.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp65001.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp65001.cpython-37.opt-1.pyc new file mode 100644 index 00000000..8bcfa03f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp65001.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp65001.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp65001.cpython-37.opt-2.pyc new file mode 100644 index 00000000..af2fa906 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp65001.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp65001.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp65001.cpython-37.pyc new file mode 100644 index 00000000..8bcfa03f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp65001.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp720.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp720.cpython-37.opt-1.pyc new file mode 100644 index 00000000..d5722d80 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp720.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp720.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp720.cpython-37.opt-2.pyc new file mode 100644 index 00000000..5c1b99f1 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp720.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp720.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp720.cpython-37.pyc new file mode 100644 index 00000000..d5722d80 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp720.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp737.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp737.cpython-37.opt-1.pyc new file mode 100644 index 00000000..4a39b595 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp737.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp737.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp737.cpython-37.opt-2.pyc new file mode 100644 index 00000000..71e7d8c6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp737.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp737.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp737.cpython-37.pyc new file mode 100644 index 00000000..4a39b595 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp737.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp775.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp775.cpython-37.opt-1.pyc new file mode 100644 index 00000000..f669e5dc Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp775.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp775.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp775.cpython-37.opt-2.pyc new file mode 100644 index 00000000..965326b9 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp775.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp775.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp775.cpython-37.pyc new file mode 100644 index 00000000..f669e5dc Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp775.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp850.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp850.cpython-37.opt-1.pyc new file mode 100644 index 00000000..559f5c85 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp850.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp850.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp850.cpython-37.opt-2.pyc new file mode 100644 index 00000000..9ce95234 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp850.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp850.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp850.cpython-37.pyc new file mode 100644 index 00000000..559f5c85 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp850.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp852.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp852.cpython-37.opt-1.pyc new file mode 100644 index 00000000..59d621a8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp852.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp852.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp852.cpython-37.opt-2.pyc new file mode 100644 index 00000000..1ac15100 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp852.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp852.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp852.cpython-37.pyc new file mode 100644 index 00000000..59d621a8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp852.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp855.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp855.cpython-37.opt-1.pyc new file mode 100644 index 00000000..2a360be2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp855.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp855.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp855.cpython-37.opt-2.pyc new file mode 100644 index 00000000..651af8fb Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp855.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp855.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp855.cpython-37.pyc new file mode 100644 index 00000000..2a360be2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp855.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp856.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp856.cpython-37.opt-1.pyc new file mode 100644 index 00000000..db08270e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp856.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp856.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp856.cpython-37.opt-2.pyc new file mode 100644 index 00000000..3812ce34 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp856.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp856.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp856.cpython-37.pyc new file mode 100644 index 00000000..db08270e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp856.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp857.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp857.cpython-37.opt-1.pyc new file mode 100644 index 00000000..d5451110 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp857.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp857.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp857.cpython-37.opt-2.pyc new file mode 100644 index 00000000..2aa2c956 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp857.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp857.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp857.cpython-37.pyc new file mode 100644 index 00000000..d5451110 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp857.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp858.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp858.cpython-37.opt-1.pyc new file mode 100644 index 00000000..3eaac783 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp858.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp858.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp858.cpython-37.opt-2.pyc new file mode 100644 index 00000000..45f352fc Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp858.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp858.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp858.cpython-37.pyc new file mode 100644 index 00000000..3eaac783 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp858.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp860.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp860.cpython-37.opt-1.pyc new file mode 100644 index 00000000..395097dc Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp860.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp860.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp860.cpython-37.opt-2.pyc new file mode 100644 index 00000000..6137115e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp860.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp860.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp860.cpython-37.pyc new file mode 100644 index 00000000..395097dc Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp860.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp861.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp861.cpython-37.opt-1.pyc new file mode 100644 index 00000000..556c1a4b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp861.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp861.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp861.cpython-37.opt-2.pyc new file mode 100644 index 00000000..a632512b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp861.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp861.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp861.cpython-37.pyc new file mode 100644 index 00000000..556c1a4b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp861.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp862.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp862.cpython-37.opt-1.pyc new file mode 100644 index 00000000..50871044 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp862.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp862.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp862.cpython-37.opt-2.pyc new file mode 100644 index 00000000..0bbc24c8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp862.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp862.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp862.cpython-37.pyc new file mode 100644 index 00000000..50871044 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp862.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp863.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp863.cpython-37.opt-1.pyc new file mode 100644 index 00000000..c75dd7f7 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp863.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp863.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp863.cpython-37.opt-2.pyc new file mode 100644 index 00000000..472a45bb Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp863.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp863.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp863.cpython-37.pyc new file mode 100644 index 00000000..c75dd7f7 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp863.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp864.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp864.cpython-37.opt-1.pyc new file mode 100644 index 00000000..3f207a55 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp864.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp864.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp864.cpython-37.opt-2.pyc new file mode 100644 index 00000000..f23824ca Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp864.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp864.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp864.cpython-37.pyc new file mode 100644 index 00000000..3f207a55 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp864.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp865.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp865.cpython-37.opt-1.pyc new file mode 100644 index 00000000..02da3d87 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp865.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp865.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp865.cpython-37.opt-2.pyc new file mode 100644 index 00000000..2a9e2ab8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp865.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp865.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp865.cpython-37.pyc new file mode 100644 index 00000000..02da3d87 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp865.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp866.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp866.cpython-37.opt-1.pyc new file mode 100644 index 00000000..8ba31c5f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp866.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp866.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp866.cpython-37.opt-2.pyc new file mode 100644 index 00000000..33f6c9a8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp866.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp866.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp866.cpython-37.pyc new file mode 100644 index 00000000..8ba31c5f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp866.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp869.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp869.cpython-37.opt-1.pyc new file mode 100644 index 00000000..efcf8670 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp869.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp869.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp869.cpython-37.opt-2.pyc new file mode 100644 index 00000000..060d61ed Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp869.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp869.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp869.cpython-37.pyc new file mode 100644 index 00000000..efcf8670 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp869.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp874.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp874.cpython-37.opt-1.pyc new file mode 100644 index 00000000..b3b02df2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp874.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp874.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp874.cpython-37.opt-2.pyc new file mode 100644 index 00000000..7dbe7ef5 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp874.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp874.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp874.cpython-37.pyc new file mode 100644 index 00000000..b3b02df2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp874.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp875.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp875.cpython-37.opt-1.pyc new file mode 100644 index 00000000..93ad84d7 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp875.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp875.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp875.cpython-37.opt-2.pyc new file mode 100644 index 00000000..823f86fa Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp875.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp875.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp875.cpython-37.pyc new file mode 100644 index 00000000..93ad84d7 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp875.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp932.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp932.cpython-37.opt-1.pyc new file mode 100644 index 00000000..ceff3162 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp932.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp932.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp932.cpython-37.opt-2.pyc new file mode 100644 index 00000000..ceff3162 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp932.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp932.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp932.cpython-37.pyc new file mode 100644 index 00000000..ceff3162 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp932.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp949.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp949.cpython-37.opt-1.pyc new file mode 100644 index 00000000..48a6d3e0 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp949.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp949.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp949.cpython-37.opt-2.pyc new file mode 100644 index 00000000..48a6d3e0 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp949.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp949.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp949.cpython-37.pyc new file mode 100644 index 00000000..48a6d3e0 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp949.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp950.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/cp950.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7859fe34 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp950.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp950.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/cp950.cpython-37.opt-2.pyc new file mode 100644 index 00000000..7859fe34 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp950.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/cp950.cpython-37.pyc b/venv/Lib/encodings/__pycache__/cp950.cpython-37.pyc new file mode 100644 index 00000000..7859fe34 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/cp950.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_jis_2004.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/euc_jis_2004.cpython-37.opt-1.pyc new file mode 100644 index 00000000..4b9ab679 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_jis_2004.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_jis_2004.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/euc_jis_2004.cpython-37.opt-2.pyc new file mode 100644 index 00000000..4b9ab679 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_jis_2004.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_jis_2004.cpython-37.pyc b/venv/Lib/encodings/__pycache__/euc_jis_2004.cpython-37.pyc new file mode 100644 index 00000000..4b9ab679 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_jis_2004.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_jisx0213.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/euc_jisx0213.cpython-37.opt-1.pyc new file mode 100644 index 00000000..701d6a29 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_jisx0213.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_jisx0213.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/euc_jisx0213.cpython-37.opt-2.pyc new file mode 100644 index 00000000..701d6a29 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_jisx0213.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_jisx0213.cpython-37.pyc b/venv/Lib/encodings/__pycache__/euc_jisx0213.cpython-37.pyc new file mode 100644 index 00000000..701d6a29 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_jisx0213.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_jp.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/euc_jp.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7d04cfcd Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_jp.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_jp.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/euc_jp.cpython-37.opt-2.pyc new file mode 100644 index 00000000..7d04cfcd Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_jp.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_jp.cpython-37.pyc b/venv/Lib/encodings/__pycache__/euc_jp.cpython-37.pyc new file mode 100644 index 00000000..7d04cfcd Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_jp.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_kr.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/euc_kr.cpython-37.opt-1.pyc new file mode 100644 index 00000000..44538356 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_kr.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_kr.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/euc_kr.cpython-37.opt-2.pyc new file mode 100644 index 00000000..44538356 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_kr.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/euc_kr.cpython-37.pyc b/venv/Lib/encodings/__pycache__/euc_kr.cpython-37.pyc new file mode 100644 index 00000000..44538356 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/euc_kr.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/gb18030.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/gb18030.cpython-37.opt-1.pyc new file mode 100644 index 00000000..a493f165 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/gb18030.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/gb18030.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/gb18030.cpython-37.opt-2.pyc new file mode 100644 index 00000000..a493f165 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/gb18030.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/gb18030.cpython-37.pyc b/venv/Lib/encodings/__pycache__/gb18030.cpython-37.pyc new file mode 100644 index 00000000..a493f165 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/gb18030.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/gb2312.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/gb2312.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7030f62c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/gb2312.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/gb2312.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/gb2312.cpython-37.opt-2.pyc new file mode 100644 index 00000000..7030f62c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/gb2312.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/gb2312.cpython-37.pyc b/venv/Lib/encodings/__pycache__/gb2312.cpython-37.pyc new file mode 100644 index 00000000..7030f62c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/gb2312.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/gbk.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/gbk.cpython-37.opt-1.pyc new file mode 100644 index 00000000..c258580c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/gbk.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/gbk.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/gbk.cpython-37.opt-2.pyc new file mode 100644 index 00000000..c258580c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/gbk.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/gbk.cpython-37.pyc b/venv/Lib/encodings/__pycache__/gbk.cpython-37.pyc new file mode 100644 index 00000000..c258580c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/gbk.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/hex_codec.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/hex_codec.cpython-37.opt-1.pyc new file mode 100644 index 00000000..b8462510 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/hex_codec.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/hex_codec.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/hex_codec.cpython-37.opt-2.pyc new file mode 100644 index 00000000..5bd775f3 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/hex_codec.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/hex_codec.cpython-37.pyc b/venv/Lib/encodings/__pycache__/hex_codec.cpython-37.pyc new file mode 100644 index 00000000..14b40d73 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/hex_codec.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/hp_roman8.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/hp_roman8.cpython-37.opt-1.pyc new file mode 100644 index 00000000..47657463 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/hp_roman8.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/hp_roman8.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/hp_roman8.cpython-37.opt-2.pyc new file mode 100644 index 00000000..52e7100d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/hp_roman8.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/hp_roman8.cpython-37.pyc b/venv/Lib/encodings/__pycache__/hp_roman8.cpython-37.pyc new file mode 100644 index 00000000..47657463 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/hp_roman8.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/hz.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/hz.cpython-37.opt-1.pyc new file mode 100644 index 00000000..5a229151 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/hz.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/hz.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/hz.cpython-37.opt-2.pyc new file mode 100644 index 00000000..5a229151 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/hz.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/hz.cpython-37.pyc b/venv/Lib/encodings/__pycache__/hz.cpython-37.pyc new file mode 100644 index 00000000..5a229151 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/hz.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/idna.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/idna.cpython-37.opt-1.pyc new file mode 100644 index 00000000..8c524c4c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/idna.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/idna.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/idna.cpython-37.opt-2.pyc new file mode 100644 index 00000000..8c524c4c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/idna.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/idna.cpython-37.pyc b/venv/Lib/encodings/__pycache__/idna.cpython-37.pyc new file mode 100644 index 00000000..8c524c4c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/idna.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp.cpython-37.opt-1.pyc new file mode 100644 index 00000000..dc683524 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp.cpython-37.opt-2.pyc new file mode 100644 index 00000000..dc683524 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp.cpython-37.pyc new file mode 100644 index 00000000..dc683524 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_1.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_1.cpython-37.opt-1.pyc new file mode 100644 index 00000000..50ce1789 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_1.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_1.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_1.cpython-37.opt-2.pyc new file mode 100644 index 00000000..50ce1789 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_1.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_1.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_1.cpython-37.pyc new file mode 100644 index 00000000..50ce1789 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_1.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_2.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_2.cpython-37.opt-1.pyc new file mode 100644 index 00000000..da11e14e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_2.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_2.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_2.cpython-37.opt-2.pyc new file mode 100644 index 00000000..da11e14e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_2.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_2.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_2.cpython-37.pyc new file mode 100644 index 00000000..da11e14e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_2.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_2004.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_2004.cpython-37.opt-1.pyc new file mode 100644 index 00000000..ec71e552 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_2004.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_2004.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_2004.cpython-37.opt-2.pyc new file mode 100644 index 00000000..ec71e552 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_2004.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_2004.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_2004.cpython-37.pyc new file mode 100644 index 00000000..ec71e552 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_2004.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_3.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_3.cpython-37.opt-1.pyc new file mode 100644 index 00000000..c5c9125d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_3.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_3.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_3.cpython-37.opt-2.pyc new file mode 100644 index 00000000..c5c9125d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_3.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_3.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_3.cpython-37.pyc new file mode 100644 index 00000000..c5c9125d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_3.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_ext.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_ext.cpython-37.opt-1.pyc new file mode 100644 index 00000000..9fc1d096 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_ext.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_ext.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_ext.cpython-37.opt-2.pyc new file mode 100644 index 00000000..9fc1d096 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_ext.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_jp_ext.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso2022_jp_ext.cpython-37.pyc new file mode 100644 index 00000000..9fc1d096 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_jp_ext.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_kr.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso2022_kr.cpython-37.opt-1.pyc new file mode 100644 index 00000000..2727d113 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_kr.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_kr.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso2022_kr.cpython-37.opt-2.pyc new file mode 100644 index 00000000..2727d113 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_kr.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso2022_kr.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso2022_kr.cpython-37.pyc new file mode 100644 index 00000000..2727d113 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso2022_kr.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_1.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_1.cpython-37.opt-1.pyc new file mode 100644 index 00000000..2922e076 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_1.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_1.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_1.cpython-37.opt-2.pyc new file mode 100644 index 00000000..3230f7c6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_1.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_1.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_1.cpython-37.pyc new file mode 100644 index 00000000..2922e076 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_1.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_10.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_10.cpython-37.opt-1.pyc new file mode 100644 index 00000000..bca1e9f0 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_10.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_10.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_10.cpython-37.opt-2.pyc new file mode 100644 index 00000000..8111b262 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_10.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_10.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_10.cpython-37.pyc new file mode 100644 index 00000000..bca1e9f0 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_10.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_11.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_11.cpython-37.opt-1.pyc new file mode 100644 index 00000000..e0997f0e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_11.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_11.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_11.cpython-37.opt-2.pyc new file mode 100644 index 00000000..185696e7 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_11.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_11.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_11.cpython-37.pyc new file mode 100644 index 00000000..e0997f0e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_11.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_13.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_13.cpython-37.opt-1.pyc new file mode 100644 index 00000000..447be0aa Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_13.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_13.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_13.cpython-37.opt-2.pyc new file mode 100644 index 00000000..1a741860 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_13.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_13.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_13.cpython-37.pyc new file mode 100644 index 00000000..447be0aa Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_13.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_14.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_14.cpython-37.opt-1.pyc new file mode 100644 index 00000000..207e58e6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_14.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_14.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_14.cpython-37.opt-2.pyc new file mode 100644 index 00000000..91d19471 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_14.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_14.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_14.cpython-37.pyc new file mode 100644 index 00000000..207e58e6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_14.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_15.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_15.cpython-37.opt-1.pyc new file mode 100644 index 00000000..682c1945 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_15.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_15.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_15.cpython-37.opt-2.pyc new file mode 100644 index 00000000..8b3ca2d3 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_15.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_15.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_15.cpython-37.pyc new file mode 100644 index 00000000..682c1945 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_15.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_16.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_16.cpython-37.opt-1.pyc new file mode 100644 index 00000000..b39d7352 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_16.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_16.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_16.cpython-37.opt-2.pyc new file mode 100644 index 00000000..befc43c1 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_16.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_16.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_16.cpython-37.pyc new file mode 100644 index 00000000..b39d7352 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_16.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_2.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_2.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7b5e72a6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_2.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_2.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_2.cpython-37.opt-2.pyc new file mode 100644 index 00000000..c3089b81 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_2.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_2.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_2.cpython-37.pyc new file mode 100644 index 00000000..7b5e72a6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_2.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_3.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_3.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7bb3f9ae Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_3.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_3.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_3.cpython-37.opt-2.pyc new file mode 100644 index 00000000..281a2e5f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_3.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_3.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_3.cpython-37.pyc new file mode 100644 index 00000000..7bb3f9ae Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_3.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_4.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_4.cpython-37.opt-1.pyc new file mode 100644 index 00000000..1cbe6cb6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_4.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_4.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_4.cpython-37.opt-2.pyc new file mode 100644 index 00000000..5a2d1aea Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_4.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_4.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_4.cpython-37.pyc new file mode 100644 index 00000000..1cbe6cb6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_4.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_5.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_5.cpython-37.opt-1.pyc new file mode 100644 index 00000000..9f01a866 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_5.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_5.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_5.cpython-37.opt-2.pyc new file mode 100644 index 00000000..5a656a37 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_5.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_5.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_5.cpython-37.pyc new file mode 100644 index 00000000..9f01a866 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_5.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_6.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_6.cpython-37.opt-1.pyc new file mode 100644 index 00000000..b140f338 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_6.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_6.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_6.cpython-37.opt-2.pyc new file mode 100644 index 00000000..0443a787 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_6.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_6.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_6.cpython-37.pyc new file mode 100644 index 00000000..b140f338 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_6.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_7.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_7.cpython-37.opt-1.pyc new file mode 100644 index 00000000..0b92c7ce Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_7.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_7.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_7.cpython-37.opt-2.pyc new file mode 100644 index 00000000..7424b801 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_7.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_7.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_7.cpython-37.pyc new file mode 100644 index 00000000..0b92c7ce Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_7.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_8.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_8.cpython-37.opt-1.pyc new file mode 100644 index 00000000..ce451ac2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_8.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_8.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_8.cpython-37.opt-2.pyc new file mode 100644 index 00000000..4bda656d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_8.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_8.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_8.cpython-37.pyc new file mode 100644 index 00000000..ce451ac2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_8.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_9.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/iso8859_9.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7e5d1b81 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_9.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_9.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/iso8859_9.cpython-37.opt-2.pyc new file mode 100644 index 00000000..a20fbfe6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_9.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/iso8859_9.cpython-37.pyc b/venv/Lib/encodings/__pycache__/iso8859_9.cpython-37.pyc new file mode 100644 index 00000000..7e5d1b81 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/iso8859_9.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/johab.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/johab.cpython-37.opt-1.pyc new file mode 100644 index 00000000..ce422987 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/johab.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/johab.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/johab.cpython-37.opt-2.pyc new file mode 100644 index 00000000..ce422987 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/johab.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/johab.cpython-37.pyc b/venv/Lib/encodings/__pycache__/johab.cpython-37.pyc new file mode 100644 index 00000000..ce422987 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/johab.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/koi8_r.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/koi8_r.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7caea62d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/koi8_r.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/koi8_r.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/koi8_r.cpython-37.opt-2.pyc new file mode 100644 index 00000000..cfd2b7bd Binary files /dev/null and b/venv/Lib/encodings/__pycache__/koi8_r.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/koi8_r.cpython-37.pyc b/venv/Lib/encodings/__pycache__/koi8_r.cpython-37.pyc new file mode 100644 index 00000000..7caea62d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/koi8_r.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/koi8_t.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/koi8_t.cpython-37.opt-1.pyc new file mode 100644 index 00000000..b5e2d938 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/koi8_t.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/koi8_t.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/koi8_t.cpython-37.opt-2.pyc new file mode 100644 index 00000000..9d325d08 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/koi8_t.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/koi8_t.cpython-37.pyc b/venv/Lib/encodings/__pycache__/koi8_t.cpython-37.pyc new file mode 100644 index 00000000..b5e2d938 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/koi8_t.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/koi8_u.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/koi8_u.cpython-37.opt-1.pyc new file mode 100644 index 00000000..50c8e557 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/koi8_u.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/koi8_u.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/koi8_u.cpython-37.opt-2.pyc new file mode 100644 index 00000000..0844f665 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/koi8_u.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/koi8_u.cpython-37.pyc b/venv/Lib/encodings/__pycache__/koi8_u.cpython-37.pyc new file mode 100644 index 00000000..50c8e557 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/koi8_u.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/kz1048.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/kz1048.cpython-37.opt-1.pyc new file mode 100644 index 00000000..213fc221 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/kz1048.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/kz1048.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/kz1048.cpython-37.opt-2.pyc new file mode 100644 index 00000000..760d2d5e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/kz1048.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/kz1048.cpython-37.pyc b/venv/Lib/encodings/__pycache__/kz1048.cpython-37.pyc new file mode 100644 index 00000000..213fc221 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/kz1048.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/latin_1.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/latin_1.cpython-37.opt-1.pyc new file mode 100644 index 00000000..6fba900e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/latin_1.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/latin_1.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/latin_1.cpython-37.opt-2.pyc new file mode 100644 index 00000000..a7c33584 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/latin_1.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/latin_1.cpython-37.pyc b/venv/Lib/encodings/__pycache__/latin_1.cpython-37.pyc new file mode 100644 index 00000000..6fba900e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/latin_1.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_arabic.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_arabic.cpython-37.opt-1.pyc new file mode 100644 index 00000000..975de66c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_arabic.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_arabic.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_arabic.cpython-37.opt-2.pyc new file mode 100644 index 00000000..ae152bf3 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_arabic.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_arabic.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_arabic.cpython-37.pyc new file mode 100644 index 00000000..975de66c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_arabic.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_centeuro.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_centeuro.cpython-37.opt-1.pyc new file mode 100644 index 00000000..a7b5b7dd Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_centeuro.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_centeuro.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_centeuro.cpython-37.opt-2.pyc new file mode 100644 index 00000000..6f81dd4a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_centeuro.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_centeuro.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_centeuro.cpython-37.pyc new file mode 100644 index 00000000..a7b5b7dd Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_centeuro.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_croatian.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_croatian.cpython-37.opt-1.pyc new file mode 100644 index 00000000..36b0126d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_croatian.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_croatian.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_croatian.cpython-37.opt-2.pyc new file mode 100644 index 00000000..7ae0a6d5 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_croatian.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_croatian.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_croatian.cpython-37.pyc new file mode 100644 index 00000000..36b0126d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_croatian.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_cyrillic.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_cyrillic.cpython-37.opt-1.pyc new file mode 100644 index 00000000..2424bcb8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_cyrillic.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_cyrillic.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_cyrillic.cpython-37.opt-2.pyc new file mode 100644 index 00000000..ee93ee53 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_cyrillic.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_cyrillic.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_cyrillic.cpython-37.pyc new file mode 100644 index 00000000..2424bcb8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_cyrillic.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_farsi.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_farsi.cpython-37.opt-1.pyc new file mode 100644 index 00000000..09a1c9ac Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_farsi.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_farsi.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_farsi.cpython-37.opt-2.pyc new file mode 100644 index 00000000..3af39aee Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_farsi.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_farsi.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_farsi.cpython-37.pyc new file mode 100644 index 00000000..09a1c9ac Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_farsi.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_greek.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_greek.cpython-37.opt-1.pyc new file mode 100644 index 00000000..21fec53e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_greek.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_greek.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_greek.cpython-37.opt-2.pyc new file mode 100644 index 00000000..cd77bad9 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_greek.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_greek.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_greek.cpython-37.pyc new file mode 100644 index 00000000..21fec53e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_greek.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_iceland.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_iceland.cpython-37.opt-1.pyc new file mode 100644 index 00000000..059af241 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_iceland.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_iceland.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_iceland.cpython-37.opt-2.pyc new file mode 100644 index 00000000..a9c66e95 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_iceland.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_iceland.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_iceland.cpython-37.pyc new file mode 100644 index 00000000..059af241 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_iceland.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_latin2.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_latin2.cpython-37.opt-1.pyc new file mode 100644 index 00000000..981e13d7 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_latin2.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_latin2.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_latin2.cpython-37.opt-2.pyc new file mode 100644 index 00000000..06e517a5 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_latin2.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_latin2.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_latin2.cpython-37.pyc new file mode 100644 index 00000000..981e13d7 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_latin2.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_roman.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_roman.cpython-37.opt-1.pyc new file mode 100644 index 00000000..146aaa8b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_roman.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_roman.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_roman.cpython-37.opt-2.pyc new file mode 100644 index 00000000..cb4b4ec0 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_roman.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_roman.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_roman.cpython-37.pyc new file mode 100644 index 00000000..146aaa8b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_roman.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_romanian.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_romanian.cpython-37.opt-1.pyc new file mode 100644 index 00000000..5f70f116 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_romanian.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_romanian.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_romanian.cpython-37.opt-2.pyc new file mode 100644 index 00000000..0afe449b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_romanian.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_romanian.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_romanian.cpython-37.pyc new file mode 100644 index 00000000..5f70f116 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_romanian.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_turkish.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mac_turkish.cpython-37.opt-1.pyc new file mode 100644 index 00000000..0288356e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_turkish.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_turkish.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mac_turkish.cpython-37.opt-2.pyc new file mode 100644 index 00000000..d3f6150b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_turkish.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mac_turkish.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mac_turkish.cpython-37.pyc new file mode 100644 index 00000000..0288356e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mac_turkish.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mbcs.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/mbcs.cpython-37.opt-1.pyc new file mode 100644 index 00000000..85c70856 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mbcs.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mbcs.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/mbcs.cpython-37.opt-2.pyc new file mode 100644 index 00000000..a69f06a3 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mbcs.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/mbcs.cpython-37.pyc b/venv/Lib/encodings/__pycache__/mbcs.cpython-37.pyc new file mode 100644 index 00000000..85c70856 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/mbcs.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/oem.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/oem.cpython-37.opt-1.pyc new file mode 100644 index 00000000..b592adbf Binary files /dev/null and b/venv/Lib/encodings/__pycache__/oem.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/oem.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/oem.cpython-37.opt-2.pyc new file mode 100644 index 00000000..0223464b Binary files /dev/null and b/venv/Lib/encodings/__pycache__/oem.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/oem.cpython-37.pyc b/venv/Lib/encodings/__pycache__/oem.cpython-37.pyc new file mode 100644 index 00000000..b592adbf Binary files /dev/null and b/venv/Lib/encodings/__pycache__/oem.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/palmos.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/palmos.cpython-37.opt-1.pyc new file mode 100644 index 00000000..738b0bf8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/palmos.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/palmos.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/palmos.cpython-37.opt-2.pyc new file mode 100644 index 00000000..65174f41 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/palmos.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/palmos.cpython-37.pyc b/venv/Lib/encodings/__pycache__/palmos.cpython-37.pyc new file mode 100644 index 00000000..738b0bf8 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/palmos.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/ptcp154.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/ptcp154.cpython-37.opt-1.pyc new file mode 100644 index 00000000..158d9020 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/ptcp154.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/ptcp154.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/ptcp154.cpython-37.opt-2.pyc new file mode 100644 index 00000000..b35f4e1a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/ptcp154.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/ptcp154.cpython-37.pyc b/venv/Lib/encodings/__pycache__/ptcp154.cpython-37.pyc new file mode 100644 index 00000000..158d9020 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/ptcp154.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/punycode.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/punycode.cpython-37.opt-1.pyc new file mode 100644 index 00000000..dbe4f975 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/punycode.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/punycode.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/punycode.cpython-37.opt-2.pyc new file mode 100644 index 00000000..ac3ac28c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/punycode.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/punycode.cpython-37.pyc b/venv/Lib/encodings/__pycache__/punycode.cpython-37.pyc new file mode 100644 index 00000000..dbe4f975 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/punycode.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/quopri_codec.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/quopri_codec.cpython-37.opt-1.pyc new file mode 100644 index 00000000..d6a941b2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/quopri_codec.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/quopri_codec.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/quopri_codec.cpython-37.opt-2.pyc new file mode 100644 index 00000000..51536f9d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/quopri_codec.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/quopri_codec.cpython-37.pyc b/venv/Lib/encodings/__pycache__/quopri_codec.cpython-37.pyc new file mode 100644 index 00000000..05fcc4e9 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/quopri_codec.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/raw_unicode_escape.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/raw_unicode_escape.cpython-37.opt-1.pyc new file mode 100644 index 00000000..5ad7bcad Binary files /dev/null and b/venv/Lib/encodings/__pycache__/raw_unicode_escape.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/raw_unicode_escape.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/raw_unicode_escape.cpython-37.opt-2.pyc new file mode 100644 index 00000000..15fe9660 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/raw_unicode_escape.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/raw_unicode_escape.cpython-37.pyc b/venv/Lib/encodings/__pycache__/raw_unicode_escape.cpython-37.pyc new file mode 100644 index 00000000..5ad7bcad Binary files /dev/null and b/venv/Lib/encodings/__pycache__/raw_unicode_escape.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/rot_13.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/rot_13.cpython-37.opt-1.pyc new file mode 100644 index 00000000..5266d983 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/rot_13.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/rot_13.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/rot_13.cpython-37.opt-2.pyc new file mode 100644 index 00000000..dd8199f1 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/rot_13.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/rot_13.cpython-37.pyc b/venv/Lib/encodings/__pycache__/rot_13.cpython-37.pyc new file mode 100644 index 00000000..5266d983 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/rot_13.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/shift_jis.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/shift_jis.cpython-37.opt-1.pyc new file mode 100644 index 00000000..bfbb1fcf Binary files /dev/null and b/venv/Lib/encodings/__pycache__/shift_jis.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/shift_jis.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/shift_jis.cpython-37.opt-2.pyc new file mode 100644 index 00000000..bfbb1fcf Binary files /dev/null and b/venv/Lib/encodings/__pycache__/shift_jis.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/shift_jis.cpython-37.pyc b/venv/Lib/encodings/__pycache__/shift_jis.cpython-37.pyc new file mode 100644 index 00000000..bfbb1fcf Binary files /dev/null and b/venv/Lib/encodings/__pycache__/shift_jis.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/shift_jis_2004.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/shift_jis_2004.cpython-37.opt-1.pyc new file mode 100644 index 00000000..ff725ff5 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/shift_jis_2004.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/shift_jis_2004.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/shift_jis_2004.cpython-37.opt-2.pyc new file mode 100644 index 00000000..ff725ff5 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/shift_jis_2004.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/shift_jis_2004.cpython-37.pyc b/venv/Lib/encodings/__pycache__/shift_jis_2004.cpython-37.pyc new file mode 100644 index 00000000..ff725ff5 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/shift_jis_2004.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/shift_jisx0213.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/shift_jisx0213.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7874d648 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/shift_jisx0213.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/shift_jisx0213.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/shift_jisx0213.cpython-37.opt-2.pyc new file mode 100644 index 00000000..7874d648 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/shift_jisx0213.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/shift_jisx0213.cpython-37.pyc b/venv/Lib/encodings/__pycache__/shift_jisx0213.cpython-37.pyc new file mode 100644 index 00000000..7874d648 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/shift_jisx0213.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/tis_620.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/tis_620.cpython-37.opt-1.pyc new file mode 100644 index 00000000..e895f155 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/tis_620.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/tis_620.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/tis_620.cpython-37.opt-2.pyc new file mode 100644 index 00000000..dd878805 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/tis_620.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/tis_620.cpython-37.pyc b/venv/Lib/encodings/__pycache__/tis_620.cpython-37.pyc new file mode 100644 index 00000000..e895f155 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/tis_620.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/undefined.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/undefined.cpython-37.opt-1.pyc new file mode 100644 index 00000000..f8925476 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/undefined.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/undefined.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/undefined.cpython-37.opt-2.pyc new file mode 100644 index 00000000..058691e4 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/undefined.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/undefined.cpython-37.pyc b/venv/Lib/encodings/__pycache__/undefined.cpython-37.pyc new file mode 100644 index 00000000..f8925476 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/undefined.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/unicode_escape.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/unicode_escape.cpython-37.opt-1.pyc new file mode 100644 index 00000000..6b6f6e6a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/unicode_escape.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/unicode_escape.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/unicode_escape.cpython-37.opt-2.pyc new file mode 100644 index 00000000..4d785eb3 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/unicode_escape.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/unicode_escape.cpython-37.pyc b/venv/Lib/encodings/__pycache__/unicode_escape.cpython-37.pyc new file mode 100644 index 00000000..6b6f6e6a Binary files /dev/null and b/venv/Lib/encodings/__pycache__/unicode_escape.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/unicode_internal.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/unicode_internal.cpython-37.opt-1.pyc new file mode 100644 index 00000000..4b9464a9 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/unicode_internal.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/unicode_internal.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/unicode_internal.cpython-37.opt-2.pyc new file mode 100644 index 00000000..c5dc4b21 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/unicode_internal.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/unicode_internal.cpython-37.pyc b/venv/Lib/encodings/__pycache__/unicode_internal.cpython-37.pyc new file mode 100644 index 00000000..4b9464a9 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/unicode_internal.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_16.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/utf_16.cpython-37.opt-1.pyc new file mode 100644 index 00000000..4ab5e1b6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_16.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_16.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/utf_16.cpython-37.opt-2.pyc new file mode 100644 index 00000000..65fd528d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_16.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_16.cpython-37.pyc b/venv/Lib/encodings/__pycache__/utf_16.cpython-37.pyc new file mode 100644 index 00000000..4ab5e1b6 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_16.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_16_be.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/utf_16_be.cpython-37.opt-1.pyc new file mode 100644 index 00000000..cd1b13b2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_16_be.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_16_be.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/utf_16_be.cpython-37.opt-2.pyc new file mode 100644 index 00000000..2dc22412 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_16_be.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_16_be.cpython-37.pyc b/venv/Lib/encodings/__pycache__/utf_16_be.cpython-37.pyc new file mode 100644 index 00000000..cd1b13b2 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_16_be.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_16_le.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/utf_16_le.cpython-37.opt-1.pyc new file mode 100644 index 00000000..62f11c31 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_16_le.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_16_le.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/utf_16_le.cpython-37.opt-2.pyc new file mode 100644 index 00000000..c3c0ad17 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_16_le.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_16_le.cpython-37.pyc b/venv/Lib/encodings/__pycache__/utf_16_le.cpython-37.pyc new file mode 100644 index 00000000..62f11c31 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_16_le.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_32.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/utf_32.cpython-37.opt-1.pyc new file mode 100644 index 00000000..6bf2bd46 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_32.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_32.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/utf_32.cpython-37.opt-2.pyc new file mode 100644 index 00000000..5d1e5a49 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_32.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_32.cpython-37.pyc b/venv/Lib/encodings/__pycache__/utf_32.cpython-37.pyc new file mode 100644 index 00000000..6bf2bd46 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_32.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_32_be.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/utf_32_be.cpython-37.opt-1.pyc new file mode 100644 index 00000000..503f21d9 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_32_be.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_32_be.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/utf_32_be.cpython-37.opt-2.pyc new file mode 100644 index 00000000..96318e6e Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_32_be.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_32_be.cpython-37.pyc b/venv/Lib/encodings/__pycache__/utf_32_be.cpython-37.pyc new file mode 100644 index 00000000..503f21d9 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_32_be.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_32_le.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/utf_32_le.cpython-37.opt-1.pyc new file mode 100644 index 00000000..2d93f42c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_32_le.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_32_le.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/utf_32_le.cpython-37.opt-2.pyc new file mode 100644 index 00000000..f9d36f18 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_32_le.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_32_le.cpython-37.pyc b/venv/Lib/encodings/__pycache__/utf_32_le.cpython-37.pyc new file mode 100644 index 00000000..2d93f42c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_32_le.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_7.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/utf_7.cpython-37.opt-1.pyc new file mode 100644 index 00000000..8102442f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_7.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_7.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/utf_7.cpython-37.opt-2.pyc new file mode 100644 index 00000000..95414853 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_7.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_7.cpython-37.pyc b/venv/Lib/encodings/__pycache__/utf_7.cpython-37.pyc new file mode 100644 index 00000000..8102442f Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_7.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_8.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/utf_8.cpython-37.opt-1.pyc new file mode 100644 index 00000000..77803705 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_8.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_8.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/utf_8.cpython-37.opt-2.pyc new file mode 100644 index 00000000..14785785 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_8.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_8.cpython-37.pyc b/venv/Lib/encodings/__pycache__/utf_8.cpython-37.pyc new file mode 100644 index 00000000..77803705 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_8.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_8_sig.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/utf_8_sig.cpython-37.opt-1.pyc new file mode 100644 index 00000000..8450f860 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_8_sig.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_8_sig.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/utf_8_sig.cpython-37.opt-2.pyc new file mode 100644 index 00000000..5e18f60c Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_8_sig.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/utf_8_sig.cpython-37.pyc b/venv/Lib/encodings/__pycache__/utf_8_sig.cpython-37.pyc new file mode 100644 index 00000000..8450f860 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/utf_8_sig.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/uu_codec.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/uu_codec.cpython-37.opt-1.pyc new file mode 100644 index 00000000..067f17e4 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/uu_codec.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/uu_codec.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/uu_codec.cpython-37.opt-2.pyc new file mode 100644 index 00000000..3ce6dd69 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/uu_codec.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/uu_codec.cpython-37.pyc b/venv/Lib/encodings/__pycache__/uu_codec.cpython-37.pyc new file mode 100644 index 00000000..93cd7f60 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/uu_codec.cpython-37.pyc differ diff --git a/venv/Lib/encodings/__pycache__/zlib_codec.cpython-37.opt-1.pyc b/venv/Lib/encodings/__pycache__/zlib_codec.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7b1aa3c9 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/zlib_codec.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/encodings/__pycache__/zlib_codec.cpython-37.opt-2.pyc b/venv/Lib/encodings/__pycache__/zlib_codec.cpython-37.opt-2.pyc new file mode 100644 index 00000000..9f1ab1c0 Binary files /dev/null and b/venv/Lib/encodings/__pycache__/zlib_codec.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/encodings/__pycache__/zlib_codec.cpython-37.pyc b/venv/Lib/encodings/__pycache__/zlib_codec.cpython-37.pyc new file mode 100644 index 00000000..d77d8e8d Binary files /dev/null and b/venv/Lib/encodings/__pycache__/zlib_codec.cpython-37.pyc differ diff --git a/venv/Lib/encodings/aliases.py b/venv/Lib/encodings/aliases.py new file mode 100644 index 00000000..2e63c2f9 --- /dev/null +++ b/venv/Lib/encodings/aliases.py @@ -0,0 +1,550 @@ +""" Encoding Aliases Support + + This module is used by the encodings package search function to + map encodings names to module names. + + Note that the search function normalizes the encoding names before + doing the lookup, so the mapping will have to map normalized + encoding names to module names. + + Contents: + + The following aliases dictionary contains mappings of all IANA + character set names for which the Python core library provides + codecs. In addition to these, a few Python specific codec + aliases have also been added. + +""" +aliases = { + + # Please keep this list sorted alphabetically by value ! + + # ascii codec + '646' : 'ascii', + 'ansi_x3.4_1968' : 'ascii', + 'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name + 'ansi_x3.4_1986' : 'ascii', + 'cp367' : 'ascii', + 'csascii' : 'ascii', + 'ibm367' : 'ascii', + 'iso646_us' : 'ascii', + 'iso_646.irv_1991' : 'ascii', + 'iso_ir_6' : 'ascii', + 'us' : 'ascii', + 'us_ascii' : 'ascii', + + # base64_codec codec + 'base64' : 'base64_codec', + 'base_64' : 'base64_codec', + + # big5 codec + 'big5_tw' : 'big5', + 'csbig5' : 'big5', + + # big5hkscs codec + 'big5_hkscs' : 'big5hkscs', + 'hkscs' : 'big5hkscs', + + # bz2_codec codec + 'bz2' : 'bz2_codec', + + # cp037 codec + '037' : 'cp037', + 'csibm037' : 'cp037', + 'ebcdic_cp_ca' : 'cp037', + 'ebcdic_cp_nl' : 'cp037', + 'ebcdic_cp_us' : 'cp037', + 'ebcdic_cp_wt' : 'cp037', + 'ibm037' : 'cp037', + 'ibm039' : 'cp037', + + # cp1026 codec + '1026' : 'cp1026', + 'csibm1026' : 'cp1026', + 'ibm1026' : 'cp1026', + + # cp1125 codec + '1125' : 'cp1125', + 'ibm1125' : 'cp1125', + 'cp866u' : 'cp1125', + 'ruscii' : 'cp1125', + + # cp1140 codec + '1140' : 'cp1140', + 'ibm1140' : 'cp1140', + + # cp1250 codec + '1250' : 'cp1250', + 'windows_1250' : 'cp1250', + + # cp1251 codec + '1251' : 'cp1251', + 'windows_1251' : 'cp1251', + + # cp1252 codec + '1252' : 'cp1252', + 'windows_1252' : 'cp1252', + + # cp1253 codec + '1253' : 'cp1253', + 'windows_1253' : 'cp1253', + + # cp1254 codec + '1254' : 'cp1254', + 'windows_1254' : 'cp1254', + + # cp1255 codec + '1255' : 'cp1255', + 'windows_1255' : 'cp1255', + + # cp1256 codec + '1256' : 'cp1256', + 'windows_1256' : 'cp1256', + + # cp1257 codec + '1257' : 'cp1257', + 'windows_1257' : 'cp1257', + + # cp1258 codec + '1258' : 'cp1258', + 'windows_1258' : 'cp1258', + + # cp273 codec + '273' : 'cp273', + 'ibm273' : 'cp273', + 'csibm273' : 'cp273', + + # cp424 codec + '424' : 'cp424', + 'csibm424' : 'cp424', + 'ebcdic_cp_he' : 'cp424', + 'ibm424' : 'cp424', + + # cp437 codec + '437' : 'cp437', + 'cspc8codepage437' : 'cp437', + 'ibm437' : 'cp437', + + # cp500 codec + '500' : 'cp500', + 'csibm500' : 'cp500', + 'ebcdic_cp_be' : 'cp500', + 'ebcdic_cp_ch' : 'cp500', + 'ibm500' : 'cp500', + + # cp775 codec + '775' : 'cp775', + 'cspc775baltic' : 'cp775', + 'ibm775' : 'cp775', + + # cp850 codec + '850' : 'cp850', + 'cspc850multilingual' : 'cp850', + 'ibm850' : 'cp850', + + # cp852 codec + '852' : 'cp852', + 'cspcp852' : 'cp852', + 'ibm852' : 'cp852', + + # cp855 codec + '855' : 'cp855', + 'csibm855' : 'cp855', + 'ibm855' : 'cp855', + + # cp857 codec + '857' : 'cp857', + 'csibm857' : 'cp857', + 'ibm857' : 'cp857', + + # cp858 codec + '858' : 'cp858', + 'csibm858' : 'cp858', + 'ibm858' : 'cp858', + + # cp860 codec + '860' : 'cp860', + 'csibm860' : 'cp860', + 'ibm860' : 'cp860', + + # cp861 codec + '861' : 'cp861', + 'cp_is' : 'cp861', + 'csibm861' : 'cp861', + 'ibm861' : 'cp861', + + # cp862 codec + '862' : 'cp862', + 'cspc862latinhebrew' : 'cp862', + 'ibm862' : 'cp862', + + # cp863 codec + '863' : 'cp863', + 'csibm863' : 'cp863', + 'ibm863' : 'cp863', + + # cp864 codec + '864' : 'cp864', + 'csibm864' : 'cp864', + 'ibm864' : 'cp864', + + # cp865 codec + '865' : 'cp865', + 'csibm865' : 'cp865', + 'ibm865' : 'cp865', + + # cp866 codec + '866' : 'cp866', + 'csibm866' : 'cp866', + 'ibm866' : 'cp866', + + # cp869 codec + '869' : 'cp869', + 'cp_gr' : 'cp869', + 'csibm869' : 'cp869', + 'ibm869' : 'cp869', + + # cp932 codec + '932' : 'cp932', + 'ms932' : 'cp932', + 'mskanji' : 'cp932', + 'ms_kanji' : 'cp932', + + # cp949 codec + '949' : 'cp949', + 'ms949' : 'cp949', + 'uhc' : 'cp949', + + # cp950 codec + '950' : 'cp950', + 'ms950' : 'cp950', + + # euc_jis_2004 codec + 'jisx0213' : 'euc_jis_2004', + 'eucjis2004' : 'euc_jis_2004', + 'euc_jis2004' : 'euc_jis_2004', + + # euc_jisx0213 codec + 'eucjisx0213' : 'euc_jisx0213', + + # euc_jp codec + 'eucjp' : 'euc_jp', + 'ujis' : 'euc_jp', + 'u_jis' : 'euc_jp', + + # euc_kr codec + 'euckr' : 'euc_kr', + 'korean' : 'euc_kr', + 'ksc5601' : 'euc_kr', + 'ks_c_5601' : 'euc_kr', + 'ks_c_5601_1987' : 'euc_kr', + 'ksx1001' : 'euc_kr', + 'ks_x_1001' : 'euc_kr', + + # gb18030 codec + 'gb18030_2000' : 'gb18030', + + # gb2312 codec + 'chinese' : 'gb2312', + 'csiso58gb231280' : 'gb2312', + 'euc_cn' : 'gb2312', + 'euccn' : 'gb2312', + 'eucgb2312_cn' : 'gb2312', + 'gb2312_1980' : 'gb2312', + 'gb2312_80' : 'gb2312', + 'iso_ir_58' : 'gb2312', + + # gbk codec + '936' : 'gbk', + 'cp936' : 'gbk', + 'ms936' : 'gbk', + + # hex_codec codec + 'hex' : 'hex_codec', + + # hp_roman8 codec + 'roman8' : 'hp_roman8', + 'r8' : 'hp_roman8', + 'csHPRoman8' : 'hp_roman8', + + # hz codec + 'hzgb' : 'hz', + 'hz_gb' : 'hz', + 'hz_gb_2312' : 'hz', + + # iso2022_jp codec + 'csiso2022jp' : 'iso2022_jp', + 'iso2022jp' : 'iso2022_jp', + 'iso_2022_jp' : 'iso2022_jp', + + # iso2022_jp_1 codec + 'iso2022jp_1' : 'iso2022_jp_1', + 'iso_2022_jp_1' : 'iso2022_jp_1', + + # iso2022_jp_2 codec + 'iso2022jp_2' : 'iso2022_jp_2', + 'iso_2022_jp_2' : 'iso2022_jp_2', + + # iso2022_jp_2004 codec + 'iso_2022_jp_2004' : 'iso2022_jp_2004', + 'iso2022jp_2004' : 'iso2022_jp_2004', + + # iso2022_jp_3 codec + 'iso2022jp_3' : 'iso2022_jp_3', + 'iso_2022_jp_3' : 'iso2022_jp_3', + + # iso2022_jp_ext codec + 'iso2022jp_ext' : 'iso2022_jp_ext', + 'iso_2022_jp_ext' : 'iso2022_jp_ext', + + # iso2022_kr codec + 'csiso2022kr' : 'iso2022_kr', + 'iso2022kr' : 'iso2022_kr', + 'iso_2022_kr' : 'iso2022_kr', + + # iso8859_10 codec + 'csisolatin6' : 'iso8859_10', + 'iso_8859_10' : 'iso8859_10', + 'iso_8859_10_1992' : 'iso8859_10', + 'iso_ir_157' : 'iso8859_10', + 'l6' : 'iso8859_10', + 'latin6' : 'iso8859_10', + + # iso8859_11 codec + 'thai' : 'iso8859_11', + 'iso_8859_11' : 'iso8859_11', + 'iso_8859_11_2001' : 'iso8859_11', + + # iso8859_13 codec + 'iso_8859_13' : 'iso8859_13', + 'l7' : 'iso8859_13', + 'latin7' : 'iso8859_13', + + # iso8859_14 codec + 'iso_8859_14' : 'iso8859_14', + 'iso_8859_14_1998' : 'iso8859_14', + 'iso_celtic' : 'iso8859_14', + 'iso_ir_199' : 'iso8859_14', + 'l8' : 'iso8859_14', + 'latin8' : 'iso8859_14', + + # iso8859_15 codec + 'iso_8859_15' : 'iso8859_15', + 'l9' : 'iso8859_15', + 'latin9' : 'iso8859_15', + + # iso8859_16 codec + 'iso_8859_16' : 'iso8859_16', + 'iso_8859_16_2001' : 'iso8859_16', + 'iso_ir_226' : 'iso8859_16', + 'l10' : 'iso8859_16', + 'latin10' : 'iso8859_16', + + # iso8859_2 codec + 'csisolatin2' : 'iso8859_2', + 'iso_8859_2' : 'iso8859_2', + 'iso_8859_2_1987' : 'iso8859_2', + 'iso_ir_101' : 'iso8859_2', + 'l2' : 'iso8859_2', + 'latin2' : 'iso8859_2', + + # iso8859_3 codec + 'csisolatin3' : 'iso8859_3', + 'iso_8859_3' : 'iso8859_3', + 'iso_8859_3_1988' : 'iso8859_3', + 'iso_ir_109' : 'iso8859_3', + 'l3' : 'iso8859_3', + 'latin3' : 'iso8859_3', + + # iso8859_4 codec + 'csisolatin4' : 'iso8859_4', + 'iso_8859_4' : 'iso8859_4', + 'iso_8859_4_1988' : 'iso8859_4', + 'iso_ir_110' : 'iso8859_4', + 'l4' : 'iso8859_4', + 'latin4' : 'iso8859_4', + + # iso8859_5 codec + 'csisolatincyrillic' : 'iso8859_5', + 'cyrillic' : 'iso8859_5', + 'iso_8859_5' : 'iso8859_5', + 'iso_8859_5_1988' : 'iso8859_5', + 'iso_ir_144' : 'iso8859_5', + + # iso8859_6 codec + 'arabic' : 'iso8859_6', + 'asmo_708' : 'iso8859_6', + 'csisolatinarabic' : 'iso8859_6', + 'ecma_114' : 'iso8859_6', + 'iso_8859_6' : 'iso8859_6', + 'iso_8859_6_1987' : 'iso8859_6', + 'iso_ir_127' : 'iso8859_6', + + # iso8859_7 codec + 'csisolatingreek' : 'iso8859_7', + 'ecma_118' : 'iso8859_7', + 'elot_928' : 'iso8859_7', + 'greek' : 'iso8859_7', + 'greek8' : 'iso8859_7', + 'iso_8859_7' : 'iso8859_7', + 'iso_8859_7_1987' : 'iso8859_7', + 'iso_ir_126' : 'iso8859_7', + + # iso8859_8 codec + 'csisolatinhebrew' : 'iso8859_8', + 'hebrew' : 'iso8859_8', + 'iso_8859_8' : 'iso8859_8', + 'iso_8859_8_1988' : 'iso8859_8', + 'iso_ir_138' : 'iso8859_8', + + # iso8859_9 codec + 'csisolatin5' : 'iso8859_9', + 'iso_8859_9' : 'iso8859_9', + 'iso_8859_9_1989' : 'iso8859_9', + 'iso_ir_148' : 'iso8859_9', + 'l5' : 'iso8859_9', + 'latin5' : 'iso8859_9', + + # johab codec + 'cp1361' : 'johab', + 'ms1361' : 'johab', + + # koi8_r codec + 'cskoi8r' : 'koi8_r', + + # kz1048 codec + 'kz_1048' : 'kz1048', + 'rk1048' : 'kz1048', + 'strk1048_2002' : 'kz1048', + + # latin_1 codec + # + # Note that the latin_1 codec is implemented internally in C and a + # lot faster than the charmap codec iso8859_1 which uses the same + # encoding. This is why we discourage the use of the iso8859_1 + # codec and alias it to latin_1 instead. + # + '8859' : 'latin_1', + 'cp819' : 'latin_1', + 'csisolatin1' : 'latin_1', + 'ibm819' : 'latin_1', + 'iso8859' : 'latin_1', + 'iso8859_1' : 'latin_1', + 'iso_8859_1' : 'latin_1', + 'iso_8859_1_1987' : 'latin_1', + 'iso_ir_100' : 'latin_1', + 'l1' : 'latin_1', + 'latin' : 'latin_1', + 'latin1' : 'latin_1', + + # mac_cyrillic codec + 'maccyrillic' : 'mac_cyrillic', + + # mac_greek codec + 'macgreek' : 'mac_greek', + + # mac_iceland codec + 'maciceland' : 'mac_iceland', + + # mac_latin2 codec + 'maccentraleurope' : 'mac_latin2', + 'maclatin2' : 'mac_latin2', + + # mac_roman codec + 'macintosh' : 'mac_roman', + 'macroman' : 'mac_roman', + + # mac_turkish codec + 'macturkish' : 'mac_turkish', + + # mbcs codec + 'ansi' : 'mbcs', + 'dbcs' : 'mbcs', + + # ptcp154 codec + 'csptcp154' : 'ptcp154', + 'pt154' : 'ptcp154', + 'cp154' : 'ptcp154', + 'cyrillic_asian' : 'ptcp154', + + # quopri_codec codec + 'quopri' : 'quopri_codec', + 'quoted_printable' : 'quopri_codec', + 'quotedprintable' : 'quopri_codec', + + # rot_13 codec + 'rot13' : 'rot_13', + + # shift_jis codec + 'csshiftjis' : 'shift_jis', + 'shiftjis' : 'shift_jis', + 'sjis' : 'shift_jis', + 's_jis' : 'shift_jis', + + # shift_jis_2004 codec + 'shiftjis2004' : 'shift_jis_2004', + 'sjis_2004' : 'shift_jis_2004', + 's_jis_2004' : 'shift_jis_2004', + + # shift_jisx0213 codec + 'shiftjisx0213' : 'shift_jisx0213', + 'sjisx0213' : 'shift_jisx0213', + 's_jisx0213' : 'shift_jisx0213', + + # tactis codec + 'tis260' : 'tactis', + + # tis_620 codec + 'tis620' : 'tis_620', + 'tis_620_0' : 'tis_620', + 'tis_620_2529_0' : 'tis_620', + 'tis_620_2529_1' : 'tis_620', + 'iso_ir_166' : 'tis_620', + + # utf_16 codec + 'u16' : 'utf_16', + 'utf16' : 'utf_16', + + # utf_16_be codec + 'unicodebigunmarked' : 'utf_16_be', + 'utf_16be' : 'utf_16_be', + + # utf_16_le codec + 'unicodelittleunmarked' : 'utf_16_le', + 'utf_16le' : 'utf_16_le', + + # utf_32 codec + 'u32' : 'utf_32', + 'utf32' : 'utf_32', + + # utf_32_be codec + 'utf_32be' : 'utf_32_be', + + # utf_32_le codec + 'utf_32le' : 'utf_32_le', + + # utf_7 codec + 'u7' : 'utf_7', + 'utf7' : 'utf_7', + 'unicode_1_1_utf_7' : 'utf_7', + + # utf_8 codec + 'u8' : 'utf_8', + 'utf' : 'utf_8', + 'utf8' : 'utf_8', + 'utf8_ucs2' : 'utf_8', + 'utf8_ucs4' : 'utf_8', + + # uu_codec codec + 'uu' : 'uu_codec', + + # zlib_codec codec + 'zip' : 'zlib_codec', + 'zlib' : 'zlib_codec', + + # temporary mac CJK aliases, will be replaced by proper codecs in 3.1 + 'x_mac_japanese' : 'shift_jis', + 'x_mac_korean' : 'euc_kr', + 'x_mac_simp_chinese' : 'gb2312', + 'x_mac_trad_chinese' : 'big5', +} diff --git a/venv/Lib/encodings/ascii.py b/venv/Lib/encodings/ascii.py new file mode 100644 index 00000000..2033cde9 --- /dev/null +++ b/venv/Lib/encodings/ascii.py @@ -0,0 +1,50 @@ +""" Python 'ascii' Codec + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + # Note: Binding these as C functions will result in the class not + # converting them to methods. This is intended. + encode = codecs.ascii_encode + decode = codecs.ascii_decode + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.ascii_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.ascii_decode(input, self.errors)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +class StreamConverter(StreamWriter,StreamReader): + + encode = codecs.ascii_decode + decode = codecs.ascii_encode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='ascii', + encode=Codec.encode, + decode=Codec.decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/venv/Lib/encodings/base64_codec.py b/venv/Lib/encodings/base64_codec.py new file mode 100644 index 00000000..8e7703b3 --- /dev/null +++ b/venv/Lib/encodings/base64_codec.py @@ -0,0 +1,55 @@ +"""Python 'base64_codec' Codec - base64 content transfer encoding. + +This codec de/encodes from bytes to bytes. + +Written by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs +import base64 + +### Codec APIs + +def base64_encode(input, errors='strict'): + assert errors == 'strict' + return (base64.encodebytes(input), len(input)) + +def base64_decode(input, errors='strict'): + assert errors == 'strict' + return (base64.decodebytes(input), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return base64_encode(input, errors) + def decode(self, input, errors='strict'): + return base64_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + assert self.errors == 'strict' + return base64.encodebytes(input) + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + assert self.errors == 'strict' + return base64.decodebytes(input) + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='base64', + encode=base64_encode, + decode=base64_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + _is_text_encoding=False, + ) diff --git a/venv/Lib/encodings/big5.py b/venv/Lib/encodings/big5.py new file mode 100644 index 00000000..7adeb0e1 --- /dev/null +++ b/venv/Lib/encodings/big5.py @@ -0,0 +1,39 @@ +# +# big5.py: Python Unicode Codec for BIG5 +# +# Written by Hye-Shik Chang +# + +import _codecs_tw, codecs +import _multibytecodec as mbc + +codec = _codecs_tw.getcodec('big5') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='big5', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/big5hkscs.py b/venv/Lib/encodings/big5hkscs.py new file mode 100644 index 00000000..350df37b --- /dev/null +++ b/venv/Lib/encodings/big5hkscs.py @@ -0,0 +1,39 @@ +# +# big5hkscs.py: Python Unicode Codec for BIG5HKSCS +# +# Written by Hye-Shik Chang +# + +import _codecs_hk, codecs +import _multibytecodec as mbc + +codec = _codecs_hk.getcodec('big5hkscs') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='big5hkscs', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/bz2_codec.py b/venv/Lib/encodings/bz2_codec.py new file mode 100644 index 00000000..fd9495e3 --- /dev/null +++ b/venv/Lib/encodings/bz2_codec.py @@ -0,0 +1,78 @@ +"""Python 'bz2_codec' Codec - bz2 compression encoding. + +This codec de/encodes from bytes to bytes and is therefore usable with +bytes.transform() and bytes.untransform(). + +Adapted by Raymond Hettinger from zlib_codec.py which was written +by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs +import bz2 # this codec needs the optional bz2 module ! + +### Codec APIs + +def bz2_encode(input, errors='strict'): + assert errors == 'strict' + return (bz2.compress(input), len(input)) + +def bz2_decode(input, errors='strict'): + assert errors == 'strict' + return (bz2.decompress(input), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return bz2_encode(input, errors) + def decode(self, input, errors='strict'): + return bz2_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def __init__(self, errors='strict'): + assert errors == 'strict' + self.errors = errors + self.compressobj = bz2.BZ2Compressor() + + def encode(self, input, final=False): + if final: + c = self.compressobj.compress(input) + return c + self.compressobj.flush() + else: + return self.compressobj.compress(input) + + def reset(self): + self.compressobj = bz2.BZ2Compressor() + +class IncrementalDecoder(codecs.IncrementalDecoder): + def __init__(self, errors='strict'): + assert errors == 'strict' + self.errors = errors + self.decompressobj = bz2.BZ2Decompressor() + + def decode(self, input, final=False): + try: + return self.decompressobj.decompress(input) + except EOFError: + return '' + + def reset(self): + self.decompressobj = bz2.BZ2Decompressor() + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name="bz2", + encode=bz2_encode, + decode=bz2_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + _is_text_encoding=False, + ) diff --git a/venv/Lib/encodings/charmap.py b/venv/Lib/encodings/charmap.py new file mode 100644 index 00000000..81189b16 --- /dev/null +++ b/venv/Lib/encodings/charmap.py @@ -0,0 +1,69 @@ +""" Generic Python Character Mapping Codec. + + Use this codec directly rather than through the automatic + conversion mechanisms supplied by unicode() and .encode(). + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + # Note: Binding these as C functions will result in the class not + # converting them to methods. This is intended. + encode = codecs.charmap_encode + decode = codecs.charmap_decode + +class IncrementalEncoder(codecs.IncrementalEncoder): + def __init__(self, errors='strict', mapping=None): + codecs.IncrementalEncoder.__init__(self, errors) + self.mapping = mapping + + def encode(self, input, final=False): + return codecs.charmap_encode(input, self.errors, self.mapping)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def __init__(self, errors='strict', mapping=None): + codecs.IncrementalDecoder.__init__(self, errors) + self.mapping = mapping + + def decode(self, input, final=False): + return codecs.charmap_decode(input, self.errors, self.mapping)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + + def __init__(self,stream,errors='strict',mapping=None): + codecs.StreamWriter.__init__(self,stream,errors) + self.mapping = mapping + + def encode(self,input,errors='strict'): + return Codec.encode(input,errors,self.mapping) + +class StreamReader(Codec,codecs.StreamReader): + + def __init__(self,stream,errors='strict',mapping=None): + codecs.StreamReader.__init__(self,stream,errors) + self.mapping = mapping + + def decode(self,input,errors='strict'): + return Codec.decode(input,errors,self.mapping) + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='charmap', + encode=Codec.encode, + decode=Codec.decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/venv/Lib/encodings/cp037.py b/venv/Lib/encodings/cp037.py new file mode 100644 index 00000000..4edd708f --- /dev/null +++ b/venv/Lib/encodings/cp037.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp037', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x9c' # 0x04 -> CONTROL + '\t' # 0x05 -> HORIZONTAL TABULATION + '\x86' # 0x06 -> CONTROL + '\x7f' # 0x07 -> DELETE + '\x97' # 0x08 -> CONTROL + '\x8d' # 0x09 -> CONTROL + '\x8e' # 0x0A -> CONTROL + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x9d' # 0x14 -> CONTROL + '\x85' # 0x15 -> CONTROL + '\x08' # 0x16 -> BACKSPACE + '\x87' # 0x17 -> CONTROL + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x92' # 0x1A -> CONTROL + '\x8f' # 0x1B -> CONTROL + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + '\x80' # 0x20 -> CONTROL + '\x81' # 0x21 -> CONTROL + '\x82' # 0x22 -> CONTROL + '\x83' # 0x23 -> CONTROL + '\x84' # 0x24 -> CONTROL + '\n' # 0x25 -> LINE FEED + '\x17' # 0x26 -> END OF TRANSMISSION BLOCK + '\x1b' # 0x27 -> ESCAPE + '\x88' # 0x28 -> CONTROL + '\x89' # 0x29 -> CONTROL + '\x8a' # 0x2A -> CONTROL + '\x8b' # 0x2B -> CONTROL + '\x8c' # 0x2C -> CONTROL + '\x05' # 0x2D -> ENQUIRY + '\x06' # 0x2E -> ACKNOWLEDGE + '\x07' # 0x2F -> BELL + '\x90' # 0x30 -> CONTROL + '\x91' # 0x31 -> CONTROL + '\x16' # 0x32 -> SYNCHRONOUS IDLE + '\x93' # 0x33 -> CONTROL + '\x94' # 0x34 -> CONTROL + '\x95' # 0x35 -> CONTROL + '\x96' # 0x36 -> CONTROL + '\x04' # 0x37 -> END OF TRANSMISSION + '\x98' # 0x38 -> CONTROL + '\x99' # 0x39 -> CONTROL + '\x9a' # 0x3A -> CONTROL + '\x9b' # 0x3B -> CONTROL + '\x14' # 0x3C -> DEVICE CONTROL FOUR + '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE + '\x9e' # 0x3E -> CONTROL + '\x1a' # 0x3F -> SUBSTITUTE + ' ' # 0x40 -> SPACE + '\xa0' # 0x41 -> NO-BREAK SPACE + '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE + '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA + '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE + '\xa2' # 0x4A -> CENT SIGN + '.' # 0x4B -> FULL STOP + '<' # 0x4C -> LESS-THAN SIGN + '(' # 0x4D -> LEFT PARENTHESIS + '+' # 0x4E -> PLUS SIGN + '|' # 0x4F -> VERTICAL LINE + '&' # 0x50 -> AMPERSAND + '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE + '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE + '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN) + '!' # 0x5A -> EXCLAMATION MARK + '$' # 0x5B -> DOLLAR SIGN + '*' # 0x5C -> ASTERISK + ')' # 0x5D -> RIGHT PARENTHESIS + ';' # 0x5E -> SEMICOLON + '\xac' # 0x5F -> NOT SIGN + '-' # 0x60 -> HYPHEN-MINUS + '/' # 0x61 -> SOLIDUS + '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE + '\xa6' # 0x6A -> BROKEN BAR + ',' # 0x6B -> COMMA + '%' # 0x6C -> PERCENT SIGN + '_' # 0x6D -> LOW LINE + '>' # 0x6E -> GREATER-THAN SIGN + '?' # 0x6F -> QUESTION MARK + '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE + '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE + '`' # 0x79 -> GRAVE ACCENT + ':' # 0x7A -> COLON + '#' # 0x7B -> NUMBER SIGN + '@' # 0x7C -> COMMERCIAL AT + "'" # 0x7D -> APOSTROPHE + '=' # 0x7E -> EQUALS SIGN + '"' # 0x7F -> QUOTATION MARK + '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE + 'a' # 0x81 -> LATIN SMALL LETTER A + 'b' # 0x82 -> LATIN SMALL LETTER B + 'c' # 0x83 -> LATIN SMALL LETTER C + 'd' # 0x84 -> LATIN SMALL LETTER D + 'e' # 0x85 -> LATIN SMALL LETTER E + 'f' # 0x86 -> LATIN SMALL LETTER F + 'g' # 0x87 -> LATIN SMALL LETTER G + 'h' # 0x88 -> LATIN SMALL LETTER H + 'i' # 0x89 -> LATIN SMALL LETTER I + '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC) + '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE + '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC) + '\xb1' # 0x8F -> PLUS-MINUS SIGN + '\xb0' # 0x90 -> DEGREE SIGN + 'j' # 0x91 -> LATIN SMALL LETTER J + 'k' # 0x92 -> LATIN SMALL LETTER K + 'l' # 0x93 -> LATIN SMALL LETTER L + 'm' # 0x94 -> LATIN SMALL LETTER M + 'n' # 0x95 -> LATIN SMALL LETTER N + 'o' # 0x96 -> LATIN SMALL LETTER O + 'p' # 0x97 -> LATIN SMALL LETTER P + 'q' # 0x98 -> LATIN SMALL LETTER Q + 'r' # 0x99 -> LATIN SMALL LETTER R + '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR + '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR + '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE + '\xb8' # 0x9D -> CEDILLA + '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE + '\xa4' # 0x9F -> CURRENCY SIGN + '\xb5' # 0xA0 -> MICRO SIGN + '~' # 0xA1 -> TILDE + 's' # 0xA2 -> LATIN SMALL LETTER S + 't' # 0xA3 -> LATIN SMALL LETTER T + 'u' # 0xA4 -> LATIN SMALL LETTER U + 'v' # 0xA5 -> LATIN SMALL LETTER V + 'w' # 0xA6 -> LATIN SMALL LETTER W + 'x' # 0xA7 -> LATIN SMALL LETTER X + 'y' # 0xA8 -> LATIN SMALL LETTER Y + 'z' # 0xA9 -> LATIN SMALL LETTER Z + '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK + '\xbf' # 0xAB -> INVERTED QUESTION MARK + '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC) + '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC) + '\xae' # 0xAF -> REGISTERED SIGN + '^' # 0xB0 -> CIRCUMFLEX ACCENT + '\xa3' # 0xB1 -> POUND SIGN + '\xa5' # 0xB2 -> YEN SIGN + '\xb7' # 0xB3 -> MIDDLE DOT + '\xa9' # 0xB4 -> COPYRIGHT SIGN + '\xa7' # 0xB5 -> SECTION SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF + '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS + '[' # 0xBA -> LEFT SQUARE BRACKET + ']' # 0xBB -> RIGHT SQUARE BRACKET + '\xaf' # 0xBC -> MACRON + '\xa8' # 0xBD -> DIAERESIS + '\xb4' # 0xBE -> ACUTE ACCENT + '\xd7' # 0xBF -> MULTIPLICATION SIGN + '{' # 0xC0 -> LEFT CURLY BRACKET + 'A' # 0xC1 -> LATIN CAPITAL LETTER A + 'B' # 0xC2 -> LATIN CAPITAL LETTER B + 'C' # 0xC3 -> LATIN CAPITAL LETTER C + 'D' # 0xC4 -> LATIN CAPITAL LETTER D + 'E' # 0xC5 -> LATIN CAPITAL LETTER E + 'F' # 0xC6 -> LATIN CAPITAL LETTER F + 'G' # 0xC7 -> LATIN CAPITAL LETTER G + 'H' # 0xC8 -> LATIN CAPITAL LETTER H + 'I' # 0xC9 -> LATIN CAPITAL LETTER I + '\xad' # 0xCA -> SOFT HYPHEN + '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE + '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE + '}' # 0xD0 -> RIGHT CURLY BRACKET + 'J' # 0xD1 -> LATIN CAPITAL LETTER J + 'K' # 0xD2 -> LATIN CAPITAL LETTER K + 'L' # 0xD3 -> LATIN CAPITAL LETTER L + 'M' # 0xD4 -> LATIN CAPITAL LETTER M + 'N' # 0xD5 -> LATIN CAPITAL LETTER N + 'O' # 0xD6 -> LATIN CAPITAL LETTER O + 'P' # 0xD7 -> LATIN CAPITAL LETTER P + 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q + 'R' # 0xD9 -> LATIN CAPITAL LETTER R + '\xb9' # 0xDA -> SUPERSCRIPT ONE + '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE + '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS + '\\' # 0xE0 -> REVERSE SOLIDUS + '\xf7' # 0xE1 -> DIVISION SIGN + 'S' # 0xE2 -> LATIN CAPITAL LETTER S + 'T' # 0xE3 -> LATIN CAPITAL LETTER T + 'U' # 0xE4 -> LATIN CAPITAL LETTER U + 'V' # 0xE5 -> LATIN CAPITAL LETTER V + 'W' # 0xE6 -> LATIN CAPITAL LETTER W + 'X' # 0xE7 -> LATIN CAPITAL LETTER X + 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y + 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z + '\xb2' # 0xEA -> SUPERSCRIPT TWO + '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE + '0' # 0xF0 -> DIGIT ZERO + '1' # 0xF1 -> DIGIT ONE + '2' # 0xF2 -> DIGIT TWO + '3' # 0xF3 -> DIGIT THREE + '4' # 0xF4 -> DIGIT FOUR + '5' # 0xF5 -> DIGIT FIVE + '6' # 0xF6 -> DIGIT SIX + '7' # 0xF7 -> DIGIT SEVEN + '8' # 0xF8 -> DIGIT EIGHT + '9' # 0xF9 -> DIGIT NINE + '\xb3' # 0xFA -> SUPERSCRIPT THREE + '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE + '\x9f' # 0xFF -> CONTROL +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1006.py b/venv/Lib/encodings/cp1006.py new file mode 100644 index 00000000..a1221c3e --- /dev/null +++ b/venv/Lib/encodings/cp1006.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1006 generated from 'MAPPINGS/VENDORS/MISC/CP1006.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1006', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u06f0' # 0xA1 -> EXTENDED ARABIC-INDIC DIGIT ZERO + '\u06f1' # 0xA2 -> EXTENDED ARABIC-INDIC DIGIT ONE + '\u06f2' # 0xA3 -> EXTENDED ARABIC-INDIC DIGIT TWO + '\u06f3' # 0xA4 -> EXTENDED ARABIC-INDIC DIGIT THREE + '\u06f4' # 0xA5 -> EXTENDED ARABIC-INDIC DIGIT FOUR + '\u06f5' # 0xA6 -> EXTENDED ARABIC-INDIC DIGIT FIVE + '\u06f6' # 0xA7 -> EXTENDED ARABIC-INDIC DIGIT SIX + '\u06f7' # 0xA8 -> EXTENDED ARABIC-INDIC DIGIT SEVEN + '\u06f8' # 0xA9 -> EXTENDED ARABIC-INDIC DIGIT EIGHT + '\u06f9' # 0xAA -> EXTENDED ARABIC-INDIC DIGIT NINE + '\u060c' # 0xAB -> ARABIC COMMA + '\u061b' # 0xAC -> ARABIC SEMICOLON + '\xad' # 0xAD -> SOFT HYPHEN + '\u061f' # 0xAE -> ARABIC QUESTION MARK + '\ufe81' # 0xAF -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM + '\ufe8d' # 0xB0 -> ARABIC LETTER ALEF ISOLATED FORM + '\ufe8e' # 0xB1 -> ARABIC LETTER ALEF FINAL FORM + '\ufe8e' # 0xB2 -> ARABIC LETTER ALEF FINAL FORM + '\ufe8f' # 0xB3 -> ARABIC LETTER BEH ISOLATED FORM + '\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM + '\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM + '\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM + '\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM + '\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM + '\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM + '\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM + '\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM + '\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM + '\ufe9b' # 0xBD -> ARABIC LETTER THEH INITIAL FORM + '\ufe9d' # 0xBE -> ARABIC LETTER JEEM ISOLATED FORM + '\ufe9f' # 0xBF -> ARABIC LETTER JEEM INITIAL FORM + '\ufb7a' # 0xC0 -> ARABIC LETTER TCHEH ISOLATED FORM + '\ufb7c' # 0xC1 -> ARABIC LETTER TCHEH INITIAL FORM + '\ufea1' # 0xC2 -> ARABIC LETTER HAH ISOLATED FORM + '\ufea3' # 0xC3 -> ARABIC LETTER HAH INITIAL FORM + '\ufea5' # 0xC4 -> ARABIC LETTER KHAH ISOLATED FORM + '\ufea7' # 0xC5 -> ARABIC LETTER KHAH INITIAL FORM + '\ufea9' # 0xC6 -> ARABIC LETTER DAL ISOLATED FORM + '\ufb84' # 0xC7 -> ARABIC LETTER DAHAL ISOLATED FORMN + '\ufeab' # 0xC8 -> ARABIC LETTER THAL ISOLATED FORM + '\ufead' # 0xC9 -> ARABIC LETTER REH ISOLATED FORM + '\ufb8c' # 0xCA -> ARABIC LETTER RREH ISOLATED FORM + '\ufeaf' # 0xCB -> ARABIC LETTER ZAIN ISOLATED FORM + '\ufb8a' # 0xCC -> ARABIC LETTER JEH ISOLATED FORM + '\ufeb1' # 0xCD -> ARABIC LETTER SEEN ISOLATED FORM + '\ufeb3' # 0xCE -> ARABIC LETTER SEEN INITIAL FORM + '\ufeb5' # 0xCF -> ARABIC LETTER SHEEN ISOLATED FORM + '\ufeb7' # 0xD0 -> ARABIC LETTER SHEEN INITIAL FORM + '\ufeb9' # 0xD1 -> ARABIC LETTER SAD ISOLATED FORM + '\ufebb' # 0xD2 -> ARABIC LETTER SAD INITIAL FORM + '\ufebd' # 0xD3 -> ARABIC LETTER DAD ISOLATED FORM + '\ufebf' # 0xD4 -> ARABIC LETTER DAD INITIAL FORM + '\ufec1' # 0xD5 -> ARABIC LETTER TAH ISOLATED FORM + '\ufec5' # 0xD6 -> ARABIC LETTER ZAH ISOLATED FORM + '\ufec9' # 0xD7 -> ARABIC LETTER AIN ISOLATED FORM + '\ufeca' # 0xD8 -> ARABIC LETTER AIN FINAL FORM + '\ufecb' # 0xD9 -> ARABIC LETTER AIN INITIAL FORM + '\ufecc' # 0xDA -> ARABIC LETTER AIN MEDIAL FORM + '\ufecd' # 0xDB -> ARABIC LETTER GHAIN ISOLATED FORM + '\ufece' # 0xDC -> ARABIC LETTER GHAIN FINAL FORM + '\ufecf' # 0xDD -> ARABIC LETTER GHAIN INITIAL FORM + '\ufed0' # 0xDE -> ARABIC LETTER GHAIN MEDIAL FORM + '\ufed1' # 0xDF -> ARABIC LETTER FEH ISOLATED FORM + '\ufed3' # 0xE0 -> ARABIC LETTER FEH INITIAL FORM + '\ufed5' # 0xE1 -> ARABIC LETTER QAF ISOLATED FORM + '\ufed7' # 0xE2 -> ARABIC LETTER QAF INITIAL FORM + '\ufed9' # 0xE3 -> ARABIC LETTER KAF ISOLATED FORM + '\ufedb' # 0xE4 -> ARABIC LETTER KAF INITIAL FORM + '\ufb92' # 0xE5 -> ARABIC LETTER GAF ISOLATED FORM + '\ufb94' # 0xE6 -> ARABIC LETTER GAF INITIAL FORM + '\ufedd' # 0xE7 -> ARABIC LETTER LAM ISOLATED FORM + '\ufedf' # 0xE8 -> ARABIC LETTER LAM INITIAL FORM + '\ufee0' # 0xE9 -> ARABIC LETTER LAM MEDIAL FORM + '\ufee1' # 0xEA -> ARABIC LETTER MEEM ISOLATED FORM + '\ufee3' # 0xEB -> ARABIC LETTER MEEM INITIAL FORM + '\ufb9e' # 0xEC -> ARABIC LETTER NOON GHUNNA ISOLATED FORM + '\ufee5' # 0xED -> ARABIC LETTER NOON ISOLATED FORM + '\ufee7' # 0xEE -> ARABIC LETTER NOON INITIAL FORM + '\ufe85' # 0xEF -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM + '\ufeed' # 0xF0 -> ARABIC LETTER WAW ISOLATED FORM + '\ufba6' # 0xF1 -> ARABIC LETTER HEH GOAL ISOLATED FORM + '\ufba8' # 0xF2 -> ARABIC LETTER HEH GOAL INITIAL FORM + '\ufba9' # 0xF3 -> ARABIC LETTER HEH GOAL MEDIAL FORM + '\ufbaa' # 0xF4 -> ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM + '\ufe80' # 0xF5 -> ARABIC LETTER HAMZA ISOLATED FORM + '\ufe89' # 0xF6 -> ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM + '\ufe8a' # 0xF7 -> ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM + '\ufe8b' # 0xF8 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM + '\ufef1' # 0xF9 -> ARABIC LETTER YEH ISOLATED FORM + '\ufef2' # 0xFA -> ARABIC LETTER YEH FINAL FORM + '\ufef3' # 0xFB -> ARABIC LETTER YEH INITIAL FORM + '\ufbb0' # 0xFC -> ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM + '\ufbae' # 0xFD -> ARABIC LETTER YEH BARREE ISOLATED FORM + '\ufe7c' # 0xFE -> ARABIC SHADDA ISOLATED FORM + '\ufe7d' # 0xFF -> ARABIC SHADDA MEDIAL FORM +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1026.py b/venv/Lib/encodings/cp1026.py new file mode 100644 index 00000000..46f71f74 --- /dev/null +++ b/venv/Lib/encodings/cp1026.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1026', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x9c' # 0x04 -> CONTROL + '\t' # 0x05 -> HORIZONTAL TABULATION + '\x86' # 0x06 -> CONTROL + '\x7f' # 0x07 -> DELETE + '\x97' # 0x08 -> CONTROL + '\x8d' # 0x09 -> CONTROL + '\x8e' # 0x0A -> CONTROL + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x9d' # 0x14 -> CONTROL + '\x85' # 0x15 -> CONTROL + '\x08' # 0x16 -> BACKSPACE + '\x87' # 0x17 -> CONTROL + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x92' # 0x1A -> CONTROL + '\x8f' # 0x1B -> CONTROL + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + '\x80' # 0x20 -> CONTROL + '\x81' # 0x21 -> CONTROL + '\x82' # 0x22 -> CONTROL + '\x83' # 0x23 -> CONTROL + '\x84' # 0x24 -> CONTROL + '\n' # 0x25 -> LINE FEED + '\x17' # 0x26 -> END OF TRANSMISSION BLOCK + '\x1b' # 0x27 -> ESCAPE + '\x88' # 0x28 -> CONTROL + '\x89' # 0x29 -> CONTROL + '\x8a' # 0x2A -> CONTROL + '\x8b' # 0x2B -> CONTROL + '\x8c' # 0x2C -> CONTROL + '\x05' # 0x2D -> ENQUIRY + '\x06' # 0x2E -> ACKNOWLEDGE + '\x07' # 0x2F -> BELL + '\x90' # 0x30 -> CONTROL + '\x91' # 0x31 -> CONTROL + '\x16' # 0x32 -> SYNCHRONOUS IDLE + '\x93' # 0x33 -> CONTROL + '\x94' # 0x34 -> CONTROL + '\x95' # 0x35 -> CONTROL + '\x96' # 0x36 -> CONTROL + '\x04' # 0x37 -> END OF TRANSMISSION + '\x98' # 0x38 -> CONTROL + '\x99' # 0x39 -> CONTROL + '\x9a' # 0x3A -> CONTROL + '\x9b' # 0x3B -> CONTROL + '\x14' # 0x3C -> DEVICE CONTROL FOUR + '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE + '\x9e' # 0x3E -> CONTROL + '\x1a' # 0x3F -> SUBSTITUTE + ' ' # 0x40 -> SPACE + '\xa0' # 0x41 -> NO-BREAK SPACE + '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE + '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE + '{' # 0x48 -> LEFT CURLY BRACKET + '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE + '\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA + '.' # 0x4B -> FULL STOP + '<' # 0x4C -> LESS-THAN SIGN + '(' # 0x4D -> LEFT PARENTHESIS + '+' # 0x4E -> PLUS SIGN + '!' # 0x4F -> EXCLAMATION MARK + '&' # 0x50 -> AMPERSAND + '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE + '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE + '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN) + '\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE + '\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE + '*' # 0x5C -> ASTERISK + ')' # 0x5D -> RIGHT PARENTHESIS + ';' # 0x5E -> SEMICOLON + '^' # 0x5F -> CIRCUMFLEX ACCENT + '-' # 0x60 -> HYPHEN-MINUS + '/' # 0x61 -> SOLIDUS + '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '[' # 0x68 -> LEFT SQUARE BRACKET + '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE + '\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA + ',' # 0x6B -> COMMA + '%' # 0x6C -> PERCENT SIGN + '_' # 0x6D -> LOW LINE + '>' # 0x6E -> GREATER-THAN SIGN + '?' # 0x6F -> QUESTION MARK + '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE + '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE + '\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I + ':' # 0x7A -> COLON + '\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA + "'" # 0x7D -> APOSTROPHE + '=' # 0x7E -> EQUALS SIGN + '\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE + 'a' # 0x81 -> LATIN SMALL LETTER A + 'b' # 0x82 -> LATIN SMALL LETTER B + 'c' # 0x83 -> LATIN SMALL LETTER C + 'd' # 0x84 -> LATIN SMALL LETTER D + 'e' # 0x85 -> LATIN SMALL LETTER E + 'f' # 0x86 -> LATIN SMALL LETTER F + 'g' # 0x87 -> LATIN SMALL LETTER G + 'h' # 0x88 -> LATIN SMALL LETTER H + 'i' # 0x89 -> LATIN SMALL LETTER I + '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '}' # 0x8C -> RIGHT CURLY BRACKET + '`' # 0x8D -> GRAVE ACCENT + '\xa6' # 0x8E -> BROKEN BAR + '\xb1' # 0x8F -> PLUS-MINUS SIGN + '\xb0' # 0x90 -> DEGREE SIGN + 'j' # 0x91 -> LATIN SMALL LETTER J + 'k' # 0x92 -> LATIN SMALL LETTER K + 'l' # 0x93 -> LATIN SMALL LETTER L + 'm' # 0x94 -> LATIN SMALL LETTER M + 'n' # 0x95 -> LATIN SMALL LETTER N + 'o' # 0x96 -> LATIN SMALL LETTER O + 'p' # 0x97 -> LATIN SMALL LETTER P + 'q' # 0x98 -> LATIN SMALL LETTER Q + 'r' # 0x99 -> LATIN SMALL LETTER R + '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR + '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR + '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE + '\xb8' # 0x9D -> CEDILLA + '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE + '\xa4' # 0x9F -> CURRENCY SIGN + '\xb5' # 0xA0 -> MICRO SIGN + '\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS + 's' # 0xA2 -> LATIN SMALL LETTER S + 't' # 0xA3 -> LATIN SMALL LETTER T + 'u' # 0xA4 -> LATIN SMALL LETTER U + 'v' # 0xA5 -> LATIN SMALL LETTER V + 'w' # 0xA6 -> LATIN SMALL LETTER W + 'x' # 0xA7 -> LATIN SMALL LETTER X + 'y' # 0xA8 -> LATIN SMALL LETTER Y + 'z' # 0xA9 -> LATIN SMALL LETTER Z + '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK + '\xbf' # 0xAB -> INVERTED QUESTION MARK + ']' # 0xAC -> RIGHT SQUARE BRACKET + '$' # 0xAD -> DOLLAR SIGN + '@' # 0xAE -> COMMERCIAL AT + '\xae' # 0xAF -> REGISTERED SIGN + '\xa2' # 0xB0 -> CENT SIGN + '\xa3' # 0xB1 -> POUND SIGN + '\xa5' # 0xB2 -> YEN SIGN + '\xb7' # 0xB3 -> MIDDLE DOT + '\xa9' # 0xB4 -> COPYRIGHT SIGN + '\xa7' # 0xB5 -> SECTION SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF + '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS + '\xac' # 0xBA -> NOT SIGN + '|' # 0xBB -> VERTICAL LINE + '\xaf' # 0xBC -> MACRON + '\xa8' # 0xBD -> DIAERESIS + '\xb4' # 0xBE -> ACUTE ACCENT + '\xd7' # 0xBF -> MULTIPLICATION SIGN + '\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA + 'A' # 0xC1 -> LATIN CAPITAL LETTER A + 'B' # 0xC2 -> LATIN CAPITAL LETTER B + 'C' # 0xC3 -> LATIN CAPITAL LETTER C + 'D' # 0xC4 -> LATIN CAPITAL LETTER D + 'E' # 0xC5 -> LATIN CAPITAL LETTER E + 'F' # 0xC6 -> LATIN CAPITAL LETTER F + 'G' # 0xC7 -> LATIN CAPITAL LETTER G + 'H' # 0xC8 -> LATIN CAPITAL LETTER H + 'I' # 0xC9 -> LATIN CAPITAL LETTER I + '\xad' # 0xCA -> SOFT HYPHEN + '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '~' # 0xCC -> TILDE + '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE + '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE + '\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE + 'J' # 0xD1 -> LATIN CAPITAL LETTER J + 'K' # 0xD2 -> LATIN CAPITAL LETTER K + 'L' # 0xD3 -> LATIN CAPITAL LETTER L + 'M' # 0xD4 -> LATIN CAPITAL LETTER M + 'N' # 0xD5 -> LATIN CAPITAL LETTER N + 'O' # 0xD6 -> LATIN CAPITAL LETTER O + 'P' # 0xD7 -> LATIN CAPITAL LETTER P + 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q + 'R' # 0xD9 -> LATIN CAPITAL LETTER R + '\xb9' # 0xDA -> SUPERSCRIPT ONE + '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\\' # 0xDC -> REVERSE SOLIDUS + '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE + '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS + '\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xf7' # 0xE1 -> DIVISION SIGN + 'S' # 0xE2 -> LATIN CAPITAL LETTER S + 'T' # 0xE3 -> LATIN CAPITAL LETTER T + 'U' # 0xE4 -> LATIN CAPITAL LETTER U + 'V' # 0xE5 -> LATIN CAPITAL LETTER V + 'W' # 0xE6 -> LATIN CAPITAL LETTER W + 'X' # 0xE7 -> LATIN CAPITAL LETTER X + 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y + 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z + '\xb2' # 0xEA -> SUPERSCRIPT TWO + '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '#' # 0xEC -> NUMBER SIGN + '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE + '0' # 0xF0 -> DIGIT ZERO + '1' # 0xF1 -> DIGIT ONE + '2' # 0xF2 -> DIGIT TWO + '3' # 0xF3 -> DIGIT THREE + '4' # 0xF4 -> DIGIT FOUR + '5' # 0xF5 -> DIGIT FIVE + '6' # 0xF6 -> DIGIT SIX + '7' # 0xF7 -> DIGIT SEVEN + '8' # 0xF8 -> DIGIT EIGHT + '9' # 0xF9 -> DIGIT NINE + '\xb3' # 0xFA -> SUPERSCRIPT THREE + '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '"' # 0xFC -> QUOTATION MARK + '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE + '\x9f' # 0xFF -> CONTROL +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1125.py b/venv/Lib/encodings/cp1125.py new file mode 100644 index 00000000..b1fd69de --- /dev/null +++ b/venv/Lib/encodings/cp1125.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec for CP1125 + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1125', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A + 0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE + 0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE + 0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE + 0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE + 0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE + 0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE + 0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE + 0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I + 0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I + 0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA + 0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL + 0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM + 0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN + 0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O + 0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE + 0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER + 0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES + 0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE + 0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U + 0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF + 0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA + 0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE + 0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE + 0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA + 0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA + 0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN + 0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU + 0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN + 0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E + 0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU + 0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA + 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A + 0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE + 0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE + 0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE + 0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE + 0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE + 0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE + 0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE + 0x00a8: 0x0438, # CYRILLIC SMALL LETTER I + 0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I + 0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA + 0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL + 0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM + 0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN + 0x00ae: 0x043e, # CYRILLIC SMALL LETTER O + 0x00af: 0x043f, # CYRILLIC SMALL LETTER PE + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER + 0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES + 0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE + 0x00e3: 0x0443, # CYRILLIC SMALL LETTER U + 0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF + 0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA + 0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE + 0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE + 0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA + 0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA + 0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN + 0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU + 0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN + 0x00ed: 0x044d, # CYRILLIC SMALL LETTER E + 0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU + 0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA + 0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO + 0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO + 0x00f2: 0x0490, # CYRILLIC CAPITAL LETTER GHE WITH UPTURN + 0x00f3: 0x0491, # CYRILLIC SMALL LETTER GHE WITH UPTURN + 0x00f4: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE + 0x00f5: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE + 0x00f6: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + 0x00f7: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + 0x00f8: 0x0407, # CYRILLIC CAPITAL LETTER YI + 0x00f9: 0x0457, # CYRILLIC SMALL LETTER YI + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x2116, # NUMERO SIGN + 0x00fd: 0x00a4, # CURRENCY SIGN + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE + '\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE + '\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE + '\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE + '\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE + '\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE + '\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE + '\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U + '\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF + '\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA + '\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE + '\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE + '\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA + '\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA + '\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN + '\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU + '\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E + '\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU + '\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA + '\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A + '\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE + '\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE + '\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE + '\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE + '\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE + '\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE + '\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I + '\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA + '\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL + '\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM + '\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN + '\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O + '\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U + '\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF + '\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA + '\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE + '\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE + '\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA + '\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA + '\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN + '\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU + '\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E + '\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU + '\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA + '\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO + '\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO + '\u0490' # 0x00f2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN + '\u0491' # 0x00f3 -> CYRILLIC SMALL LETTER GHE WITH UPTURN + '\u0404' # 0x00f4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE + '\u0454' # 0x00f5 -> CYRILLIC SMALL LETTER UKRAINIAN IE + '\u0406' # 0x00f6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0456' # 0x00f7 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0407' # 0x00f8 -> CYRILLIC CAPITAL LETTER YI + '\u0457' # 0x00f9 -> CYRILLIC SMALL LETTER YI + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u2116' # 0x00fc -> NUMERO SIGN + '\xa4' # 0x00fd -> CURRENCY SIGN + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a4: 0x00fd, # CURRENCY SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO + 0x0404: 0x00f4, # CYRILLIC CAPITAL LETTER UKRAINIAN IE + 0x0406: 0x00f6, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + 0x0407: 0x00f8, # CYRILLIC CAPITAL LETTER YI + 0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A + 0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE + 0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE + 0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE + 0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE + 0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE + 0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE + 0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE + 0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I + 0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I + 0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA + 0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL + 0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM + 0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN + 0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O + 0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE + 0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER + 0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES + 0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE + 0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U + 0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF + 0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA + 0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE + 0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE + 0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA + 0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA + 0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN + 0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU + 0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN + 0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E + 0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU + 0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA + 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A + 0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE + 0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE + 0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE + 0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE + 0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE + 0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE + 0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE + 0x0438: 0x00a8, # CYRILLIC SMALL LETTER I + 0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I + 0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA + 0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL + 0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM + 0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN + 0x043e: 0x00ae, # CYRILLIC SMALL LETTER O + 0x043f: 0x00af, # CYRILLIC SMALL LETTER PE + 0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER + 0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES + 0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE + 0x0443: 0x00e3, # CYRILLIC SMALL LETTER U + 0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF + 0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA + 0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE + 0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE + 0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA + 0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA + 0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN + 0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU + 0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN + 0x044d: 0x00ed, # CYRILLIC SMALL LETTER E + 0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU + 0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA + 0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO + 0x0454: 0x00f5, # CYRILLIC SMALL LETTER UKRAINIAN IE + 0x0456: 0x00f7, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + 0x0457: 0x00f9, # CYRILLIC SMALL LETTER YI + 0x0490: 0x00f2, # CYRILLIC CAPITAL LETTER GHE WITH UPTURN + 0x0491: 0x00f3, # CYRILLIC SMALL LETTER GHE WITH UPTURN + 0x2116: 0x00fc, # NUMERO SIGN + 0x221a: 0x00fb, # SQUARE ROOT + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp1140.py b/venv/Lib/encodings/cp1140.py new file mode 100644 index 00000000..0a919d83 --- /dev/null +++ b/venv/Lib/encodings/cp1140.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1140', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x9c' # 0x04 -> CONTROL + '\t' # 0x05 -> HORIZONTAL TABULATION + '\x86' # 0x06 -> CONTROL + '\x7f' # 0x07 -> DELETE + '\x97' # 0x08 -> CONTROL + '\x8d' # 0x09 -> CONTROL + '\x8e' # 0x0A -> CONTROL + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x9d' # 0x14 -> CONTROL + '\x85' # 0x15 -> CONTROL + '\x08' # 0x16 -> BACKSPACE + '\x87' # 0x17 -> CONTROL + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x92' # 0x1A -> CONTROL + '\x8f' # 0x1B -> CONTROL + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + '\x80' # 0x20 -> CONTROL + '\x81' # 0x21 -> CONTROL + '\x82' # 0x22 -> CONTROL + '\x83' # 0x23 -> CONTROL + '\x84' # 0x24 -> CONTROL + '\n' # 0x25 -> LINE FEED + '\x17' # 0x26 -> END OF TRANSMISSION BLOCK + '\x1b' # 0x27 -> ESCAPE + '\x88' # 0x28 -> CONTROL + '\x89' # 0x29 -> CONTROL + '\x8a' # 0x2A -> CONTROL + '\x8b' # 0x2B -> CONTROL + '\x8c' # 0x2C -> CONTROL + '\x05' # 0x2D -> ENQUIRY + '\x06' # 0x2E -> ACKNOWLEDGE + '\x07' # 0x2F -> BELL + '\x90' # 0x30 -> CONTROL + '\x91' # 0x31 -> CONTROL + '\x16' # 0x32 -> SYNCHRONOUS IDLE + '\x93' # 0x33 -> CONTROL + '\x94' # 0x34 -> CONTROL + '\x95' # 0x35 -> CONTROL + '\x96' # 0x36 -> CONTROL + '\x04' # 0x37 -> END OF TRANSMISSION + '\x98' # 0x38 -> CONTROL + '\x99' # 0x39 -> CONTROL + '\x9a' # 0x3A -> CONTROL + '\x9b' # 0x3B -> CONTROL + '\x14' # 0x3C -> DEVICE CONTROL FOUR + '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE + '\x9e' # 0x3E -> CONTROL + '\x1a' # 0x3F -> SUBSTITUTE + ' ' # 0x40 -> SPACE + '\xa0' # 0x41 -> NO-BREAK SPACE + '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE + '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA + '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE + '\xa2' # 0x4A -> CENT SIGN + '.' # 0x4B -> FULL STOP + '<' # 0x4C -> LESS-THAN SIGN + '(' # 0x4D -> LEFT PARENTHESIS + '+' # 0x4E -> PLUS SIGN + '|' # 0x4F -> VERTICAL LINE + '&' # 0x50 -> AMPERSAND + '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE + '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE + '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN) + '!' # 0x5A -> EXCLAMATION MARK + '$' # 0x5B -> DOLLAR SIGN + '*' # 0x5C -> ASTERISK + ')' # 0x5D -> RIGHT PARENTHESIS + ';' # 0x5E -> SEMICOLON + '\xac' # 0x5F -> NOT SIGN + '-' # 0x60 -> HYPHEN-MINUS + '/' # 0x61 -> SOLIDUS + '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE + '\xa6' # 0x6A -> BROKEN BAR + ',' # 0x6B -> COMMA + '%' # 0x6C -> PERCENT SIGN + '_' # 0x6D -> LOW LINE + '>' # 0x6E -> GREATER-THAN SIGN + '?' # 0x6F -> QUESTION MARK + '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE + '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE + '`' # 0x79 -> GRAVE ACCENT + ':' # 0x7A -> COLON + '#' # 0x7B -> NUMBER SIGN + '@' # 0x7C -> COMMERCIAL AT + "'" # 0x7D -> APOSTROPHE + '=' # 0x7E -> EQUALS SIGN + '"' # 0x7F -> QUOTATION MARK + '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE + 'a' # 0x81 -> LATIN SMALL LETTER A + 'b' # 0x82 -> LATIN SMALL LETTER B + 'c' # 0x83 -> LATIN SMALL LETTER C + 'd' # 0x84 -> LATIN SMALL LETTER D + 'e' # 0x85 -> LATIN SMALL LETTER E + 'f' # 0x86 -> LATIN SMALL LETTER F + 'g' # 0x87 -> LATIN SMALL LETTER G + 'h' # 0x88 -> LATIN SMALL LETTER H + 'i' # 0x89 -> LATIN SMALL LETTER I + '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC) + '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE + '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC) + '\xb1' # 0x8F -> PLUS-MINUS SIGN + '\xb0' # 0x90 -> DEGREE SIGN + 'j' # 0x91 -> LATIN SMALL LETTER J + 'k' # 0x92 -> LATIN SMALL LETTER K + 'l' # 0x93 -> LATIN SMALL LETTER L + 'm' # 0x94 -> LATIN SMALL LETTER M + 'n' # 0x95 -> LATIN SMALL LETTER N + 'o' # 0x96 -> LATIN SMALL LETTER O + 'p' # 0x97 -> LATIN SMALL LETTER P + 'q' # 0x98 -> LATIN SMALL LETTER Q + 'r' # 0x99 -> LATIN SMALL LETTER R + '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR + '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR + '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE + '\xb8' # 0x9D -> CEDILLA + '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE + '\u20ac' # 0x9F -> EURO SIGN + '\xb5' # 0xA0 -> MICRO SIGN + '~' # 0xA1 -> TILDE + 's' # 0xA2 -> LATIN SMALL LETTER S + 't' # 0xA3 -> LATIN SMALL LETTER T + 'u' # 0xA4 -> LATIN SMALL LETTER U + 'v' # 0xA5 -> LATIN SMALL LETTER V + 'w' # 0xA6 -> LATIN SMALL LETTER W + 'x' # 0xA7 -> LATIN SMALL LETTER X + 'y' # 0xA8 -> LATIN SMALL LETTER Y + 'z' # 0xA9 -> LATIN SMALL LETTER Z + '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK + '\xbf' # 0xAB -> INVERTED QUESTION MARK + '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC) + '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC) + '\xae' # 0xAF -> REGISTERED SIGN + '^' # 0xB0 -> CIRCUMFLEX ACCENT + '\xa3' # 0xB1 -> POUND SIGN + '\xa5' # 0xB2 -> YEN SIGN + '\xb7' # 0xB3 -> MIDDLE DOT + '\xa9' # 0xB4 -> COPYRIGHT SIGN + '\xa7' # 0xB5 -> SECTION SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF + '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS + '[' # 0xBA -> LEFT SQUARE BRACKET + ']' # 0xBB -> RIGHT SQUARE BRACKET + '\xaf' # 0xBC -> MACRON + '\xa8' # 0xBD -> DIAERESIS + '\xb4' # 0xBE -> ACUTE ACCENT + '\xd7' # 0xBF -> MULTIPLICATION SIGN + '{' # 0xC0 -> LEFT CURLY BRACKET + 'A' # 0xC1 -> LATIN CAPITAL LETTER A + 'B' # 0xC2 -> LATIN CAPITAL LETTER B + 'C' # 0xC3 -> LATIN CAPITAL LETTER C + 'D' # 0xC4 -> LATIN CAPITAL LETTER D + 'E' # 0xC5 -> LATIN CAPITAL LETTER E + 'F' # 0xC6 -> LATIN CAPITAL LETTER F + 'G' # 0xC7 -> LATIN CAPITAL LETTER G + 'H' # 0xC8 -> LATIN CAPITAL LETTER H + 'I' # 0xC9 -> LATIN CAPITAL LETTER I + '\xad' # 0xCA -> SOFT HYPHEN + '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE + '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE + '}' # 0xD0 -> RIGHT CURLY BRACKET + 'J' # 0xD1 -> LATIN CAPITAL LETTER J + 'K' # 0xD2 -> LATIN CAPITAL LETTER K + 'L' # 0xD3 -> LATIN CAPITAL LETTER L + 'M' # 0xD4 -> LATIN CAPITAL LETTER M + 'N' # 0xD5 -> LATIN CAPITAL LETTER N + 'O' # 0xD6 -> LATIN CAPITAL LETTER O + 'P' # 0xD7 -> LATIN CAPITAL LETTER P + 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q + 'R' # 0xD9 -> LATIN CAPITAL LETTER R + '\xb9' # 0xDA -> SUPERSCRIPT ONE + '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE + '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS + '\\' # 0xE0 -> REVERSE SOLIDUS + '\xf7' # 0xE1 -> DIVISION SIGN + 'S' # 0xE2 -> LATIN CAPITAL LETTER S + 'T' # 0xE3 -> LATIN CAPITAL LETTER T + 'U' # 0xE4 -> LATIN CAPITAL LETTER U + 'V' # 0xE5 -> LATIN CAPITAL LETTER V + 'W' # 0xE6 -> LATIN CAPITAL LETTER W + 'X' # 0xE7 -> LATIN CAPITAL LETTER X + 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y + 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z + '\xb2' # 0xEA -> SUPERSCRIPT TWO + '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE + '0' # 0xF0 -> DIGIT ZERO + '1' # 0xF1 -> DIGIT ONE + '2' # 0xF2 -> DIGIT TWO + '3' # 0xF3 -> DIGIT THREE + '4' # 0xF4 -> DIGIT FOUR + '5' # 0xF5 -> DIGIT FIVE + '6' # 0xF6 -> DIGIT SIX + '7' # 0xF7 -> DIGIT SEVEN + '8' # 0xF8 -> DIGIT EIGHT + '9' # 0xF9 -> DIGIT NINE + '\xb3' # 0xFA -> SUPERSCRIPT THREE + '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE + '\x9f' # 0xFF -> CONTROL +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1250.py b/venv/Lib/encodings/cp1250.py new file mode 100644 index 00000000..c2c83aaf --- /dev/null +++ b/venv/Lib/encodings/cp1250.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1250', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\ufffe' # 0x81 -> UNDEFINED + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\ufffe' # 0x83 -> UNDEFINED + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\ufffe' # 0x88 -> UNDEFINED + '\u2030' # 0x89 -> PER MILLE SIGN + '\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE + '\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON + '\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON + '\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE + '\ufffe' # 0x90 -> UNDEFINED + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\ufffe' # 0x98 -> UNDEFINED + '\u2122' # 0x99 -> TRADE MARK SIGN + '\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE + '\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON + '\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON + '\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u02c7' # 0xA1 -> CARON + '\u02d8' # 0xA2 -> BREVE + '\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE + '\xa4' # 0xA4 -> CURRENCY SIGN + '\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u02db' # 0xB2 -> OGONEK + '\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK + '\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON + '\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT + '\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON + '\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE + '\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON + '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE + '\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE + '\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON + '\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE + '\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON + '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE + '\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE + '\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON + '\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE + '\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA + '\u02d9' # 0xFF -> DOT ABOVE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1251.py b/venv/Lib/encodings/cp1251.py new file mode 100644 index 00000000..22bc6600 --- /dev/null +++ b/venv/Lib/encodings/cp1251.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1251', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE + '\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u20ac' # 0x88 -> EURO SIGN + '\u2030' # 0x89 -> PER MILLE SIGN + '\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE + '\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE + '\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE + '\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE + '\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\ufffe' # 0x98 -> UNDEFINED + '\u2122' # 0x99 -> TRADE MARK SIGN + '\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE + '\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE + '\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE + '\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U + '\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U + '\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE + '\xa4' # 0xA4 -> CURRENCY SIGN + '\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO + '\u2116' # 0xB9 -> NUMERO SIGN + '\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE + '\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE + '\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE + '\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI + '\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE + '\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE + '\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE + '\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE + '\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE + '\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE + '\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE + '\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U + '\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF + '\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA + '\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE + '\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE + '\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA + '\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA + '\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN + '\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU + '\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E + '\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU + '\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA + '\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A + '\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE + '\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE + '\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE + '\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE + '\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE + '\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE + '\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I + '\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA + '\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL + '\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM + '\u043d' # 0xED -> CYRILLIC SMALL LETTER EN + '\u043e' # 0xEE -> CYRILLIC SMALL LETTER O + '\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE + '\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U + '\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF + '\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA + '\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE + '\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE + '\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA + '\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA + '\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN + '\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU + '\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044d' # 0xFD -> CYRILLIC SMALL LETTER E + '\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU + '\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1252.py b/venv/Lib/encodings/cp1252.py new file mode 100644 index 00000000..c0e8088e --- /dev/null +++ b/venv/Lib/encodings/cp1252.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1252', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\ufffe' # 0x81 -> UNDEFINED + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u2030' # 0x89 -> PER MILLE SIGN + '\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE + '\ufffe' # 0x8D -> UNDEFINED + '\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON + '\ufffe' # 0x8F -> UNDEFINED + '\ufffe' # 0x90 -> UNDEFINED + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\u02dc' # 0x98 -> SMALL TILDE + '\u2122' # 0x99 -> TRADE MARK SIGN + '\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE + '\ufffe' # 0x9D -> UNDEFINED + '\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON + '\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xbf' # 0xBF -> INVERTED QUESTION MARK + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xde' # 0xDE -> LATIN CAPITAL LETTER THORN + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf0' # 0xF0 -> LATIN SMALL LETTER ETH + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE + '\xfe' # 0xFE -> LATIN SMALL LETTER THORN + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1253.py b/venv/Lib/encodings/cp1253.py new file mode 100644 index 00000000..ec9c0972 --- /dev/null +++ b/venv/Lib/encodings/cp1253.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1253', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\ufffe' # 0x81 -> UNDEFINED + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\ufffe' # 0x88 -> UNDEFINED + '\u2030' # 0x89 -> PER MILLE SIGN + '\ufffe' # 0x8A -> UNDEFINED + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\ufffe' # 0x8C -> UNDEFINED + '\ufffe' # 0x8D -> UNDEFINED + '\ufffe' # 0x8E -> UNDEFINED + '\ufffe' # 0x8F -> UNDEFINED + '\ufffe' # 0x90 -> UNDEFINED + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\ufffe' # 0x98 -> UNDEFINED + '\u2122' # 0x99 -> TRADE MARK SIGN + '\ufffe' # 0x9A -> UNDEFINED + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\ufffe' # 0x9C -> UNDEFINED + '\ufffe' # 0x9D -> UNDEFINED + '\ufffe' # 0x9E -> UNDEFINED + '\ufffe' # 0x9F -> UNDEFINED + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS + '\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\ufffe' # 0xAA -> UNDEFINED + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\u2015' # 0xAF -> HORIZONTAL BAR + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\u0384' # 0xB4 -> GREEK TONOS + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS + '\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS + '\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS + '\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS + '\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS + '\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA + '\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA + '\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA + '\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA + '\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON + '\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA + '\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA + '\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA + '\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA + '\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA + '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA + '\u039c' # 0xCC -> GREEK CAPITAL LETTER MU + '\u039d' # 0xCD -> GREEK CAPITAL LETTER NU + '\u039e' # 0xCE -> GREEK CAPITAL LETTER XI + '\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON + '\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI + '\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO + '\ufffe' # 0xD2 -> UNDEFINED + '\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA + '\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU + '\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON + '\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI + '\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI + '\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI + '\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA + '\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + '\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + '\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS + '\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS + '\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS + '\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS + '\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS + '\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA + '\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA + '\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA + '\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA + '\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON + '\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA + '\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA + '\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA + '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA + '\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA + '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA + '\u03bc' # 0xEC -> GREEK SMALL LETTER MU + '\u03bd' # 0xED -> GREEK SMALL LETTER NU + '\u03be' # 0xEE -> GREEK SMALL LETTER XI + '\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON + '\u03c0' # 0xF0 -> GREEK SMALL LETTER PI + '\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO + '\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA + '\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA + '\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU + '\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON + '\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI + '\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI + '\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI + '\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA + '\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA + '\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA + '\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS + '\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS + '\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS + '\ufffe' # 0xFF -> UNDEFINED +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1254.py b/venv/Lib/encodings/cp1254.py new file mode 100644 index 00000000..4912327a --- /dev/null +++ b/venv/Lib/encodings/cp1254.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1254', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\ufffe' # 0x81 -> UNDEFINED + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u2030' # 0x89 -> PER MILLE SIGN + '\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE + '\ufffe' # 0x8D -> UNDEFINED + '\ufffe' # 0x8E -> UNDEFINED + '\ufffe' # 0x8F -> UNDEFINED + '\ufffe' # 0x90 -> UNDEFINED + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\u02dc' # 0x98 -> SMALL TILDE + '\u2122' # 0x99 -> TRADE MARK SIGN + '\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE + '\ufffe' # 0x9D -> UNDEFINED + '\ufffe' # 0x9E -> UNDEFINED + '\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xbf' # 0xBF -> INVERTED QUESTION MARK + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE + '\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I + '\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1255.py b/venv/Lib/encodings/cp1255.py new file mode 100644 index 00000000..91ce26b9 --- /dev/null +++ b/venv/Lib/encodings/cp1255.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1255', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\ufffe' # 0x81 -> UNDEFINED + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u2030' # 0x89 -> PER MILLE SIGN + '\ufffe' # 0x8A -> UNDEFINED + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\ufffe' # 0x8C -> UNDEFINED + '\ufffe' # 0x8D -> UNDEFINED + '\ufffe' # 0x8E -> UNDEFINED + '\ufffe' # 0x8F -> UNDEFINED + '\ufffe' # 0x90 -> UNDEFINED + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\u02dc' # 0x98 -> SMALL TILDE + '\u2122' # 0x99 -> TRADE MARK SIGN + '\ufffe' # 0x9A -> UNDEFINED + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\ufffe' # 0x9C -> UNDEFINED + '\ufffe' # 0x9D -> UNDEFINED + '\ufffe' # 0x9E -> UNDEFINED + '\ufffe' # 0x9F -> UNDEFINED + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\u20aa' # 0xA4 -> NEW SHEQEL SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xd7' # 0xAA -> MULTIPLICATION SIGN + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xf7' # 0xBA -> DIVISION SIGN + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xbf' # 0xBF -> INVERTED QUESTION MARK + '\u05b0' # 0xC0 -> HEBREW POINT SHEVA + '\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL + '\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH + '\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS + '\u05b4' # 0xC4 -> HEBREW POINT HIRIQ + '\u05b5' # 0xC5 -> HEBREW POINT TSERE + '\u05b6' # 0xC6 -> HEBREW POINT SEGOL + '\u05b7' # 0xC7 -> HEBREW POINT PATAH + '\u05b8' # 0xC8 -> HEBREW POINT QAMATS + '\u05b9' # 0xC9 -> HEBREW POINT HOLAM + '\ufffe' # 0xCA -> UNDEFINED + '\u05bb' # 0xCB -> HEBREW POINT QUBUTS + '\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ + '\u05bd' # 0xCD -> HEBREW POINT METEG + '\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF + '\u05bf' # 0xCF -> HEBREW POINT RAFE + '\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ + '\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT + '\u05c2' # 0xD2 -> HEBREW POINT SIN DOT + '\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ + '\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV + '\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD + '\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD + '\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH + '\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM + '\ufffe' # 0xD9 -> UNDEFINED + '\ufffe' # 0xDA -> UNDEFINED + '\ufffe' # 0xDB -> UNDEFINED + '\ufffe' # 0xDC -> UNDEFINED + '\ufffe' # 0xDD -> UNDEFINED + '\ufffe' # 0xDE -> UNDEFINED + '\ufffe' # 0xDF -> UNDEFINED + '\u05d0' # 0xE0 -> HEBREW LETTER ALEF + '\u05d1' # 0xE1 -> HEBREW LETTER BET + '\u05d2' # 0xE2 -> HEBREW LETTER GIMEL + '\u05d3' # 0xE3 -> HEBREW LETTER DALET + '\u05d4' # 0xE4 -> HEBREW LETTER HE + '\u05d5' # 0xE5 -> HEBREW LETTER VAV + '\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN + '\u05d7' # 0xE7 -> HEBREW LETTER HET + '\u05d8' # 0xE8 -> HEBREW LETTER TET + '\u05d9' # 0xE9 -> HEBREW LETTER YOD + '\u05da' # 0xEA -> HEBREW LETTER FINAL KAF + '\u05db' # 0xEB -> HEBREW LETTER KAF + '\u05dc' # 0xEC -> HEBREW LETTER LAMED + '\u05dd' # 0xED -> HEBREW LETTER FINAL MEM + '\u05de' # 0xEE -> HEBREW LETTER MEM + '\u05df' # 0xEF -> HEBREW LETTER FINAL NUN + '\u05e0' # 0xF0 -> HEBREW LETTER NUN + '\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH + '\u05e2' # 0xF2 -> HEBREW LETTER AYIN + '\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE + '\u05e4' # 0xF4 -> HEBREW LETTER PE + '\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI + '\u05e6' # 0xF6 -> HEBREW LETTER TSADI + '\u05e7' # 0xF7 -> HEBREW LETTER QOF + '\u05e8' # 0xF8 -> HEBREW LETTER RESH + '\u05e9' # 0xF9 -> HEBREW LETTER SHIN + '\u05ea' # 0xFA -> HEBREW LETTER TAV + '\ufffe' # 0xFB -> UNDEFINED + '\ufffe' # 0xFC -> UNDEFINED + '\u200e' # 0xFD -> LEFT-TO-RIGHT MARK + '\u200f' # 0xFE -> RIGHT-TO-LEFT MARK + '\ufffe' # 0xFF -> UNDEFINED +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1256.py b/venv/Lib/encodings/cp1256.py new file mode 100644 index 00000000..fd6afab5 --- /dev/null +++ b/venv/Lib/encodings/cp1256.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1256 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1256.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1256', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\u067e' # 0x81 -> ARABIC LETTER PEH + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u2030' # 0x89 -> PER MILLE SIGN + '\u0679' # 0x8A -> ARABIC LETTER TTEH + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE + '\u0686' # 0x8D -> ARABIC LETTER TCHEH + '\u0698' # 0x8E -> ARABIC LETTER JEH + '\u0688' # 0x8F -> ARABIC LETTER DDAL + '\u06af' # 0x90 -> ARABIC LETTER GAF + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\u06a9' # 0x98 -> ARABIC LETTER KEHEH + '\u2122' # 0x99 -> TRADE MARK SIGN + '\u0691' # 0x9A -> ARABIC LETTER RREH + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE + '\u200c' # 0x9D -> ZERO WIDTH NON-JOINER + '\u200d' # 0x9E -> ZERO WIDTH JOINER + '\u06ba' # 0x9F -> ARABIC LETTER NOON GHUNNA + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u060c' # 0xA1 -> ARABIC COMMA + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u06be' # 0xAA -> ARABIC LETTER HEH DOACHASHMEE + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\u061b' # 0xBA -> ARABIC SEMICOLON + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\u061f' # 0xBF -> ARABIC QUESTION MARK + '\u06c1' # 0xC0 -> ARABIC LETTER HEH GOAL + '\u0621' # 0xC1 -> ARABIC LETTER HAMZA + '\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE + '\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE + '\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE + '\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW + '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE + '\u0627' # 0xC7 -> ARABIC LETTER ALEF + '\u0628' # 0xC8 -> ARABIC LETTER BEH + '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0xCA -> ARABIC LETTER TEH + '\u062b' # 0xCB -> ARABIC LETTER THEH + '\u062c' # 0xCC -> ARABIC LETTER JEEM + '\u062d' # 0xCD -> ARABIC LETTER HAH + '\u062e' # 0xCE -> ARABIC LETTER KHAH + '\u062f' # 0xCF -> ARABIC LETTER DAL + '\u0630' # 0xD0 -> ARABIC LETTER THAL + '\u0631' # 0xD1 -> ARABIC LETTER REH + '\u0632' # 0xD2 -> ARABIC LETTER ZAIN + '\u0633' # 0xD3 -> ARABIC LETTER SEEN + '\u0634' # 0xD4 -> ARABIC LETTER SHEEN + '\u0635' # 0xD5 -> ARABIC LETTER SAD + '\u0636' # 0xD6 -> ARABIC LETTER DAD + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\u0637' # 0xD8 -> ARABIC LETTER TAH + '\u0638' # 0xD9 -> ARABIC LETTER ZAH + '\u0639' # 0xDA -> ARABIC LETTER AIN + '\u063a' # 0xDB -> ARABIC LETTER GHAIN + '\u0640' # 0xDC -> ARABIC TATWEEL + '\u0641' # 0xDD -> ARABIC LETTER FEH + '\u0642' # 0xDE -> ARABIC LETTER QAF + '\u0643' # 0xDF -> ARABIC LETTER KAF + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\u0644' # 0xE1 -> ARABIC LETTER LAM + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\u0645' # 0xE3 -> ARABIC LETTER MEEM + '\u0646' # 0xE4 -> ARABIC LETTER NOON + '\u0647' # 0xE5 -> ARABIC LETTER HEH + '\u0648' # 0xE6 -> ARABIC LETTER WAW + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\u0649' # 0xEC -> ARABIC LETTER ALEF MAKSURA + '\u064a' # 0xED -> ARABIC LETTER YEH + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\u064b' # 0xF0 -> ARABIC FATHATAN + '\u064c' # 0xF1 -> ARABIC DAMMATAN + '\u064d' # 0xF2 -> ARABIC KASRATAN + '\u064e' # 0xF3 -> ARABIC FATHA + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\u064f' # 0xF5 -> ARABIC DAMMA + '\u0650' # 0xF6 -> ARABIC KASRA + '\xf7' # 0xF7 -> DIVISION SIGN + '\u0651' # 0xF8 -> ARABIC SHADDA + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\u0652' # 0xFA -> ARABIC SUKUN + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u200e' # 0xFD -> LEFT-TO-RIGHT MARK + '\u200f' # 0xFE -> RIGHT-TO-LEFT MARK + '\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1257.py b/venv/Lib/encodings/cp1257.py new file mode 100644 index 00000000..9ebc90d5 --- /dev/null +++ b/venv/Lib/encodings/cp1257.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1257', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\ufffe' # 0x81 -> UNDEFINED + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\ufffe' # 0x83 -> UNDEFINED + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\ufffe' # 0x88 -> UNDEFINED + '\u2030' # 0x89 -> PER MILLE SIGN + '\ufffe' # 0x8A -> UNDEFINED + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\ufffe' # 0x8C -> UNDEFINED + '\xa8' # 0x8D -> DIAERESIS + '\u02c7' # 0x8E -> CARON + '\xb8' # 0x8F -> CEDILLA + '\ufffe' # 0x90 -> UNDEFINED + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\ufffe' # 0x98 -> UNDEFINED + '\u2122' # 0x99 -> TRADE MARK SIGN + '\ufffe' # 0x9A -> UNDEFINED + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\ufffe' # 0x9C -> UNDEFINED + '\xaf' # 0x9D -> MACRON + '\u02db' # 0x9E -> OGONEK + '\ufffe' # 0x9F -> UNDEFINED + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\ufffe' # 0xA1 -> UNDEFINED + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\ufffe' # 0xA5 -> UNDEFINED + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xc6' # 0xAF -> LATIN CAPITAL LETTER AE + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xe6' # 0xBF -> LATIN SMALL LETTER AE + '\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK + '\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK + '\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON + '\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK + '\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON + '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE + '\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE + '\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA + '\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA + '\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON + '\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA + '\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON + '\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE + '\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK + '\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE + '\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE + '\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK + '\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK + '\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON + '\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK + '\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON + '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE + '\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE + '\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA + '\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA + '\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON + '\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA + '\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON + '\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE + '\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK + '\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE + '\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE + '\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON + '\u02d9' # 0xFF -> DOT ABOVE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp1258.py b/venv/Lib/encodings/cp1258.py new file mode 100644 index 00000000..784378a8 --- /dev/null +++ b/venv/Lib/encodings/cp1258.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1258', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\ufffe' # 0x81 -> UNDEFINED + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u2030' # 0x89 -> PER MILLE SIGN + '\ufffe' # 0x8A -> UNDEFINED + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE + '\ufffe' # 0x8D -> UNDEFINED + '\ufffe' # 0x8E -> UNDEFINED + '\ufffe' # 0x8F -> UNDEFINED + '\ufffe' # 0x90 -> UNDEFINED + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\u02dc' # 0x98 -> SMALL TILDE + '\u2122' # 0x99 -> TRADE MARK SIGN + '\ufffe' # 0x9A -> UNDEFINED + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE + '\ufffe' # 0x9D -> UNDEFINED + '\ufffe' # 0x9E -> UNDEFINED + '\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xbf' # 0xBF -> INVERTED QUESTION MARK + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\u0300' # 0xCC -> COMBINING GRAVE ACCENT + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\u0309' # 0xD2 -> COMBINING HOOK ABOVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN + '\u0303' # 0xDE -> COMBINING TILDE + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\u0301' # 0xEC -> COMBINING ACUTE ACCENT + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\u0323' # 0xF2 -> COMBINING DOT BELOW + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN + '\u20ab' # 0xFE -> DONG SIGN + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp273.py b/venv/Lib/encodings/cp273.py new file mode 100644 index 00000000..69c6d778 --- /dev/null +++ b/venv/Lib/encodings/cp273.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp273 generated from 'python-mappings/CP273.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp273', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL (NUL) + '\x01' # 0x01 -> START OF HEADING (SOH) + '\x02' # 0x02 -> START OF TEXT (STX) + '\x03' # 0x03 -> END OF TEXT (ETX) + '\x9c' # 0x04 -> STRING TERMINATOR (ST) + '\t' # 0x05 -> CHARACTER TABULATION (HT) + '\x86' # 0x06 -> START OF SELECTED AREA (SSA) + '\x7f' # 0x07 -> DELETE (DEL) + '\x97' # 0x08 -> END OF GUARDED AREA (EPA) + '\x8d' # 0x09 -> REVERSE LINE FEED (RI) + '\x8e' # 0x0A -> SINGLE-SHIFT TWO (SS2) + '\x0b' # 0x0B -> LINE TABULATION (VT) + '\x0c' # 0x0C -> FORM FEED (FF) + '\r' # 0x0D -> CARRIAGE RETURN (CR) + '\x0e' # 0x0E -> SHIFT OUT (SO) + '\x0f' # 0x0F -> SHIFT IN (SI) + '\x10' # 0x10 -> DATALINK ESCAPE (DLE) + '\x11' # 0x11 -> DEVICE CONTROL ONE (DC1) + '\x12' # 0x12 -> DEVICE CONTROL TWO (DC2) + '\x13' # 0x13 -> DEVICE CONTROL THREE (DC3) + '\x9d' # 0x14 -> OPERATING SYSTEM COMMAND (OSC) + '\x85' # 0x15 -> NEXT LINE (NEL) + '\x08' # 0x16 -> BACKSPACE (BS) + '\x87' # 0x17 -> END OF SELECTED AREA (ESA) + '\x18' # 0x18 -> CANCEL (CAN) + '\x19' # 0x19 -> END OF MEDIUM (EM) + '\x92' # 0x1A -> PRIVATE USE TWO (PU2) + '\x8f' # 0x1B -> SINGLE-SHIFT THREE (SS3) + '\x1c' # 0x1C -> FILE SEPARATOR (IS4) + '\x1d' # 0x1D -> GROUP SEPARATOR (IS3) + '\x1e' # 0x1E -> RECORD SEPARATOR (IS2) + '\x1f' # 0x1F -> UNIT SEPARATOR (IS1) + '\x80' # 0x20 -> PADDING CHARACTER (PAD) + '\x81' # 0x21 -> HIGH OCTET PRESET (HOP) + '\x82' # 0x22 -> BREAK PERMITTED HERE (BPH) + '\x83' # 0x23 -> NO BREAK HERE (NBH) + '\x84' # 0x24 -> INDEX (IND) + '\n' # 0x25 -> LINE FEED (LF) + '\x17' # 0x26 -> END OF TRANSMISSION BLOCK (ETB) + '\x1b' # 0x27 -> ESCAPE (ESC) + '\x88' # 0x28 -> CHARACTER TABULATION SET (HTS) + '\x89' # 0x29 -> CHARACTER TABULATION WITH JUSTIFICATION (HTJ) + '\x8a' # 0x2A -> LINE TABULATION SET (VTS) + '\x8b' # 0x2B -> PARTIAL LINE FORWARD (PLD) + '\x8c' # 0x2C -> PARTIAL LINE BACKWARD (PLU) + '\x05' # 0x2D -> ENQUIRY (ENQ) + '\x06' # 0x2E -> ACKNOWLEDGE (ACK) + '\x07' # 0x2F -> BELL (BEL) + '\x90' # 0x30 -> DEVICE CONTROL STRING (DCS) + '\x91' # 0x31 -> PRIVATE USE ONE (PU1) + '\x16' # 0x32 -> SYNCHRONOUS IDLE (SYN) + '\x93' # 0x33 -> SET TRANSMIT STATE (STS) + '\x94' # 0x34 -> CANCEL CHARACTER (CCH) + '\x95' # 0x35 -> MESSAGE WAITING (MW) + '\x96' # 0x36 -> START OF GUARDED AREA (SPA) + '\x04' # 0x37 -> END OF TRANSMISSION (EOT) + '\x98' # 0x38 -> START OF STRING (SOS) + '\x99' # 0x39 -> SINGLE GRAPHIC CHARACTER INTRODUCER (SGCI) + '\x9a' # 0x3A -> SINGLE CHARACTER INTRODUCER (SCI) + '\x9b' # 0x3B -> CONTROL SEQUENCE INTRODUCER (CSI) + '\x14' # 0x3C -> DEVICE CONTROL FOUR (DC4) + '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE (NAK) + '\x9e' # 0x3E -> PRIVACY MESSAGE (PM) + '\x1a' # 0x3F -> SUBSTITUTE (SUB) + ' ' # 0x40 -> SPACE + '\xa0' # 0x41 -> NO-BREAK SPACE + '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '{' # 0x43 -> LEFT CURLY BRACKET + '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE + '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA + '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE + '\xc4' # 0x4A -> LATIN CAPITAL LETTER A WITH DIAERESIS + '.' # 0x4B -> FULL STOP + '<' # 0x4C -> LESS-THAN SIGN + '(' # 0x4D -> LEFT PARENTHESIS + '+' # 0x4E -> PLUS SIGN + '!' # 0x4F -> EXCLAMATION MARK + '&' # 0x50 -> AMPERSAND + '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE + '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE + '~' # 0x59 -> TILDE + '\xdc' # 0x5A -> LATIN CAPITAL LETTER U WITH DIAERESIS + '$' # 0x5B -> DOLLAR SIGN + '*' # 0x5C -> ASTERISK + ')' # 0x5D -> RIGHT PARENTHESIS + ';' # 0x5E -> SEMICOLON + '^' # 0x5F -> CIRCUMFLEX ACCENT + '-' # 0x60 -> HYPHEN-MINUS + '/' # 0x61 -> SOLIDUS + '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '[' # 0x63 -> LEFT SQUARE BRACKET + '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE + '\xf6' # 0x6A -> LATIN SMALL LETTER O WITH DIAERESIS + ',' # 0x6B -> COMMA + '%' # 0x6C -> PERCENT SIGN + '_' # 0x6D -> LOW LINE + '>' # 0x6E -> GREATER-THAN SIGN + '?' # 0x6F -> QUESTION MARK + '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE + '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE + '`' # 0x79 -> GRAVE ACCENT + ':' # 0x7A -> COLON + '#' # 0x7B -> NUMBER SIGN + '\xa7' # 0x7C -> SECTION SIGN + "'" # 0x7D -> APOSTROPHE + '=' # 0x7E -> EQUALS SIGN + '"' # 0x7F -> QUOTATION MARK + '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE + 'a' # 0x81 -> LATIN SMALL LETTER A + 'b' # 0x82 -> LATIN SMALL LETTER B + 'c' # 0x83 -> LATIN SMALL LETTER C + 'd' # 0x84 -> LATIN SMALL LETTER D + 'e' # 0x85 -> LATIN SMALL LETTER E + 'f' # 0x86 -> LATIN SMALL LETTER F + 'g' # 0x87 -> LATIN SMALL LETTER G + 'h' # 0x88 -> LATIN SMALL LETTER H + 'i' # 0x89 -> LATIN SMALL LETTER I + '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (Icelandic) + '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE + '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (Icelandic) + '\xb1' # 0x8F -> PLUS-MINUS SIGN + '\xb0' # 0x90 -> DEGREE SIGN + 'j' # 0x91 -> LATIN SMALL LETTER J + 'k' # 0x92 -> LATIN SMALL LETTER K + 'l' # 0x93 -> LATIN SMALL LETTER L + 'm' # 0x94 -> LATIN SMALL LETTER M + 'n' # 0x95 -> LATIN SMALL LETTER N + 'o' # 0x96 -> LATIN SMALL LETTER O + 'p' # 0x97 -> LATIN SMALL LETTER P + 'q' # 0x98 -> LATIN SMALL LETTER Q + 'r' # 0x99 -> LATIN SMALL LETTER R + '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR + '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR + '\xe6' # 0x9C -> LATIN SMALL LETTER AE + '\xb8' # 0x9D -> CEDILLA + '\xc6' # 0x9E -> LATIN CAPITAL LETTER AE + '\xa4' # 0x9F -> CURRENCY SIGN + '\xb5' # 0xA0 -> MICRO SIGN + '\xdf' # 0xA1 -> LATIN SMALL LETTER SHARP S (German) + 's' # 0xA2 -> LATIN SMALL LETTER S + 't' # 0xA3 -> LATIN SMALL LETTER T + 'u' # 0xA4 -> LATIN SMALL LETTER U + 'v' # 0xA5 -> LATIN SMALL LETTER V + 'w' # 0xA6 -> LATIN SMALL LETTER W + 'x' # 0xA7 -> LATIN SMALL LETTER X + 'y' # 0xA8 -> LATIN SMALL LETTER Y + 'z' # 0xA9 -> LATIN SMALL LETTER Z + '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK + '\xbf' # 0xAB -> INVERTED QUESTION MARK + '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (Icelandic) + '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (Icelandic) + '\xae' # 0xAF -> REGISTERED SIGN + '\xa2' # 0xB0 -> CENT SIGN + '\xa3' # 0xB1 -> POUND SIGN + '\xa5' # 0xB2 -> YEN SIGN + '\xb7' # 0xB3 -> MIDDLE DOT + '\xa9' # 0xB4 -> COPYRIGHT SIGN + '@' # 0xB5 -> COMMERCIAL AT + '\xb6' # 0xB6 -> PILCROW SIGN + '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF + '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS + '\xac' # 0xBA -> NOT SIGN + '|' # 0xBB -> VERTICAL LINE + '\u203e' # 0xBC -> OVERLINE + '\xa8' # 0xBD -> DIAERESIS + '\xb4' # 0xBE -> ACUTE ACCENT + '\xd7' # 0xBF -> MULTIPLICATION SIGN + '\xe4' # 0xC0 -> LATIN SMALL LETTER A WITH DIAERESIS + 'A' # 0xC1 -> LATIN CAPITAL LETTER A + 'B' # 0xC2 -> LATIN CAPITAL LETTER B + 'C' # 0xC3 -> LATIN CAPITAL LETTER C + 'D' # 0xC4 -> LATIN CAPITAL LETTER D + 'E' # 0xC5 -> LATIN CAPITAL LETTER E + 'F' # 0xC6 -> LATIN CAPITAL LETTER F + 'G' # 0xC7 -> LATIN CAPITAL LETTER G + 'H' # 0xC8 -> LATIN CAPITAL LETTER H + 'I' # 0xC9 -> LATIN CAPITAL LETTER I + '\xad' # 0xCA -> SOFT HYPHEN + '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xa6' # 0xCC -> BROKEN BAR + '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE + '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE + '\xfc' # 0xD0 -> LATIN SMALL LETTER U WITH DIAERESIS + 'J' # 0xD1 -> LATIN CAPITAL LETTER J + 'K' # 0xD2 -> LATIN CAPITAL LETTER K + 'L' # 0xD3 -> LATIN CAPITAL LETTER L + 'M' # 0xD4 -> LATIN CAPITAL LETTER M + 'N' # 0xD5 -> LATIN CAPITAL LETTER N + 'O' # 0xD6 -> LATIN CAPITAL LETTER O + 'P' # 0xD7 -> LATIN CAPITAL LETTER P + 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q + 'R' # 0xD9 -> LATIN CAPITAL LETTER R + '\xb9' # 0xDA -> SUPERSCRIPT ONE + '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '}' # 0xDC -> RIGHT CURLY BRACKET + '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE + '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS + '\xd6' # 0xE0 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xf7' # 0xE1 -> DIVISION SIGN + 'S' # 0xE2 -> LATIN CAPITAL LETTER S + 'T' # 0xE3 -> LATIN CAPITAL LETTER T + 'U' # 0xE4 -> LATIN CAPITAL LETTER U + 'V' # 0xE5 -> LATIN CAPITAL LETTER V + 'W' # 0xE6 -> LATIN CAPITAL LETTER W + 'X' # 0xE7 -> LATIN CAPITAL LETTER X + 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y + 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z + '\xb2' # 0xEA -> SUPERSCRIPT TWO + '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\\' # 0xEC -> REVERSE SOLIDUS + '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE + '0' # 0xF0 -> DIGIT ZERO + '1' # 0xF1 -> DIGIT ONE + '2' # 0xF2 -> DIGIT TWO + '3' # 0xF3 -> DIGIT THREE + '4' # 0xF4 -> DIGIT FOUR + '5' # 0xF5 -> DIGIT FIVE + '6' # 0xF6 -> DIGIT SIX + '7' # 0xF7 -> DIGIT SEVEN + '8' # 0xF8 -> DIGIT EIGHT + '9' # 0xF9 -> DIGIT NINE + '\xb3' # 0xFA -> SUPERSCRIPT THREE + '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + ']' # 0xFC -> RIGHT SQUARE BRACKET + '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE + '\x9f' # 0xFF -> APPLICATION PROGRAM COMMAND (APC) +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp424.py b/venv/Lib/encodings/cp424.py new file mode 100644 index 00000000..6753daf1 --- /dev/null +++ b/venv/Lib/encodings/cp424.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp424 generated from 'MAPPINGS/VENDORS/MISC/CP424.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp424', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x9c' # 0x04 -> SELECT + '\t' # 0x05 -> HORIZONTAL TABULATION + '\x86' # 0x06 -> REQUIRED NEW LINE + '\x7f' # 0x07 -> DELETE + '\x97' # 0x08 -> GRAPHIC ESCAPE + '\x8d' # 0x09 -> SUPERSCRIPT + '\x8e' # 0x0A -> REPEAT + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x9d' # 0x14 -> RESTORE/ENABLE PRESENTATION + '\x85' # 0x15 -> NEW LINE + '\x08' # 0x16 -> BACKSPACE + '\x87' # 0x17 -> PROGRAM OPERATOR COMMUNICATION + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x92' # 0x1A -> UNIT BACK SPACE + '\x8f' # 0x1B -> CUSTOMER USE ONE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + '\x80' # 0x20 -> DIGIT SELECT + '\x81' # 0x21 -> START OF SIGNIFICANCE + '\x82' # 0x22 -> FIELD SEPARATOR + '\x83' # 0x23 -> WORD UNDERSCORE + '\x84' # 0x24 -> BYPASS OR INHIBIT PRESENTATION + '\n' # 0x25 -> LINE FEED + '\x17' # 0x26 -> END OF TRANSMISSION BLOCK + '\x1b' # 0x27 -> ESCAPE + '\x88' # 0x28 -> SET ATTRIBUTE + '\x89' # 0x29 -> START FIELD EXTENDED + '\x8a' # 0x2A -> SET MODE OR SWITCH + '\x8b' # 0x2B -> CONTROL SEQUENCE PREFIX + '\x8c' # 0x2C -> MODIFY FIELD ATTRIBUTE + '\x05' # 0x2D -> ENQUIRY + '\x06' # 0x2E -> ACKNOWLEDGE + '\x07' # 0x2F -> BELL + '\x90' # 0x30 -> + '\x91' # 0x31 -> + '\x16' # 0x32 -> SYNCHRONOUS IDLE + '\x93' # 0x33 -> INDEX RETURN + '\x94' # 0x34 -> PRESENTATION POSITION + '\x95' # 0x35 -> TRANSPARENT + '\x96' # 0x36 -> NUMERIC BACKSPACE + '\x04' # 0x37 -> END OF TRANSMISSION + '\x98' # 0x38 -> SUBSCRIPT + '\x99' # 0x39 -> INDENT TABULATION + '\x9a' # 0x3A -> REVERSE FORM FEED + '\x9b' # 0x3B -> CUSTOMER USE THREE + '\x14' # 0x3C -> DEVICE CONTROL FOUR + '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE + '\x9e' # 0x3E -> + '\x1a' # 0x3F -> SUBSTITUTE + ' ' # 0x40 -> SPACE + '\u05d0' # 0x41 -> HEBREW LETTER ALEF + '\u05d1' # 0x42 -> HEBREW LETTER BET + '\u05d2' # 0x43 -> HEBREW LETTER GIMEL + '\u05d3' # 0x44 -> HEBREW LETTER DALET + '\u05d4' # 0x45 -> HEBREW LETTER HE + '\u05d5' # 0x46 -> HEBREW LETTER VAV + '\u05d6' # 0x47 -> HEBREW LETTER ZAYIN + '\u05d7' # 0x48 -> HEBREW LETTER HET + '\u05d8' # 0x49 -> HEBREW LETTER TET + '\xa2' # 0x4A -> CENT SIGN + '.' # 0x4B -> FULL STOP + '<' # 0x4C -> LESS-THAN SIGN + '(' # 0x4D -> LEFT PARENTHESIS + '+' # 0x4E -> PLUS SIGN + '|' # 0x4F -> VERTICAL LINE + '&' # 0x50 -> AMPERSAND + '\u05d9' # 0x51 -> HEBREW LETTER YOD + '\u05da' # 0x52 -> HEBREW LETTER FINAL KAF + '\u05db' # 0x53 -> HEBREW LETTER KAF + '\u05dc' # 0x54 -> HEBREW LETTER LAMED + '\u05dd' # 0x55 -> HEBREW LETTER FINAL MEM + '\u05de' # 0x56 -> HEBREW LETTER MEM + '\u05df' # 0x57 -> HEBREW LETTER FINAL NUN + '\u05e0' # 0x58 -> HEBREW LETTER NUN + '\u05e1' # 0x59 -> HEBREW LETTER SAMEKH + '!' # 0x5A -> EXCLAMATION MARK + '$' # 0x5B -> DOLLAR SIGN + '*' # 0x5C -> ASTERISK + ')' # 0x5D -> RIGHT PARENTHESIS + ';' # 0x5E -> SEMICOLON + '\xac' # 0x5F -> NOT SIGN + '-' # 0x60 -> HYPHEN-MINUS + '/' # 0x61 -> SOLIDUS + '\u05e2' # 0x62 -> HEBREW LETTER AYIN + '\u05e3' # 0x63 -> HEBREW LETTER FINAL PE + '\u05e4' # 0x64 -> HEBREW LETTER PE + '\u05e5' # 0x65 -> HEBREW LETTER FINAL TSADI + '\u05e6' # 0x66 -> HEBREW LETTER TSADI + '\u05e7' # 0x67 -> HEBREW LETTER QOF + '\u05e8' # 0x68 -> HEBREW LETTER RESH + '\u05e9' # 0x69 -> HEBREW LETTER SHIN + '\xa6' # 0x6A -> BROKEN BAR + ',' # 0x6B -> COMMA + '%' # 0x6C -> PERCENT SIGN + '_' # 0x6D -> LOW LINE + '>' # 0x6E -> GREATER-THAN SIGN + '?' # 0x6F -> QUESTION MARK + '\ufffe' # 0x70 -> UNDEFINED + '\u05ea' # 0x71 -> HEBREW LETTER TAV + '\ufffe' # 0x72 -> UNDEFINED + '\ufffe' # 0x73 -> UNDEFINED + '\xa0' # 0x74 -> NO-BREAK SPACE + '\ufffe' # 0x75 -> UNDEFINED + '\ufffe' # 0x76 -> UNDEFINED + '\ufffe' # 0x77 -> UNDEFINED + '\u2017' # 0x78 -> DOUBLE LOW LINE + '`' # 0x79 -> GRAVE ACCENT + ':' # 0x7A -> COLON + '#' # 0x7B -> NUMBER SIGN + '@' # 0x7C -> COMMERCIAL AT + "'" # 0x7D -> APOSTROPHE + '=' # 0x7E -> EQUALS SIGN + '"' # 0x7F -> QUOTATION MARK + '\ufffe' # 0x80 -> UNDEFINED + 'a' # 0x81 -> LATIN SMALL LETTER A + 'b' # 0x82 -> LATIN SMALL LETTER B + 'c' # 0x83 -> LATIN SMALL LETTER C + 'd' # 0x84 -> LATIN SMALL LETTER D + 'e' # 0x85 -> LATIN SMALL LETTER E + 'f' # 0x86 -> LATIN SMALL LETTER F + 'g' # 0x87 -> LATIN SMALL LETTER G + 'h' # 0x88 -> LATIN SMALL LETTER H + 'i' # 0x89 -> LATIN SMALL LETTER I + '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\ufffe' # 0x8C -> UNDEFINED + '\ufffe' # 0x8D -> UNDEFINED + '\ufffe' # 0x8E -> UNDEFINED + '\xb1' # 0x8F -> PLUS-MINUS SIGN + '\xb0' # 0x90 -> DEGREE SIGN + 'j' # 0x91 -> LATIN SMALL LETTER J + 'k' # 0x92 -> LATIN SMALL LETTER K + 'l' # 0x93 -> LATIN SMALL LETTER L + 'm' # 0x94 -> LATIN SMALL LETTER M + 'n' # 0x95 -> LATIN SMALL LETTER N + 'o' # 0x96 -> LATIN SMALL LETTER O + 'p' # 0x97 -> LATIN SMALL LETTER P + 'q' # 0x98 -> LATIN SMALL LETTER Q + 'r' # 0x99 -> LATIN SMALL LETTER R + '\ufffe' # 0x9A -> UNDEFINED + '\ufffe' # 0x9B -> UNDEFINED + '\ufffe' # 0x9C -> UNDEFINED + '\xb8' # 0x9D -> CEDILLA + '\ufffe' # 0x9E -> UNDEFINED + '\xa4' # 0x9F -> CURRENCY SIGN + '\xb5' # 0xA0 -> MICRO SIGN + '~' # 0xA1 -> TILDE + 's' # 0xA2 -> LATIN SMALL LETTER S + 't' # 0xA3 -> LATIN SMALL LETTER T + 'u' # 0xA4 -> LATIN SMALL LETTER U + 'v' # 0xA5 -> LATIN SMALL LETTER V + 'w' # 0xA6 -> LATIN SMALL LETTER W + 'x' # 0xA7 -> LATIN SMALL LETTER X + 'y' # 0xA8 -> LATIN SMALL LETTER Y + 'z' # 0xA9 -> LATIN SMALL LETTER Z + '\ufffe' # 0xAA -> UNDEFINED + '\ufffe' # 0xAB -> UNDEFINED + '\ufffe' # 0xAC -> UNDEFINED + '\ufffe' # 0xAD -> UNDEFINED + '\ufffe' # 0xAE -> UNDEFINED + '\xae' # 0xAF -> REGISTERED SIGN + '^' # 0xB0 -> CIRCUMFLEX ACCENT + '\xa3' # 0xB1 -> POUND SIGN + '\xa5' # 0xB2 -> YEN SIGN + '\xb7' # 0xB3 -> MIDDLE DOT + '\xa9' # 0xB4 -> COPYRIGHT SIGN + '\xa7' # 0xB5 -> SECTION SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF + '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS + '[' # 0xBA -> LEFT SQUARE BRACKET + ']' # 0xBB -> RIGHT SQUARE BRACKET + '\xaf' # 0xBC -> MACRON + '\xa8' # 0xBD -> DIAERESIS + '\xb4' # 0xBE -> ACUTE ACCENT + '\xd7' # 0xBF -> MULTIPLICATION SIGN + '{' # 0xC0 -> LEFT CURLY BRACKET + 'A' # 0xC1 -> LATIN CAPITAL LETTER A + 'B' # 0xC2 -> LATIN CAPITAL LETTER B + 'C' # 0xC3 -> LATIN CAPITAL LETTER C + 'D' # 0xC4 -> LATIN CAPITAL LETTER D + 'E' # 0xC5 -> LATIN CAPITAL LETTER E + 'F' # 0xC6 -> LATIN CAPITAL LETTER F + 'G' # 0xC7 -> LATIN CAPITAL LETTER G + 'H' # 0xC8 -> LATIN CAPITAL LETTER H + 'I' # 0xC9 -> LATIN CAPITAL LETTER I + '\xad' # 0xCA -> SOFT HYPHEN + '\ufffe' # 0xCB -> UNDEFINED + '\ufffe' # 0xCC -> UNDEFINED + '\ufffe' # 0xCD -> UNDEFINED + '\ufffe' # 0xCE -> UNDEFINED + '\ufffe' # 0xCF -> UNDEFINED + '}' # 0xD0 -> RIGHT CURLY BRACKET + 'J' # 0xD1 -> LATIN CAPITAL LETTER J + 'K' # 0xD2 -> LATIN CAPITAL LETTER K + 'L' # 0xD3 -> LATIN CAPITAL LETTER L + 'M' # 0xD4 -> LATIN CAPITAL LETTER M + 'N' # 0xD5 -> LATIN CAPITAL LETTER N + 'O' # 0xD6 -> LATIN CAPITAL LETTER O + 'P' # 0xD7 -> LATIN CAPITAL LETTER P + 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q + 'R' # 0xD9 -> LATIN CAPITAL LETTER R + '\xb9' # 0xDA -> SUPERSCRIPT ONE + '\ufffe' # 0xDB -> UNDEFINED + '\ufffe' # 0xDC -> UNDEFINED + '\ufffe' # 0xDD -> UNDEFINED + '\ufffe' # 0xDE -> UNDEFINED + '\ufffe' # 0xDF -> UNDEFINED + '\\' # 0xE0 -> REVERSE SOLIDUS + '\xf7' # 0xE1 -> DIVISION SIGN + 'S' # 0xE2 -> LATIN CAPITAL LETTER S + 'T' # 0xE3 -> LATIN CAPITAL LETTER T + 'U' # 0xE4 -> LATIN CAPITAL LETTER U + 'V' # 0xE5 -> LATIN CAPITAL LETTER V + 'W' # 0xE6 -> LATIN CAPITAL LETTER W + 'X' # 0xE7 -> LATIN CAPITAL LETTER X + 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y + 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z + '\xb2' # 0xEA -> SUPERSCRIPT TWO + '\ufffe' # 0xEB -> UNDEFINED + '\ufffe' # 0xEC -> UNDEFINED + '\ufffe' # 0xED -> UNDEFINED + '\ufffe' # 0xEE -> UNDEFINED + '\ufffe' # 0xEF -> UNDEFINED + '0' # 0xF0 -> DIGIT ZERO + '1' # 0xF1 -> DIGIT ONE + '2' # 0xF2 -> DIGIT TWO + '3' # 0xF3 -> DIGIT THREE + '4' # 0xF4 -> DIGIT FOUR + '5' # 0xF5 -> DIGIT FIVE + '6' # 0xF6 -> DIGIT SIX + '7' # 0xF7 -> DIGIT SEVEN + '8' # 0xF8 -> DIGIT EIGHT + '9' # 0xF9 -> DIGIT NINE + '\xb3' # 0xFA -> SUPERSCRIPT THREE + '\ufffe' # 0xFB -> UNDEFINED + '\ufffe' # 0xFC -> UNDEFINED + '\ufffe' # 0xFD -> UNDEFINED + '\ufffe' # 0xFE -> UNDEFINED + '\x9f' # 0xFF -> EIGHT ONES +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp437.py b/venv/Lib/encodings/cp437.py new file mode 100644 index 00000000..b6c75e2c --- /dev/null +++ b/venv/Lib/encodings/cp437.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp437', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS + 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE + 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE + 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00a2, # CENT SIGN + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00a5, # YEN SIGN + 0x009e: 0x20a7, # PESETA SIGN + 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR + 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR + 0x00a8: 0x00bf, # INVERTED QUESTION MARK + 0x00a9: 0x2310, # REVERSED NOT SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA + 0x00e3: 0x03c0, # GREEK SMALL LETTER PI + 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA + 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU + 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI + 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA + 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA + 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA + 0x00ec: 0x221e, # INFINITY + 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI + 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON + 0x00ef: 0x2229, # INTERSECTION + 0x00f0: 0x2261, # IDENTICAL TO + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO + 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO + 0x00f4: 0x2320, # TOP HALF INTEGRAL + 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x2248, # ALMOST EQUAL TO + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE + '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE + '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS + '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE + '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE + '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE + '\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xa2' # 0x009b -> CENT SIGN + '\xa3' # 0x009c -> POUND SIGN + '\xa5' # 0x009d -> YEN SIGN + '\u20a7' # 0x009e -> PESETA SIGN + '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE + '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE + '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR + '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR + '\xbf' # 0x00a8 -> INVERTED QUESTION MARK + '\u2310' # 0x00a9 -> REVERSED NOT SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA + '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI + '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA + '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA + '\xb5' # 0x00e6 -> MICRO SIGN + '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU + '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI + '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA + '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA + '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA + '\u221e' # 0x00ec -> INFINITY + '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI + '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON + '\u2229' # 0x00ef -> INTERSECTION + '\u2261' # 0x00f0 -> IDENTICAL TO + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO + '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO + '\u2320' # 0x00f4 -> TOP HALF INTEGRAL + '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL + '\xf7' # 0x00f6 -> DIVISION SIGN + '\u2248' # 0x00f7 -> ALMOST EQUAL TO + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK + 0x00a2: 0x009b, # CENT SIGN + 0x00a3: 0x009c, # POUND SIGN + 0x00a5: 0x009d, # YEN SIGN + 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00bf: 0x00a8, # INVERTED QUESTION MARK + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE + 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS + 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE + 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK + 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA + 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA + 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA + 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI + 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA + 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA + 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA + 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON + 0x03c0: 0x00e3, # GREEK SMALL LETTER PI + 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA + 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU + 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI + 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N + 0x20a7: 0x009e, # PESETA SIGN + 0x2219: 0x00f9, # BULLET OPERATOR + 0x221a: 0x00fb, # SQUARE ROOT + 0x221e: 0x00ec, # INFINITY + 0x2229: 0x00ef, # INTERSECTION + 0x2248: 0x00f7, # ALMOST EQUAL TO + 0x2261: 0x00f0, # IDENTICAL TO + 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO + 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO + 0x2310: 0x00a9, # REVERSED NOT SIGN + 0x2320: 0x00f4, # TOP HALF INTEGRAL + 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp500.py b/venv/Lib/encodings/cp500.py new file mode 100644 index 00000000..5f61535f --- /dev/null +++ b/venv/Lib/encodings/cp500.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp500', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x9c' # 0x04 -> CONTROL + '\t' # 0x05 -> HORIZONTAL TABULATION + '\x86' # 0x06 -> CONTROL + '\x7f' # 0x07 -> DELETE + '\x97' # 0x08 -> CONTROL + '\x8d' # 0x09 -> CONTROL + '\x8e' # 0x0A -> CONTROL + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x9d' # 0x14 -> CONTROL + '\x85' # 0x15 -> CONTROL + '\x08' # 0x16 -> BACKSPACE + '\x87' # 0x17 -> CONTROL + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x92' # 0x1A -> CONTROL + '\x8f' # 0x1B -> CONTROL + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + '\x80' # 0x20 -> CONTROL + '\x81' # 0x21 -> CONTROL + '\x82' # 0x22 -> CONTROL + '\x83' # 0x23 -> CONTROL + '\x84' # 0x24 -> CONTROL + '\n' # 0x25 -> LINE FEED + '\x17' # 0x26 -> END OF TRANSMISSION BLOCK + '\x1b' # 0x27 -> ESCAPE + '\x88' # 0x28 -> CONTROL + '\x89' # 0x29 -> CONTROL + '\x8a' # 0x2A -> CONTROL + '\x8b' # 0x2B -> CONTROL + '\x8c' # 0x2C -> CONTROL + '\x05' # 0x2D -> ENQUIRY + '\x06' # 0x2E -> ACKNOWLEDGE + '\x07' # 0x2F -> BELL + '\x90' # 0x30 -> CONTROL + '\x91' # 0x31 -> CONTROL + '\x16' # 0x32 -> SYNCHRONOUS IDLE + '\x93' # 0x33 -> CONTROL + '\x94' # 0x34 -> CONTROL + '\x95' # 0x35 -> CONTROL + '\x96' # 0x36 -> CONTROL + '\x04' # 0x37 -> END OF TRANSMISSION + '\x98' # 0x38 -> CONTROL + '\x99' # 0x39 -> CONTROL + '\x9a' # 0x3A -> CONTROL + '\x9b' # 0x3B -> CONTROL + '\x14' # 0x3C -> DEVICE CONTROL FOUR + '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE + '\x9e' # 0x3E -> CONTROL + '\x1a' # 0x3F -> SUBSTITUTE + ' ' # 0x40 -> SPACE + '\xa0' # 0x41 -> NO-BREAK SPACE + '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE + '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA + '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE + '[' # 0x4A -> LEFT SQUARE BRACKET + '.' # 0x4B -> FULL STOP + '<' # 0x4C -> LESS-THAN SIGN + '(' # 0x4D -> LEFT PARENTHESIS + '+' # 0x4E -> PLUS SIGN + '!' # 0x4F -> EXCLAMATION MARK + '&' # 0x50 -> AMPERSAND + '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE + '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE + '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN) + ']' # 0x5A -> RIGHT SQUARE BRACKET + '$' # 0x5B -> DOLLAR SIGN + '*' # 0x5C -> ASTERISK + ')' # 0x5D -> RIGHT PARENTHESIS + ';' # 0x5E -> SEMICOLON + '^' # 0x5F -> CIRCUMFLEX ACCENT + '-' # 0x60 -> HYPHEN-MINUS + '/' # 0x61 -> SOLIDUS + '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE + '\xa6' # 0x6A -> BROKEN BAR + ',' # 0x6B -> COMMA + '%' # 0x6C -> PERCENT SIGN + '_' # 0x6D -> LOW LINE + '>' # 0x6E -> GREATER-THAN SIGN + '?' # 0x6F -> QUESTION MARK + '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE + '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE + '`' # 0x79 -> GRAVE ACCENT + ':' # 0x7A -> COLON + '#' # 0x7B -> NUMBER SIGN + '@' # 0x7C -> COMMERCIAL AT + "'" # 0x7D -> APOSTROPHE + '=' # 0x7E -> EQUALS SIGN + '"' # 0x7F -> QUOTATION MARK + '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE + 'a' # 0x81 -> LATIN SMALL LETTER A + 'b' # 0x82 -> LATIN SMALL LETTER B + 'c' # 0x83 -> LATIN SMALL LETTER C + 'd' # 0x84 -> LATIN SMALL LETTER D + 'e' # 0x85 -> LATIN SMALL LETTER E + 'f' # 0x86 -> LATIN SMALL LETTER F + 'g' # 0x87 -> LATIN SMALL LETTER G + 'h' # 0x88 -> LATIN SMALL LETTER H + 'i' # 0x89 -> LATIN SMALL LETTER I + '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC) + '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE + '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC) + '\xb1' # 0x8F -> PLUS-MINUS SIGN + '\xb0' # 0x90 -> DEGREE SIGN + 'j' # 0x91 -> LATIN SMALL LETTER J + 'k' # 0x92 -> LATIN SMALL LETTER K + 'l' # 0x93 -> LATIN SMALL LETTER L + 'm' # 0x94 -> LATIN SMALL LETTER M + 'n' # 0x95 -> LATIN SMALL LETTER N + 'o' # 0x96 -> LATIN SMALL LETTER O + 'p' # 0x97 -> LATIN SMALL LETTER P + 'q' # 0x98 -> LATIN SMALL LETTER Q + 'r' # 0x99 -> LATIN SMALL LETTER R + '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR + '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR + '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE + '\xb8' # 0x9D -> CEDILLA + '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE + '\xa4' # 0x9F -> CURRENCY SIGN + '\xb5' # 0xA0 -> MICRO SIGN + '~' # 0xA1 -> TILDE + 's' # 0xA2 -> LATIN SMALL LETTER S + 't' # 0xA3 -> LATIN SMALL LETTER T + 'u' # 0xA4 -> LATIN SMALL LETTER U + 'v' # 0xA5 -> LATIN SMALL LETTER V + 'w' # 0xA6 -> LATIN SMALL LETTER W + 'x' # 0xA7 -> LATIN SMALL LETTER X + 'y' # 0xA8 -> LATIN SMALL LETTER Y + 'z' # 0xA9 -> LATIN SMALL LETTER Z + '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK + '\xbf' # 0xAB -> INVERTED QUESTION MARK + '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC) + '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC) + '\xae' # 0xAF -> REGISTERED SIGN + '\xa2' # 0xB0 -> CENT SIGN + '\xa3' # 0xB1 -> POUND SIGN + '\xa5' # 0xB2 -> YEN SIGN + '\xb7' # 0xB3 -> MIDDLE DOT + '\xa9' # 0xB4 -> COPYRIGHT SIGN + '\xa7' # 0xB5 -> SECTION SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF + '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS + '\xac' # 0xBA -> NOT SIGN + '|' # 0xBB -> VERTICAL LINE + '\xaf' # 0xBC -> MACRON + '\xa8' # 0xBD -> DIAERESIS + '\xb4' # 0xBE -> ACUTE ACCENT + '\xd7' # 0xBF -> MULTIPLICATION SIGN + '{' # 0xC0 -> LEFT CURLY BRACKET + 'A' # 0xC1 -> LATIN CAPITAL LETTER A + 'B' # 0xC2 -> LATIN CAPITAL LETTER B + 'C' # 0xC3 -> LATIN CAPITAL LETTER C + 'D' # 0xC4 -> LATIN CAPITAL LETTER D + 'E' # 0xC5 -> LATIN CAPITAL LETTER E + 'F' # 0xC6 -> LATIN CAPITAL LETTER F + 'G' # 0xC7 -> LATIN CAPITAL LETTER G + 'H' # 0xC8 -> LATIN CAPITAL LETTER H + 'I' # 0xC9 -> LATIN CAPITAL LETTER I + '\xad' # 0xCA -> SOFT HYPHEN + '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE + '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE + '}' # 0xD0 -> RIGHT CURLY BRACKET + 'J' # 0xD1 -> LATIN CAPITAL LETTER J + 'K' # 0xD2 -> LATIN CAPITAL LETTER K + 'L' # 0xD3 -> LATIN CAPITAL LETTER L + 'M' # 0xD4 -> LATIN CAPITAL LETTER M + 'N' # 0xD5 -> LATIN CAPITAL LETTER N + 'O' # 0xD6 -> LATIN CAPITAL LETTER O + 'P' # 0xD7 -> LATIN CAPITAL LETTER P + 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q + 'R' # 0xD9 -> LATIN CAPITAL LETTER R + '\xb9' # 0xDA -> SUPERSCRIPT ONE + '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE + '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS + '\\' # 0xE0 -> REVERSE SOLIDUS + '\xf7' # 0xE1 -> DIVISION SIGN + 'S' # 0xE2 -> LATIN CAPITAL LETTER S + 'T' # 0xE3 -> LATIN CAPITAL LETTER T + 'U' # 0xE4 -> LATIN CAPITAL LETTER U + 'V' # 0xE5 -> LATIN CAPITAL LETTER V + 'W' # 0xE6 -> LATIN CAPITAL LETTER W + 'X' # 0xE7 -> LATIN CAPITAL LETTER X + 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y + 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z + '\xb2' # 0xEA -> SUPERSCRIPT TWO + '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE + '0' # 0xF0 -> DIGIT ZERO + '1' # 0xF1 -> DIGIT ONE + '2' # 0xF2 -> DIGIT TWO + '3' # 0xF3 -> DIGIT THREE + '4' # 0xF4 -> DIGIT FOUR + '5' # 0xF5 -> DIGIT FIVE + '6' # 0xF6 -> DIGIT SIX + '7' # 0xF7 -> DIGIT SEVEN + '8' # 0xF8 -> DIGIT EIGHT + '9' # 0xF9 -> DIGIT NINE + '\xb3' # 0xFA -> SUPERSCRIPT THREE + '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE + '\x9f' # 0xFF -> CONTROL +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp65001.py b/venv/Lib/encodings/cp65001.py new file mode 100644 index 00000000..95cb2aec --- /dev/null +++ b/venv/Lib/encodings/cp65001.py @@ -0,0 +1,43 @@ +""" +Code page 65001: Windows UTF-8 (CP_UTF8). +""" + +import codecs +import functools + +if not hasattr(codecs, 'code_page_encode'): + raise LookupError("cp65001 encoding is only available on Windows") + +### Codec APIs + +encode = functools.partial(codecs.code_page_encode, 65001) +_decode = functools.partial(codecs.code_page_decode, 65001) + +def decode(input, errors='strict'): + return codecs.code_page_decode(65001, input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + _buffer_decode = _decode + +class StreamWriter(codecs.StreamWriter): + encode = encode + +class StreamReader(codecs.StreamReader): + decode = _decode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp65001', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/cp720.py b/venv/Lib/encodings/cp720.py new file mode 100644 index 00000000..96d60961 --- /dev/null +++ b/venv/Lib/encodings/cp720.py @@ -0,0 +1,309 @@ +"""Python Character Mapping Codec cp720 generated on Windows: +Vista 6.0.6002 SP2 Multiprocessor Free with the command: + python Tools/unicode/genwincodec.py 720 +"""#" + + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp720', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\x80' + '\x81' + '\xe9' # 0x82 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x83 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\x84' + '\xe0' # 0x85 -> LATIN SMALL LETTER A WITH GRAVE + '\x86' + '\xe7' # 0x87 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x88 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x89 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x8A -> LATIN SMALL LETTER E WITH GRAVE + '\xef' # 0x8B -> LATIN SMALL LETTER I WITH DIAERESIS + '\xee' # 0x8C -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\x8d' + '\x8e' + '\x8f' + '\x90' + '\u0651' # 0x91 -> ARABIC SHADDA + '\u0652' # 0x92 -> ARABIC SUKUN + '\xf4' # 0x93 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xa4' # 0x94 -> CURRENCY SIGN + '\u0640' # 0x95 -> ARABIC TATWEEL + '\xfb' # 0x96 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xf9' # 0x97 -> LATIN SMALL LETTER U WITH GRAVE + '\u0621' # 0x98 -> ARABIC LETTER HAMZA + '\u0622' # 0x99 -> ARABIC LETTER ALEF WITH MADDA ABOVE + '\u0623' # 0x9A -> ARABIC LETTER ALEF WITH HAMZA ABOVE + '\u0624' # 0x9B -> ARABIC LETTER WAW WITH HAMZA ABOVE + '\xa3' # 0x9C -> POUND SIGN + '\u0625' # 0x9D -> ARABIC LETTER ALEF WITH HAMZA BELOW + '\u0626' # 0x9E -> ARABIC LETTER YEH WITH HAMZA ABOVE + '\u0627' # 0x9F -> ARABIC LETTER ALEF + '\u0628' # 0xA0 -> ARABIC LETTER BEH + '\u0629' # 0xA1 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0xA2 -> ARABIC LETTER TEH + '\u062b' # 0xA3 -> ARABIC LETTER THEH + '\u062c' # 0xA4 -> ARABIC LETTER JEEM + '\u062d' # 0xA5 -> ARABIC LETTER HAH + '\u062e' # 0xA6 -> ARABIC LETTER KHAH + '\u062f' # 0xA7 -> ARABIC LETTER DAL + '\u0630' # 0xA8 -> ARABIC LETTER THAL + '\u0631' # 0xA9 -> ARABIC LETTER REH + '\u0632' # 0xAA -> ARABIC LETTER ZAIN + '\u0633' # 0xAB -> ARABIC LETTER SEEN + '\u0634' # 0xAC -> ARABIC LETTER SHEEN + '\u0635' # 0xAD -> ARABIC LETTER SAD + '\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0xB0 -> LIGHT SHADE + '\u2592' # 0xB1 -> MEDIUM SHADE + '\u2593' # 0xB2 -> DARK SHADE + '\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0xB5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0xB6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0xB8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0xBD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0xBE -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0xC6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0xC7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0xCF -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0xD0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0xD1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0xD2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0xD3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0xD4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0xD5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0xD6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0xD7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0xD8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0xDB -> FULL BLOCK + '\u2584' # 0xDC -> LOWER HALF BLOCK + '\u258c' # 0xDD -> LEFT HALF BLOCK + '\u2590' # 0xDE -> RIGHT HALF BLOCK + '\u2580' # 0xDF -> UPPER HALF BLOCK + '\u0636' # 0xE0 -> ARABIC LETTER DAD + '\u0637' # 0xE1 -> ARABIC LETTER TAH + '\u0638' # 0xE2 -> ARABIC LETTER ZAH + '\u0639' # 0xE3 -> ARABIC LETTER AIN + '\u063a' # 0xE4 -> ARABIC LETTER GHAIN + '\u0641' # 0xE5 -> ARABIC LETTER FEH + '\xb5' # 0xE6 -> MICRO SIGN + '\u0642' # 0xE7 -> ARABIC LETTER QAF + '\u0643' # 0xE8 -> ARABIC LETTER KAF + '\u0644' # 0xE9 -> ARABIC LETTER LAM + '\u0645' # 0xEA -> ARABIC LETTER MEEM + '\u0646' # 0xEB -> ARABIC LETTER NOON + '\u0647' # 0xEC -> ARABIC LETTER HEH + '\u0648' # 0xED -> ARABIC LETTER WAW + '\u0649' # 0xEE -> ARABIC LETTER ALEF MAKSURA + '\u064a' # 0xEF -> ARABIC LETTER YEH + '\u2261' # 0xF0 -> IDENTICAL TO + '\u064b' # 0xF1 -> ARABIC FATHATAN + '\u064c' # 0xF2 -> ARABIC DAMMATAN + '\u064d' # 0xF3 -> ARABIC KASRATAN + '\u064e' # 0xF4 -> ARABIC FATHA + '\u064f' # 0xF5 -> ARABIC DAMMA + '\u0650' # 0xF6 -> ARABIC KASRA + '\u2248' # 0xF7 -> ALMOST EQUAL TO + '\xb0' # 0xF8 -> DEGREE SIGN + '\u2219' # 0xF9 -> BULLET OPERATOR + '\xb7' # 0xFA -> MIDDLE DOT + '\u221a' # 0xFB -> SQUARE ROOT + '\u207f' # 0xFC -> SUPERSCRIPT LATIN SMALL LETTER N + '\xb2' # 0xFD -> SUPERSCRIPT TWO + '\u25a0' # 0xFE -> BLACK SQUARE + '\xa0' # 0xFF -> NO-BREAK SPACE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp737.py b/venv/Lib/encodings/cp737.py new file mode 100644 index 00000000..9685bae7 --- /dev/null +++ b/venv/Lib/encodings/cp737.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp737', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA + 0x0081: 0x0392, # GREEK CAPITAL LETTER BETA + 0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA + 0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA + 0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON + 0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA + 0x0086: 0x0397, # GREEK CAPITAL LETTER ETA + 0x0087: 0x0398, # GREEK CAPITAL LETTER THETA + 0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA + 0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA + 0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA + 0x008b: 0x039c, # GREEK CAPITAL LETTER MU + 0x008c: 0x039d, # GREEK CAPITAL LETTER NU + 0x008d: 0x039e, # GREEK CAPITAL LETTER XI + 0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON + 0x008f: 0x03a0, # GREEK CAPITAL LETTER PI + 0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO + 0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA + 0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU + 0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON + 0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI + 0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI + 0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI + 0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA + 0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA + 0x0099: 0x03b2, # GREEK SMALL LETTER BETA + 0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA + 0x009b: 0x03b4, # GREEK SMALL LETTER DELTA + 0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON + 0x009d: 0x03b6, # GREEK SMALL LETTER ZETA + 0x009e: 0x03b7, # GREEK SMALL LETTER ETA + 0x009f: 0x03b8, # GREEK SMALL LETTER THETA + 0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA + 0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA + 0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA + 0x00a3: 0x03bc, # GREEK SMALL LETTER MU + 0x00a4: 0x03bd, # GREEK SMALL LETTER NU + 0x00a5: 0x03be, # GREEK SMALL LETTER XI + 0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON + 0x00a7: 0x03c0, # GREEK SMALL LETTER PI + 0x00a8: 0x03c1, # GREEK SMALL LETTER RHO + 0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA + 0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA + 0x00ab: 0x03c4, # GREEK SMALL LETTER TAU + 0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON + 0x00ad: 0x03c6, # GREEK SMALL LETTER PHI + 0x00ae: 0x03c7, # GREEK SMALL LETTER CHI + 0x00af: 0x03c8, # GREEK SMALL LETTER PSI + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA + 0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS + 0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS + 0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS + 0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA + 0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS + 0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS + 0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS + 0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA + 0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS + 0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS + 0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS + 0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS + 0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS + 0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS + 0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS + 0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO + 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO + 0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + 0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x2248, # ALMOST EQUAL TO + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA + '\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA + '\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA + '\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA + '\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON + '\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA + '\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA + '\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA + '\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA + '\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA + '\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA + '\u039c' # 0x008b -> GREEK CAPITAL LETTER MU + '\u039d' # 0x008c -> GREEK CAPITAL LETTER NU + '\u039e' # 0x008d -> GREEK CAPITAL LETTER XI + '\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON + '\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI + '\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO + '\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA + '\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU + '\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON + '\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI + '\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI + '\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI + '\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA + '\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA + '\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA + '\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA + '\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA + '\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON + '\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA + '\u03b7' # 0x009e -> GREEK SMALL LETTER ETA + '\u03b8' # 0x009f -> GREEK SMALL LETTER THETA + '\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA + '\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA + '\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA + '\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU + '\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU + '\u03be' # 0x00a5 -> GREEK SMALL LETTER XI + '\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON + '\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI + '\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO + '\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA + '\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA + '\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU + '\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON + '\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI + '\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI + '\u03c8' # 0x00af -> GREEK SMALL LETTER PSI + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA + '\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS + '\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS + '\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS + '\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA + '\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS + '\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS + '\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS + '\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA + '\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS + '\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS + '\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS + '\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS + '\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS + '\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS + '\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS + '\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO + '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO + '\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + '\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + '\xf7' # 0x00f6 -> DIVISION SIGN + '\u2248' # 0x00f7 -> ALMOST EQUAL TO + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00f7: 0x00f6, # DIVISION SIGN + 0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS + 0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS + 0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS + 0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS + 0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS + 0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS + 0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS + 0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA + 0x0392: 0x0081, # GREEK CAPITAL LETTER BETA + 0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA + 0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA + 0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON + 0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA + 0x0397: 0x0086, # GREEK CAPITAL LETTER ETA + 0x0398: 0x0087, # GREEK CAPITAL LETTER THETA + 0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA + 0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA + 0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA + 0x039c: 0x008b, # GREEK CAPITAL LETTER MU + 0x039d: 0x008c, # GREEK CAPITAL LETTER NU + 0x039e: 0x008d, # GREEK CAPITAL LETTER XI + 0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON + 0x03a0: 0x008f, # GREEK CAPITAL LETTER PI + 0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO + 0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA + 0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU + 0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON + 0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI + 0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI + 0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI + 0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA + 0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + 0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + 0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS + 0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS + 0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS + 0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS + 0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA + 0x03b2: 0x0099, # GREEK SMALL LETTER BETA + 0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA + 0x03b4: 0x009b, # GREEK SMALL LETTER DELTA + 0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON + 0x03b6: 0x009d, # GREEK SMALL LETTER ZETA + 0x03b7: 0x009e, # GREEK SMALL LETTER ETA + 0x03b8: 0x009f, # GREEK SMALL LETTER THETA + 0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA + 0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA + 0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA + 0x03bc: 0x00a3, # GREEK SMALL LETTER MU + 0x03bd: 0x00a4, # GREEK SMALL LETTER NU + 0x03be: 0x00a5, # GREEK SMALL LETTER XI + 0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON + 0x03c0: 0x00a7, # GREEK SMALL LETTER PI + 0x03c1: 0x00a8, # GREEK SMALL LETTER RHO + 0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA + 0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA + 0x03c4: 0x00ab, # GREEK SMALL LETTER TAU + 0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON + 0x03c6: 0x00ad, # GREEK SMALL LETTER PHI + 0x03c7: 0x00ae, # GREEK SMALL LETTER CHI + 0x03c8: 0x00af, # GREEK SMALL LETTER PSI + 0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA + 0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA + 0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA + 0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS + 0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS + 0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS + 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N + 0x2219: 0x00f9, # BULLET OPERATOR + 0x221a: 0x00fb, # SQUARE ROOT + 0x2248: 0x00f7, # ALMOST EQUAL TO + 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO + 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp775.py b/venv/Lib/encodings/cp775.py new file mode 100644 index 00000000..fe06e7bc --- /dev/null +++ b/venv/Lib/encodings/cp775.py @@ -0,0 +1,697 @@ +""" Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp775', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA + 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE + 0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE + 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE + 0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON + 0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA + 0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA + 0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON + 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE + 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE + 0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA + 0x0096: 0x00a2, # CENT SIGN + 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE + 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE + 0x009e: 0x00d7, # MULTIPLICATION SIGN + 0x009f: 0x00a4, # CURRENCY SIGN + 0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON + 0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE + 0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE + 0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE + 0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK + 0x00a7: 0x00a6, # BROKEN BAR + 0x00a8: 0x00a9, # COPYRIGHT SIGN + 0x00a9: 0x00ae, # REGISTERED SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK + 0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON + 0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK + 0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK + 0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK + 0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON + 0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK + 0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON + 0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK + 0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE + 0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK + 0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON + 0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK + 0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON + 0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN) + 0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON + 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE + 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE + 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE + 0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA + 0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA + 0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA + 0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA + 0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA + 0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON + 0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA + 0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK + 0x00f0: 0x00ad, # SOFT HYPHEN + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK + 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS + 0x00f4: 0x00b6, # PILCROW SIGN + 0x00f5: 0x00a7, # SECTION SIGN + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x00b9, # SUPERSCRIPT ONE + 0x00fc: 0x00b3, # SUPERSCRIPT THREE + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA + '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE + '\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE + '\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE + '\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON + '\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA + '\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA + '\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON + '\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE + '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE + '\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA + '\xa2' # 0x0096 -> CENT SIGN + '\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE + '\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE + '\xa3' # 0x009c -> POUND SIGN + '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE + '\xd7' # 0x009e -> MULTIPLICATION SIGN + '\xa4' # 0x009f -> CURRENCY SIGN + '\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON + '\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE + '\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK + '\xa6' # 0x00a7 -> BROKEN BAR + '\xa9' # 0x00a8 -> COPYRIGHT SIGN + '\xae' # 0x00a9 -> REGISTERED SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK + '\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON + '\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK + '\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK + '\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK + '\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON + '\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK + '\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON + '\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK + '\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE + '\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK + '\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON + '\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK + '\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON + '\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN) + '\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON + '\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE + '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE + '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xb5' # 0x00e6 -> MICRO SIGN + '\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE + '\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA + '\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA + '\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA + '\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA + '\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA + '\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON + '\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA + '\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK + '\xad' # 0x00f0 -> SOFT HYPHEN + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK + '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS + '\xb6' # 0x00f4 -> PILCROW SIGN + '\xa7' # 0x00f5 -> SECTION SIGN + '\xf7' # 0x00f6 -> DIVISION SIGN + '\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\xb9' # 0x00fb -> SUPERSCRIPT ONE + '\xb3' # 0x00fc -> SUPERSCRIPT THREE + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a2: 0x0096, # CENT SIGN + 0x00a3: 0x009c, # POUND SIGN + 0x00a4: 0x009f, # CURRENCY SIGN + 0x00a6: 0x00a7, # BROKEN BAR + 0x00a7: 0x00f5, # SECTION SIGN + 0x00a9: 0x00a8, # COPYRIGHT SIGN + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00ad: 0x00f0, # SOFT HYPHEN + 0x00ae: 0x00a9, # REGISTERED SIGN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b3: 0x00fc, # SUPERSCRIPT THREE + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b6: 0x00f4, # PILCROW SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00b9: 0x00fb, # SUPERSCRIPT ONE + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00d7: 0x009e, # MULTIPLICATION SIGN + 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN) + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE + 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON + 0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON + 0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK + 0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK + 0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE + 0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE + 0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON + 0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON + 0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON + 0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON + 0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE + 0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE + 0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK + 0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK + 0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA + 0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA + 0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON + 0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON + 0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK + 0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK + 0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA + 0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA + 0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA + 0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA + 0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE + 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE + 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE + 0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE + 0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA + 0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA + 0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON + 0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON + 0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA + 0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA + 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE + 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE + 0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON + 0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON + 0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON + 0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON + 0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK + 0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK + 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE + 0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE + 0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE + 0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE + 0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON + 0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON + 0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK + 0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK + 0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK + 0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK + 0x2219: 0x00f9, # BULLET OPERATOR + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp850.py b/venv/Lib/encodings/cp850.py new file mode 100644 index 00000000..f98aef99 --- /dev/null +++ b/venv/Lib/encodings/cp850.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP850.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp850', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS + 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE + 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE + 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE + 0x009e: 0x00d7, # MULTIPLICATION SIGN + 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR + 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR + 0x00a8: 0x00bf, # INVERTED QUESTION MARK + 0x00a9: 0x00ae, # REGISTERED SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE + 0x00b8: 0x00a9, # COPYRIGHT SIGN + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x00a2, # CENT SIGN + 0x00be: 0x00a5, # YEN SIGN + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE + 0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x00a4, # CURRENCY SIGN + 0x00d0: 0x00f0, # LATIN SMALL LETTER ETH + 0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH + 0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE + 0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I + 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x00a6, # BROKEN BAR + 0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE + 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE + 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x00fe, # LATIN SMALL LETTER THORN + 0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN + 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX + 0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE + 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE + 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x00ee: 0x00af, # MACRON + 0x00ef: 0x00b4, # ACUTE ACCENT + 0x00f0: 0x00ad, # SOFT HYPHEN + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2017, # DOUBLE LOW LINE + 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS + 0x00f4: 0x00b6, # PILCROW SIGN + 0x00f5: 0x00a7, # SECTION SIGN + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x00b8, # CEDILLA + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x00a8, # DIAERESIS + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x00b9, # SUPERSCRIPT ONE + 0x00fc: 0x00b3, # SUPERSCRIPT THREE + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE + '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE + '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS + '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE + '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE + '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE + '\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE + '\xa3' # 0x009c -> POUND SIGN + '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE + '\xd7' # 0x009e -> MULTIPLICATION SIGN + '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE + '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE + '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR + '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR + '\xbf' # 0x00a8 -> INVERTED QUESTION MARK + '\xae' # 0x00a9 -> REGISTERED SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xa9' # 0x00b8 -> COPYRIGHT SIGN + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\xa2' # 0x00bd -> CENT SIGN + '\xa5' # 0x00be -> YEN SIGN + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE + '\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\xa4' # 0x00cf -> CURRENCY SIGN + '\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH + '\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH + '\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE + '\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I + '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\xa6' # 0x00dd -> BROKEN BAR + '\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE + '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xb5' # 0x00e6 -> MICRO SIGN + '\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN + '\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN + '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE + '\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE + '\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xaf' # 0x00ee -> MACRON + '\xb4' # 0x00ef -> ACUTE ACCENT + '\xad' # 0x00f0 -> SOFT HYPHEN + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2017' # 0x00f2 -> DOUBLE LOW LINE + '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS + '\xb6' # 0x00f4 -> PILCROW SIGN + '\xa7' # 0x00f5 -> SECTION SIGN + '\xf7' # 0x00f6 -> DIVISION SIGN + '\xb8' # 0x00f7 -> CEDILLA + '\xb0' # 0x00f8 -> DEGREE SIGN + '\xa8' # 0x00f9 -> DIAERESIS + '\xb7' # 0x00fa -> MIDDLE DOT + '\xb9' # 0x00fb -> SUPERSCRIPT ONE + '\xb3' # 0x00fc -> SUPERSCRIPT THREE + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK + 0x00a2: 0x00bd, # CENT SIGN + 0x00a3: 0x009c, # POUND SIGN + 0x00a4: 0x00cf, # CURRENCY SIGN + 0x00a5: 0x00be, # YEN SIGN + 0x00a6: 0x00dd, # BROKEN BAR + 0x00a7: 0x00f5, # SECTION SIGN + 0x00a8: 0x00f9, # DIAERESIS + 0x00a9: 0x00b8, # COPYRIGHT SIGN + 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00ad: 0x00f0, # SOFT HYPHEN + 0x00ae: 0x00a9, # REGISTERED SIGN + 0x00af: 0x00ee, # MACRON + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b3: 0x00fc, # SUPERSCRIPT THREE + 0x00b4: 0x00ef, # ACUTE ACCENT + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b6: 0x00f4, # PILCROW SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00b8: 0x00f7, # CEDILLA + 0x00b9: 0x00fb, # SUPERSCRIPT ONE + 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS + 0x00bf: 0x00a8, # INVERTED QUESTION MARK + 0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE + 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE + 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS + 0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH + 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE + 0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE + 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00d7: 0x009e, # MULTIPLICATION SIGN + 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE + 0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE + 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE + 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS + 0x00f0: 0x00d0, # LATIN SMALL LETTER ETH + 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE + 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE + 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE + 0x00fe: 0x00e7, # LATIN SMALL LETTER THORN + 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I + 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK + 0x2017: 0x00f2, # DOUBLE LOW LINE + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp852.py b/venv/Lib/encodings/cp852.py new file mode 100644 index 00000000..34d8a0ea --- /dev/null +++ b/venv/Lib/encodings/cp852.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp852', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE + 0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + 0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE + 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE + 0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON + 0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON + 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE + 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON + 0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON + 0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE + 0x009e: 0x00d7, # MULTIPLICATION SIGN + 0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK + 0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK + 0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON + 0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON + 0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK + 0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE + 0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON + 0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON + 0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE + 0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE + 0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x00a4, # CURRENCY SIGN + 0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE + 0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE + 0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON + 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON + 0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON + 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA + 0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE + 0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE + 0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON + 0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON + 0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON + 0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE + 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE + 0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE + 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA + 0x00ef: 0x00b4, # ACUTE ACCENT + 0x00f0: 0x00ad, # SOFT HYPHEN + 0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT + 0x00f2: 0x02db, # OGONEK + 0x00f3: 0x02c7, # CARON + 0x00f4: 0x02d8, # BREVE + 0x00f5: 0x00a7, # SECTION SIGN + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x00b8, # CEDILLA + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x00a8, # DIAERESIS + 0x00fa: 0x02d9, # DOT ABOVE + 0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE + 0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON + 0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE + '\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + '\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE + '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE + '\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON + '\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON + '\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE + '\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON + '\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON + '\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE + '\xd7' # 0x009e -> MULTIPLICATION SIGN + '\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK + '\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK + '\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON + '\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON + '\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK + '\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK + '\xac' # 0x00aa -> NOT SIGN + '\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE + '\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON + '\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON + '\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE + '\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\xa4' # 0x00cf -> CURRENCY SIGN + '\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE + '\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE + '\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON + '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON + '\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON + '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA + '\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE + '\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE + '\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON + '\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON + '\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON + '\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE + '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE + '\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE + '\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + '\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE + '\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE + '\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA + '\xb4' # 0x00ef -> ACUTE ACCENT + '\xad' # 0x00f0 -> SOFT HYPHEN + '\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT + '\u02db' # 0x00f2 -> OGONEK + '\u02c7' # 0x00f3 -> CARON + '\u02d8' # 0x00f4 -> BREVE + '\xa7' # 0x00f5 -> SECTION SIGN + '\xf7' # 0x00f6 -> DIVISION SIGN + '\xb8' # 0x00f7 -> CEDILLA + '\xb0' # 0x00f8 -> DEGREE SIGN + '\xa8' # 0x00f9 -> DIAERESIS + '\u02d9' # 0x00fa -> DOT ABOVE + '\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE + '\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON + '\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a4: 0x00cf, # CURRENCY SIGN + 0x00a7: 0x00f5, # SECTION SIGN + 0x00a8: 0x00f9, # DIAERESIS + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00ad: 0x00f0, # SOFT HYPHEN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b4: 0x00ef, # ACUTE ACCENT + 0x00b8: 0x00f7, # CEDILLA + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00d7: 0x009e, # MULTIPLICATION SIGN + 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE + 0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE + 0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE + 0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK + 0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK + 0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE + 0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE + 0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON + 0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON + 0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON + 0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON + 0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE + 0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE + 0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK + 0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK + 0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON + 0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON + 0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE + 0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE + 0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON + 0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON + 0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE + 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE + 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE + 0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE + 0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON + 0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON + 0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + 0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE + 0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE + 0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE + 0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON + 0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON + 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE + 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE + 0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA + 0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA + 0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON + 0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON + 0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA + 0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA + 0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON + 0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON + 0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE + 0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE + 0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + 0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE + 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE + 0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE + 0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE + 0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE + 0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON + 0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON + 0x02c7: 0x00f3, # CARON + 0x02d8: 0x00f4, # BREVE + 0x02d9: 0x00fa, # DOT ABOVE + 0x02db: 0x00f2, # OGONEK + 0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp855.py b/venv/Lib/encodings/cp855.py new file mode 100644 index 00000000..4fe92106 --- /dev/null +++ b/venv/Lib/encodings/cp855.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp855', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE + 0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE + 0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE + 0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE + 0x0084: 0x0451, # CYRILLIC SMALL LETTER IO + 0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO + 0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE + 0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE + 0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE + 0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE + 0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + 0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + 0x008c: 0x0457, # CYRILLIC SMALL LETTER YI + 0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI + 0x008e: 0x0458, # CYRILLIC SMALL LETTER JE + 0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE + 0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE + 0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE + 0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE + 0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE + 0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE + 0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE + 0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE + 0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE + 0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U + 0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U + 0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE + 0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE + 0x009c: 0x044e, # CYRILLIC SMALL LETTER YU + 0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU + 0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN + 0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN + 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A + 0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A + 0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE + 0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE + 0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE + 0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE + 0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE + 0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE + 0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE + 0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE + 0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF + 0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF + 0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE + 0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA + 0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA + 0x00b7: 0x0438, # CYRILLIC SMALL LETTER I + 0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I + 0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA + 0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x00a4, # CURRENCY SIGN + 0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL + 0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL + 0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM + 0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM + 0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN + 0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN + 0x00d6: 0x043e, # CYRILLIC SMALL LETTER O + 0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O + 0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE + 0x00de: 0x044f, # CYRILLIC SMALL LETTER YA + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA + 0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER + 0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER + 0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES + 0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES + 0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE + 0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE + 0x00e7: 0x0443, # CYRILLIC SMALL LETTER U + 0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U + 0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE + 0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE + 0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE + 0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE + 0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN + 0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN + 0x00ef: 0x2116, # NUMERO SIGN + 0x00f0: 0x00ad, # SOFT HYPHEN + 0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU + 0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU + 0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE + 0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE + 0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA + 0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA + 0x00f7: 0x044d, # CYRILLIC SMALL LETTER E + 0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E + 0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA + 0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA + 0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE + 0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE + 0x00fd: 0x00a7, # SECTION SIGN + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE + '\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE + '\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE + '\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE + '\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO + '\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO + '\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE + '\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE + '\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE + '\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE + '\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI + '\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI + '\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE + '\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE + '\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE + '\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE + '\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE + '\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE + '\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE + '\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE + '\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE + '\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE + '\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U + '\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U + '\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE + '\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE + '\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU + '\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU + '\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN + '\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN + '\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A + '\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A + '\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE + '\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE + '\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE + '\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE + '\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE + '\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE + '\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE + '\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE + '\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF + '\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF + '\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE + '\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA + '\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA + '\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I + '\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I + '\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA + '\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\xa4' # 0x00cf -> CURRENCY SIGN + '\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL + '\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL + '\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM + '\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM + '\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN + '\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN + '\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O + '\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O + '\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE + '\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA + '\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER + '\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER + '\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES + '\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES + '\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE + '\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE + '\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U + '\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U + '\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE + '\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE + '\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE + '\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE + '\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN + '\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u2116' # 0x00ef -> NUMERO SIGN + '\xad' # 0x00f0 -> SOFT HYPHEN + '\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU + '\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU + '\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE + '\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE + '\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA + '\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA + '\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E + '\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E + '\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA + '\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA + '\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE + '\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE + '\xa7' # 0x00fd -> SECTION SIGN + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a4: 0x00cf, # CURRENCY SIGN + 0x00a7: 0x00fd, # SECTION SIGN + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ad: 0x00f0, # SOFT HYPHEN + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO + 0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE + 0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE + 0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE + 0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE + 0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + 0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI + 0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE + 0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE + 0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE + 0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE + 0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE + 0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U + 0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE + 0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A + 0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE + 0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE + 0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE + 0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE + 0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE + 0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE + 0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE + 0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I + 0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I + 0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA + 0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL + 0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM + 0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN + 0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O + 0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE + 0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER + 0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES + 0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE + 0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U + 0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF + 0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA + 0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE + 0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE + 0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA + 0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA + 0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN + 0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU + 0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN + 0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E + 0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU + 0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA + 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A + 0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE + 0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE + 0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE + 0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE + 0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE + 0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE + 0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE + 0x0438: 0x00b7, # CYRILLIC SMALL LETTER I + 0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I + 0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA + 0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL + 0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM + 0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN + 0x043e: 0x00d6, # CYRILLIC SMALL LETTER O + 0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE + 0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER + 0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES + 0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE + 0x0443: 0x00e7, # CYRILLIC SMALL LETTER U + 0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF + 0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA + 0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE + 0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE + 0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA + 0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA + 0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN + 0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU + 0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN + 0x044d: 0x00f7, # CYRILLIC SMALL LETTER E + 0x044e: 0x009c, # CYRILLIC SMALL LETTER YU + 0x044f: 0x00de, # CYRILLIC SMALL LETTER YA + 0x0451: 0x0084, # CYRILLIC SMALL LETTER IO + 0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE + 0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE + 0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE + 0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE + 0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + 0x0457: 0x008c, # CYRILLIC SMALL LETTER YI + 0x0458: 0x008e, # CYRILLIC SMALL LETTER JE + 0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE + 0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE + 0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE + 0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE + 0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U + 0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE + 0x2116: 0x00ef, # NUMERO SIGN + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp856.py b/venv/Lib/encodings/cp856.py new file mode 100644 index 00000000..cacbfb2f --- /dev/null +++ b/venv/Lib/encodings/cp856.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp856 generated from 'MAPPINGS/VENDORS/MISC/CP856.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp856', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u05d0' # 0x80 -> HEBREW LETTER ALEF + '\u05d1' # 0x81 -> HEBREW LETTER BET + '\u05d2' # 0x82 -> HEBREW LETTER GIMEL + '\u05d3' # 0x83 -> HEBREW LETTER DALET + '\u05d4' # 0x84 -> HEBREW LETTER HE + '\u05d5' # 0x85 -> HEBREW LETTER VAV + '\u05d6' # 0x86 -> HEBREW LETTER ZAYIN + '\u05d7' # 0x87 -> HEBREW LETTER HET + '\u05d8' # 0x88 -> HEBREW LETTER TET + '\u05d9' # 0x89 -> HEBREW LETTER YOD + '\u05da' # 0x8A -> HEBREW LETTER FINAL KAF + '\u05db' # 0x8B -> HEBREW LETTER KAF + '\u05dc' # 0x8C -> HEBREW LETTER LAMED + '\u05dd' # 0x8D -> HEBREW LETTER FINAL MEM + '\u05de' # 0x8E -> HEBREW LETTER MEM + '\u05df' # 0x8F -> HEBREW LETTER FINAL NUN + '\u05e0' # 0x90 -> HEBREW LETTER NUN + '\u05e1' # 0x91 -> HEBREW LETTER SAMEKH + '\u05e2' # 0x92 -> HEBREW LETTER AYIN + '\u05e3' # 0x93 -> HEBREW LETTER FINAL PE + '\u05e4' # 0x94 -> HEBREW LETTER PE + '\u05e5' # 0x95 -> HEBREW LETTER FINAL TSADI + '\u05e6' # 0x96 -> HEBREW LETTER TSADI + '\u05e7' # 0x97 -> HEBREW LETTER QOF + '\u05e8' # 0x98 -> HEBREW LETTER RESH + '\u05e9' # 0x99 -> HEBREW LETTER SHIN + '\u05ea' # 0x9A -> HEBREW LETTER TAV + '\ufffe' # 0x9B -> UNDEFINED + '\xa3' # 0x9C -> POUND SIGN + '\ufffe' # 0x9D -> UNDEFINED + '\xd7' # 0x9E -> MULTIPLICATION SIGN + '\ufffe' # 0x9F -> UNDEFINED + '\ufffe' # 0xA0 -> UNDEFINED + '\ufffe' # 0xA1 -> UNDEFINED + '\ufffe' # 0xA2 -> UNDEFINED + '\ufffe' # 0xA3 -> UNDEFINED + '\ufffe' # 0xA4 -> UNDEFINED + '\ufffe' # 0xA5 -> UNDEFINED + '\ufffe' # 0xA6 -> UNDEFINED + '\ufffe' # 0xA7 -> UNDEFINED + '\ufffe' # 0xA8 -> UNDEFINED + '\xae' # 0xA9 -> REGISTERED SIGN + '\xac' # 0xAA -> NOT SIGN + '\xbd' # 0xAB -> VULGAR FRACTION ONE HALF + '\xbc' # 0xAC -> VULGAR FRACTION ONE QUARTER + '\ufffe' # 0xAD -> UNDEFINED + '\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0xB0 -> LIGHT SHADE + '\u2592' # 0xB1 -> MEDIUM SHADE + '\u2593' # 0xB2 -> DARK SHADE + '\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\ufffe' # 0xB5 -> UNDEFINED + '\ufffe' # 0xB6 -> UNDEFINED + '\ufffe' # 0xB7 -> UNDEFINED + '\xa9' # 0xB8 -> COPYRIGHT SIGN + '\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT + '\xa2' # 0xBD -> CENT SIGN + '\xa5' # 0xBE -> YEN SIGN + '\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\ufffe' # 0xC6 -> UNDEFINED + '\ufffe' # 0xC7 -> UNDEFINED + '\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\xa4' # 0xCF -> CURRENCY SIGN + '\ufffe' # 0xD0 -> UNDEFINED + '\ufffe' # 0xD1 -> UNDEFINED + '\ufffe' # 0xD2 -> UNDEFINED + '\ufffe' # 0xD3 -> UNDEFINEDS + '\ufffe' # 0xD4 -> UNDEFINED + '\ufffe' # 0xD5 -> UNDEFINED + '\ufffe' # 0xD6 -> UNDEFINEDE + '\ufffe' # 0xD7 -> UNDEFINED + '\ufffe' # 0xD8 -> UNDEFINED + '\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0xDB -> FULL BLOCK + '\u2584' # 0xDC -> LOWER HALF BLOCK + '\xa6' # 0xDD -> BROKEN BAR + '\ufffe' # 0xDE -> UNDEFINED + '\u2580' # 0xDF -> UPPER HALF BLOCK + '\ufffe' # 0xE0 -> UNDEFINED + '\ufffe' # 0xE1 -> UNDEFINED + '\ufffe' # 0xE2 -> UNDEFINED + '\ufffe' # 0xE3 -> UNDEFINED + '\ufffe' # 0xE4 -> UNDEFINED + '\ufffe' # 0xE5 -> UNDEFINED + '\xb5' # 0xE6 -> MICRO SIGN + '\ufffe' # 0xE7 -> UNDEFINED + '\ufffe' # 0xE8 -> UNDEFINED + '\ufffe' # 0xE9 -> UNDEFINED + '\ufffe' # 0xEA -> UNDEFINED + '\ufffe' # 0xEB -> UNDEFINED + '\ufffe' # 0xEC -> UNDEFINED + '\ufffe' # 0xED -> UNDEFINED + '\xaf' # 0xEE -> MACRON + '\xb4' # 0xEF -> ACUTE ACCENT + '\xad' # 0xF0 -> SOFT HYPHEN + '\xb1' # 0xF1 -> PLUS-MINUS SIGN + '\u2017' # 0xF2 -> DOUBLE LOW LINE + '\xbe' # 0xF3 -> VULGAR FRACTION THREE QUARTERS + '\xb6' # 0xF4 -> PILCROW SIGN + '\xa7' # 0xF5 -> SECTION SIGN + '\xf7' # 0xF6 -> DIVISION SIGN + '\xb8' # 0xF7 -> CEDILLA + '\xb0' # 0xF8 -> DEGREE SIGN + '\xa8' # 0xF9 -> DIAERESIS + '\xb7' # 0xFA -> MIDDLE DOT + '\xb9' # 0xFB -> SUPERSCRIPT ONE + '\xb3' # 0xFC -> SUPERSCRIPT THREE + '\xb2' # 0xFD -> SUPERSCRIPT TWO + '\u25a0' # 0xFE -> BLACK SQUARE + '\xa0' # 0xFF -> NO-BREAK SPACE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp857.py b/venv/Lib/encodings/cp857.py new file mode 100644 index 00000000..741b059b --- /dev/null +++ b/venv/Lib/encodings/cp857.py @@ -0,0 +1,694 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp857', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS + 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE + 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE + 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE + 0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA + 0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE + 0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE + 0x00a8: 0x00bf, # INVERTED QUESTION MARK + 0x00a9: 0x00ae, # REGISTERED SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE + 0x00b8: 0x00a9, # COPYRIGHT SIGN + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x00a2, # CENT SIGN + 0x00be: 0x00a5, # YEN SIGN + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE + 0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x00a4, # CURRENCY SIGN + 0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR + 0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR + 0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE + 0x00d5: None, # UNDEFINED + 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x00a6, # BROKEN BAR + 0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE + 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE + 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: None, # UNDEFINED + 0x00e8: 0x00d7, # MULTIPLICATION SIGN + 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX + 0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE + 0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x00ee: 0x00af, # MACRON + 0x00ef: 0x00b4, # ACUTE ACCENT + 0x00f0: 0x00ad, # SOFT HYPHEN + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: None, # UNDEFINED + 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS + 0x00f4: 0x00b6, # PILCROW SIGN + 0x00f5: 0x00a7, # SECTION SIGN + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x00b8, # CEDILLA + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x00a8, # DIAERESIS + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x00b9, # SUPERSCRIPT ONE + 0x00fc: 0x00b3, # SUPERSCRIPT THREE + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE + '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE + '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS + '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE + '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE + '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE + '\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE + '\xa3' # 0x009c -> POUND SIGN + '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE + '\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA + '\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE + '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE + '\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE + '\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE + '\xbf' # 0x00a8 -> INVERTED QUESTION MARK + '\xae' # 0x00a9 -> REGISTERED SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xa9' # 0x00b8 -> COPYRIGHT SIGN + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\xa2' # 0x00bd -> CENT SIGN + '\xa5' # 0x00be -> YEN SIGN + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE + '\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\xa4' # 0x00cf -> CURRENCY SIGN + '\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR + '\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR + '\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE + '\ufffe' # 0x00d5 -> UNDEFINED + '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\xa6' # 0x00dd -> BROKEN BAR + '\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE + '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xb5' # 0x00e6 -> MICRO SIGN + '\ufffe' # 0x00e7 -> UNDEFINED + '\xd7' # 0x00e8 -> MULTIPLICATION SIGN + '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE + '\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE + '\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS + '\xaf' # 0x00ee -> MACRON + '\xb4' # 0x00ef -> ACUTE ACCENT + '\xad' # 0x00f0 -> SOFT HYPHEN + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\ufffe' # 0x00f2 -> UNDEFINED + '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS + '\xb6' # 0x00f4 -> PILCROW SIGN + '\xa7' # 0x00f5 -> SECTION SIGN + '\xf7' # 0x00f6 -> DIVISION SIGN + '\xb8' # 0x00f7 -> CEDILLA + '\xb0' # 0x00f8 -> DEGREE SIGN + '\xa8' # 0x00f9 -> DIAERESIS + '\xb7' # 0x00fa -> MIDDLE DOT + '\xb9' # 0x00fb -> SUPERSCRIPT ONE + '\xb3' # 0x00fc -> SUPERSCRIPT THREE + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK + 0x00a2: 0x00bd, # CENT SIGN + 0x00a3: 0x009c, # POUND SIGN + 0x00a4: 0x00cf, # CURRENCY SIGN + 0x00a5: 0x00be, # YEN SIGN + 0x00a6: 0x00dd, # BROKEN BAR + 0x00a7: 0x00f5, # SECTION SIGN + 0x00a8: 0x00f9, # DIAERESIS + 0x00a9: 0x00b8, # COPYRIGHT SIGN + 0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00ad: 0x00f0, # SOFT HYPHEN + 0x00ae: 0x00a9, # REGISTERED SIGN + 0x00af: 0x00ee, # MACRON + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b3: 0x00fc, # SUPERSCRIPT THREE + 0x00b4: 0x00ef, # ACUTE ACCENT + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b6: 0x00f4, # PILCROW SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00b8: 0x00f7, # CEDILLA + 0x00b9: 0x00fb, # SUPERSCRIPT ONE + 0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS + 0x00bf: 0x00a8, # INVERTED QUESTION MARK + 0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE + 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE + 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS + 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE + 0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE + 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00d7: 0x00e8, # MULTIPLICATION SIGN + 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE + 0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE + 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE + 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS + 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE + 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE + 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE + 0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE + 0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE + 0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I + 0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA + 0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp858.py b/venv/Lib/encodings/cp858.py new file mode 100644 index 00000000..7579f525 --- /dev/null +++ b/venv/Lib/encodings/cp858.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec for CP858, modified from cp850. + +""" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp858', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS + 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE + 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE + 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE + 0x009e: 0x00d7, # MULTIPLICATION SIGN + 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR + 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR + 0x00a8: 0x00bf, # INVERTED QUESTION MARK + 0x00a9: 0x00ae, # REGISTERED SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE + 0x00b8: 0x00a9, # COPYRIGHT SIGN + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x00a2, # CENT SIGN + 0x00be: 0x00a5, # YEN SIGN + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE + 0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x00a4, # CURRENCY SIGN + 0x00d0: 0x00f0, # LATIN SMALL LETTER ETH + 0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH + 0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE + 0x00d5: 0x20ac, # EURO SIGN + 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x00a6, # BROKEN BAR + 0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE + 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE + 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x00fe, # LATIN SMALL LETTER THORN + 0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN + 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX + 0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE + 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE + 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x00ee: 0x00af, # MACRON + 0x00ef: 0x00b4, # ACUTE ACCENT + 0x00f0: 0x00ad, # SOFT HYPHEN + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2017, # DOUBLE LOW LINE + 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS + 0x00f4: 0x00b6, # PILCROW SIGN + 0x00f5: 0x00a7, # SECTION SIGN + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x00b8, # CEDILLA + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x00a8, # DIAERESIS + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x00b9, # SUPERSCRIPT ONE + 0x00fc: 0x00b3, # SUPERSCRIPT THREE + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE + '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE + '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS + '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE + '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE + '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE + '\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE + '\xa3' # 0x009c -> POUND SIGN + '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE + '\xd7' # 0x009e -> MULTIPLICATION SIGN + '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE + '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE + '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR + '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR + '\xbf' # 0x00a8 -> INVERTED QUESTION MARK + '\xae' # 0x00a9 -> REGISTERED SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xa9' # 0x00b8 -> COPYRIGHT SIGN + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\xa2' # 0x00bd -> CENT SIGN + '\xa5' # 0x00be -> YEN SIGN + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE + '\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\xa4' # 0x00cf -> CURRENCY SIGN + '\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH + '\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH + '\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE + '\u20ac' # 0x00d5 -> EURO SIGN + '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\xa6' # 0x00dd -> BROKEN BAR + '\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE + '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xb5' # 0x00e6 -> MICRO SIGN + '\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN + '\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN + '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE + '\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE + '\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xaf' # 0x00ee -> MACRON + '\xb4' # 0x00ef -> ACUTE ACCENT + '\xad' # 0x00f0 -> SOFT HYPHEN + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2017' # 0x00f2 -> DOUBLE LOW LINE + '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS + '\xb6' # 0x00f4 -> PILCROW SIGN + '\xa7' # 0x00f5 -> SECTION SIGN + '\xf7' # 0x00f6 -> DIVISION SIGN + '\xb8' # 0x00f7 -> CEDILLA + '\xb0' # 0x00f8 -> DEGREE SIGN + '\xa8' # 0x00f9 -> DIAERESIS + '\xb7' # 0x00fa -> MIDDLE DOT + '\xb9' # 0x00fb -> SUPERSCRIPT ONE + '\xb3' # 0x00fc -> SUPERSCRIPT THREE + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK + 0x00a2: 0x00bd, # CENT SIGN + 0x00a3: 0x009c, # POUND SIGN + 0x00a4: 0x00cf, # CURRENCY SIGN + 0x00a5: 0x00be, # YEN SIGN + 0x00a6: 0x00dd, # BROKEN BAR + 0x00a7: 0x00f5, # SECTION SIGN + 0x00a8: 0x00f9, # DIAERESIS + 0x00a9: 0x00b8, # COPYRIGHT SIGN + 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00ad: 0x00f0, # SOFT HYPHEN + 0x00ae: 0x00a9, # REGISTERED SIGN + 0x00af: 0x00ee, # MACRON + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b3: 0x00fc, # SUPERSCRIPT THREE + 0x00b4: 0x00ef, # ACUTE ACCENT + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b6: 0x00f4, # PILCROW SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00b8: 0x00f7, # CEDILLA + 0x00b9: 0x00fb, # SUPERSCRIPT ONE + 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS + 0x00bf: 0x00a8, # INVERTED QUESTION MARK + 0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE + 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE + 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS + 0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH + 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE + 0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE + 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00d7: 0x009e, # MULTIPLICATION SIGN + 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE + 0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE + 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE + 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS + 0x00f0: 0x00d0, # LATIN SMALL LETTER ETH + 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE + 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE + 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE + 0x00fe: 0x00e7, # LATIN SMALL LETTER THORN + 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x20ac: 0x00d5, # EURO SIGN + 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK + 0x2017: 0x00f2, # DOUBLE LOW LINE + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp860.py b/venv/Lib/encodings/cp860.py new file mode 100644 index 00000000..65903e74 --- /dev/null +++ b/venv/Lib/encodings/cp860.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP860.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp860', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e3, # LATIN SMALL LETTER A WITH TILDE + 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0086: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0089: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x008b: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE + 0x008c: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE + 0x008e: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE + 0x008f: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE + 0x0092: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f5, # LATIN SMALL LETTER O WITH TILDE + 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE + 0x0096: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE + 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0x0098: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE + 0x0099: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00a2, # CENT SIGN + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE + 0x009e: 0x20a7, # PESETA SIGN + 0x009f: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR + 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR + 0x00a8: 0x00bf, # INVERTED QUESTION MARK + 0x00a9: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA + 0x00e3: 0x03c0, # GREEK SMALL LETTER PI + 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA + 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU + 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI + 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA + 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA + 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA + 0x00ec: 0x221e, # INFINITY + 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI + 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON + 0x00ef: 0x2229, # INTERSECTION + 0x00f0: 0x2261, # IDENTICAL TO + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO + 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO + 0x00f4: 0x2320, # TOP HALF INTEGRAL + 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x2248, # ALMOST EQUAL TO + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0x0084 -> LATIN SMALL LETTER A WITH TILDE + '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE + '\xc1' # 0x0086 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xca' # 0x0089 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE + '\xcd' # 0x008b -> LATIN CAPITAL LETTER I WITH ACUTE + '\xd4' # 0x008c -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE + '\xc3' # 0x008e -> LATIN CAPITAL LETTER A WITH TILDE + '\xc2' # 0x008f -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xc0' # 0x0091 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc8' # 0x0092 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0x0094 -> LATIN SMALL LETTER O WITH TILDE + '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE + '\xda' # 0x0096 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE + '\xcc' # 0x0098 -> LATIN CAPITAL LETTER I WITH GRAVE + '\xd5' # 0x0099 -> LATIN CAPITAL LETTER O WITH TILDE + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xa2' # 0x009b -> CENT SIGN + '\xa3' # 0x009c -> POUND SIGN + '\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE + '\u20a7' # 0x009e -> PESETA SIGN + '\xd3' # 0x009f -> LATIN CAPITAL LETTER O WITH ACUTE + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE + '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE + '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR + '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR + '\xbf' # 0x00a8 -> INVERTED QUESTION MARK + '\xd2' # 0x00a9 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA + '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI + '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA + '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA + '\xb5' # 0x00e6 -> MICRO SIGN + '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU + '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI + '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA + '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA + '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA + '\u221e' # 0x00ec -> INFINITY + '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI + '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON + '\u2229' # 0x00ef -> INTERSECTION + '\u2261' # 0x00f0 -> IDENTICAL TO + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO + '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO + '\u2320' # 0x00f4 -> TOP HALF INTEGRAL + '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL + '\xf7' # 0x00f6 -> DIVISION SIGN + '\u2248' # 0x00f7 -> ALMOST EQUAL TO + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK + 0x00a2: 0x009b, # CENT SIGN + 0x00a3: 0x009c, # POUND SIGN + 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00bf: 0x00a8, # INVERTED QUESTION MARK + 0x00c0: 0x0091, # LATIN CAPITAL LETTER A WITH GRAVE + 0x00c1: 0x0086, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00c2: 0x008f, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00c3: 0x008e, # LATIN CAPITAL LETTER A WITH TILDE + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c8: 0x0092, # LATIN CAPITAL LETTER E WITH GRAVE + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00ca: 0x0089, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x00cc: 0x0098, # LATIN CAPITAL LETTER I WITH GRAVE + 0x00cd: 0x008b, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE + 0x00d2: 0x00a9, # LATIN CAPITAL LETTER O WITH GRAVE + 0x00d3: 0x009f, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00d4: 0x008c, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00d5: 0x0099, # LATIN CAPITAL LETTER O WITH TILDE + 0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE + 0x00da: 0x0096, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e3: 0x0084, # LATIN SMALL LETTER A WITH TILDE + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE + 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f5: 0x0094, # LATIN SMALL LETTER O WITH TILDE + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA + 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA + 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA + 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI + 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA + 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA + 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA + 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON + 0x03c0: 0x00e3, # GREEK SMALL LETTER PI + 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA + 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU + 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI + 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N + 0x20a7: 0x009e, # PESETA SIGN + 0x2219: 0x00f9, # BULLET OPERATOR + 0x221a: 0x00fb, # SQUARE ROOT + 0x221e: 0x00ec, # INFINITY + 0x2229: 0x00ef, # INTERSECTION + 0x2248: 0x00f7, # ALMOST EQUAL TO + 0x2261: 0x00f0, # IDENTICAL TO + 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO + 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO + 0x2320: 0x00f4, # TOP HALF INTEGRAL + 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp861.py b/venv/Lib/encodings/cp861.py new file mode 100644 index 00000000..860a05fa --- /dev/null +++ b/venv/Lib/encodings/cp861.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP861.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp861', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH + 0x008c: 0x00f0, # LATIN SMALL LETTER ETH + 0x008d: 0x00de, # LATIN CAPITAL LETTER THORN + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE + 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x00fe, # LATIN SMALL LETTER THORN + 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE + 0x009e: 0x20a7, # PESETA SIGN + 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00a8: 0x00bf, # INVERTED QUESTION MARK + 0x00a9: 0x2310, # REVERSED NOT SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA + 0x00e3: 0x03c0, # GREEK SMALL LETTER PI + 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA + 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU + 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI + 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA + 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA + 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA + 0x00ec: 0x221e, # INFINITY + 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI + 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON + 0x00ef: 0x2229, # INTERSECTION + 0x00f0: 0x2261, # IDENTICAL TO + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO + 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO + 0x00f4: 0x2320, # TOP HALF INTEGRAL + 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x2248, # ALMOST EQUAL TO + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE + '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE + '\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH + '\xf0' # 0x008c -> LATIN SMALL LETTER ETH + '\xde' # 0x008d -> LATIN CAPITAL LETTER THORN + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE + '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xfe' # 0x0095 -> LATIN SMALL LETTER THORN + '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE + '\xa3' # 0x009c -> POUND SIGN + '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE + '\u20a7' # 0x009e -> PESETA SIGN + '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xbf' # 0x00a8 -> INVERTED QUESTION MARK + '\u2310' # 0x00a9 -> REVERSED NOT SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA + '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI + '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA + '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA + '\xb5' # 0x00e6 -> MICRO SIGN + '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU + '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI + '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA + '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA + '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA + '\u221e' # 0x00ec -> INFINITY + '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI + '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON + '\u2229' # 0x00ef -> INTERSECTION + '\u2261' # 0x00f0 -> IDENTICAL TO + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO + '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO + '\u2320' # 0x00f4 -> TOP HALF INTEGRAL + '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL + '\xf7' # 0x00f6 -> DIVISION SIGN + '\u2248' # 0x00f7 -> ALMOST EQUAL TO + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK + 0x00a3: 0x009c, # POUND SIGN + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00bf: 0x00a8, # INVERTED QUESTION MARK + 0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH + 0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE + 0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x00de: 0x008d, # LATIN CAPITAL LETTER THORN + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE + 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00f0: 0x008c, # LATIN SMALL LETTER ETH + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE + 0x00fe: 0x0095, # LATIN SMALL LETTER THORN + 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK + 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA + 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA + 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA + 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI + 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA + 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA + 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA + 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON + 0x03c0: 0x00e3, # GREEK SMALL LETTER PI + 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA + 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU + 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI + 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N + 0x20a7: 0x009e, # PESETA SIGN + 0x2219: 0x00f9, # BULLET OPERATOR + 0x221a: 0x00fb, # SQUARE ROOT + 0x221e: 0x00ec, # INFINITY + 0x2229: 0x00ef, # INTERSECTION + 0x2248: 0x00f7, # ALMOST EQUAL TO + 0x2261: 0x00f0, # IDENTICAL TO + 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO + 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO + 0x2310: 0x00a9, # REVERSED NOT SIGN + 0x2320: 0x00f4, # TOP HALF INTEGRAL + 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp862.py b/venv/Lib/encodings/cp862.py new file mode 100644 index 00000000..3df22f99 --- /dev/null +++ b/venv/Lib/encodings/cp862.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp862', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x05d0, # HEBREW LETTER ALEF + 0x0081: 0x05d1, # HEBREW LETTER BET + 0x0082: 0x05d2, # HEBREW LETTER GIMEL + 0x0083: 0x05d3, # HEBREW LETTER DALET + 0x0084: 0x05d4, # HEBREW LETTER HE + 0x0085: 0x05d5, # HEBREW LETTER VAV + 0x0086: 0x05d6, # HEBREW LETTER ZAYIN + 0x0087: 0x05d7, # HEBREW LETTER HET + 0x0088: 0x05d8, # HEBREW LETTER TET + 0x0089: 0x05d9, # HEBREW LETTER YOD + 0x008a: 0x05da, # HEBREW LETTER FINAL KAF + 0x008b: 0x05db, # HEBREW LETTER KAF + 0x008c: 0x05dc, # HEBREW LETTER LAMED + 0x008d: 0x05dd, # HEBREW LETTER FINAL MEM + 0x008e: 0x05de, # HEBREW LETTER MEM + 0x008f: 0x05df, # HEBREW LETTER FINAL NUN + 0x0090: 0x05e0, # HEBREW LETTER NUN + 0x0091: 0x05e1, # HEBREW LETTER SAMEKH + 0x0092: 0x05e2, # HEBREW LETTER AYIN + 0x0093: 0x05e3, # HEBREW LETTER FINAL PE + 0x0094: 0x05e4, # HEBREW LETTER PE + 0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI + 0x0096: 0x05e6, # HEBREW LETTER TSADI + 0x0097: 0x05e7, # HEBREW LETTER QOF + 0x0098: 0x05e8, # HEBREW LETTER RESH + 0x0099: 0x05e9, # HEBREW LETTER SHIN + 0x009a: 0x05ea, # HEBREW LETTER TAV + 0x009b: 0x00a2, # CENT SIGN + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00a5, # YEN SIGN + 0x009e: 0x20a7, # PESETA SIGN + 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR + 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR + 0x00a8: 0x00bf, # INVERTED QUESTION MARK + 0x00a9: 0x2310, # REVERSED NOT SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN) + 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA + 0x00e3: 0x03c0, # GREEK SMALL LETTER PI + 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA + 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU + 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI + 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA + 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA + 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA + 0x00ec: 0x221e, # INFINITY + 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI + 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON + 0x00ef: 0x2229, # INTERSECTION + 0x00f0: 0x2261, # IDENTICAL TO + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO + 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO + 0x00f4: 0x2320, # TOP HALF INTEGRAL + 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x2248, # ALMOST EQUAL TO + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\u05d0' # 0x0080 -> HEBREW LETTER ALEF + '\u05d1' # 0x0081 -> HEBREW LETTER BET + '\u05d2' # 0x0082 -> HEBREW LETTER GIMEL + '\u05d3' # 0x0083 -> HEBREW LETTER DALET + '\u05d4' # 0x0084 -> HEBREW LETTER HE + '\u05d5' # 0x0085 -> HEBREW LETTER VAV + '\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN + '\u05d7' # 0x0087 -> HEBREW LETTER HET + '\u05d8' # 0x0088 -> HEBREW LETTER TET + '\u05d9' # 0x0089 -> HEBREW LETTER YOD + '\u05da' # 0x008a -> HEBREW LETTER FINAL KAF + '\u05db' # 0x008b -> HEBREW LETTER KAF + '\u05dc' # 0x008c -> HEBREW LETTER LAMED + '\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM + '\u05de' # 0x008e -> HEBREW LETTER MEM + '\u05df' # 0x008f -> HEBREW LETTER FINAL NUN + '\u05e0' # 0x0090 -> HEBREW LETTER NUN + '\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH + '\u05e2' # 0x0092 -> HEBREW LETTER AYIN + '\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE + '\u05e4' # 0x0094 -> HEBREW LETTER PE + '\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI + '\u05e6' # 0x0096 -> HEBREW LETTER TSADI + '\u05e7' # 0x0097 -> HEBREW LETTER QOF + '\u05e8' # 0x0098 -> HEBREW LETTER RESH + '\u05e9' # 0x0099 -> HEBREW LETTER SHIN + '\u05ea' # 0x009a -> HEBREW LETTER TAV + '\xa2' # 0x009b -> CENT SIGN + '\xa3' # 0x009c -> POUND SIGN + '\xa5' # 0x009d -> YEN SIGN + '\u20a7' # 0x009e -> PESETA SIGN + '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE + '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE + '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR + '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR + '\xbf' # 0x00a8 -> INVERTED QUESTION MARK + '\u2310' # 0x00a9 -> REVERSED NOT SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN) + '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA + '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI + '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA + '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA + '\xb5' # 0x00e6 -> MICRO SIGN + '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU + '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI + '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA + '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA + '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA + '\u221e' # 0x00ec -> INFINITY + '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI + '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON + '\u2229' # 0x00ef -> INTERSECTION + '\u2261' # 0x00f0 -> IDENTICAL TO + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO + '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO + '\u2320' # 0x00f4 -> TOP HALF INTEGRAL + '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL + '\xf7' # 0x00f6 -> DIVISION SIGN + '\u2248' # 0x00f7 -> ALMOST EQUAL TO + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK + 0x00a2: 0x009b, # CENT SIGN + 0x00a3: 0x009c, # POUND SIGN + 0x00a5: 0x009d, # YEN SIGN + 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00bf: 0x00a8, # INVERTED QUESTION MARK + 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN) + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK + 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA + 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA + 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA + 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI + 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA + 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA + 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA + 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON + 0x03c0: 0x00e3, # GREEK SMALL LETTER PI + 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA + 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU + 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI + 0x05d0: 0x0080, # HEBREW LETTER ALEF + 0x05d1: 0x0081, # HEBREW LETTER BET + 0x05d2: 0x0082, # HEBREW LETTER GIMEL + 0x05d3: 0x0083, # HEBREW LETTER DALET + 0x05d4: 0x0084, # HEBREW LETTER HE + 0x05d5: 0x0085, # HEBREW LETTER VAV + 0x05d6: 0x0086, # HEBREW LETTER ZAYIN + 0x05d7: 0x0087, # HEBREW LETTER HET + 0x05d8: 0x0088, # HEBREW LETTER TET + 0x05d9: 0x0089, # HEBREW LETTER YOD + 0x05da: 0x008a, # HEBREW LETTER FINAL KAF + 0x05db: 0x008b, # HEBREW LETTER KAF + 0x05dc: 0x008c, # HEBREW LETTER LAMED + 0x05dd: 0x008d, # HEBREW LETTER FINAL MEM + 0x05de: 0x008e, # HEBREW LETTER MEM + 0x05df: 0x008f, # HEBREW LETTER FINAL NUN + 0x05e0: 0x0090, # HEBREW LETTER NUN + 0x05e1: 0x0091, # HEBREW LETTER SAMEKH + 0x05e2: 0x0092, # HEBREW LETTER AYIN + 0x05e3: 0x0093, # HEBREW LETTER FINAL PE + 0x05e4: 0x0094, # HEBREW LETTER PE + 0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI + 0x05e6: 0x0096, # HEBREW LETTER TSADI + 0x05e7: 0x0097, # HEBREW LETTER QOF + 0x05e8: 0x0098, # HEBREW LETTER RESH + 0x05e9: 0x0099, # HEBREW LETTER SHIN + 0x05ea: 0x009a, # HEBREW LETTER TAV + 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N + 0x20a7: 0x009e, # PESETA SIGN + 0x2219: 0x00f9, # BULLET OPERATOR + 0x221a: 0x00fb, # SQUARE ROOT + 0x221e: 0x00ec, # INFINITY + 0x2229: 0x00ef, # INTERSECTION + 0x2248: 0x00f7, # ALMOST EQUAL TO + 0x2261: 0x00f0, # IDENTICAL TO + 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO + 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO + 0x2310: 0x00a9, # REVERSED NOT SIGN + 0x2320: 0x00f4, # TOP HALF INTEGRAL + 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp863.py b/venv/Lib/encodings/cp863.py new file mode 100644 index 00000000..764180b6 --- /dev/null +++ b/venv/Lib/encodings/cp863.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp863', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0086: 0x00b6, # PILCROW SIGN + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS + 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x008d: 0x2017, # DOUBLE LOW LINE + 0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE + 0x008f: 0x00a7, # SECTION SIGN + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE + 0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS + 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0x0098: 0x00a4, # CURRENCY SIGN + 0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00a2, # CENT SIGN + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE + 0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX + 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK + 0x00a0: 0x00a6, # BROKEN BAR + 0x00a1: 0x00b4, # ACUTE ACCENT + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00a8, # DIAERESIS + 0x00a5: 0x00b8, # CEDILLA + 0x00a6: 0x00b3, # SUPERSCRIPT THREE + 0x00a7: 0x00af, # MACRON + 0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00a9: 0x2310, # REVERSED NOT SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA + 0x00e3: 0x03c0, # GREEK SMALL LETTER PI + 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA + 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU + 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI + 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA + 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA + 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA + 0x00ec: 0x221e, # INFINITY + 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI + 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON + 0x00ef: 0x2229, # INTERSECTION + 0x00f0: 0x2261, # IDENTICAL TO + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO + 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO + 0x00f4: 0x2320, # TOP HALF INTEGRAL + 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x2248, # ALMOST EQUAL TO + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE + '\xb6' # 0x0086 -> PILCROW SIGN + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE + '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS + '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\u2017' # 0x008d -> DOUBLE LOW LINE + '\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE + '\xa7' # 0x008f -> SECTION SIGN + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE + '\xa4' # 0x0098 -> CURRENCY SIGN + '\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xa2' # 0x009b -> CENT SIGN + '\xa3' # 0x009c -> POUND SIGN + '\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE + '\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK + '\xa6' # 0x00a0 -> BROKEN BAR + '\xb4' # 0x00a1 -> ACUTE ACCENT + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xa8' # 0x00a4 -> DIAERESIS + '\xb8' # 0x00a5 -> CEDILLA + '\xb3' # 0x00a6 -> SUPERSCRIPT THREE + '\xaf' # 0x00a7 -> MACRON + '\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\u2310' # 0x00a9 -> REVERSED NOT SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA + '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI + '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA + '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA + '\xb5' # 0x00e6 -> MICRO SIGN + '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU + '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI + '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA + '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA + '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA + '\u221e' # 0x00ec -> INFINITY + '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI + '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON + '\u2229' # 0x00ef -> INTERSECTION + '\u2261' # 0x00f0 -> IDENTICAL TO + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO + '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO + '\u2320' # 0x00f4 -> TOP HALF INTEGRAL + '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL + '\xf7' # 0x00f6 -> DIVISION SIGN + '\u2248' # 0x00f7 -> ALMOST EQUAL TO + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a2: 0x009b, # CENT SIGN + 0x00a3: 0x009c, # POUND SIGN + 0x00a4: 0x0098, # CURRENCY SIGN + 0x00a6: 0x00a0, # BROKEN BAR + 0x00a7: 0x008f, # SECTION SIGN + 0x00a8: 0x00a4, # DIAERESIS + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00af: 0x00a7, # MACRON + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b3: 0x00a6, # SUPERSCRIPT THREE + 0x00b4: 0x00a1, # ACUTE ACCENT + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b6: 0x0086, # PILCROW SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00b8: 0x00a5, # CEDILLA + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS + 0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE + 0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS + 0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE + 0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK + 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA + 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA + 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA + 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI + 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA + 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA + 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA + 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON + 0x03c0: 0x00e3, # GREEK SMALL LETTER PI + 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA + 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU + 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI + 0x2017: 0x008d, # DOUBLE LOW LINE + 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N + 0x2219: 0x00f9, # BULLET OPERATOR + 0x221a: 0x00fb, # SQUARE ROOT + 0x221e: 0x00ec, # INFINITY + 0x2229: 0x00ef, # INTERSECTION + 0x2248: 0x00f7, # ALMOST EQUAL TO + 0x2261: 0x00f0, # IDENTICAL TO + 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO + 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO + 0x2310: 0x00a9, # REVERSED NOT SIGN + 0x2320: 0x00f4, # TOP HALF INTEGRAL + 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp864.py b/venv/Lib/encodings/cp864.py new file mode 100644 index 00000000..53df482d --- /dev/null +++ b/venv/Lib/encodings/cp864.py @@ -0,0 +1,690 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP864.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp864', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0025: 0x066a, # ARABIC PERCENT SIGN + 0x0080: 0x00b0, # DEGREE SIGN + 0x0081: 0x00b7, # MIDDLE DOT + 0x0082: 0x2219, # BULLET OPERATOR + 0x0083: 0x221a, # SQUARE ROOT + 0x0084: 0x2592, # MEDIUM SHADE + 0x0085: 0x2500, # FORMS LIGHT HORIZONTAL + 0x0086: 0x2502, # FORMS LIGHT VERTICAL + 0x0087: 0x253c, # FORMS LIGHT VERTICAL AND HORIZONTAL + 0x0088: 0x2524, # FORMS LIGHT VERTICAL AND LEFT + 0x0089: 0x252c, # FORMS LIGHT DOWN AND HORIZONTAL + 0x008a: 0x251c, # FORMS LIGHT VERTICAL AND RIGHT + 0x008b: 0x2534, # FORMS LIGHT UP AND HORIZONTAL + 0x008c: 0x2510, # FORMS LIGHT DOWN AND LEFT + 0x008d: 0x250c, # FORMS LIGHT DOWN AND RIGHT + 0x008e: 0x2514, # FORMS LIGHT UP AND RIGHT + 0x008f: 0x2518, # FORMS LIGHT UP AND LEFT + 0x0090: 0x03b2, # GREEK SMALL BETA + 0x0091: 0x221e, # INFINITY + 0x0092: 0x03c6, # GREEK SMALL PHI + 0x0093: 0x00b1, # PLUS-OR-MINUS SIGN + 0x0094: 0x00bd, # FRACTION 1/2 + 0x0095: 0x00bc, # FRACTION 1/4 + 0x0096: 0x2248, # ALMOST EQUAL TO + 0x0097: 0x00ab, # LEFT POINTING GUILLEMET + 0x0098: 0x00bb, # RIGHT POINTING GUILLEMET + 0x0099: 0xfef7, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM + 0x009a: 0xfef8, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM + 0x009b: None, # UNDEFINED + 0x009c: None, # UNDEFINED + 0x009d: 0xfefb, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM + 0x009e: 0xfefc, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM + 0x009f: None, # UNDEFINED + 0x00a1: 0x00ad, # SOFT HYPHEN + 0x00a2: 0xfe82, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM + 0x00a5: 0xfe84, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM + 0x00a6: None, # UNDEFINED + 0x00a7: None, # UNDEFINED + 0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM + 0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM + 0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM + 0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM + 0x00ac: 0x060c, # ARABIC COMMA + 0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM + 0x00ae: 0xfea1, # ARABIC LETTER HAH ISOLATED FORM + 0x00af: 0xfea5, # ARABIC LETTER KHAH ISOLATED FORM + 0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO + 0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE + 0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO + 0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE + 0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR + 0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE + 0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX + 0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN + 0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT + 0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE + 0x00ba: 0xfed1, # ARABIC LETTER FEH ISOLATED FORM + 0x00bb: 0x061b, # ARABIC SEMICOLON + 0x00bc: 0xfeb1, # ARABIC LETTER SEEN ISOLATED FORM + 0x00bd: 0xfeb5, # ARABIC LETTER SHEEN ISOLATED FORM + 0x00be: 0xfeb9, # ARABIC LETTER SAD ISOLATED FORM + 0x00bf: 0x061f, # ARABIC QUESTION MARK + 0x00c0: 0x00a2, # CENT SIGN + 0x00c1: 0xfe80, # ARABIC LETTER HAMZA ISOLATED FORM + 0x00c2: 0xfe81, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM + 0x00c3: 0xfe83, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM + 0x00c4: 0xfe85, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM + 0x00c5: 0xfeca, # ARABIC LETTER AIN FINAL FORM + 0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM + 0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM + 0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM + 0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM + 0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM + 0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM + 0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM + 0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM + 0x00ce: 0xfea7, # ARABIC LETTER KHAH INITIAL FORM + 0x00cf: 0xfea9, # ARABIC LETTER DAL ISOLATED FORM + 0x00d0: 0xfeab, # ARABIC LETTER THAL ISOLATED FORM + 0x00d1: 0xfead, # ARABIC LETTER REH ISOLATED FORM + 0x00d2: 0xfeaf, # ARABIC LETTER ZAIN ISOLATED FORM + 0x00d3: 0xfeb3, # ARABIC LETTER SEEN INITIAL FORM + 0x00d4: 0xfeb7, # ARABIC LETTER SHEEN INITIAL FORM + 0x00d5: 0xfebb, # ARABIC LETTER SAD INITIAL FORM + 0x00d6: 0xfebf, # ARABIC LETTER DAD INITIAL FORM + 0x00d7: 0xfec1, # ARABIC LETTER TAH ISOLATED FORM + 0x00d8: 0xfec5, # ARABIC LETTER ZAH ISOLATED FORM + 0x00d9: 0xfecb, # ARABIC LETTER AIN INITIAL FORM + 0x00da: 0xfecf, # ARABIC LETTER GHAIN INITIAL FORM + 0x00db: 0x00a6, # BROKEN VERTICAL BAR + 0x00dc: 0x00ac, # NOT SIGN + 0x00dd: 0x00f7, # DIVISION SIGN + 0x00de: 0x00d7, # MULTIPLICATION SIGN + 0x00df: 0xfec9, # ARABIC LETTER AIN ISOLATED FORM + 0x00e0: 0x0640, # ARABIC TATWEEL + 0x00e1: 0xfed3, # ARABIC LETTER FEH INITIAL FORM + 0x00e2: 0xfed7, # ARABIC LETTER QAF INITIAL FORM + 0x00e3: 0xfedb, # ARABIC LETTER KAF INITIAL FORM + 0x00e4: 0xfedf, # ARABIC LETTER LAM INITIAL FORM + 0x00e5: 0xfee3, # ARABIC LETTER MEEM INITIAL FORM + 0x00e6: 0xfee7, # ARABIC LETTER NOON INITIAL FORM + 0x00e7: 0xfeeb, # ARABIC LETTER HEH INITIAL FORM + 0x00e8: 0xfeed, # ARABIC LETTER WAW ISOLATED FORM + 0x00e9: 0xfeef, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM + 0x00ea: 0xfef3, # ARABIC LETTER YEH INITIAL FORM + 0x00eb: 0xfebd, # ARABIC LETTER DAD ISOLATED FORM + 0x00ec: 0xfecc, # ARABIC LETTER AIN MEDIAL FORM + 0x00ed: 0xfece, # ARABIC LETTER GHAIN FINAL FORM + 0x00ee: 0xfecd, # ARABIC LETTER GHAIN ISOLATED FORM + 0x00ef: 0xfee1, # ARABIC LETTER MEEM ISOLATED FORM + 0x00f0: 0xfe7d, # ARABIC SHADDA MEDIAL FORM + 0x00f1: 0x0651, # ARABIC SHADDAH + 0x00f2: 0xfee5, # ARABIC LETTER NOON ISOLATED FORM + 0x00f3: 0xfee9, # ARABIC LETTER HEH ISOLATED FORM + 0x00f4: 0xfeec, # ARABIC LETTER HEH MEDIAL FORM + 0x00f5: 0xfef0, # ARABIC LETTER ALEF MAKSURA FINAL FORM + 0x00f6: 0xfef2, # ARABIC LETTER YEH FINAL FORM + 0x00f7: 0xfed0, # ARABIC LETTER GHAIN MEDIAL FORM + 0x00f8: 0xfed5, # ARABIC LETTER QAF ISOLATED FORM + 0x00f9: 0xfef5, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM + 0x00fa: 0xfef6, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM + 0x00fb: 0xfedd, # ARABIC LETTER LAM ISOLATED FORM + 0x00fc: 0xfed9, # ARABIC LETTER KAF ISOLATED FORM + 0x00fd: 0xfef1, # ARABIC LETTER YEH ISOLATED FORM + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: None, # UNDEFINED +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '\u066a' # 0x0025 -> ARABIC PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xb0' # 0x0080 -> DEGREE SIGN + '\xb7' # 0x0081 -> MIDDLE DOT + '\u2219' # 0x0082 -> BULLET OPERATOR + '\u221a' # 0x0083 -> SQUARE ROOT + '\u2592' # 0x0084 -> MEDIUM SHADE + '\u2500' # 0x0085 -> FORMS LIGHT HORIZONTAL + '\u2502' # 0x0086 -> FORMS LIGHT VERTICAL + '\u253c' # 0x0087 -> FORMS LIGHT VERTICAL AND HORIZONTAL + '\u2524' # 0x0088 -> FORMS LIGHT VERTICAL AND LEFT + '\u252c' # 0x0089 -> FORMS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x008a -> FORMS LIGHT VERTICAL AND RIGHT + '\u2534' # 0x008b -> FORMS LIGHT UP AND HORIZONTAL + '\u2510' # 0x008c -> FORMS LIGHT DOWN AND LEFT + '\u250c' # 0x008d -> FORMS LIGHT DOWN AND RIGHT + '\u2514' # 0x008e -> FORMS LIGHT UP AND RIGHT + '\u2518' # 0x008f -> FORMS LIGHT UP AND LEFT + '\u03b2' # 0x0090 -> GREEK SMALL BETA + '\u221e' # 0x0091 -> INFINITY + '\u03c6' # 0x0092 -> GREEK SMALL PHI + '\xb1' # 0x0093 -> PLUS-OR-MINUS SIGN + '\xbd' # 0x0094 -> FRACTION 1/2 + '\xbc' # 0x0095 -> FRACTION 1/4 + '\u2248' # 0x0096 -> ALMOST EQUAL TO + '\xab' # 0x0097 -> LEFT POINTING GUILLEMET + '\xbb' # 0x0098 -> RIGHT POINTING GUILLEMET + '\ufef7' # 0x0099 -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM + '\ufef8' # 0x009a -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM + '\ufffe' # 0x009b -> UNDEFINED + '\ufffe' # 0x009c -> UNDEFINED + '\ufefb' # 0x009d -> ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM + '\ufefc' # 0x009e -> ARABIC LIGATURE LAM WITH ALEF FINAL FORM + '\ufffe' # 0x009f -> UNDEFINED + '\xa0' # 0x00a0 -> NON-BREAKING SPACE + '\xad' # 0x00a1 -> SOFT HYPHEN + '\ufe82' # 0x00a2 -> ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM + '\xa3' # 0x00a3 -> POUND SIGN + '\xa4' # 0x00a4 -> CURRENCY SIGN + '\ufe84' # 0x00a5 -> ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM + '\ufffe' # 0x00a6 -> UNDEFINED + '\ufffe' # 0x00a7 -> UNDEFINED + '\ufe8e' # 0x00a8 -> ARABIC LETTER ALEF FINAL FORM + '\ufe8f' # 0x00a9 -> ARABIC LETTER BEH ISOLATED FORM + '\ufe95' # 0x00aa -> ARABIC LETTER TEH ISOLATED FORM + '\ufe99' # 0x00ab -> ARABIC LETTER THEH ISOLATED FORM + '\u060c' # 0x00ac -> ARABIC COMMA + '\ufe9d' # 0x00ad -> ARABIC LETTER JEEM ISOLATED FORM + '\ufea1' # 0x00ae -> ARABIC LETTER HAH ISOLATED FORM + '\ufea5' # 0x00af -> ARABIC LETTER KHAH ISOLATED FORM + '\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO + '\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE + '\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO + '\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE + '\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR + '\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE + '\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX + '\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN + '\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT + '\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE + '\ufed1' # 0x00ba -> ARABIC LETTER FEH ISOLATED FORM + '\u061b' # 0x00bb -> ARABIC SEMICOLON + '\ufeb1' # 0x00bc -> ARABIC LETTER SEEN ISOLATED FORM + '\ufeb5' # 0x00bd -> ARABIC LETTER SHEEN ISOLATED FORM + '\ufeb9' # 0x00be -> ARABIC LETTER SAD ISOLATED FORM + '\u061f' # 0x00bf -> ARABIC QUESTION MARK + '\xa2' # 0x00c0 -> CENT SIGN + '\ufe80' # 0x00c1 -> ARABIC LETTER HAMZA ISOLATED FORM + '\ufe81' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM + '\ufe83' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM + '\ufe85' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM + '\ufeca' # 0x00c5 -> ARABIC LETTER AIN FINAL FORM + '\ufe8b' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM + '\ufe8d' # 0x00c7 -> ARABIC LETTER ALEF ISOLATED FORM + '\ufe91' # 0x00c8 -> ARABIC LETTER BEH INITIAL FORM + '\ufe93' # 0x00c9 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM + '\ufe97' # 0x00ca -> ARABIC LETTER TEH INITIAL FORM + '\ufe9b' # 0x00cb -> ARABIC LETTER THEH INITIAL FORM + '\ufe9f' # 0x00cc -> ARABIC LETTER JEEM INITIAL FORM + '\ufea3' # 0x00cd -> ARABIC LETTER HAH INITIAL FORM + '\ufea7' # 0x00ce -> ARABIC LETTER KHAH INITIAL FORM + '\ufea9' # 0x00cf -> ARABIC LETTER DAL ISOLATED FORM + '\ufeab' # 0x00d0 -> ARABIC LETTER THAL ISOLATED FORM + '\ufead' # 0x00d1 -> ARABIC LETTER REH ISOLATED FORM + '\ufeaf' # 0x00d2 -> ARABIC LETTER ZAIN ISOLATED FORM + '\ufeb3' # 0x00d3 -> ARABIC LETTER SEEN INITIAL FORM + '\ufeb7' # 0x00d4 -> ARABIC LETTER SHEEN INITIAL FORM + '\ufebb' # 0x00d5 -> ARABIC LETTER SAD INITIAL FORM + '\ufebf' # 0x00d6 -> ARABIC LETTER DAD INITIAL FORM + '\ufec1' # 0x00d7 -> ARABIC LETTER TAH ISOLATED FORM + '\ufec5' # 0x00d8 -> ARABIC LETTER ZAH ISOLATED FORM + '\ufecb' # 0x00d9 -> ARABIC LETTER AIN INITIAL FORM + '\ufecf' # 0x00da -> ARABIC LETTER GHAIN INITIAL FORM + '\xa6' # 0x00db -> BROKEN VERTICAL BAR + '\xac' # 0x00dc -> NOT SIGN + '\xf7' # 0x00dd -> DIVISION SIGN + '\xd7' # 0x00de -> MULTIPLICATION SIGN + '\ufec9' # 0x00df -> ARABIC LETTER AIN ISOLATED FORM + '\u0640' # 0x00e0 -> ARABIC TATWEEL + '\ufed3' # 0x00e1 -> ARABIC LETTER FEH INITIAL FORM + '\ufed7' # 0x00e2 -> ARABIC LETTER QAF INITIAL FORM + '\ufedb' # 0x00e3 -> ARABIC LETTER KAF INITIAL FORM + '\ufedf' # 0x00e4 -> ARABIC LETTER LAM INITIAL FORM + '\ufee3' # 0x00e5 -> ARABIC LETTER MEEM INITIAL FORM + '\ufee7' # 0x00e6 -> ARABIC LETTER NOON INITIAL FORM + '\ufeeb' # 0x00e7 -> ARABIC LETTER HEH INITIAL FORM + '\ufeed' # 0x00e8 -> ARABIC LETTER WAW ISOLATED FORM + '\ufeef' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA ISOLATED FORM + '\ufef3' # 0x00ea -> ARABIC LETTER YEH INITIAL FORM + '\ufebd' # 0x00eb -> ARABIC LETTER DAD ISOLATED FORM + '\ufecc' # 0x00ec -> ARABIC LETTER AIN MEDIAL FORM + '\ufece' # 0x00ed -> ARABIC LETTER GHAIN FINAL FORM + '\ufecd' # 0x00ee -> ARABIC LETTER GHAIN ISOLATED FORM + '\ufee1' # 0x00ef -> ARABIC LETTER MEEM ISOLATED FORM + '\ufe7d' # 0x00f0 -> ARABIC SHADDA MEDIAL FORM + '\u0651' # 0x00f1 -> ARABIC SHADDAH + '\ufee5' # 0x00f2 -> ARABIC LETTER NOON ISOLATED FORM + '\ufee9' # 0x00f3 -> ARABIC LETTER HEH ISOLATED FORM + '\ufeec' # 0x00f4 -> ARABIC LETTER HEH MEDIAL FORM + '\ufef0' # 0x00f5 -> ARABIC LETTER ALEF MAKSURA FINAL FORM + '\ufef2' # 0x00f6 -> ARABIC LETTER YEH FINAL FORM + '\ufed0' # 0x00f7 -> ARABIC LETTER GHAIN MEDIAL FORM + '\ufed5' # 0x00f8 -> ARABIC LETTER QAF ISOLATED FORM + '\ufef5' # 0x00f9 -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM + '\ufef6' # 0x00fa -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM + '\ufedd' # 0x00fb -> ARABIC LETTER LAM ISOLATED FORM + '\ufed9' # 0x00fc -> ARABIC LETTER KAF ISOLATED FORM + '\ufef1' # 0x00fd -> ARABIC LETTER YEH ISOLATED FORM + '\u25a0' # 0x00fe -> BLACK SQUARE + '\ufffe' # 0x00ff -> UNDEFINED +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00a0, # NON-BREAKING SPACE + 0x00a2: 0x00c0, # CENT SIGN + 0x00a3: 0x00a3, # POUND SIGN + 0x00a4: 0x00a4, # CURRENCY SIGN + 0x00a6: 0x00db, # BROKEN VERTICAL BAR + 0x00ab: 0x0097, # LEFT POINTING GUILLEMET + 0x00ac: 0x00dc, # NOT SIGN + 0x00ad: 0x00a1, # SOFT HYPHEN + 0x00b0: 0x0080, # DEGREE SIGN + 0x00b1: 0x0093, # PLUS-OR-MINUS SIGN + 0x00b7: 0x0081, # MIDDLE DOT + 0x00bb: 0x0098, # RIGHT POINTING GUILLEMET + 0x00bc: 0x0095, # FRACTION 1/4 + 0x00bd: 0x0094, # FRACTION 1/2 + 0x00d7: 0x00de, # MULTIPLICATION SIGN + 0x00f7: 0x00dd, # DIVISION SIGN + 0x03b2: 0x0090, # GREEK SMALL BETA + 0x03c6: 0x0092, # GREEK SMALL PHI + 0x060c: 0x00ac, # ARABIC COMMA + 0x061b: 0x00bb, # ARABIC SEMICOLON + 0x061f: 0x00bf, # ARABIC QUESTION MARK + 0x0640: 0x00e0, # ARABIC TATWEEL + 0x0651: 0x00f1, # ARABIC SHADDAH + 0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO + 0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE + 0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO + 0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE + 0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR + 0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE + 0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX + 0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN + 0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT + 0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE + 0x066a: 0x0025, # ARABIC PERCENT SIGN + 0x2219: 0x0082, # BULLET OPERATOR + 0x221a: 0x0083, # SQUARE ROOT + 0x221e: 0x0091, # INFINITY + 0x2248: 0x0096, # ALMOST EQUAL TO + 0x2500: 0x0085, # FORMS LIGHT HORIZONTAL + 0x2502: 0x0086, # FORMS LIGHT VERTICAL + 0x250c: 0x008d, # FORMS LIGHT DOWN AND RIGHT + 0x2510: 0x008c, # FORMS LIGHT DOWN AND LEFT + 0x2514: 0x008e, # FORMS LIGHT UP AND RIGHT + 0x2518: 0x008f, # FORMS LIGHT UP AND LEFT + 0x251c: 0x008a, # FORMS LIGHT VERTICAL AND RIGHT + 0x2524: 0x0088, # FORMS LIGHT VERTICAL AND LEFT + 0x252c: 0x0089, # FORMS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x008b, # FORMS LIGHT UP AND HORIZONTAL + 0x253c: 0x0087, # FORMS LIGHT VERTICAL AND HORIZONTAL + 0x2592: 0x0084, # MEDIUM SHADE + 0x25a0: 0x00fe, # BLACK SQUARE + 0xfe7d: 0x00f0, # ARABIC SHADDA MEDIAL FORM + 0xfe80: 0x00c1, # ARABIC LETTER HAMZA ISOLATED FORM + 0xfe81: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM + 0xfe82: 0x00a2, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM + 0xfe83: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM + 0xfe84: 0x00a5, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM + 0xfe85: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM + 0xfe8b: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM + 0xfe8d: 0x00c7, # ARABIC LETTER ALEF ISOLATED FORM + 0xfe8e: 0x00a8, # ARABIC LETTER ALEF FINAL FORM + 0xfe8f: 0x00a9, # ARABIC LETTER BEH ISOLATED FORM + 0xfe91: 0x00c8, # ARABIC LETTER BEH INITIAL FORM + 0xfe93: 0x00c9, # ARABIC LETTER TEH MARBUTA ISOLATED FORM + 0xfe95: 0x00aa, # ARABIC LETTER TEH ISOLATED FORM + 0xfe97: 0x00ca, # ARABIC LETTER TEH INITIAL FORM + 0xfe99: 0x00ab, # ARABIC LETTER THEH ISOLATED FORM + 0xfe9b: 0x00cb, # ARABIC LETTER THEH INITIAL FORM + 0xfe9d: 0x00ad, # ARABIC LETTER JEEM ISOLATED FORM + 0xfe9f: 0x00cc, # ARABIC LETTER JEEM INITIAL FORM + 0xfea1: 0x00ae, # ARABIC LETTER HAH ISOLATED FORM + 0xfea3: 0x00cd, # ARABIC LETTER HAH INITIAL FORM + 0xfea5: 0x00af, # ARABIC LETTER KHAH ISOLATED FORM + 0xfea7: 0x00ce, # ARABIC LETTER KHAH INITIAL FORM + 0xfea9: 0x00cf, # ARABIC LETTER DAL ISOLATED FORM + 0xfeab: 0x00d0, # ARABIC LETTER THAL ISOLATED FORM + 0xfead: 0x00d1, # ARABIC LETTER REH ISOLATED FORM + 0xfeaf: 0x00d2, # ARABIC LETTER ZAIN ISOLATED FORM + 0xfeb1: 0x00bc, # ARABIC LETTER SEEN ISOLATED FORM + 0xfeb3: 0x00d3, # ARABIC LETTER SEEN INITIAL FORM + 0xfeb5: 0x00bd, # ARABIC LETTER SHEEN ISOLATED FORM + 0xfeb7: 0x00d4, # ARABIC LETTER SHEEN INITIAL FORM + 0xfeb9: 0x00be, # ARABIC LETTER SAD ISOLATED FORM + 0xfebb: 0x00d5, # ARABIC LETTER SAD INITIAL FORM + 0xfebd: 0x00eb, # ARABIC LETTER DAD ISOLATED FORM + 0xfebf: 0x00d6, # ARABIC LETTER DAD INITIAL FORM + 0xfec1: 0x00d7, # ARABIC LETTER TAH ISOLATED FORM + 0xfec5: 0x00d8, # ARABIC LETTER ZAH ISOLATED FORM + 0xfec9: 0x00df, # ARABIC LETTER AIN ISOLATED FORM + 0xfeca: 0x00c5, # ARABIC LETTER AIN FINAL FORM + 0xfecb: 0x00d9, # ARABIC LETTER AIN INITIAL FORM + 0xfecc: 0x00ec, # ARABIC LETTER AIN MEDIAL FORM + 0xfecd: 0x00ee, # ARABIC LETTER GHAIN ISOLATED FORM + 0xfece: 0x00ed, # ARABIC LETTER GHAIN FINAL FORM + 0xfecf: 0x00da, # ARABIC LETTER GHAIN INITIAL FORM + 0xfed0: 0x00f7, # ARABIC LETTER GHAIN MEDIAL FORM + 0xfed1: 0x00ba, # ARABIC LETTER FEH ISOLATED FORM + 0xfed3: 0x00e1, # ARABIC LETTER FEH INITIAL FORM + 0xfed5: 0x00f8, # ARABIC LETTER QAF ISOLATED FORM + 0xfed7: 0x00e2, # ARABIC LETTER QAF INITIAL FORM + 0xfed9: 0x00fc, # ARABIC LETTER KAF ISOLATED FORM + 0xfedb: 0x00e3, # ARABIC LETTER KAF INITIAL FORM + 0xfedd: 0x00fb, # ARABIC LETTER LAM ISOLATED FORM + 0xfedf: 0x00e4, # ARABIC LETTER LAM INITIAL FORM + 0xfee1: 0x00ef, # ARABIC LETTER MEEM ISOLATED FORM + 0xfee3: 0x00e5, # ARABIC LETTER MEEM INITIAL FORM + 0xfee5: 0x00f2, # ARABIC LETTER NOON ISOLATED FORM + 0xfee7: 0x00e6, # ARABIC LETTER NOON INITIAL FORM + 0xfee9: 0x00f3, # ARABIC LETTER HEH ISOLATED FORM + 0xfeeb: 0x00e7, # ARABIC LETTER HEH INITIAL FORM + 0xfeec: 0x00f4, # ARABIC LETTER HEH MEDIAL FORM + 0xfeed: 0x00e8, # ARABIC LETTER WAW ISOLATED FORM + 0xfeef: 0x00e9, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM + 0xfef0: 0x00f5, # ARABIC LETTER ALEF MAKSURA FINAL FORM + 0xfef1: 0x00fd, # ARABIC LETTER YEH ISOLATED FORM + 0xfef2: 0x00f6, # ARABIC LETTER YEH FINAL FORM + 0xfef3: 0x00ea, # ARABIC LETTER YEH INITIAL FORM + 0xfef5: 0x00f9, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM + 0xfef6: 0x00fa, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM + 0xfef7: 0x0099, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM + 0xfef8: 0x009a, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM + 0xfefb: 0x009d, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM + 0xfefc: 0x009e, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM +} diff --git a/venv/Lib/encodings/cp865.py b/venv/Lib/encodings/cp865.py new file mode 100644 index 00000000..6726cf3f --- /dev/null +++ b/venv/Lib/encodings/cp865.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp865', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS + 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE + 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE + 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE + 0x009e: 0x20a7, # PESETA SIGN + 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR + 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR + 0x00a8: 0x00bf, # INVERTED QUESTION MARK + 0x00a9: 0x2310, # REVERSED NOT SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00a4, # CURRENCY SIGN + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA + 0x00e3: 0x03c0, # GREEK SMALL LETTER PI + 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA + 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU + 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI + 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA + 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA + 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA + 0x00ec: 0x221e, # INFINITY + 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI + 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON + 0x00ef: 0x2229, # INTERSECTION + 0x00f0: 0x2261, # IDENTICAL TO + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO + 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO + 0x00f4: 0x2320, # TOP HALF INTEGRAL + 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x2248, # ALMOST EQUAL TO + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE + '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE + '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS + '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE + '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE + '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE + '\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE + '\xa3' # 0x009c -> POUND SIGN + '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE + '\u20a7' # 0x009e -> PESETA SIGN + '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE + '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE + '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR + '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR + '\xbf' # 0x00a8 -> INVERTED QUESTION MARK + '\u2310' # 0x00a9 -> REVERSED NOT SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xa4' # 0x00af -> CURRENCY SIGN + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA + '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI + '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA + '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA + '\xb5' # 0x00e6 -> MICRO SIGN + '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU + '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI + '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA + '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA + '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA + '\u221e' # 0x00ec -> INFINITY + '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI + '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON + '\u2229' # 0x00ef -> INTERSECTION + '\u2261' # 0x00f0 -> IDENTICAL TO + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO + '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO + '\u2320' # 0x00f4 -> TOP HALF INTEGRAL + '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL + '\xf7' # 0x00f6 -> DIVISION SIGN + '\u2248' # 0x00f7 -> ALMOST EQUAL TO + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK + 0x00a3: 0x009c, # POUND SIGN + 0x00a4: 0x00af, # CURRENCY SIGN + 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00bf: 0x00a8, # INVERTED QUESTION MARK + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE + 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS + 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE + 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE + 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK + 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA + 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA + 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA + 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI + 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA + 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA + 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA + 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON + 0x03c0: 0x00e3, # GREEK SMALL LETTER PI + 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA + 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU + 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI + 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N + 0x20a7: 0x009e, # PESETA SIGN + 0x2219: 0x00f9, # BULLET OPERATOR + 0x221a: 0x00fb, # SQUARE ROOT + 0x221e: 0x00ec, # INFINITY + 0x2229: 0x00ef, # INTERSECTION + 0x2248: 0x00f7, # ALMOST EQUAL TO + 0x2261: 0x00f0, # IDENTICAL TO + 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO + 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO + 0x2310: 0x00a9, # REVERSED NOT SIGN + 0x2320: 0x00f4, # TOP HALF INTEGRAL + 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp866.py b/venv/Lib/encodings/cp866.py new file mode 100644 index 00000000..bec7ae39 --- /dev/null +++ b/venv/Lib/encodings/cp866.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp866', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A + 0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE + 0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE + 0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE + 0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE + 0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE + 0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE + 0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE + 0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I + 0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I + 0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA + 0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL + 0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM + 0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN + 0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O + 0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE + 0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER + 0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES + 0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE + 0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U + 0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF + 0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA + 0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE + 0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE + 0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA + 0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA + 0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN + 0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU + 0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN + 0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E + 0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU + 0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA + 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A + 0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE + 0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE + 0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE + 0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE + 0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE + 0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE + 0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE + 0x00a8: 0x0438, # CYRILLIC SMALL LETTER I + 0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I + 0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA + 0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL + 0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM + 0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN + 0x00ae: 0x043e, # CYRILLIC SMALL LETTER O + 0x00af: 0x043f, # CYRILLIC SMALL LETTER PE + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER + 0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES + 0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE + 0x00e3: 0x0443, # CYRILLIC SMALL LETTER U + 0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF + 0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA + 0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE + 0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE + 0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA + 0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA + 0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN + 0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU + 0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN + 0x00ed: 0x044d, # CYRILLIC SMALL LETTER E + 0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU + 0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA + 0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO + 0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO + 0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE + 0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE + 0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI + 0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI + 0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U + 0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x2116, # NUMERO SIGN + 0x00fd: 0x00a4, # CURRENCY SIGN + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE + '\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE + '\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE + '\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE + '\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE + '\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE + '\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE + '\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U + '\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF + '\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA + '\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE + '\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE + '\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA + '\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA + '\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN + '\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU + '\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E + '\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU + '\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA + '\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A + '\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE + '\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE + '\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE + '\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE + '\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE + '\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE + '\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I + '\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA + '\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL + '\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM + '\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN + '\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O + '\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U + '\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF + '\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA + '\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE + '\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE + '\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA + '\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA + '\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN + '\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU + '\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E + '\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU + '\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA + '\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO + '\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO + '\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE + '\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE + '\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI + '\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI + '\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U + '\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u2116' # 0x00fc -> NUMERO SIGN + '\xa4' # 0x00fd -> CURRENCY SIGN + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a4: 0x00fd, # CURRENCY SIGN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO + 0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE + 0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI + 0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U + 0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A + 0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE + 0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE + 0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE + 0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE + 0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE + 0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE + 0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE + 0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I + 0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I + 0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA + 0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL + 0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM + 0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN + 0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O + 0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE + 0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER + 0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES + 0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE + 0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U + 0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF + 0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA + 0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE + 0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE + 0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA + 0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA + 0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN + 0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU + 0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN + 0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E + 0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU + 0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA + 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A + 0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE + 0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE + 0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE + 0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE + 0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE + 0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE + 0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE + 0x0438: 0x00a8, # CYRILLIC SMALL LETTER I + 0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I + 0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA + 0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL + 0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM + 0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN + 0x043e: 0x00ae, # CYRILLIC SMALL LETTER O + 0x043f: 0x00af, # CYRILLIC SMALL LETTER PE + 0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER + 0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES + 0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE + 0x0443: 0x00e3, # CYRILLIC SMALL LETTER U + 0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF + 0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA + 0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE + 0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE + 0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA + 0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA + 0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN + 0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU + 0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN + 0x044d: 0x00ed, # CYRILLIC SMALL LETTER E + 0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU + 0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA + 0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO + 0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE + 0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI + 0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U + 0x2116: 0x00fc, # NUMERO SIGN + 0x2219: 0x00f9, # BULLET OPERATOR + 0x221a: 0x00fb, # SQUARE ROOT + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp869.py b/venv/Lib/encodings/cp869.py new file mode 100644 index 00000000..8d8a29b1 --- /dev/null +++ b/venv/Lib/encodings/cp869.py @@ -0,0 +1,689 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP869.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp869', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: None, # UNDEFINED + 0x0081: None, # UNDEFINED + 0x0082: None, # UNDEFINED + 0x0083: None, # UNDEFINED + 0x0084: None, # UNDEFINED + 0x0085: None, # UNDEFINED + 0x0086: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS + 0x0087: None, # UNDEFINED + 0x0088: 0x00b7, # MIDDLE DOT + 0x0089: 0x00ac, # NOT SIGN + 0x008a: 0x00a6, # BROKEN BAR + 0x008b: 0x2018, # LEFT SINGLE QUOTATION MARK + 0x008c: 0x2019, # RIGHT SINGLE QUOTATION MARK + 0x008d: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS + 0x008e: 0x2015, # HORIZONTAL BAR + 0x008f: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS + 0x0090: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS + 0x0091: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + 0x0092: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS + 0x0093: None, # UNDEFINED + 0x0094: None, # UNDEFINED + 0x0095: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS + 0x0096: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + 0x0097: 0x00a9, # COPYRIGHT SIGN + 0x0098: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS + 0x0099: 0x00b2, # SUPERSCRIPT TWO + 0x009a: 0x00b3, # SUPERSCRIPT THREE + 0x009b: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS + 0x009e: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS + 0x009f: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS + 0x00a0: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA + 0x00a1: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS + 0x00a2: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS + 0x00a3: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS + 0x00a4: 0x0391, # GREEK CAPITAL LETTER ALPHA + 0x00a5: 0x0392, # GREEK CAPITAL LETTER BETA + 0x00a6: 0x0393, # GREEK CAPITAL LETTER GAMMA + 0x00a7: 0x0394, # GREEK CAPITAL LETTER DELTA + 0x00a8: 0x0395, # GREEK CAPITAL LETTER EPSILON + 0x00a9: 0x0396, # GREEK CAPITAL LETTER ZETA + 0x00aa: 0x0397, # GREEK CAPITAL LETTER ETA + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x0398, # GREEK CAPITAL LETTER THETA + 0x00ad: 0x0399, # GREEK CAPITAL LETTER IOTA + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x039a, # GREEK CAPITAL LETTER KAPPA + 0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMDA + 0x00b7: 0x039c, # GREEK CAPITAL LETTER MU + 0x00b8: 0x039d, # GREEK CAPITAL LETTER NU + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x039e, # GREEK CAPITAL LETTER XI + 0x00be: 0x039f, # GREEK CAPITAL LETTER OMICRON + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x03a0, # GREEK CAPITAL LETTER PI + 0x00c7: 0x03a1, # GREEK CAPITAL LETTER RHO + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x03a3, # GREEK CAPITAL LETTER SIGMA + 0x00d0: 0x03a4, # GREEK CAPITAL LETTER TAU + 0x00d1: 0x03a5, # GREEK CAPITAL LETTER UPSILON + 0x00d2: 0x03a6, # GREEK CAPITAL LETTER PHI + 0x00d3: 0x03a7, # GREEK CAPITAL LETTER CHI + 0x00d4: 0x03a8, # GREEK CAPITAL LETTER PSI + 0x00d5: 0x03a9, # GREEK CAPITAL LETTER OMEGA + 0x00d6: 0x03b1, # GREEK SMALL LETTER ALPHA + 0x00d7: 0x03b2, # GREEK SMALL LETTER BETA + 0x00d8: 0x03b3, # GREEK SMALL LETTER GAMMA + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x03b4, # GREEK SMALL LETTER DELTA + 0x00de: 0x03b5, # GREEK SMALL LETTER EPSILON + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x03b6, # GREEK SMALL LETTER ZETA + 0x00e1: 0x03b7, # GREEK SMALL LETTER ETA + 0x00e2: 0x03b8, # GREEK SMALL LETTER THETA + 0x00e3: 0x03b9, # GREEK SMALL LETTER IOTA + 0x00e4: 0x03ba, # GREEK SMALL LETTER KAPPA + 0x00e5: 0x03bb, # GREEK SMALL LETTER LAMDA + 0x00e6: 0x03bc, # GREEK SMALL LETTER MU + 0x00e7: 0x03bd, # GREEK SMALL LETTER NU + 0x00e8: 0x03be, # GREEK SMALL LETTER XI + 0x00e9: 0x03bf, # GREEK SMALL LETTER OMICRON + 0x00ea: 0x03c0, # GREEK SMALL LETTER PI + 0x00eb: 0x03c1, # GREEK SMALL LETTER RHO + 0x00ec: 0x03c3, # GREEK SMALL LETTER SIGMA + 0x00ed: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA + 0x00ee: 0x03c4, # GREEK SMALL LETTER TAU + 0x00ef: 0x0384, # GREEK TONOS + 0x00f0: 0x00ad, # SOFT HYPHEN + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x03c5, # GREEK SMALL LETTER UPSILON + 0x00f3: 0x03c6, # GREEK SMALL LETTER PHI + 0x00f4: 0x03c7, # GREEK SMALL LETTER CHI + 0x00f5: 0x00a7, # SECTION SIGN + 0x00f6: 0x03c8, # GREEK SMALL LETTER PSI + 0x00f7: 0x0385, # GREEK DIALYTIKA TONOS + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x00a8, # DIAERESIS + 0x00fa: 0x03c9, # GREEK SMALL LETTER OMEGA + 0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA + 0x00fc: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS + 0x00fd: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\ufffe' # 0x0080 -> UNDEFINED + '\ufffe' # 0x0081 -> UNDEFINED + '\ufffe' # 0x0082 -> UNDEFINED + '\ufffe' # 0x0083 -> UNDEFINED + '\ufffe' # 0x0084 -> UNDEFINED + '\ufffe' # 0x0085 -> UNDEFINED + '\u0386' # 0x0086 -> GREEK CAPITAL LETTER ALPHA WITH TONOS + '\ufffe' # 0x0087 -> UNDEFINED + '\xb7' # 0x0088 -> MIDDLE DOT + '\xac' # 0x0089 -> NOT SIGN + '\xa6' # 0x008a -> BROKEN BAR + '\u2018' # 0x008b -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x008c -> RIGHT SINGLE QUOTATION MARK + '\u0388' # 0x008d -> GREEK CAPITAL LETTER EPSILON WITH TONOS + '\u2015' # 0x008e -> HORIZONTAL BAR + '\u0389' # 0x008f -> GREEK CAPITAL LETTER ETA WITH TONOS + '\u038a' # 0x0090 -> GREEK CAPITAL LETTER IOTA WITH TONOS + '\u03aa' # 0x0091 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + '\u038c' # 0x0092 -> GREEK CAPITAL LETTER OMICRON WITH TONOS + '\ufffe' # 0x0093 -> UNDEFINED + '\ufffe' # 0x0094 -> UNDEFINED + '\u038e' # 0x0095 -> GREEK CAPITAL LETTER UPSILON WITH TONOS + '\u03ab' # 0x0096 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + '\xa9' # 0x0097 -> COPYRIGHT SIGN + '\u038f' # 0x0098 -> GREEK CAPITAL LETTER OMEGA WITH TONOS + '\xb2' # 0x0099 -> SUPERSCRIPT TWO + '\xb3' # 0x009a -> SUPERSCRIPT THREE + '\u03ac' # 0x009b -> GREEK SMALL LETTER ALPHA WITH TONOS + '\xa3' # 0x009c -> POUND SIGN + '\u03ad' # 0x009d -> GREEK SMALL LETTER EPSILON WITH TONOS + '\u03ae' # 0x009e -> GREEK SMALL LETTER ETA WITH TONOS + '\u03af' # 0x009f -> GREEK SMALL LETTER IOTA WITH TONOS + '\u03ca' # 0x00a0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA + '\u0390' # 0x00a1 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS + '\u03cc' # 0x00a2 -> GREEK SMALL LETTER OMICRON WITH TONOS + '\u03cd' # 0x00a3 -> GREEK SMALL LETTER UPSILON WITH TONOS + '\u0391' # 0x00a4 -> GREEK CAPITAL LETTER ALPHA + '\u0392' # 0x00a5 -> GREEK CAPITAL LETTER BETA + '\u0393' # 0x00a6 -> GREEK CAPITAL LETTER GAMMA + '\u0394' # 0x00a7 -> GREEK CAPITAL LETTER DELTA + '\u0395' # 0x00a8 -> GREEK CAPITAL LETTER EPSILON + '\u0396' # 0x00a9 -> GREEK CAPITAL LETTER ZETA + '\u0397' # 0x00aa -> GREEK CAPITAL LETTER ETA + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\u0398' # 0x00ac -> GREEK CAPITAL LETTER THETA + '\u0399' # 0x00ad -> GREEK CAPITAL LETTER IOTA + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u039a' # 0x00b5 -> GREEK CAPITAL LETTER KAPPA + '\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMDA + '\u039c' # 0x00b7 -> GREEK CAPITAL LETTER MU + '\u039d' # 0x00b8 -> GREEK CAPITAL LETTER NU + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u039e' # 0x00bd -> GREEK CAPITAL LETTER XI + '\u039f' # 0x00be -> GREEK CAPITAL LETTER OMICRON + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u03a0' # 0x00c6 -> GREEK CAPITAL LETTER PI + '\u03a1' # 0x00c7 -> GREEK CAPITAL LETTER RHO + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u03a3' # 0x00cf -> GREEK CAPITAL LETTER SIGMA + '\u03a4' # 0x00d0 -> GREEK CAPITAL LETTER TAU + '\u03a5' # 0x00d1 -> GREEK CAPITAL LETTER UPSILON + '\u03a6' # 0x00d2 -> GREEK CAPITAL LETTER PHI + '\u03a7' # 0x00d3 -> GREEK CAPITAL LETTER CHI + '\u03a8' # 0x00d4 -> GREEK CAPITAL LETTER PSI + '\u03a9' # 0x00d5 -> GREEK CAPITAL LETTER OMEGA + '\u03b1' # 0x00d6 -> GREEK SMALL LETTER ALPHA + '\u03b2' # 0x00d7 -> GREEK SMALL LETTER BETA + '\u03b3' # 0x00d8 -> GREEK SMALL LETTER GAMMA + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u03b4' # 0x00dd -> GREEK SMALL LETTER DELTA + '\u03b5' # 0x00de -> GREEK SMALL LETTER EPSILON + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u03b6' # 0x00e0 -> GREEK SMALL LETTER ZETA + '\u03b7' # 0x00e1 -> GREEK SMALL LETTER ETA + '\u03b8' # 0x00e2 -> GREEK SMALL LETTER THETA + '\u03b9' # 0x00e3 -> GREEK SMALL LETTER IOTA + '\u03ba' # 0x00e4 -> GREEK SMALL LETTER KAPPA + '\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMDA + '\u03bc' # 0x00e6 -> GREEK SMALL LETTER MU + '\u03bd' # 0x00e7 -> GREEK SMALL LETTER NU + '\u03be' # 0x00e8 -> GREEK SMALL LETTER XI + '\u03bf' # 0x00e9 -> GREEK SMALL LETTER OMICRON + '\u03c0' # 0x00ea -> GREEK SMALL LETTER PI + '\u03c1' # 0x00eb -> GREEK SMALL LETTER RHO + '\u03c3' # 0x00ec -> GREEK SMALL LETTER SIGMA + '\u03c2' # 0x00ed -> GREEK SMALL LETTER FINAL SIGMA + '\u03c4' # 0x00ee -> GREEK SMALL LETTER TAU + '\u0384' # 0x00ef -> GREEK TONOS + '\xad' # 0x00f0 -> SOFT HYPHEN + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u03c5' # 0x00f2 -> GREEK SMALL LETTER UPSILON + '\u03c6' # 0x00f3 -> GREEK SMALL LETTER PHI + '\u03c7' # 0x00f4 -> GREEK SMALL LETTER CHI + '\xa7' # 0x00f5 -> SECTION SIGN + '\u03c8' # 0x00f6 -> GREEK SMALL LETTER PSI + '\u0385' # 0x00f7 -> GREEK DIALYTIKA TONOS + '\xb0' # 0x00f8 -> DEGREE SIGN + '\xa8' # 0x00f9 -> DIAERESIS + '\u03c9' # 0x00fa -> GREEK SMALL LETTER OMEGA + '\u03cb' # 0x00fb -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA + '\u03b0' # 0x00fc -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS + '\u03ce' # 0x00fd -> GREEK SMALL LETTER OMEGA WITH TONOS + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a3: 0x009c, # POUND SIGN + 0x00a6: 0x008a, # BROKEN BAR + 0x00a7: 0x00f5, # SECTION SIGN + 0x00a8: 0x00f9, # DIAERESIS + 0x00a9: 0x0097, # COPYRIGHT SIGN + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x0089, # NOT SIGN + 0x00ad: 0x00f0, # SOFT HYPHEN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x0099, # SUPERSCRIPT TWO + 0x00b3: 0x009a, # SUPERSCRIPT THREE + 0x00b7: 0x0088, # MIDDLE DOT + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x0384: 0x00ef, # GREEK TONOS + 0x0385: 0x00f7, # GREEK DIALYTIKA TONOS + 0x0386: 0x0086, # GREEK CAPITAL LETTER ALPHA WITH TONOS + 0x0388: 0x008d, # GREEK CAPITAL LETTER EPSILON WITH TONOS + 0x0389: 0x008f, # GREEK CAPITAL LETTER ETA WITH TONOS + 0x038a: 0x0090, # GREEK CAPITAL LETTER IOTA WITH TONOS + 0x038c: 0x0092, # GREEK CAPITAL LETTER OMICRON WITH TONOS + 0x038e: 0x0095, # GREEK CAPITAL LETTER UPSILON WITH TONOS + 0x038f: 0x0098, # GREEK CAPITAL LETTER OMEGA WITH TONOS + 0x0390: 0x00a1, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS + 0x0391: 0x00a4, # GREEK CAPITAL LETTER ALPHA + 0x0392: 0x00a5, # GREEK CAPITAL LETTER BETA + 0x0393: 0x00a6, # GREEK CAPITAL LETTER GAMMA + 0x0394: 0x00a7, # GREEK CAPITAL LETTER DELTA + 0x0395: 0x00a8, # GREEK CAPITAL LETTER EPSILON + 0x0396: 0x00a9, # GREEK CAPITAL LETTER ZETA + 0x0397: 0x00aa, # GREEK CAPITAL LETTER ETA + 0x0398: 0x00ac, # GREEK CAPITAL LETTER THETA + 0x0399: 0x00ad, # GREEK CAPITAL LETTER IOTA + 0x039a: 0x00b5, # GREEK CAPITAL LETTER KAPPA + 0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMDA + 0x039c: 0x00b7, # GREEK CAPITAL LETTER MU + 0x039d: 0x00b8, # GREEK CAPITAL LETTER NU + 0x039e: 0x00bd, # GREEK CAPITAL LETTER XI + 0x039f: 0x00be, # GREEK CAPITAL LETTER OMICRON + 0x03a0: 0x00c6, # GREEK CAPITAL LETTER PI + 0x03a1: 0x00c7, # GREEK CAPITAL LETTER RHO + 0x03a3: 0x00cf, # GREEK CAPITAL LETTER SIGMA + 0x03a4: 0x00d0, # GREEK CAPITAL LETTER TAU + 0x03a5: 0x00d1, # GREEK CAPITAL LETTER UPSILON + 0x03a6: 0x00d2, # GREEK CAPITAL LETTER PHI + 0x03a7: 0x00d3, # GREEK CAPITAL LETTER CHI + 0x03a8: 0x00d4, # GREEK CAPITAL LETTER PSI + 0x03a9: 0x00d5, # GREEK CAPITAL LETTER OMEGA + 0x03aa: 0x0091, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + 0x03ab: 0x0096, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + 0x03ac: 0x009b, # GREEK SMALL LETTER ALPHA WITH TONOS + 0x03ad: 0x009d, # GREEK SMALL LETTER EPSILON WITH TONOS + 0x03ae: 0x009e, # GREEK SMALL LETTER ETA WITH TONOS + 0x03af: 0x009f, # GREEK SMALL LETTER IOTA WITH TONOS + 0x03b0: 0x00fc, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS + 0x03b1: 0x00d6, # GREEK SMALL LETTER ALPHA + 0x03b2: 0x00d7, # GREEK SMALL LETTER BETA + 0x03b3: 0x00d8, # GREEK SMALL LETTER GAMMA + 0x03b4: 0x00dd, # GREEK SMALL LETTER DELTA + 0x03b5: 0x00de, # GREEK SMALL LETTER EPSILON + 0x03b6: 0x00e0, # GREEK SMALL LETTER ZETA + 0x03b7: 0x00e1, # GREEK SMALL LETTER ETA + 0x03b8: 0x00e2, # GREEK SMALL LETTER THETA + 0x03b9: 0x00e3, # GREEK SMALL LETTER IOTA + 0x03ba: 0x00e4, # GREEK SMALL LETTER KAPPA + 0x03bb: 0x00e5, # GREEK SMALL LETTER LAMDA + 0x03bc: 0x00e6, # GREEK SMALL LETTER MU + 0x03bd: 0x00e7, # GREEK SMALL LETTER NU + 0x03be: 0x00e8, # GREEK SMALL LETTER XI + 0x03bf: 0x00e9, # GREEK SMALL LETTER OMICRON + 0x03c0: 0x00ea, # GREEK SMALL LETTER PI + 0x03c1: 0x00eb, # GREEK SMALL LETTER RHO + 0x03c2: 0x00ed, # GREEK SMALL LETTER FINAL SIGMA + 0x03c3: 0x00ec, # GREEK SMALL LETTER SIGMA + 0x03c4: 0x00ee, # GREEK SMALL LETTER TAU + 0x03c5: 0x00f2, # GREEK SMALL LETTER UPSILON + 0x03c6: 0x00f3, # GREEK SMALL LETTER PHI + 0x03c7: 0x00f4, # GREEK SMALL LETTER CHI + 0x03c8: 0x00f6, # GREEK SMALL LETTER PSI + 0x03c9: 0x00fa, # GREEK SMALL LETTER OMEGA + 0x03ca: 0x00a0, # GREEK SMALL LETTER IOTA WITH DIALYTIKA + 0x03cb: 0x00fb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA + 0x03cc: 0x00a2, # GREEK SMALL LETTER OMICRON WITH TONOS + 0x03cd: 0x00a3, # GREEK SMALL LETTER UPSILON WITH TONOS + 0x03ce: 0x00fd, # GREEK SMALL LETTER OMEGA WITH TONOS + 0x2015: 0x008e, # HORIZONTAL BAR + 0x2018: 0x008b, # LEFT SINGLE QUOTATION MARK + 0x2019: 0x008c, # RIGHT SINGLE QUOTATION MARK + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/venv/Lib/encodings/cp874.py b/venv/Lib/encodings/cp874.py new file mode 100644 index 00000000..59bfcbc9 --- /dev/null +++ b/venv/Lib/encodings/cp874.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp874', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\ufffe' # 0x81 -> UNDEFINED + '\ufffe' # 0x82 -> UNDEFINED + '\ufffe' # 0x83 -> UNDEFINED + '\ufffe' # 0x84 -> UNDEFINED + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\ufffe' # 0x86 -> UNDEFINED + '\ufffe' # 0x87 -> UNDEFINED + '\ufffe' # 0x88 -> UNDEFINED + '\ufffe' # 0x89 -> UNDEFINED + '\ufffe' # 0x8A -> UNDEFINED + '\ufffe' # 0x8B -> UNDEFINED + '\ufffe' # 0x8C -> UNDEFINED + '\ufffe' # 0x8D -> UNDEFINED + '\ufffe' # 0x8E -> UNDEFINED + '\ufffe' # 0x8F -> UNDEFINED + '\ufffe' # 0x90 -> UNDEFINED + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\ufffe' # 0x98 -> UNDEFINED + '\ufffe' # 0x99 -> UNDEFINED + '\ufffe' # 0x9A -> UNDEFINED + '\ufffe' # 0x9B -> UNDEFINED + '\ufffe' # 0x9C -> UNDEFINED + '\ufffe' # 0x9D -> UNDEFINED + '\ufffe' # 0x9E -> UNDEFINED + '\ufffe' # 0x9F -> UNDEFINED + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u0e01' # 0xA1 -> THAI CHARACTER KO KAI + '\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI + '\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT + '\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI + '\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON + '\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG + '\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU + '\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN + '\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING + '\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG + '\u0e0b' # 0xAB -> THAI CHARACTER SO SO + '\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE + '\u0e0d' # 0xAD -> THAI CHARACTER YO YING + '\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA + '\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK + '\u0e10' # 0xB0 -> THAI CHARACTER THO THAN + '\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO + '\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO + '\u0e13' # 0xB3 -> THAI CHARACTER NO NEN + '\u0e14' # 0xB4 -> THAI CHARACTER DO DEK + '\u0e15' # 0xB5 -> THAI CHARACTER TO TAO + '\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG + '\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN + '\u0e18' # 0xB8 -> THAI CHARACTER THO THONG + '\u0e19' # 0xB9 -> THAI CHARACTER NO NU + '\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI + '\u0e1b' # 0xBB -> THAI CHARACTER PO PLA + '\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG + '\u0e1d' # 0xBD -> THAI CHARACTER FO FA + '\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN + '\u0e1f' # 0xBF -> THAI CHARACTER FO FAN + '\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO + '\u0e21' # 0xC1 -> THAI CHARACTER MO MA + '\u0e22' # 0xC2 -> THAI CHARACTER YO YAK + '\u0e23' # 0xC3 -> THAI CHARACTER RO RUA + '\u0e24' # 0xC4 -> THAI CHARACTER RU + '\u0e25' # 0xC5 -> THAI CHARACTER LO LING + '\u0e26' # 0xC6 -> THAI CHARACTER LU + '\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN + '\u0e28' # 0xC8 -> THAI CHARACTER SO SALA + '\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI + '\u0e2a' # 0xCA -> THAI CHARACTER SO SUA + '\u0e2b' # 0xCB -> THAI CHARACTER HO HIP + '\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA + '\u0e2d' # 0xCD -> THAI CHARACTER O ANG + '\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK + '\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI + '\u0e30' # 0xD0 -> THAI CHARACTER SARA A + '\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT + '\u0e32' # 0xD2 -> THAI CHARACTER SARA AA + '\u0e33' # 0xD3 -> THAI CHARACTER SARA AM + '\u0e34' # 0xD4 -> THAI CHARACTER SARA I + '\u0e35' # 0xD5 -> THAI CHARACTER SARA II + '\u0e36' # 0xD6 -> THAI CHARACTER SARA UE + '\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE + '\u0e38' # 0xD8 -> THAI CHARACTER SARA U + '\u0e39' # 0xD9 -> THAI CHARACTER SARA UU + '\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU + '\ufffe' # 0xDB -> UNDEFINED + '\ufffe' # 0xDC -> UNDEFINED + '\ufffe' # 0xDD -> UNDEFINED + '\ufffe' # 0xDE -> UNDEFINED + '\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT + '\u0e40' # 0xE0 -> THAI CHARACTER SARA E + '\u0e41' # 0xE1 -> THAI CHARACTER SARA AE + '\u0e42' # 0xE2 -> THAI CHARACTER SARA O + '\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN + '\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI + '\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO + '\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK + '\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU + '\u0e48' # 0xE8 -> THAI CHARACTER MAI EK + '\u0e49' # 0xE9 -> THAI CHARACTER MAI THO + '\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI + '\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA + '\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT + '\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT + '\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN + '\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN + '\u0e50' # 0xF0 -> THAI DIGIT ZERO + '\u0e51' # 0xF1 -> THAI DIGIT ONE + '\u0e52' # 0xF2 -> THAI DIGIT TWO + '\u0e53' # 0xF3 -> THAI DIGIT THREE + '\u0e54' # 0xF4 -> THAI DIGIT FOUR + '\u0e55' # 0xF5 -> THAI DIGIT FIVE + '\u0e56' # 0xF6 -> THAI DIGIT SIX + '\u0e57' # 0xF7 -> THAI DIGIT SEVEN + '\u0e58' # 0xF8 -> THAI DIGIT EIGHT + '\u0e59' # 0xF9 -> THAI DIGIT NINE + '\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU + '\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT + '\ufffe' # 0xFC -> UNDEFINED + '\ufffe' # 0xFD -> UNDEFINED + '\ufffe' # 0xFE -> UNDEFINED + '\ufffe' # 0xFF -> UNDEFINED +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp875.py b/venv/Lib/encodings/cp875.py new file mode 100644 index 00000000..c25a5a43 --- /dev/null +++ b/venv/Lib/encodings/cp875.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp875', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x9c' # 0x04 -> CONTROL + '\t' # 0x05 -> HORIZONTAL TABULATION + '\x86' # 0x06 -> CONTROL + '\x7f' # 0x07 -> DELETE + '\x97' # 0x08 -> CONTROL + '\x8d' # 0x09 -> CONTROL + '\x8e' # 0x0A -> CONTROL + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x9d' # 0x14 -> CONTROL + '\x85' # 0x15 -> CONTROL + '\x08' # 0x16 -> BACKSPACE + '\x87' # 0x17 -> CONTROL + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x92' # 0x1A -> CONTROL + '\x8f' # 0x1B -> CONTROL + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + '\x80' # 0x20 -> CONTROL + '\x81' # 0x21 -> CONTROL + '\x82' # 0x22 -> CONTROL + '\x83' # 0x23 -> CONTROL + '\x84' # 0x24 -> CONTROL + '\n' # 0x25 -> LINE FEED + '\x17' # 0x26 -> END OF TRANSMISSION BLOCK + '\x1b' # 0x27 -> ESCAPE + '\x88' # 0x28 -> CONTROL + '\x89' # 0x29 -> CONTROL + '\x8a' # 0x2A -> CONTROL + '\x8b' # 0x2B -> CONTROL + '\x8c' # 0x2C -> CONTROL + '\x05' # 0x2D -> ENQUIRY + '\x06' # 0x2E -> ACKNOWLEDGE + '\x07' # 0x2F -> BELL + '\x90' # 0x30 -> CONTROL + '\x91' # 0x31 -> CONTROL + '\x16' # 0x32 -> SYNCHRONOUS IDLE + '\x93' # 0x33 -> CONTROL + '\x94' # 0x34 -> CONTROL + '\x95' # 0x35 -> CONTROL + '\x96' # 0x36 -> CONTROL + '\x04' # 0x37 -> END OF TRANSMISSION + '\x98' # 0x38 -> CONTROL + '\x99' # 0x39 -> CONTROL + '\x9a' # 0x3A -> CONTROL + '\x9b' # 0x3B -> CONTROL + '\x14' # 0x3C -> DEVICE CONTROL FOUR + '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE + '\x9e' # 0x3E -> CONTROL + '\x1a' # 0x3F -> SUBSTITUTE + ' ' # 0x40 -> SPACE + '\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA + '\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA + '\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA + '\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA + '\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON + '\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA + '\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA + '\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA + '\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA + '[' # 0x4A -> LEFT SQUARE BRACKET + '.' # 0x4B -> FULL STOP + '<' # 0x4C -> LESS-THAN SIGN + '(' # 0x4D -> LEFT PARENTHESIS + '+' # 0x4E -> PLUS SIGN + '!' # 0x4F -> EXCLAMATION MARK + '&' # 0x50 -> AMPERSAND + '\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA + '\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA + '\u039c' # 0x53 -> GREEK CAPITAL LETTER MU + '\u039d' # 0x54 -> GREEK CAPITAL LETTER NU + '\u039e' # 0x55 -> GREEK CAPITAL LETTER XI + '\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON + '\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI + '\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO + '\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA + ']' # 0x5A -> RIGHT SQUARE BRACKET + '$' # 0x5B -> DOLLAR SIGN + '*' # 0x5C -> ASTERISK + ')' # 0x5D -> RIGHT PARENTHESIS + ';' # 0x5E -> SEMICOLON + '^' # 0x5F -> CIRCUMFLEX ACCENT + '-' # 0x60 -> HYPHEN-MINUS + '/' # 0x61 -> SOLIDUS + '\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU + '\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON + '\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI + '\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI + '\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI + '\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA + '\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + '\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + '|' # 0x6A -> VERTICAL LINE + ',' # 0x6B -> COMMA + '%' # 0x6C -> PERCENT SIGN + '_' # 0x6D -> LOW LINE + '>' # 0x6E -> GREATER-THAN SIGN + '?' # 0x6F -> QUESTION MARK + '\xa8' # 0x70 -> DIAERESIS + '\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS + '\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS + '\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS + '\xa0' # 0x74 -> NO-BREAK SPACE + '\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS + '\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS + '\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS + '\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS + '`' # 0x79 -> GRAVE ACCENT + ':' # 0x7A -> COLON + '#' # 0x7B -> NUMBER SIGN + '@' # 0x7C -> COMMERCIAL AT + "'" # 0x7D -> APOSTROPHE + '=' # 0x7E -> EQUALS SIGN + '"' # 0x7F -> QUOTATION MARK + '\u0385' # 0x80 -> GREEK DIALYTIKA TONOS + 'a' # 0x81 -> LATIN SMALL LETTER A + 'b' # 0x82 -> LATIN SMALL LETTER B + 'c' # 0x83 -> LATIN SMALL LETTER C + 'd' # 0x84 -> LATIN SMALL LETTER D + 'e' # 0x85 -> LATIN SMALL LETTER E + 'f' # 0x86 -> LATIN SMALL LETTER F + 'g' # 0x87 -> LATIN SMALL LETTER G + 'h' # 0x88 -> LATIN SMALL LETTER H + 'i' # 0x89 -> LATIN SMALL LETTER I + '\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA + '\u03b2' # 0x8B -> GREEK SMALL LETTER BETA + '\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA + '\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA + '\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON + '\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA + '\xb0' # 0x90 -> DEGREE SIGN + 'j' # 0x91 -> LATIN SMALL LETTER J + 'k' # 0x92 -> LATIN SMALL LETTER K + 'l' # 0x93 -> LATIN SMALL LETTER L + 'm' # 0x94 -> LATIN SMALL LETTER M + 'n' # 0x95 -> LATIN SMALL LETTER N + 'o' # 0x96 -> LATIN SMALL LETTER O + 'p' # 0x97 -> LATIN SMALL LETTER P + 'q' # 0x98 -> LATIN SMALL LETTER Q + 'r' # 0x99 -> LATIN SMALL LETTER R + '\u03b7' # 0x9A -> GREEK SMALL LETTER ETA + '\u03b8' # 0x9B -> GREEK SMALL LETTER THETA + '\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA + '\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA + '\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA + '\u03bc' # 0x9F -> GREEK SMALL LETTER MU + '\xb4' # 0xA0 -> ACUTE ACCENT + '~' # 0xA1 -> TILDE + 's' # 0xA2 -> LATIN SMALL LETTER S + 't' # 0xA3 -> LATIN SMALL LETTER T + 'u' # 0xA4 -> LATIN SMALL LETTER U + 'v' # 0xA5 -> LATIN SMALL LETTER V + 'w' # 0xA6 -> LATIN SMALL LETTER W + 'x' # 0xA7 -> LATIN SMALL LETTER X + 'y' # 0xA8 -> LATIN SMALL LETTER Y + 'z' # 0xA9 -> LATIN SMALL LETTER Z + '\u03bd' # 0xAA -> GREEK SMALL LETTER NU + '\u03be' # 0xAB -> GREEK SMALL LETTER XI + '\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON + '\u03c0' # 0xAD -> GREEK SMALL LETTER PI + '\u03c1' # 0xAE -> GREEK SMALL LETTER RHO + '\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA + '\xa3' # 0xB0 -> POUND SIGN + '\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS + '\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS + '\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS + '\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA + '\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS + '\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS + '\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS + '\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA + '\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS + '\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA + '\u03c4' # 0xBB -> GREEK SMALL LETTER TAU + '\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON + '\u03c6' # 0xBD -> GREEK SMALL LETTER PHI + '\u03c7' # 0xBE -> GREEK SMALL LETTER CHI + '\u03c8' # 0xBF -> GREEK SMALL LETTER PSI + '{' # 0xC0 -> LEFT CURLY BRACKET + 'A' # 0xC1 -> LATIN CAPITAL LETTER A + 'B' # 0xC2 -> LATIN CAPITAL LETTER B + 'C' # 0xC3 -> LATIN CAPITAL LETTER C + 'D' # 0xC4 -> LATIN CAPITAL LETTER D + 'E' # 0xC5 -> LATIN CAPITAL LETTER E + 'F' # 0xC6 -> LATIN CAPITAL LETTER F + 'G' # 0xC7 -> LATIN CAPITAL LETTER G + 'H' # 0xC8 -> LATIN CAPITAL LETTER H + 'I' # 0xC9 -> LATIN CAPITAL LETTER I + '\xad' # 0xCA -> SOFT HYPHEN + '\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA + '\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS + '\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS + '\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK + '\u2015' # 0xCF -> HORIZONTAL BAR + '}' # 0xD0 -> RIGHT CURLY BRACKET + 'J' # 0xD1 -> LATIN CAPITAL LETTER J + 'K' # 0xD2 -> LATIN CAPITAL LETTER K + 'L' # 0xD3 -> LATIN CAPITAL LETTER L + 'M' # 0xD4 -> LATIN CAPITAL LETTER M + 'N' # 0xD5 -> LATIN CAPITAL LETTER N + 'O' # 0xD6 -> LATIN CAPITAL LETTER O + 'P' # 0xD7 -> LATIN CAPITAL LETTER P + 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q + 'R' # 0xD9 -> LATIN CAPITAL LETTER R + '\xb1' # 0xDA -> PLUS-MINUS SIGN + '\xbd' # 0xDB -> VULGAR FRACTION ONE HALF + '\x1a' # 0xDC -> SUBSTITUTE + '\u0387' # 0xDD -> GREEK ANO TELEIA + '\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK + '\xa6' # 0xDF -> BROKEN BAR + '\\' # 0xE0 -> REVERSE SOLIDUS + '\x1a' # 0xE1 -> SUBSTITUTE + 'S' # 0xE2 -> LATIN CAPITAL LETTER S + 'T' # 0xE3 -> LATIN CAPITAL LETTER T + 'U' # 0xE4 -> LATIN CAPITAL LETTER U + 'V' # 0xE5 -> LATIN CAPITAL LETTER V + 'W' # 0xE6 -> LATIN CAPITAL LETTER W + 'X' # 0xE7 -> LATIN CAPITAL LETTER X + 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y + 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z + '\xb2' # 0xEA -> SUPERSCRIPT TWO + '\xa7' # 0xEB -> SECTION SIGN + '\x1a' # 0xEC -> SUBSTITUTE + '\x1a' # 0xED -> SUBSTITUTE + '\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xEF -> NOT SIGN + '0' # 0xF0 -> DIGIT ZERO + '1' # 0xF1 -> DIGIT ONE + '2' # 0xF2 -> DIGIT TWO + '3' # 0xF3 -> DIGIT THREE + '4' # 0xF4 -> DIGIT FOUR + '5' # 0xF5 -> DIGIT FIVE + '6' # 0xF6 -> DIGIT SIX + '7' # 0xF7 -> DIGIT SEVEN + '8' # 0xF8 -> DIGIT EIGHT + '9' # 0xF9 -> DIGIT NINE + '\xb3' # 0xFA -> SUPERSCRIPT THREE + '\xa9' # 0xFB -> COPYRIGHT SIGN + '\x1a' # 0xFC -> SUBSTITUTE + '\x1a' # 0xFD -> SUBSTITUTE + '\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\x9f' # 0xFF -> CONTROL +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/cp932.py b/venv/Lib/encodings/cp932.py new file mode 100644 index 00000000..e01f59b7 --- /dev/null +++ b/venv/Lib/encodings/cp932.py @@ -0,0 +1,39 @@ +# +# cp932.py: Python Unicode Codec for CP932 +# +# Written by Hye-Shik Chang +# + +import _codecs_jp, codecs +import _multibytecodec as mbc + +codec = _codecs_jp.getcodec('cp932') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='cp932', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/cp949.py b/venv/Lib/encodings/cp949.py new file mode 100644 index 00000000..627c8712 --- /dev/null +++ b/venv/Lib/encodings/cp949.py @@ -0,0 +1,39 @@ +# +# cp949.py: Python Unicode Codec for CP949 +# +# Written by Hye-Shik Chang +# + +import _codecs_kr, codecs +import _multibytecodec as mbc + +codec = _codecs_kr.getcodec('cp949') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='cp949', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/cp950.py b/venv/Lib/encodings/cp950.py new file mode 100644 index 00000000..39eec5ed --- /dev/null +++ b/venv/Lib/encodings/cp950.py @@ -0,0 +1,39 @@ +# +# cp950.py: Python Unicode Codec for CP950 +# +# Written by Hye-Shik Chang +# + +import _codecs_tw, codecs +import _multibytecodec as mbc + +codec = _codecs_tw.getcodec('cp950') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='cp950', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/euc_jis_2004.py b/venv/Lib/encodings/euc_jis_2004.py new file mode 100644 index 00000000..72b87aea --- /dev/null +++ b/venv/Lib/encodings/euc_jis_2004.py @@ -0,0 +1,39 @@ +# +# euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004 +# +# Written by Hye-Shik Chang +# + +import _codecs_jp, codecs +import _multibytecodec as mbc + +codec = _codecs_jp.getcodec('euc_jis_2004') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='euc_jis_2004', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/euc_jisx0213.py b/venv/Lib/encodings/euc_jisx0213.py new file mode 100644 index 00000000..cc47d041 --- /dev/null +++ b/venv/Lib/encodings/euc_jisx0213.py @@ -0,0 +1,39 @@ +# +# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213 +# +# Written by Hye-Shik Chang +# + +import _codecs_jp, codecs +import _multibytecodec as mbc + +codec = _codecs_jp.getcodec('euc_jisx0213') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='euc_jisx0213', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/euc_jp.py b/venv/Lib/encodings/euc_jp.py new file mode 100644 index 00000000..7bcbe414 --- /dev/null +++ b/venv/Lib/encodings/euc_jp.py @@ -0,0 +1,39 @@ +# +# euc_jp.py: Python Unicode Codec for EUC_JP +# +# Written by Hye-Shik Chang +# + +import _codecs_jp, codecs +import _multibytecodec as mbc + +codec = _codecs_jp.getcodec('euc_jp') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='euc_jp', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/euc_kr.py b/venv/Lib/encodings/euc_kr.py new file mode 100644 index 00000000..c1fb1260 --- /dev/null +++ b/venv/Lib/encodings/euc_kr.py @@ -0,0 +1,39 @@ +# +# euc_kr.py: Python Unicode Codec for EUC_KR +# +# Written by Hye-Shik Chang +# + +import _codecs_kr, codecs +import _multibytecodec as mbc + +codec = _codecs_kr.getcodec('euc_kr') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='euc_kr', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/gb18030.py b/venv/Lib/encodings/gb18030.py new file mode 100644 index 00000000..34fb6c36 --- /dev/null +++ b/venv/Lib/encodings/gb18030.py @@ -0,0 +1,39 @@ +# +# gb18030.py: Python Unicode Codec for GB18030 +# +# Written by Hye-Shik Chang +# + +import _codecs_cn, codecs +import _multibytecodec as mbc + +codec = _codecs_cn.getcodec('gb18030') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='gb18030', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/gb2312.py b/venv/Lib/encodings/gb2312.py new file mode 100644 index 00000000..3c3b837d --- /dev/null +++ b/venv/Lib/encodings/gb2312.py @@ -0,0 +1,39 @@ +# +# gb2312.py: Python Unicode Codec for GB2312 +# +# Written by Hye-Shik Chang +# + +import _codecs_cn, codecs +import _multibytecodec as mbc + +codec = _codecs_cn.getcodec('gb2312') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='gb2312', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/gbk.py b/venv/Lib/encodings/gbk.py new file mode 100644 index 00000000..1b45db89 --- /dev/null +++ b/venv/Lib/encodings/gbk.py @@ -0,0 +1,39 @@ +# +# gbk.py: Python Unicode Codec for GBK +# +# Written by Hye-Shik Chang +# + +import _codecs_cn, codecs +import _multibytecodec as mbc + +codec = _codecs_cn.getcodec('gbk') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='gbk', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/hex_codec.py b/venv/Lib/encodings/hex_codec.py new file mode 100644 index 00000000..9fb10728 --- /dev/null +++ b/venv/Lib/encodings/hex_codec.py @@ -0,0 +1,55 @@ +"""Python 'hex_codec' Codec - 2-digit hex content transfer encoding. + +This codec de/encodes from bytes to bytes. + +Written by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs +import binascii + +### Codec APIs + +def hex_encode(input, errors='strict'): + assert errors == 'strict' + return (binascii.b2a_hex(input), len(input)) + +def hex_decode(input, errors='strict'): + assert errors == 'strict' + return (binascii.a2b_hex(input), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return hex_encode(input, errors) + def decode(self, input, errors='strict'): + return hex_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + assert self.errors == 'strict' + return binascii.b2a_hex(input) + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + assert self.errors == 'strict' + return binascii.a2b_hex(input) + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='hex', + encode=hex_encode, + decode=hex_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + _is_text_encoding=False, + ) diff --git a/venv/Lib/encodings/hp_roman8.py b/venv/Lib/encodings/hp_roman8.py new file mode 100644 index 00000000..58de1033 --- /dev/null +++ b/venv/Lib/encodings/hp_roman8.py @@ -0,0 +1,314 @@ +""" Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py. + + Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen) + + Original source: LaserJet IIP Printer User's Manual HP part no + 33471-90901, Hewlet-Packard, June 1989. + + (Used with permission) + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='hp-roman8', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xc0' # 0xA1 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc2' # 0xA2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc8' # 0xA3 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xca' # 0xA4 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xA5 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xce' # 0xA6 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xA7 -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xb4' # 0xA8 -> ACUTE ACCENT + '\u02cb' # 0xA9 -> MODIFIER LETTER GRAVE ACCENT (MANDARIN CHINESE FOURTH TONE) + '\u02c6' # 0xAA -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\xa8' # 0xAB -> DIAERESIS + '\u02dc' # 0xAC -> SMALL TILDE + '\xd9' # 0xAD -> LATIN CAPITAL LETTER U WITH GRAVE + '\xdb' # 0xAE -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\u20a4' # 0xAF -> LIRA SIGN + '\xaf' # 0xB0 -> MACRON + '\xdd' # 0xB1 -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xfd' # 0xB2 -> LATIN SMALL LETTER Y WITH ACUTE + '\xb0' # 0xB3 -> DEGREE SIGN + '\xc7' # 0xB4 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xe7' # 0xB5 -> LATIN SMALL LETTER C WITH CEDILLA + '\xd1' # 0xB6 -> LATIN CAPITAL LETTER N WITH TILDE + '\xf1' # 0xB7 -> LATIN SMALL LETTER N WITH TILDE + '\xa1' # 0xB8 -> INVERTED EXCLAMATION MARK + '\xbf' # 0xB9 -> INVERTED QUESTION MARK + '\xa4' # 0xBA -> CURRENCY SIGN + '\xa3' # 0xBB -> POUND SIGN + '\xa5' # 0xBC -> YEN SIGN + '\xa7' # 0xBD -> SECTION SIGN + '\u0192' # 0xBE -> LATIN SMALL LETTER F WITH HOOK + '\xa2' # 0xBF -> CENT SIGN + '\xe2' # 0xC0 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xea' # 0xC1 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xf4' # 0xC2 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xfb' # 0xC3 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xe1' # 0xC4 -> LATIN SMALL LETTER A WITH ACUTE + '\xe9' # 0xC5 -> LATIN SMALL LETTER E WITH ACUTE + '\xf3' # 0xC6 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0xC7 -> LATIN SMALL LETTER U WITH ACUTE + '\xe0' # 0xC8 -> LATIN SMALL LETTER A WITH GRAVE + '\xe8' # 0xC9 -> LATIN SMALL LETTER E WITH GRAVE + '\xf2' # 0xCA -> LATIN SMALL LETTER O WITH GRAVE + '\xf9' # 0xCB -> LATIN SMALL LETTER U WITH GRAVE + '\xe4' # 0xCC -> LATIN SMALL LETTER A WITH DIAERESIS + '\xeb' # 0xCD -> LATIN SMALL LETTER E WITH DIAERESIS + '\xf6' # 0xCE -> LATIN SMALL LETTER O WITH DIAERESIS + '\xfc' # 0xCF -> LATIN SMALL LETTER U WITH DIAERESIS + '\xc5' # 0xD0 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xee' # 0xD1 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xd8' # 0xD2 -> LATIN CAPITAL LETTER O WITH STROKE + '\xc6' # 0xD3 -> LATIN CAPITAL LETTER AE + '\xe5' # 0xD4 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xed' # 0xD5 -> LATIN SMALL LETTER I WITH ACUTE + '\xf8' # 0xD6 -> LATIN SMALL LETTER O WITH STROKE + '\xe6' # 0xD7 -> LATIN SMALL LETTER AE + '\xc4' # 0xD8 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xec' # 0xD9 -> LATIN SMALL LETTER I WITH GRAVE + '\xd6' # 0xDA -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0xDB -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xc9' # 0xDC -> LATIN CAPITAL LETTER E WITH ACUTE + '\xef' # 0xDD -> LATIN SMALL LETTER I WITH DIAERESIS + '\xdf' # 0xDE -> LATIN SMALL LETTER SHARP S (GERMAN) + '\xd4' # 0xDF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xc1' # 0xE0 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc3' # 0xE1 -> LATIN CAPITAL LETTER A WITH TILDE + '\xe3' # 0xE2 -> LATIN SMALL LETTER A WITH TILDE + '\xd0' # 0xE3 -> LATIN CAPITAL LETTER ETH (ICELANDIC) + '\xf0' # 0xE4 -> LATIN SMALL LETTER ETH (ICELANDIC) + '\xcd' # 0xE5 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xcc' # 0xE6 -> LATIN CAPITAL LETTER I WITH GRAVE + '\xd3' # 0xE7 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd2' # 0xE8 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd5' # 0xE9 -> LATIN CAPITAL LETTER O WITH TILDE + '\xf5' # 0xEA -> LATIN SMALL LETTER O WITH TILDE + '\u0160' # 0xEB -> LATIN CAPITAL LETTER S WITH CARON + '\u0161' # 0xEC -> LATIN SMALL LETTER S WITH CARON + '\xda' # 0xED -> LATIN CAPITAL LETTER U WITH ACUTE + '\u0178' # 0xEE -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\xff' # 0xEF -> LATIN SMALL LETTER Y WITH DIAERESIS + '\xde' # 0xF0 -> LATIN CAPITAL LETTER THORN (ICELANDIC) + '\xfe' # 0xF1 -> LATIN SMALL LETTER THORN (ICELANDIC) + '\xb7' # 0xF2 -> MIDDLE DOT + '\xb5' # 0xF3 -> MICRO SIGN + '\xb6' # 0xF4 -> PILCROW SIGN + '\xbe' # 0xF5 -> VULGAR FRACTION THREE QUARTERS + '\u2014' # 0xF6 -> EM DASH + '\xbc' # 0xF7 -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xF8 -> VULGAR FRACTION ONE HALF + '\xaa' # 0xF9 -> FEMININE ORDINAL INDICATOR + '\xba' # 0xFA -> MASCULINE ORDINAL INDICATOR + '\xab' # 0xFB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u25a0' # 0xFC -> BLACK SQUARE + '\xbb' # 0xFD -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xb1' # 0xFE -> PLUS-MINUS SIGN + '\ufffe' +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/hz.py b/venv/Lib/encodings/hz.py new file mode 100644 index 00000000..383442a3 --- /dev/null +++ b/venv/Lib/encodings/hz.py @@ -0,0 +1,39 @@ +# +# hz.py: Python Unicode Codec for HZ +# +# Written by Hye-Shik Chang +# + +import _codecs_cn, codecs +import _multibytecodec as mbc + +codec = _codecs_cn.getcodec('hz') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='hz', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/idna.py b/venv/Lib/encodings/idna.py new file mode 100644 index 00000000..ea405851 --- /dev/null +++ b/venv/Lib/encodings/idna.py @@ -0,0 +1,309 @@ +# This module implements the RFCs 3490 (IDNA) and 3491 (Nameprep) + +import stringprep, re, codecs +from unicodedata import ucd_3_2_0 as unicodedata + +# IDNA section 3.1 +dots = re.compile("[\u002E\u3002\uFF0E\uFF61]") + +# IDNA section 5 +ace_prefix = b"xn--" +sace_prefix = "xn--" + +# This assumes query strings, so AllowUnassigned is true +def nameprep(label): + # Map + newlabel = [] + for c in label: + if stringprep.in_table_b1(c): + # Map to nothing + continue + newlabel.append(stringprep.map_table_b2(c)) + label = "".join(newlabel) + + # Normalize + label = unicodedata.normalize("NFKC", label) + + # Prohibit + for c in label: + if stringprep.in_table_c12(c) or \ + stringprep.in_table_c22(c) or \ + stringprep.in_table_c3(c) or \ + stringprep.in_table_c4(c) or \ + stringprep.in_table_c5(c) or \ + stringprep.in_table_c6(c) or \ + stringprep.in_table_c7(c) or \ + stringprep.in_table_c8(c) or \ + stringprep.in_table_c9(c): + raise UnicodeError("Invalid character %r" % c) + + # Check bidi + RandAL = [stringprep.in_table_d1(x) for x in label] + for c in RandAL: + if c: + # There is a RandAL char in the string. Must perform further + # tests: + # 1) The characters in section 5.8 MUST be prohibited. + # This is table C.8, which was already checked + # 2) If a string contains any RandALCat character, the string + # MUST NOT contain any LCat character. + if any(stringprep.in_table_d2(x) for x in label): + raise UnicodeError("Violation of BIDI requirement 2") + + # 3) If a string contains any RandALCat character, a + # RandALCat character MUST be the first character of the + # string, and a RandALCat character MUST be the last + # character of the string. + if not RandAL[0] or not RandAL[-1]: + raise UnicodeError("Violation of BIDI requirement 3") + + return label + +def ToASCII(label): + try: + # Step 1: try ASCII + label = label.encode("ascii") + except UnicodeError: + pass + else: + # Skip to step 3: UseSTD3ASCIIRules is false, so + # Skip to step 8. + if 0 < len(label) < 64: + return label + raise UnicodeError("label empty or too long") + + # Step 2: nameprep + label = nameprep(label) + + # Step 3: UseSTD3ASCIIRules is false + # Step 4: try ASCII + try: + label = label.encode("ascii") + except UnicodeError: + pass + else: + # Skip to step 8. + if 0 < len(label) < 64: + return label + raise UnicodeError("label empty or too long") + + # Step 5: Check ACE prefix + if label.startswith(sace_prefix): + raise UnicodeError("Label starts with ACE prefix") + + # Step 6: Encode with PUNYCODE + label = label.encode("punycode") + + # Step 7: Prepend ACE prefix + label = ace_prefix + label + + # Step 8: Check size + if 0 < len(label) < 64: + return label + raise UnicodeError("label empty or too long") + +def ToUnicode(label): + # Step 1: Check for ASCII + if isinstance(label, bytes): + pure_ascii = True + else: + try: + label = label.encode("ascii") + pure_ascii = True + except UnicodeError: + pure_ascii = False + if not pure_ascii: + # Step 2: Perform nameprep + label = nameprep(label) + # It doesn't say this, but apparently, it should be ASCII now + try: + label = label.encode("ascii") + except UnicodeError: + raise UnicodeError("Invalid character in IDN label") + # Step 3: Check for ACE prefix + if not label.startswith(ace_prefix): + return str(label, "ascii") + + # Step 4: Remove ACE prefix + label1 = label[len(ace_prefix):] + + # Step 5: Decode using PUNYCODE + result = label1.decode("punycode") + + # Step 6: Apply ToASCII + label2 = ToASCII(result) + + # Step 7: Compare the result of step 6 with the one of step 3 + # label2 will already be in lower case. + if str(label, "ascii").lower() != str(label2, "ascii"): + raise UnicodeError("IDNA does not round-trip", label, label2) + + # Step 8: return the result of step 5 + return result + +### Codec APIs + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + + if errors != 'strict': + # IDNA is quite clear that implementations must be strict + raise UnicodeError("unsupported error handling "+errors) + + if not input: + return b'', 0 + + try: + result = input.encode('ascii') + except UnicodeEncodeError: + pass + else: + # ASCII name: fast path + labels = result.split(b'.') + for label in labels[:-1]: + if not (0 < len(label) < 64): + raise UnicodeError("label empty or too long") + if len(labels[-1]) >= 64: + raise UnicodeError("label too long") + return result, len(input) + + result = bytearray() + labels = dots.split(input) + if labels and not labels[-1]: + trailing_dot = b'.' + del labels[-1] + else: + trailing_dot = b'' + for label in labels: + if result: + # Join with U+002E + result.extend(b'.') + result.extend(ToASCII(label)) + return bytes(result+trailing_dot), len(input) + + def decode(self, input, errors='strict'): + + if errors != 'strict': + raise UnicodeError("Unsupported error handling "+errors) + + if not input: + return "", 0 + + # IDNA allows decoding to operate on Unicode strings, too. + if not isinstance(input, bytes): + # XXX obviously wrong, see #3232 + input = bytes(input) + + if ace_prefix not in input: + # Fast path + try: + return input.decode('ascii'), len(input) + except UnicodeDecodeError: + pass + + labels = input.split(b".") + + if labels and len(labels[-1]) == 0: + trailing_dot = '.' + del labels[-1] + else: + trailing_dot = '' + + result = [] + for label in labels: + result.append(ToUnicode(label)) + + return ".".join(result)+trailing_dot, len(input) + +class IncrementalEncoder(codecs.BufferedIncrementalEncoder): + def _buffer_encode(self, input, errors, final): + if errors != 'strict': + # IDNA is quite clear that implementations must be strict + raise UnicodeError("unsupported error handling "+errors) + + if not input: + return (b'', 0) + + labels = dots.split(input) + trailing_dot = b'' + if labels: + if not labels[-1]: + trailing_dot = b'.' + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = b'.' + + result = bytearray() + size = 0 + for label in labels: + if size: + # Join with U+002E + result.extend(b'.') + size += 1 + result.extend(ToASCII(label)) + size += len(label) + + result += trailing_dot + size += len(trailing_dot) + return (bytes(result), size) + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def _buffer_decode(self, input, errors, final): + if errors != 'strict': + raise UnicodeError("Unsupported error handling "+errors) + + if not input: + return ("", 0) + + # IDNA allows decoding to operate on Unicode strings, too. + if isinstance(input, str): + labels = dots.split(input) + else: + # Must be ASCII string + input = str(input, "ascii") + labels = input.split(".") + + trailing_dot = '' + if labels: + if not labels[-1]: + trailing_dot = '.' + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = '.' + + result = [] + size = 0 + for label in labels: + result.append(ToUnicode(label)) + if size: + size += 1 + size += len(label) + + result = ".".join(result) + trailing_dot + size += len(trailing_dot) + return (result, size) + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='idna', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/venv/Lib/encodings/iso2022_jp.py b/venv/Lib/encodings/iso2022_jp.py new file mode 100644 index 00000000..ab040606 --- /dev/null +++ b/venv/Lib/encodings/iso2022_jp.py @@ -0,0 +1,39 @@ +# +# iso2022_jp.py: Python Unicode Codec for ISO2022_JP +# +# Written by Hye-Shik Chang +# + +import _codecs_iso2022, codecs +import _multibytecodec as mbc + +codec = _codecs_iso2022.getcodec('iso2022_jp') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='iso2022_jp', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/iso2022_jp_1.py b/venv/Lib/encodings/iso2022_jp_1.py new file mode 100644 index 00000000..997044dc --- /dev/null +++ b/venv/Lib/encodings/iso2022_jp_1.py @@ -0,0 +1,39 @@ +# +# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1 +# +# Written by Hye-Shik Chang +# + +import _codecs_iso2022, codecs +import _multibytecodec as mbc + +codec = _codecs_iso2022.getcodec('iso2022_jp_1') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='iso2022_jp_1', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/iso2022_jp_2.py b/venv/Lib/encodings/iso2022_jp_2.py new file mode 100644 index 00000000..9106bf76 --- /dev/null +++ b/venv/Lib/encodings/iso2022_jp_2.py @@ -0,0 +1,39 @@ +# +# iso2022_jp_2.py: Python Unicode Codec for ISO2022_JP_2 +# +# Written by Hye-Shik Chang +# + +import _codecs_iso2022, codecs +import _multibytecodec as mbc + +codec = _codecs_iso2022.getcodec('iso2022_jp_2') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='iso2022_jp_2', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/iso2022_jp_2004.py b/venv/Lib/encodings/iso2022_jp_2004.py new file mode 100644 index 00000000..40198bf0 --- /dev/null +++ b/venv/Lib/encodings/iso2022_jp_2004.py @@ -0,0 +1,39 @@ +# +# iso2022_jp_2004.py: Python Unicode Codec for ISO2022_JP_2004 +# +# Written by Hye-Shik Chang +# + +import _codecs_iso2022, codecs +import _multibytecodec as mbc + +codec = _codecs_iso2022.getcodec('iso2022_jp_2004') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='iso2022_jp_2004', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/iso2022_jp_3.py b/venv/Lib/encodings/iso2022_jp_3.py new file mode 100644 index 00000000..346e08be --- /dev/null +++ b/venv/Lib/encodings/iso2022_jp_3.py @@ -0,0 +1,39 @@ +# +# iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3 +# +# Written by Hye-Shik Chang +# + +import _codecs_iso2022, codecs +import _multibytecodec as mbc + +codec = _codecs_iso2022.getcodec('iso2022_jp_3') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='iso2022_jp_3', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/iso2022_jp_ext.py b/venv/Lib/encodings/iso2022_jp_ext.py new file mode 100644 index 00000000..752bab98 --- /dev/null +++ b/venv/Lib/encodings/iso2022_jp_ext.py @@ -0,0 +1,39 @@ +# +# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT +# +# Written by Hye-Shik Chang +# + +import _codecs_iso2022, codecs +import _multibytecodec as mbc + +codec = _codecs_iso2022.getcodec('iso2022_jp_ext') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='iso2022_jp_ext', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/iso2022_kr.py b/venv/Lib/encodings/iso2022_kr.py new file mode 100644 index 00000000..bf701876 --- /dev/null +++ b/venv/Lib/encodings/iso2022_kr.py @@ -0,0 +1,39 @@ +# +# iso2022_kr.py: Python Unicode Codec for ISO2022_KR +# +# Written by Hye-Shik Chang +# + +import _codecs_iso2022, codecs +import _multibytecodec as mbc + +codec = _codecs_iso2022.getcodec('iso2022_kr') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='iso2022_kr', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/iso8859_1.py b/venv/Lib/encodings/iso8859_1.py new file mode 100644 index 00000000..8cfc01fe --- /dev/null +++ b/venv/Lib/encodings/iso8859_1.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-1', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xbf' # 0xBF -> INVERTED QUESTION MARK + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic) + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic) + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German) + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic) + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE + '\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic) + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_10.py b/venv/Lib/encodings/iso8859_10.py new file mode 100644 index 00000000..b4fb0419 --- /dev/null +++ b/venv/Lib/encodings/iso8859_10.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-10', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK + '\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON + '\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA + '\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON + '\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE + '\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA + '\xa7' # 0xA7 -> SECTION SIGN + '\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA + '\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE + '\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON + '\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE + '\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON + '\xad' # 0xAD -> SOFT HYPHEN + '\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON + '\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG + '\xb0' # 0xB0 -> DEGREE SIGN + '\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK + '\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON + '\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA + '\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON + '\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE + '\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA + '\xb7' # 0xB7 -> MIDDLE DOT + '\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA + '\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE + '\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON + '\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE + '\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON + '\u2015' # 0xBD -> HORIZONTAL BAR + '\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON + '\u014b' # 0xBF -> LATIN SMALL LETTER ENG + '\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK + '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic) + '\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA + '\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic) + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German) + '\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK + '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic) + '\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA + '\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE + '\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic) + '\u0138' # 0xFF -> LATIN SMALL LETTER KRA +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_11.py b/venv/Lib/encodings/iso8859_11.py new file mode 100644 index 00000000..c7258ecf --- /dev/null +++ b/venv/Lib/encodings/iso8859_11.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-11', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u0e01' # 0xA1 -> THAI CHARACTER KO KAI + '\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI + '\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT + '\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI + '\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON + '\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG + '\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU + '\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN + '\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING + '\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG + '\u0e0b' # 0xAB -> THAI CHARACTER SO SO + '\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE + '\u0e0d' # 0xAD -> THAI CHARACTER YO YING + '\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA + '\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK + '\u0e10' # 0xB0 -> THAI CHARACTER THO THAN + '\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO + '\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO + '\u0e13' # 0xB3 -> THAI CHARACTER NO NEN + '\u0e14' # 0xB4 -> THAI CHARACTER DO DEK + '\u0e15' # 0xB5 -> THAI CHARACTER TO TAO + '\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG + '\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN + '\u0e18' # 0xB8 -> THAI CHARACTER THO THONG + '\u0e19' # 0xB9 -> THAI CHARACTER NO NU + '\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI + '\u0e1b' # 0xBB -> THAI CHARACTER PO PLA + '\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG + '\u0e1d' # 0xBD -> THAI CHARACTER FO FA + '\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN + '\u0e1f' # 0xBF -> THAI CHARACTER FO FAN + '\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO + '\u0e21' # 0xC1 -> THAI CHARACTER MO MA + '\u0e22' # 0xC2 -> THAI CHARACTER YO YAK + '\u0e23' # 0xC3 -> THAI CHARACTER RO RUA + '\u0e24' # 0xC4 -> THAI CHARACTER RU + '\u0e25' # 0xC5 -> THAI CHARACTER LO LING + '\u0e26' # 0xC6 -> THAI CHARACTER LU + '\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN + '\u0e28' # 0xC8 -> THAI CHARACTER SO SALA + '\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI + '\u0e2a' # 0xCA -> THAI CHARACTER SO SUA + '\u0e2b' # 0xCB -> THAI CHARACTER HO HIP + '\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA + '\u0e2d' # 0xCD -> THAI CHARACTER O ANG + '\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK + '\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI + '\u0e30' # 0xD0 -> THAI CHARACTER SARA A + '\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT + '\u0e32' # 0xD2 -> THAI CHARACTER SARA AA + '\u0e33' # 0xD3 -> THAI CHARACTER SARA AM + '\u0e34' # 0xD4 -> THAI CHARACTER SARA I + '\u0e35' # 0xD5 -> THAI CHARACTER SARA II + '\u0e36' # 0xD6 -> THAI CHARACTER SARA UE + '\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE + '\u0e38' # 0xD8 -> THAI CHARACTER SARA U + '\u0e39' # 0xD9 -> THAI CHARACTER SARA UU + '\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT + '\u0e40' # 0xE0 -> THAI CHARACTER SARA E + '\u0e41' # 0xE1 -> THAI CHARACTER SARA AE + '\u0e42' # 0xE2 -> THAI CHARACTER SARA O + '\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN + '\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI + '\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO + '\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK + '\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU + '\u0e48' # 0xE8 -> THAI CHARACTER MAI EK + '\u0e49' # 0xE9 -> THAI CHARACTER MAI THO + '\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI + '\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA + '\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT + '\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT + '\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN + '\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN + '\u0e50' # 0xF0 -> THAI DIGIT ZERO + '\u0e51' # 0xF1 -> THAI DIGIT ONE + '\u0e52' # 0xF2 -> THAI DIGIT TWO + '\u0e53' # 0xF3 -> THAI DIGIT THREE + '\u0e54' # 0xF4 -> THAI DIGIT FOUR + '\u0e55' # 0xF5 -> THAI DIGIT FIVE + '\u0e56' # 0xF6 -> THAI DIGIT SIX + '\u0e57' # 0xF7 -> THAI DIGIT SEVEN + '\u0e58' # 0xF8 -> THAI DIGIT EIGHT + '\u0e59' # 0xF9 -> THAI DIGIT NINE + '\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU + '\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_13.py b/venv/Lib/encodings/iso8859_13.py new file mode 100644 index 00000000..6f8eab2b --- /dev/null +++ b/venv/Lib/encodings/iso8859_13.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_13 generated from 'MAPPINGS/ISO8859/8859-13.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-13', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u201d' # 0xA1 -> RIGHT DOUBLE QUOTATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xc6' # 0xAF -> LATIN CAPITAL LETTER AE + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\u201c' # 0xB4 -> LEFT DOUBLE QUOTATION MARK + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xe6' # 0xBF -> LATIN SMALL LETTER AE + '\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK + '\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK + '\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON + '\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK + '\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON + '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE + '\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE + '\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA + '\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA + '\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON + '\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA + '\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON + '\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE + '\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK + '\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE + '\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE + '\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German) + '\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK + '\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK + '\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON + '\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK + '\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON + '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE + '\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE + '\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA + '\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA + '\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON + '\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA + '\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON + '\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE + '\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK + '\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE + '\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE + '\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON + '\u2019' # 0xFF -> RIGHT SINGLE QUOTATION MARK +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_14.py b/venv/Lib/encodings/iso8859_14.py new file mode 100644 index 00000000..7568d4ee --- /dev/null +++ b/venv/Lib/encodings/iso8859_14.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_14 generated from 'MAPPINGS/ISO8859/8859-14.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-14', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u1e02' # 0xA1 -> LATIN CAPITAL LETTER B WITH DOT ABOVE + '\u1e03' # 0xA2 -> LATIN SMALL LETTER B WITH DOT ABOVE + '\xa3' # 0xA3 -> POUND SIGN + '\u010a' # 0xA4 -> LATIN CAPITAL LETTER C WITH DOT ABOVE + '\u010b' # 0xA5 -> LATIN SMALL LETTER C WITH DOT ABOVE + '\u1e0a' # 0xA6 -> LATIN CAPITAL LETTER D WITH DOT ABOVE + '\xa7' # 0xA7 -> SECTION SIGN + '\u1e80' # 0xA8 -> LATIN CAPITAL LETTER W WITH GRAVE + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u1e82' # 0xAA -> LATIN CAPITAL LETTER W WITH ACUTE + '\u1e0b' # 0xAB -> LATIN SMALL LETTER D WITH DOT ABOVE + '\u1ef2' # 0xAC -> LATIN CAPITAL LETTER Y WITH GRAVE + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\u0178' # 0xAF -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\u1e1e' # 0xB0 -> LATIN CAPITAL LETTER F WITH DOT ABOVE + '\u1e1f' # 0xB1 -> LATIN SMALL LETTER F WITH DOT ABOVE + '\u0120' # 0xB2 -> LATIN CAPITAL LETTER G WITH DOT ABOVE + '\u0121' # 0xB3 -> LATIN SMALL LETTER G WITH DOT ABOVE + '\u1e40' # 0xB4 -> LATIN CAPITAL LETTER M WITH DOT ABOVE + '\u1e41' # 0xB5 -> LATIN SMALL LETTER M WITH DOT ABOVE + '\xb6' # 0xB6 -> PILCROW SIGN + '\u1e56' # 0xB7 -> LATIN CAPITAL LETTER P WITH DOT ABOVE + '\u1e81' # 0xB8 -> LATIN SMALL LETTER W WITH GRAVE + '\u1e57' # 0xB9 -> LATIN SMALL LETTER P WITH DOT ABOVE + '\u1e83' # 0xBA -> LATIN SMALL LETTER W WITH ACUTE + '\u1e60' # 0xBB -> LATIN CAPITAL LETTER S WITH DOT ABOVE + '\u1ef3' # 0xBC -> LATIN SMALL LETTER Y WITH GRAVE + '\u1e84' # 0xBD -> LATIN CAPITAL LETTER W WITH DIAERESIS + '\u1e85' # 0xBE -> LATIN SMALL LETTER W WITH DIAERESIS + '\u1e61' # 0xBF -> LATIN SMALL LETTER S WITH DOT ABOVE + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\u0174' # 0xD0 -> LATIN CAPITAL LETTER W WITH CIRCUMFLEX + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\u1e6a' # 0xD7 -> LATIN CAPITAL LETTER T WITH DOT ABOVE + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\u0176' # 0xDE -> LATIN CAPITAL LETTER Y WITH CIRCUMFLEX + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\u0175' # 0xF0 -> LATIN SMALL LETTER W WITH CIRCUMFLEX + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\u1e6b' # 0xF7 -> LATIN SMALL LETTER T WITH DOT ABOVE + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE + '\u0177' # 0xFE -> LATIN SMALL LETTER Y WITH CIRCUMFLEX + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_15.py b/venv/Lib/encodings/iso8859_15.py new file mode 100644 index 00000000..43bdecd4 --- /dev/null +++ b/venv/Lib/encodings/iso8859_15.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_15 generated from 'MAPPINGS/ISO8859/8859-15.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-15', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\u20ac' # 0xA4 -> EURO SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON + '\xa7' # 0xA7 -> SECTION SIGN + '\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE + '\u0153' # 0xBD -> LATIN SMALL LIGATURE OE + '\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\xbf' # 0xBF -> INVERTED QUESTION MARK + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xde' # 0xDE -> LATIN CAPITAL LETTER THORN + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf0' # 0xF0 -> LATIN SMALL LETTER ETH + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE + '\xfe' # 0xFE -> LATIN SMALL LETTER THORN + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_16.py b/venv/Lib/encodings/iso8859_16.py new file mode 100644 index 00000000..e70c96e3 --- /dev/null +++ b/venv/Lib/encodings/iso8859_16.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_16 generated from 'MAPPINGS/ISO8859/8859-16.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-16', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK + '\u0105' # 0xA2 -> LATIN SMALL LETTER A WITH OGONEK + '\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE + '\u20ac' # 0xA4 -> EURO SIGN + '\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK + '\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON + '\xa7' # 0xA7 -> SECTION SIGN + '\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u0218' # 0xAA -> LATIN CAPITAL LETTER S WITH COMMA BELOW + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE + '\xad' # 0xAD -> SOFT HYPHEN + '\u017a' # 0xAE -> LATIN SMALL LETTER Z WITH ACUTE + '\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u010c' # 0xB2 -> LATIN CAPITAL LETTER C WITH CARON + '\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE + '\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON + '\u201d' # 0xB5 -> RIGHT DOUBLE QUOTATION MARK + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON + '\u010d' # 0xB9 -> LATIN SMALL LETTER C WITH CARON + '\u0219' # 0xBA -> LATIN SMALL LETTER S WITH COMMA BELOW + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE + '\u0153' # 0xBD -> LATIN SMALL LIGATURE OE + '\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\u0106' # 0xC5 -> LATIN CAPITAL LETTER C WITH ACUTE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE + '\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\u015a' # 0xD7 -> LATIN CAPITAL LETTER S WITH ACUTE + '\u0170' # 0xD8 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u0118' # 0xDD -> LATIN CAPITAL LETTER E WITH OGONEK + '\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\u0107' # 0xE5 -> LATIN SMALL LETTER C WITH ACUTE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE + '\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\u015b' # 0xF7 -> LATIN SMALL LETTER S WITH ACUTE + '\u0171' # 0xF8 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u0119' # 0xFD -> LATIN SMALL LETTER E WITH OGONEK + '\u021b' # 0xFE -> LATIN SMALL LETTER T WITH COMMA BELOW + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_2.py b/venv/Lib/encodings/iso8859_2.py new file mode 100644 index 00000000..3698747b --- /dev/null +++ b/venv/Lib/encodings/iso8859_2.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_2 generated from 'MAPPINGS/ISO8859/8859-2.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-2', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK + '\u02d8' # 0xA2 -> BREVE + '\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE + '\xa4' # 0xA4 -> CURRENCY SIGN + '\u013d' # 0xA5 -> LATIN CAPITAL LETTER L WITH CARON + '\u015a' # 0xA6 -> LATIN CAPITAL LETTER S WITH ACUTE + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON + '\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA + '\u0164' # 0xAB -> LATIN CAPITAL LETTER T WITH CARON + '\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE + '\xad' # 0xAD -> SOFT HYPHEN + '\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON + '\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\xb0' # 0xB0 -> DEGREE SIGN + '\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK + '\u02db' # 0xB2 -> OGONEK + '\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\u013e' # 0xB5 -> LATIN SMALL LETTER L WITH CARON + '\u015b' # 0xB6 -> LATIN SMALL LETTER S WITH ACUTE + '\u02c7' # 0xB7 -> CARON + '\xb8' # 0xB8 -> CEDILLA + '\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON + '\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA + '\u0165' # 0xBB -> LATIN SMALL LETTER T WITH CARON + '\u017a' # 0xBC -> LATIN SMALL LETTER Z WITH ACUTE + '\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT + '\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON + '\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE + '\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON + '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE + '\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE + '\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON + '\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE + '\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON + '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE + '\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE + '\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON + '\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE + '\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA + '\u02d9' # 0xFF -> DOT ABOVE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_3.py b/venv/Lib/encodings/iso8859_3.py new file mode 100644 index 00000000..96d3063e --- /dev/null +++ b/venv/Lib/encodings/iso8859_3.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-3', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE + '\u02d8' # 0xA2 -> BREVE + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\ufffe' + '\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE + '\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA + '\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE + '\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX + '\xad' # 0xAD -> SOFT HYPHEN + '\ufffe' + '\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\xb0' # 0xB0 -> DEGREE SIGN + '\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I + '\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA + '\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE + '\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\ufffe' + '\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\ufffe' + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE + '\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\ufffe' + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE + '\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\ufffe' + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE + '\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\ufffe' + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE + '\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX + '\u02d9' # 0xFF -> DOT ABOVE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_4.py b/venv/Lib/encodings/iso8859_4.py new file mode 100644 index 00000000..65c1e00f --- /dev/null +++ b/venv/Lib/encodings/iso8859_4.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_4 generated from 'MAPPINGS/ISO8859/8859-4.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-4', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK + '\u0138' # 0xA2 -> LATIN SMALL LETTER KRA + '\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA + '\xa4' # 0xA4 -> CURRENCY SIGN + '\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE + '\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON + '\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON + '\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA + '\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE + '\xad' # 0xAD -> SOFT HYPHEN + '\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK + '\u02db' # 0xB2 -> OGONEK + '\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA + '\xb4' # 0xB4 -> ACUTE ACCENT + '\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE + '\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA + '\u02c7' # 0xB7 -> CARON + '\xb8' # 0xB8 -> CEDILLA + '\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON + '\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON + '\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA + '\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE + '\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG + '\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON + '\u014b' # 0xBF -> LATIN SMALL LETTER ENG + '\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK + '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON + '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE + '\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA + '\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON + '\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE + '\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK + '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON + '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE + '\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA + '\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON + '\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE + '\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON + '\u02d9' # 0xFF -> DOT ABOVE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_5.py b/venv/Lib/encodings/iso8859_5.py new file mode 100644 index 00000000..a3c868a5 --- /dev/null +++ b/venv/Lib/encodings/iso8859_5.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_5 generated from 'MAPPINGS/ISO8859/8859-5.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-5', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u0401' # 0xA1 -> CYRILLIC CAPITAL LETTER IO + '\u0402' # 0xA2 -> CYRILLIC CAPITAL LETTER DJE + '\u0403' # 0xA3 -> CYRILLIC CAPITAL LETTER GJE + '\u0404' # 0xA4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE + '\u0405' # 0xA5 -> CYRILLIC CAPITAL LETTER DZE + '\u0406' # 0xA6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0407' # 0xA7 -> CYRILLIC CAPITAL LETTER YI + '\u0408' # 0xA8 -> CYRILLIC CAPITAL LETTER JE + '\u0409' # 0xA9 -> CYRILLIC CAPITAL LETTER LJE + '\u040a' # 0xAA -> CYRILLIC CAPITAL LETTER NJE + '\u040b' # 0xAB -> CYRILLIC CAPITAL LETTER TSHE + '\u040c' # 0xAC -> CYRILLIC CAPITAL LETTER KJE + '\xad' # 0xAD -> SOFT HYPHEN + '\u040e' # 0xAE -> CYRILLIC CAPITAL LETTER SHORT U + '\u040f' # 0xAF -> CYRILLIC CAPITAL LETTER DZHE + '\u0410' # 0xB0 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0xB1 -> CYRILLIC CAPITAL LETTER BE + '\u0412' # 0xB2 -> CYRILLIC CAPITAL LETTER VE + '\u0413' # 0xB3 -> CYRILLIC CAPITAL LETTER GHE + '\u0414' # 0xB4 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0xB5 -> CYRILLIC CAPITAL LETTER IE + '\u0416' # 0xB6 -> CYRILLIC CAPITAL LETTER ZHE + '\u0417' # 0xB7 -> CYRILLIC CAPITAL LETTER ZE + '\u0418' # 0xB8 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0xB9 -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0xBA -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0xBB -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0xBC -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0xBD -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0xBE -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0xBF -> CYRILLIC CAPITAL LETTER PE + '\u0420' # 0xC0 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0xC1 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0xC2 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0xC3 -> CYRILLIC CAPITAL LETTER U + '\u0424' # 0xC4 -> CYRILLIC CAPITAL LETTER EF + '\u0425' # 0xC5 -> CYRILLIC CAPITAL LETTER HA + '\u0426' # 0xC6 -> CYRILLIC CAPITAL LETTER TSE + '\u0427' # 0xC7 -> CYRILLIC CAPITAL LETTER CHE + '\u0428' # 0xC8 -> CYRILLIC CAPITAL LETTER SHA + '\u0429' # 0xC9 -> CYRILLIC CAPITAL LETTER SHCHA + '\u042a' # 0xCA -> CYRILLIC CAPITAL LETTER HARD SIGN + '\u042b' # 0xCB -> CYRILLIC CAPITAL LETTER YERU + '\u042c' # 0xCC -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042d' # 0xCD -> CYRILLIC CAPITAL LETTER E + '\u042e' # 0xCE -> CYRILLIC CAPITAL LETTER YU + '\u042f' # 0xCF -> CYRILLIC CAPITAL LETTER YA + '\u0430' # 0xD0 -> CYRILLIC SMALL LETTER A + '\u0431' # 0xD1 -> CYRILLIC SMALL LETTER BE + '\u0432' # 0xD2 -> CYRILLIC SMALL LETTER VE + '\u0433' # 0xD3 -> CYRILLIC SMALL LETTER GHE + '\u0434' # 0xD4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0xD5 -> CYRILLIC SMALL LETTER IE + '\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE + '\u0437' # 0xD7 -> CYRILLIC SMALL LETTER ZE + '\u0438' # 0xD8 -> CYRILLIC SMALL LETTER I + '\u0439' # 0xD9 -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0xDA -> CYRILLIC SMALL LETTER KA + '\u043b' # 0xDB -> CYRILLIC SMALL LETTER EL + '\u043c' # 0xDC -> CYRILLIC SMALL LETTER EM + '\u043d' # 0xDD -> CYRILLIC SMALL LETTER EN + '\u043e' # 0xDE -> CYRILLIC SMALL LETTER O + '\u043f' # 0xDF -> CYRILLIC SMALL LETTER PE + '\u0440' # 0xE0 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0xE1 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0xE2 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0xE3 -> CYRILLIC SMALL LETTER U + '\u0444' # 0xE4 -> CYRILLIC SMALL LETTER EF + '\u0445' # 0xE5 -> CYRILLIC SMALL LETTER HA + '\u0446' # 0xE6 -> CYRILLIC SMALL LETTER TSE + '\u0447' # 0xE7 -> CYRILLIC SMALL LETTER CHE + '\u0448' # 0xE8 -> CYRILLIC SMALL LETTER SHA + '\u0449' # 0xE9 -> CYRILLIC SMALL LETTER SHCHA + '\u044a' # 0xEA -> CYRILLIC SMALL LETTER HARD SIGN + '\u044b' # 0xEB -> CYRILLIC SMALL LETTER YERU + '\u044c' # 0xEC -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044d' # 0xED -> CYRILLIC SMALL LETTER E + '\u044e' # 0xEE -> CYRILLIC SMALL LETTER YU + '\u044f' # 0xEF -> CYRILLIC SMALL LETTER YA + '\u2116' # 0xF0 -> NUMERO SIGN + '\u0451' # 0xF1 -> CYRILLIC SMALL LETTER IO + '\u0452' # 0xF2 -> CYRILLIC SMALL LETTER DJE + '\u0453' # 0xF3 -> CYRILLIC SMALL LETTER GJE + '\u0454' # 0xF4 -> CYRILLIC SMALL LETTER UKRAINIAN IE + '\u0455' # 0xF5 -> CYRILLIC SMALL LETTER DZE + '\u0456' # 0xF6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0457' # 0xF7 -> CYRILLIC SMALL LETTER YI + '\u0458' # 0xF8 -> CYRILLIC SMALL LETTER JE + '\u0459' # 0xF9 -> CYRILLIC SMALL LETTER LJE + '\u045a' # 0xFA -> CYRILLIC SMALL LETTER NJE + '\u045b' # 0xFB -> CYRILLIC SMALL LETTER TSHE + '\u045c' # 0xFC -> CYRILLIC SMALL LETTER KJE + '\xa7' # 0xFD -> SECTION SIGN + '\u045e' # 0xFE -> CYRILLIC SMALL LETTER SHORT U + '\u045f' # 0xFF -> CYRILLIC SMALL LETTER DZHE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_6.py b/venv/Lib/encodings/iso8859_6.py new file mode 100644 index 00000000..b02ade6e --- /dev/null +++ b/venv/Lib/encodings/iso8859_6.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_6 generated from 'MAPPINGS/ISO8859/8859-6.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-6', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\ufffe' + '\ufffe' + '\ufffe' + '\xa4' # 0xA4 -> CURRENCY SIGN + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\u060c' # 0xAC -> ARABIC COMMA + '\xad' # 0xAD -> SOFT HYPHEN + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\u061b' # 0xBB -> ARABIC SEMICOLON + '\ufffe' + '\ufffe' + '\ufffe' + '\u061f' # 0xBF -> ARABIC QUESTION MARK + '\ufffe' + '\u0621' # 0xC1 -> ARABIC LETTER HAMZA + '\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE + '\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE + '\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE + '\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW + '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE + '\u0627' # 0xC7 -> ARABIC LETTER ALEF + '\u0628' # 0xC8 -> ARABIC LETTER BEH + '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0xCA -> ARABIC LETTER TEH + '\u062b' # 0xCB -> ARABIC LETTER THEH + '\u062c' # 0xCC -> ARABIC LETTER JEEM + '\u062d' # 0xCD -> ARABIC LETTER HAH + '\u062e' # 0xCE -> ARABIC LETTER KHAH + '\u062f' # 0xCF -> ARABIC LETTER DAL + '\u0630' # 0xD0 -> ARABIC LETTER THAL + '\u0631' # 0xD1 -> ARABIC LETTER REH + '\u0632' # 0xD2 -> ARABIC LETTER ZAIN + '\u0633' # 0xD3 -> ARABIC LETTER SEEN + '\u0634' # 0xD4 -> ARABIC LETTER SHEEN + '\u0635' # 0xD5 -> ARABIC LETTER SAD + '\u0636' # 0xD6 -> ARABIC LETTER DAD + '\u0637' # 0xD7 -> ARABIC LETTER TAH + '\u0638' # 0xD8 -> ARABIC LETTER ZAH + '\u0639' # 0xD9 -> ARABIC LETTER AIN + '\u063a' # 0xDA -> ARABIC LETTER GHAIN + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\u0640' # 0xE0 -> ARABIC TATWEEL + '\u0641' # 0xE1 -> ARABIC LETTER FEH + '\u0642' # 0xE2 -> ARABIC LETTER QAF + '\u0643' # 0xE3 -> ARABIC LETTER KAF + '\u0644' # 0xE4 -> ARABIC LETTER LAM + '\u0645' # 0xE5 -> ARABIC LETTER MEEM + '\u0646' # 0xE6 -> ARABIC LETTER NOON + '\u0647' # 0xE7 -> ARABIC LETTER HEH + '\u0648' # 0xE8 -> ARABIC LETTER WAW + '\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA + '\u064a' # 0xEA -> ARABIC LETTER YEH + '\u064b' # 0xEB -> ARABIC FATHATAN + '\u064c' # 0xEC -> ARABIC DAMMATAN + '\u064d' # 0xED -> ARABIC KASRATAN + '\u064e' # 0xEE -> ARABIC FATHA + '\u064f' # 0xEF -> ARABIC DAMMA + '\u0650' # 0xF0 -> ARABIC KASRA + '\u0651' # 0xF1 -> ARABIC SHADDA + '\u0652' # 0xF2 -> ARABIC SUKUN + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_7.py b/venv/Lib/encodings/iso8859_7.py new file mode 100644 index 00000000..d7b39cbc --- /dev/null +++ b/venv/Lib/encodings/iso8859_7.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_7 generated from 'MAPPINGS/ISO8859/8859-7.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-7', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u2018' # 0xA1 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xA2 -> RIGHT SINGLE QUOTATION MARK + '\xa3' # 0xA3 -> POUND SIGN + '\u20ac' # 0xA4 -> EURO SIGN + '\u20af' # 0xA5 -> DRACHMA SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u037a' # 0xAA -> GREEK YPOGEGRAMMENI + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\ufffe' + '\u2015' # 0xAF -> HORIZONTAL BAR + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\u0384' # 0xB4 -> GREEK TONOS + '\u0385' # 0xB5 -> GREEK DIALYTIKA TONOS + '\u0386' # 0xB6 -> GREEK CAPITAL LETTER ALPHA WITH TONOS + '\xb7' # 0xB7 -> MIDDLE DOT + '\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS + '\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS + '\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS + '\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS + '\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS + '\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA + '\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA + '\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA + '\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA + '\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON + '\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA + '\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA + '\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA + '\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA + '\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA + '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA + '\u039c' # 0xCC -> GREEK CAPITAL LETTER MU + '\u039d' # 0xCD -> GREEK CAPITAL LETTER NU + '\u039e' # 0xCE -> GREEK CAPITAL LETTER XI + '\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON + '\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI + '\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO + '\ufffe' + '\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA + '\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU + '\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON + '\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI + '\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI + '\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI + '\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA + '\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + '\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + '\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS + '\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS + '\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS + '\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS + '\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS + '\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA + '\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA + '\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA + '\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA + '\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON + '\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA + '\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA + '\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA + '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA + '\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA + '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA + '\u03bc' # 0xEC -> GREEK SMALL LETTER MU + '\u03bd' # 0xED -> GREEK SMALL LETTER NU + '\u03be' # 0xEE -> GREEK SMALL LETTER XI + '\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON + '\u03c0' # 0xF0 -> GREEK SMALL LETTER PI + '\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO + '\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA + '\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA + '\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU + '\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON + '\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI + '\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI + '\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI + '\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA + '\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA + '\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA + '\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS + '\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS + '\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS + '\ufffe' +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_8.py b/venv/Lib/encodings/iso8859_8.py new file mode 100644 index 00000000..81849027 --- /dev/null +++ b/venv/Lib/encodings/iso8859_8.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_8 generated from 'MAPPINGS/ISO8859/8859-8.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-8', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\ufffe' + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xd7' # 0xAA -> MULTIPLICATION SIGN + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xf7' # 0xBA -> DIVISION SIGN + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\u2017' # 0xDF -> DOUBLE LOW LINE + '\u05d0' # 0xE0 -> HEBREW LETTER ALEF + '\u05d1' # 0xE1 -> HEBREW LETTER BET + '\u05d2' # 0xE2 -> HEBREW LETTER GIMEL + '\u05d3' # 0xE3 -> HEBREW LETTER DALET + '\u05d4' # 0xE4 -> HEBREW LETTER HE + '\u05d5' # 0xE5 -> HEBREW LETTER VAV + '\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN + '\u05d7' # 0xE7 -> HEBREW LETTER HET + '\u05d8' # 0xE8 -> HEBREW LETTER TET + '\u05d9' # 0xE9 -> HEBREW LETTER YOD + '\u05da' # 0xEA -> HEBREW LETTER FINAL KAF + '\u05db' # 0xEB -> HEBREW LETTER KAF + '\u05dc' # 0xEC -> HEBREW LETTER LAMED + '\u05dd' # 0xED -> HEBREW LETTER FINAL MEM + '\u05de' # 0xEE -> HEBREW LETTER MEM + '\u05df' # 0xEF -> HEBREW LETTER FINAL NUN + '\u05e0' # 0xF0 -> HEBREW LETTER NUN + '\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH + '\u05e2' # 0xF2 -> HEBREW LETTER AYIN + '\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE + '\u05e4' # 0xF4 -> HEBREW LETTER PE + '\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI + '\u05e6' # 0xF6 -> HEBREW LETTER TSADI + '\u05e7' # 0xF7 -> HEBREW LETTER QOF + '\u05e8' # 0xF8 -> HEBREW LETTER RESH + '\u05e9' # 0xF9 -> HEBREW LETTER SHIN + '\u05ea' # 0xFA -> HEBREW LETTER TAV + '\ufffe' + '\ufffe' + '\u200e' # 0xFD -> LEFT-TO-RIGHT MARK + '\u200f' # 0xFE -> RIGHT-TO-LEFT MARK + '\ufffe' +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/iso8859_9.py b/venv/Lib/encodings/iso8859_9.py new file mode 100644 index 00000000..e539fdda --- /dev/null +++ b/venv/Lib/encodings/iso8859_9.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec iso8859_9 generated from 'MAPPINGS/ISO8859/8859-9.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-9', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xbf' # 0xBF -> INVERTED QUESTION MARK + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE + '\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I + '\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/johab.py b/venv/Lib/encodings/johab.py new file mode 100644 index 00000000..512aeeb7 --- /dev/null +++ b/venv/Lib/encodings/johab.py @@ -0,0 +1,39 @@ +# +# johab.py: Python Unicode Codec for JOHAB +# +# Written by Hye-Shik Chang +# + +import _codecs_kr, codecs +import _multibytecodec as mbc + +codec = _codecs_kr.getcodec('johab') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='johab', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/koi8_r.py b/venv/Lib/encodings/koi8_r.py new file mode 100644 index 00000000..41ddde85 --- /dev/null +++ b/venv/Lib/encodings/koi8_r.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec koi8_r generated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='koi8-r', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL + '\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u2580' # 0x8B -> UPPER HALF BLOCK + '\u2584' # 0x8C -> LOWER HALF BLOCK + '\u2588' # 0x8D -> FULL BLOCK + '\u258c' # 0x8E -> LEFT HALF BLOCK + '\u2590' # 0x8F -> RIGHT HALF BLOCK + '\u2591' # 0x90 -> LIGHT SHADE + '\u2592' # 0x91 -> MEDIUM SHADE + '\u2593' # 0x92 -> DARK SHADE + '\u2320' # 0x93 -> TOP HALF INTEGRAL + '\u25a0' # 0x94 -> BLACK SQUARE + '\u2219' # 0x95 -> BULLET OPERATOR + '\u221a' # 0x96 -> SQUARE ROOT + '\u2248' # 0x97 -> ALMOST EQUAL TO + '\u2264' # 0x98 -> LESS-THAN OR EQUAL TO + '\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO + '\xa0' # 0x9A -> NO-BREAK SPACE + '\u2321' # 0x9B -> BOTTOM HALF INTEGRAL + '\xb0' # 0x9C -> DEGREE SIGN + '\xb2' # 0x9D -> SUPERSCRIPT TWO + '\xb7' # 0x9E -> MIDDLE DOT + '\xf7' # 0x9F -> DIVISION SIGN + '\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL + '\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO + '\u2553' # 0xA4 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2555' # 0xA6 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2556' # 0xA7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u255c' # 0xAD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO + '\u2562' # 0xB4 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2564' # 0xB6 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u256b' # 0xBD -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\xa9' # 0xBF -> COPYRIGHT SIGN + '\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU + '\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A + '\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE + '\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE + '\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE + '\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF + '\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE + '\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA + '\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I + '\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA + '\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL + '\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM + '\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN + '\u043e' # 0xCF -> CYRILLIC SMALL LETTER O + '\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE + '\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA + '\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U + '\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE + '\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE + '\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU + '\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE + '\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA + '\u044d' # 0xDC -> CYRILLIC SMALL LETTER E + '\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA + '\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE + '\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN + '\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU + '\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE + '\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE + '\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE + '\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF + '\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE + '\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA + '\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE + '\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA + '\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U + '\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE + '\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE + '\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU + '\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE + '\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA + '\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E + '\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA + '\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE + '\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/koi8_t.py b/venv/Lib/encodings/koi8_t.py new file mode 100644 index 00000000..b5415ba3 --- /dev/null +++ b/venv/Lib/encodings/koi8_t.py @@ -0,0 +1,308 @@ +""" Python Character Mapping Codec koi8_t +""" +# http://ru.wikipedia.org/wiki/КОИ-8 +# http://www.opensource.apple.com/source/libiconv/libiconv-4/libiconv/tests/KOI8-T.TXT + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='koi8-t', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u049b' # 0x80 -> CYRILLIC SMALL LETTER KA WITH DESCENDER + '\u0493' # 0x81 -> CYRILLIC SMALL LETTER GHE WITH STROKE + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0492' # 0x83 -> CYRILLIC CAPITAL LETTER GHE WITH STROKE + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\ufffe' # 0x88 -> UNDEFINED + '\u2030' # 0x89 -> PER MILLE SIGN + '\u04b3' # 0x8A -> CYRILLIC SMALL LETTER HA WITH DESCENDER + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u04b2' # 0x8C -> CYRILLIC CAPITAL LETTER HA WITH DESCENDER + '\u04b7' # 0x8D -> CYRILLIC SMALL LETTER CHE WITH DESCENDER + '\u04b6' # 0x8E -> CYRILLIC CAPITAL LETTER CHE WITH DESCENDER + '\ufffe' # 0x8F -> UNDEFINED + '\u049a' # 0x90 -> CYRILLIC CAPITAL LETTER KA WITH DESCENDER + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\ufffe' # 0x98 -> UNDEFINED + '\u2122' # 0x99 -> TRADE MARK SIGN + '\ufffe' # 0x9A -> UNDEFINED + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\ufffe' # 0x9C -> UNDEFINED + '\ufffe' # 0x9D -> UNDEFINED + '\ufffe' # 0x9E -> UNDEFINED + '\ufffe' # 0x9F -> UNDEFINED + '\ufffe' # 0xA0 -> UNDEFINED + '\u04ef' # 0xA1 -> CYRILLIC SMALL LETTER U WITH MACRON + '\u04ee' # 0xA2 -> CYRILLIC CAPITAL LETTER U WITH MACRON + '\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO + '\xa4' # 0xA4 -> CURRENCY SIGN + '\u04e3' # 0xA5 -> CYRILLIC SMALL LETTER I WITH MACRON + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\ufffe' # 0xA8 -> UNDEFINED + '\ufffe' # 0xA9 -> UNDEFINED + '\ufffe' # 0xAA -> UNDEFINED + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\ufffe' # 0xAF -> UNDEFINED + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO + '\ufffe' # 0xB4 -> UNDEFINED + '\u04e2' # 0xB5 -> CYRILLIC CAPITAL LETTER I WITH MACRON + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\ufffe' # 0xB8 -> UNDEFINED + '\u2116' # 0xB9 -> NUMERO SIGN + '\ufffe' # 0xBA -> UNDEFINED + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\ufffe' # 0xBC -> UNDEFINED + '\ufffe' # 0xBD -> UNDEFINED + '\ufffe' # 0xBE -> UNDEFINED + '\xa9' # 0xBF -> COPYRIGHT SIGN + '\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU + '\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A + '\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE + '\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE + '\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE + '\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF + '\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE + '\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA + '\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I + '\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA + '\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL + '\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM + '\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN + '\u043e' # 0xCF -> CYRILLIC SMALL LETTER O + '\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE + '\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA + '\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U + '\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE + '\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE + '\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU + '\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE + '\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA + '\u044d' # 0xDC -> CYRILLIC SMALL LETTER E + '\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA + '\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE + '\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN + '\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU + '\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE + '\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE + '\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE + '\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF + '\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE + '\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA + '\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE + '\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA + '\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U + '\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE + '\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE + '\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU + '\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE + '\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA + '\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E + '\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA + '\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE + '\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/koi8_u.py b/venv/Lib/encodings/koi8_u.py new file mode 100644 index 00000000..f9e3fae5 --- /dev/null +++ b/venv/Lib/encodings/koi8_u.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='koi8-u', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL + '\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u2580' # 0x8B -> UPPER HALF BLOCK + '\u2584' # 0x8C -> LOWER HALF BLOCK + '\u2588' # 0x8D -> FULL BLOCK + '\u258c' # 0x8E -> LEFT HALF BLOCK + '\u2590' # 0x8F -> RIGHT HALF BLOCK + '\u2591' # 0x90 -> LIGHT SHADE + '\u2592' # 0x91 -> MEDIUM SHADE + '\u2593' # 0x92 -> DARK SHADE + '\u2320' # 0x93 -> TOP HALF INTEGRAL + '\u25a0' # 0x94 -> BLACK SQUARE + '\u2219' # 0x95 -> BULLET OPERATOR + '\u221a' # 0x96 -> SQUARE ROOT + '\u2248' # 0x97 -> ALMOST EQUAL TO + '\u2264' # 0x98 -> LESS-THAN OR EQUAL TO + '\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO + '\xa0' # 0x9A -> NO-BREAK SPACE + '\u2321' # 0x9B -> BOTTOM HALF INTEGRAL + '\xb0' # 0x9C -> DEGREE SIGN + '\xb2' # 0x9D -> SUPERSCRIPT TWO + '\xb7' # 0x9E -> MIDDLE DOT + '\xf7' # 0x9F -> DIVISION SIGN + '\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL + '\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO + '\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE + '\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN) + '\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN + '\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO + '\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE + '\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN) + '\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN + '\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\xa9' # 0xBF -> COPYRIGHT SIGN + '\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU + '\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A + '\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE + '\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE + '\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE + '\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF + '\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE + '\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA + '\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I + '\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA + '\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL + '\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM + '\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN + '\u043e' # 0xCF -> CYRILLIC SMALL LETTER O + '\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE + '\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA + '\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U + '\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE + '\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE + '\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU + '\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE + '\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA + '\u044d' # 0xDC -> CYRILLIC SMALL LETTER E + '\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA + '\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE + '\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN + '\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU + '\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE + '\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE + '\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE + '\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF + '\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE + '\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA + '\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE + '\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA + '\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U + '\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE + '\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE + '\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU + '\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE + '\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA + '\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E + '\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA + '\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE + '\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/kz1048.py b/venv/Lib/encodings/kz1048.py new file mode 100644 index 00000000..712aee6e --- /dev/null +++ b/venv/Lib/encodings/kz1048.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec kz1048 generated from 'MAPPINGS/VENDORS/MISC/KZ1048.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self, input, errors='strict'): + return codecs.charmap_encode(input, errors, encoding_table) + + def decode(self, input, errors='strict'): + return codecs.charmap_decode(input, errors, decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input, self.errors, encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input, self.errors, decoding_table)[0] + +class StreamWriter(Codec, codecs.StreamWriter): + pass + +class StreamReader(Codec, codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='kz1048', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE + '\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u20ac' # 0x88 -> EURO SIGN + '\u2030' # 0x89 -> PER MILLE SIGN + '\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE + '\u049a' # 0x8D -> CYRILLIC CAPITAL LETTER KA WITH DESCENDER + '\u04ba' # 0x8E -> CYRILLIC CAPITAL LETTER SHHA + '\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE + '\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\ufffe' # 0x98 -> UNDEFINED + '\u2122' # 0x99 -> TRADE MARK SIGN + '\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE + '\u049b' # 0x9D -> CYRILLIC SMALL LETTER KA WITH DESCENDER + '\u04bb' # 0x9E -> CYRILLIC SMALL LETTER SHHA + '\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u04b0' # 0xA1 -> CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE + '\u04b1' # 0xA2 -> CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE + '\u04d8' # 0xA3 -> CYRILLIC CAPITAL LETTER SCHWA + '\xa4' # 0xA4 -> CURRENCY SIGN + '\u04e8' # 0xA5 -> CYRILLIC CAPITAL LETTER BARRED O + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u0492' # 0xAA -> CYRILLIC CAPITAL LETTER GHE WITH STROKE + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\u04ae' # 0xAF -> CYRILLIC CAPITAL LETTER STRAIGHT U + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + '\u04e9' # 0xB4 -> CYRILLIC SMALL LETTER BARRED O + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO + '\u2116' # 0xB9 -> NUMERO SIGN + '\u0493' # 0xBA -> CYRILLIC SMALL LETTER GHE WITH STROKE + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u04d9' # 0xBC -> CYRILLIC SMALL LETTER SCHWA + '\u04a2' # 0xBD -> CYRILLIC CAPITAL LETTER EN WITH DESCENDER + '\u04a3' # 0xBE -> CYRILLIC SMALL LETTER EN WITH DESCENDER + '\u04af' # 0xBF -> CYRILLIC SMALL LETTER STRAIGHT U + '\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE + '\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE + '\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE + '\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE + '\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE + '\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE + '\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE + '\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U + '\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF + '\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA + '\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE + '\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE + '\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA + '\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA + '\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN + '\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU + '\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E + '\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU + '\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA + '\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A + '\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE + '\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE + '\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE + '\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE + '\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE + '\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE + '\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I + '\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA + '\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL + '\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM + '\u043d' # 0xED -> CYRILLIC SMALL LETTER EN + '\u043e' # 0xEE -> CYRILLIC SMALL LETTER O + '\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE + '\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U + '\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF + '\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA + '\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE + '\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE + '\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA + '\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA + '\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN + '\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU + '\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044d' # 0xFD -> CYRILLIC SMALL LETTER E + '\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU + '\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA +) + +### Encoding table +encoding_table = codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/latin_1.py b/venv/Lib/encodings/latin_1.py new file mode 100644 index 00000000..370160c0 --- /dev/null +++ b/venv/Lib/encodings/latin_1.py @@ -0,0 +1,50 @@ +""" Python 'latin-1' Codec + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + # Note: Binding these as C functions will result in the class not + # converting them to methods. This is intended. + encode = codecs.latin_1_encode + decode = codecs.latin_1_decode + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.latin_1_encode(input,self.errors)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.latin_1_decode(input,self.errors)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +class StreamConverter(StreamWriter,StreamReader): + + encode = codecs.latin_1_decode + decode = codecs.latin_1_encode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='iso8859-1', + encode=Codec.encode, + decode=Codec.decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/mac_arabic.py b/venv/Lib/encodings/mac_arabic.py new file mode 100644 index 00000000..72847e85 --- /dev/null +++ b/venv/Lib/encodings/mac_arabic.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/APPLE/ARABIC.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-arabic', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x0081: 0x00a0, # NO-BREAK SPACE, right-left + 0x0082: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0084: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x008b: 0x06ba, # ARABIC LETTER NOON GHUNNA + 0x008c: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left + 0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x0093: 0x2026, # HORIZONTAL ELLIPSIS, right-left + 0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS + 0x0096: 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x0098: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left + 0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x009b: 0x00f7, # DIVISION SIGN, right-left + 0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00a0: 0x0020, # SPACE, right-left + 0x00a1: 0x0021, # EXCLAMATION MARK, right-left + 0x00a2: 0x0022, # QUOTATION MARK, right-left + 0x00a3: 0x0023, # NUMBER SIGN, right-left + 0x00a4: 0x0024, # DOLLAR SIGN, right-left + 0x00a5: 0x066a, # ARABIC PERCENT SIGN + 0x00a6: 0x0026, # AMPERSAND, right-left + 0x00a7: 0x0027, # APOSTROPHE, right-left + 0x00a8: 0x0028, # LEFT PARENTHESIS, right-left + 0x00a9: 0x0029, # RIGHT PARENTHESIS, right-left + 0x00aa: 0x002a, # ASTERISK, right-left + 0x00ab: 0x002b, # PLUS SIGN, right-left + 0x00ac: 0x060c, # ARABIC COMMA + 0x00ad: 0x002d, # HYPHEN-MINUS, right-left + 0x00ae: 0x002e, # FULL STOP, right-left + 0x00af: 0x002f, # SOLIDUS, right-left + 0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO, right-left (need override) + 0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE, right-left (need override) + 0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO, right-left (need override) + 0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE, right-left (need override) + 0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR, right-left (need override) + 0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE, right-left (need override) + 0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX, right-left (need override) + 0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN, right-left (need override) + 0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT, right-left (need override) + 0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE, right-left (need override) + 0x00ba: 0x003a, # COLON, right-left + 0x00bb: 0x061b, # ARABIC SEMICOLON + 0x00bc: 0x003c, # LESS-THAN SIGN, right-left + 0x00bd: 0x003d, # EQUALS SIGN, right-left + 0x00be: 0x003e, # GREATER-THAN SIGN, right-left + 0x00bf: 0x061f, # ARABIC QUESTION MARK + 0x00c0: 0x274a, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left + 0x00c1: 0x0621, # ARABIC LETTER HAMZA + 0x00c2: 0x0622, # ARABIC LETTER ALEF WITH MADDA ABOVE + 0x00c3: 0x0623, # ARABIC LETTER ALEF WITH HAMZA ABOVE + 0x00c4: 0x0624, # ARABIC LETTER WAW WITH HAMZA ABOVE + 0x00c5: 0x0625, # ARABIC LETTER ALEF WITH HAMZA BELOW + 0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE + 0x00c7: 0x0627, # ARABIC LETTER ALEF + 0x00c8: 0x0628, # ARABIC LETTER BEH + 0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA + 0x00ca: 0x062a, # ARABIC LETTER TEH + 0x00cb: 0x062b, # ARABIC LETTER THEH + 0x00cc: 0x062c, # ARABIC LETTER JEEM + 0x00cd: 0x062d, # ARABIC LETTER HAH + 0x00ce: 0x062e, # ARABIC LETTER KHAH + 0x00cf: 0x062f, # ARABIC LETTER DAL + 0x00d0: 0x0630, # ARABIC LETTER THAL + 0x00d1: 0x0631, # ARABIC LETTER REH + 0x00d2: 0x0632, # ARABIC LETTER ZAIN + 0x00d3: 0x0633, # ARABIC LETTER SEEN + 0x00d4: 0x0634, # ARABIC LETTER SHEEN + 0x00d5: 0x0635, # ARABIC LETTER SAD + 0x00d6: 0x0636, # ARABIC LETTER DAD + 0x00d7: 0x0637, # ARABIC LETTER TAH + 0x00d8: 0x0638, # ARABIC LETTER ZAH + 0x00d9: 0x0639, # ARABIC LETTER AIN + 0x00da: 0x063a, # ARABIC LETTER GHAIN + 0x00db: 0x005b, # LEFT SQUARE BRACKET, right-left + 0x00dc: 0x005c, # REVERSE SOLIDUS, right-left + 0x00dd: 0x005d, # RIGHT SQUARE BRACKET, right-left + 0x00de: 0x005e, # CIRCUMFLEX ACCENT, right-left + 0x00df: 0x005f, # LOW LINE, right-left + 0x00e0: 0x0640, # ARABIC TATWEEL + 0x00e1: 0x0641, # ARABIC LETTER FEH + 0x00e2: 0x0642, # ARABIC LETTER QAF + 0x00e3: 0x0643, # ARABIC LETTER KAF + 0x00e4: 0x0644, # ARABIC LETTER LAM + 0x00e5: 0x0645, # ARABIC LETTER MEEM + 0x00e6: 0x0646, # ARABIC LETTER NOON + 0x00e7: 0x0647, # ARABIC LETTER HEH + 0x00e8: 0x0648, # ARABIC LETTER WAW + 0x00e9: 0x0649, # ARABIC LETTER ALEF MAKSURA + 0x00ea: 0x064a, # ARABIC LETTER YEH + 0x00eb: 0x064b, # ARABIC FATHATAN + 0x00ec: 0x064c, # ARABIC DAMMATAN + 0x00ed: 0x064d, # ARABIC KASRATAN + 0x00ee: 0x064e, # ARABIC FATHA + 0x00ef: 0x064f, # ARABIC DAMMA + 0x00f0: 0x0650, # ARABIC KASRA + 0x00f1: 0x0651, # ARABIC SHADDA + 0x00f2: 0x0652, # ARABIC SUKUN + 0x00f3: 0x067e, # ARABIC LETTER PEH + 0x00f4: 0x0679, # ARABIC LETTER TTEH + 0x00f5: 0x0686, # ARABIC LETTER TCHEH + 0x00f6: 0x06d5, # ARABIC LETTER AE + 0x00f7: 0x06a4, # ARABIC LETTER VEH + 0x00f8: 0x06af, # ARABIC LETTER GAF + 0x00f9: 0x0688, # ARABIC LETTER DDAL + 0x00fa: 0x0691, # ARABIC LETTER RREH + 0x00fb: 0x007b, # LEFT CURLY BRACKET, right-left + 0x00fc: 0x007c, # VERTICAL LINE, right-left + 0x00fd: 0x007d, # RIGHT CURLY BRACKET, right-left + 0x00fe: 0x0698, # ARABIC LETTER JEH + 0x00ff: 0x06d2, # ARABIC LETTER YEH BARREE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> CONTROL CHARACTER + '\x01' # 0x0001 -> CONTROL CHARACTER + '\x02' # 0x0002 -> CONTROL CHARACTER + '\x03' # 0x0003 -> CONTROL CHARACTER + '\x04' # 0x0004 -> CONTROL CHARACTER + '\x05' # 0x0005 -> CONTROL CHARACTER + '\x06' # 0x0006 -> CONTROL CHARACTER + '\x07' # 0x0007 -> CONTROL CHARACTER + '\x08' # 0x0008 -> CONTROL CHARACTER + '\t' # 0x0009 -> CONTROL CHARACTER + '\n' # 0x000a -> CONTROL CHARACTER + '\x0b' # 0x000b -> CONTROL CHARACTER + '\x0c' # 0x000c -> CONTROL CHARACTER + '\r' # 0x000d -> CONTROL CHARACTER + '\x0e' # 0x000e -> CONTROL CHARACTER + '\x0f' # 0x000f -> CONTROL CHARACTER + '\x10' # 0x0010 -> CONTROL CHARACTER + '\x11' # 0x0011 -> CONTROL CHARACTER + '\x12' # 0x0012 -> CONTROL CHARACTER + '\x13' # 0x0013 -> CONTROL CHARACTER + '\x14' # 0x0014 -> CONTROL CHARACTER + '\x15' # 0x0015 -> CONTROL CHARACTER + '\x16' # 0x0016 -> CONTROL CHARACTER + '\x17' # 0x0017 -> CONTROL CHARACTER + '\x18' # 0x0018 -> CONTROL CHARACTER + '\x19' # 0x0019 -> CONTROL CHARACTER + '\x1a' # 0x001a -> CONTROL CHARACTER + '\x1b' # 0x001b -> CONTROL CHARACTER + '\x1c' # 0x001c -> CONTROL CHARACTER + '\x1d' # 0x001d -> CONTROL CHARACTER + '\x1e' # 0x001e -> CONTROL CHARACTER + '\x1f' # 0x001f -> CONTROL CHARACTER + ' ' # 0x0020 -> SPACE, left-right + '!' # 0x0021 -> EXCLAMATION MARK, left-right + '"' # 0x0022 -> QUOTATION MARK, left-right + '#' # 0x0023 -> NUMBER SIGN, left-right + '$' # 0x0024 -> DOLLAR SIGN, left-right + '%' # 0x0025 -> PERCENT SIGN, left-right + '&' # 0x0026 -> AMPERSAND, left-right + "'" # 0x0027 -> APOSTROPHE, left-right + '(' # 0x0028 -> LEFT PARENTHESIS, left-right + ')' # 0x0029 -> RIGHT PARENTHESIS, left-right + '*' # 0x002a -> ASTERISK, left-right + '+' # 0x002b -> PLUS SIGN, left-right + ',' # 0x002c -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR + '-' # 0x002d -> HYPHEN-MINUS, left-right + '.' # 0x002e -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR + '/' # 0x002f -> SOLIDUS, left-right + '0' # 0x0030 -> DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE + '2' # 0x0032 -> DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO + '3' # 0x0033 -> DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE + ':' # 0x003a -> COLON, left-right + ';' # 0x003b -> SEMICOLON, left-right + '<' # 0x003c -> LESS-THAN SIGN, left-right + '=' # 0x003d -> EQUALS SIGN, left-right + '>' # 0x003e -> GREATER-THAN SIGN, left-right + '?' # 0x003f -> QUESTION MARK, left-right + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET, left-right + '\\' # 0x005c -> REVERSE SOLIDUS, left-right + ']' # 0x005d -> RIGHT SQUARE BRACKET, left-right + '^' # 0x005e -> CIRCUMFLEX ACCENT, left-right + '_' # 0x005f -> LOW LINE, left-right + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET, left-right + '|' # 0x007c -> VERTICAL LINE, left-right + '}' # 0x007d -> RIGHT CURLY BRACKET, left-right + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> CONTROL CHARACTER + '\xc4' # 0x0080 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xa0' # 0x0081 -> NO-BREAK SPACE, right-left + '\xc7' # 0x0082 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc9' # 0x0083 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xd1' # 0x0084 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd6' # 0x0085 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x0086 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xe1' # 0x0087 -> LATIN SMALL LETTER A WITH ACUTE + '\xe0' # 0x0088 -> LATIN SMALL LETTER A WITH GRAVE + '\xe2' # 0x0089 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x008a -> LATIN SMALL LETTER A WITH DIAERESIS + '\u06ba' # 0x008b -> ARABIC LETTER NOON GHUNNA + '\xab' # 0x008c -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left + '\xe7' # 0x008d -> LATIN SMALL LETTER C WITH CEDILLA + '\xe9' # 0x008e -> LATIN SMALL LETTER E WITH ACUTE + '\xe8' # 0x008f -> LATIN SMALL LETTER E WITH GRAVE + '\xea' # 0x0090 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x0091 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xed' # 0x0092 -> LATIN SMALL LETTER I WITH ACUTE + '\u2026' # 0x0093 -> HORIZONTAL ELLIPSIS, right-left + '\xee' # 0x0094 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x0095 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf1' # 0x0096 -> LATIN SMALL LETTER N WITH TILDE + '\xf3' # 0x0097 -> LATIN SMALL LETTER O WITH ACUTE + '\xbb' # 0x0098 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left + '\xf4' # 0x0099 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x009a -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0x009b -> DIVISION SIGN, right-left + '\xfa' # 0x009c -> LATIN SMALL LETTER U WITH ACUTE + '\xf9' # 0x009d -> LATIN SMALL LETTER U WITH GRAVE + '\xfb' # 0x009e -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0x009f -> LATIN SMALL LETTER U WITH DIAERESIS + ' ' # 0x00a0 -> SPACE, right-left + '!' # 0x00a1 -> EXCLAMATION MARK, right-left + '"' # 0x00a2 -> QUOTATION MARK, right-left + '#' # 0x00a3 -> NUMBER SIGN, right-left + '$' # 0x00a4 -> DOLLAR SIGN, right-left + '\u066a' # 0x00a5 -> ARABIC PERCENT SIGN + '&' # 0x00a6 -> AMPERSAND, right-left + "'" # 0x00a7 -> APOSTROPHE, right-left + '(' # 0x00a8 -> LEFT PARENTHESIS, right-left + ')' # 0x00a9 -> RIGHT PARENTHESIS, right-left + '*' # 0x00aa -> ASTERISK, right-left + '+' # 0x00ab -> PLUS SIGN, right-left + '\u060c' # 0x00ac -> ARABIC COMMA + '-' # 0x00ad -> HYPHEN-MINUS, right-left + '.' # 0x00ae -> FULL STOP, right-left + '/' # 0x00af -> SOLIDUS, right-left + '\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO, right-left (need override) + '\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE, right-left (need override) + '\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO, right-left (need override) + '\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE, right-left (need override) + '\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR, right-left (need override) + '\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE, right-left (need override) + '\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX, right-left (need override) + '\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN, right-left (need override) + '\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT, right-left (need override) + '\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE, right-left (need override) + ':' # 0x00ba -> COLON, right-left + '\u061b' # 0x00bb -> ARABIC SEMICOLON + '<' # 0x00bc -> LESS-THAN SIGN, right-left + '=' # 0x00bd -> EQUALS SIGN, right-left + '>' # 0x00be -> GREATER-THAN SIGN, right-left + '\u061f' # 0x00bf -> ARABIC QUESTION MARK + '\u274a' # 0x00c0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left + '\u0621' # 0x00c1 -> ARABIC LETTER HAMZA + '\u0622' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE + '\u0623' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE + '\u0624' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE + '\u0625' # 0x00c5 -> ARABIC LETTER ALEF WITH HAMZA BELOW + '\u0626' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE + '\u0627' # 0x00c7 -> ARABIC LETTER ALEF + '\u0628' # 0x00c8 -> ARABIC LETTER BEH + '\u0629' # 0x00c9 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0x00ca -> ARABIC LETTER TEH + '\u062b' # 0x00cb -> ARABIC LETTER THEH + '\u062c' # 0x00cc -> ARABIC LETTER JEEM + '\u062d' # 0x00cd -> ARABIC LETTER HAH + '\u062e' # 0x00ce -> ARABIC LETTER KHAH + '\u062f' # 0x00cf -> ARABIC LETTER DAL + '\u0630' # 0x00d0 -> ARABIC LETTER THAL + '\u0631' # 0x00d1 -> ARABIC LETTER REH + '\u0632' # 0x00d2 -> ARABIC LETTER ZAIN + '\u0633' # 0x00d3 -> ARABIC LETTER SEEN + '\u0634' # 0x00d4 -> ARABIC LETTER SHEEN + '\u0635' # 0x00d5 -> ARABIC LETTER SAD + '\u0636' # 0x00d6 -> ARABIC LETTER DAD + '\u0637' # 0x00d7 -> ARABIC LETTER TAH + '\u0638' # 0x00d8 -> ARABIC LETTER ZAH + '\u0639' # 0x00d9 -> ARABIC LETTER AIN + '\u063a' # 0x00da -> ARABIC LETTER GHAIN + '[' # 0x00db -> LEFT SQUARE BRACKET, right-left + '\\' # 0x00dc -> REVERSE SOLIDUS, right-left + ']' # 0x00dd -> RIGHT SQUARE BRACKET, right-left + '^' # 0x00de -> CIRCUMFLEX ACCENT, right-left + '_' # 0x00df -> LOW LINE, right-left + '\u0640' # 0x00e0 -> ARABIC TATWEEL + '\u0641' # 0x00e1 -> ARABIC LETTER FEH + '\u0642' # 0x00e2 -> ARABIC LETTER QAF + '\u0643' # 0x00e3 -> ARABIC LETTER KAF + '\u0644' # 0x00e4 -> ARABIC LETTER LAM + '\u0645' # 0x00e5 -> ARABIC LETTER MEEM + '\u0646' # 0x00e6 -> ARABIC LETTER NOON + '\u0647' # 0x00e7 -> ARABIC LETTER HEH + '\u0648' # 0x00e8 -> ARABIC LETTER WAW + '\u0649' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA + '\u064a' # 0x00ea -> ARABIC LETTER YEH + '\u064b' # 0x00eb -> ARABIC FATHATAN + '\u064c' # 0x00ec -> ARABIC DAMMATAN + '\u064d' # 0x00ed -> ARABIC KASRATAN + '\u064e' # 0x00ee -> ARABIC FATHA + '\u064f' # 0x00ef -> ARABIC DAMMA + '\u0650' # 0x00f0 -> ARABIC KASRA + '\u0651' # 0x00f1 -> ARABIC SHADDA + '\u0652' # 0x00f2 -> ARABIC SUKUN + '\u067e' # 0x00f3 -> ARABIC LETTER PEH + '\u0679' # 0x00f4 -> ARABIC LETTER TTEH + '\u0686' # 0x00f5 -> ARABIC LETTER TCHEH + '\u06d5' # 0x00f6 -> ARABIC LETTER AE + '\u06a4' # 0x00f7 -> ARABIC LETTER VEH + '\u06af' # 0x00f8 -> ARABIC LETTER GAF + '\u0688' # 0x00f9 -> ARABIC LETTER DDAL + '\u0691' # 0x00fa -> ARABIC LETTER RREH + '{' # 0x00fb -> LEFT CURLY BRACKET, right-left + '|' # 0x00fc -> VERTICAL LINE, right-left + '}' # 0x00fd -> RIGHT CURLY BRACKET, right-left + '\u0698' # 0x00fe -> ARABIC LETTER JEH + '\u06d2' # 0x00ff -> ARABIC LETTER YEH BARREE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # CONTROL CHARACTER + 0x0001: 0x0001, # CONTROL CHARACTER + 0x0002: 0x0002, # CONTROL CHARACTER + 0x0003: 0x0003, # CONTROL CHARACTER + 0x0004: 0x0004, # CONTROL CHARACTER + 0x0005: 0x0005, # CONTROL CHARACTER + 0x0006: 0x0006, # CONTROL CHARACTER + 0x0007: 0x0007, # CONTROL CHARACTER + 0x0008: 0x0008, # CONTROL CHARACTER + 0x0009: 0x0009, # CONTROL CHARACTER + 0x000a: 0x000a, # CONTROL CHARACTER + 0x000b: 0x000b, # CONTROL CHARACTER + 0x000c: 0x000c, # CONTROL CHARACTER + 0x000d: 0x000d, # CONTROL CHARACTER + 0x000e: 0x000e, # CONTROL CHARACTER + 0x000f: 0x000f, # CONTROL CHARACTER + 0x0010: 0x0010, # CONTROL CHARACTER + 0x0011: 0x0011, # CONTROL CHARACTER + 0x0012: 0x0012, # CONTROL CHARACTER + 0x0013: 0x0013, # CONTROL CHARACTER + 0x0014: 0x0014, # CONTROL CHARACTER + 0x0015: 0x0015, # CONTROL CHARACTER + 0x0016: 0x0016, # CONTROL CHARACTER + 0x0017: 0x0017, # CONTROL CHARACTER + 0x0018: 0x0018, # CONTROL CHARACTER + 0x0019: 0x0019, # CONTROL CHARACTER + 0x001a: 0x001a, # CONTROL CHARACTER + 0x001b: 0x001b, # CONTROL CHARACTER + 0x001c: 0x001c, # CONTROL CHARACTER + 0x001d: 0x001d, # CONTROL CHARACTER + 0x001e: 0x001e, # CONTROL CHARACTER + 0x001f: 0x001f, # CONTROL CHARACTER + 0x0020: 0x0020, # SPACE, left-right + 0x0020: 0x00a0, # SPACE, right-left + 0x0021: 0x0021, # EXCLAMATION MARK, left-right + 0x0021: 0x00a1, # EXCLAMATION MARK, right-left + 0x0022: 0x0022, # QUOTATION MARK, left-right + 0x0022: 0x00a2, # QUOTATION MARK, right-left + 0x0023: 0x0023, # NUMBER SIGN, left-right + 0x0023: 0x00a3, # NUMBER SIGN, right-left + 0x0024: 0x0024, # DOLLAR SIGN, left-right + 0x0024: 0x00a4, # DOLLAR SIGN, right-left + 0x0025: 0x0025, # PERCENT SIGN, left-right + 0x0026: 0x0026, # AMPERSAND, left-right + 0x0026: 0x00a6, # AMPERSAND, right-left + 0x0027: 0x0027, # APOSTROPHE, left-right + 0x0027: 0x00a7, # APOSTROPHE, right-left + 0x0028: 0x0028, # LEFT PARENTHESIS, left-right + 0x0028: 0x00a8, # LEFT PARENTHESIS, right-left + 0x0029: 0x0029, # RIGHT PARENTHESIS, left-right + 0x0029: 0x00a9, # RIGHT PARENTHESIS, right-left + 0x002a: 0x002a, # ASTERISK, left-right + 0x002a: 0x00aa, # ASTERISK, right-left + 0x002b: 0x002b, # PLUS SIGN, left-right + 0x002b: 0x00ab, # PLUS SIGN, right-left + 0x002c: 0x002c, # COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR + 0x002d: 0x002d, # HYPHEN-MINUS, left-right + 0x002d: 0x00ad, # HYPHEN-MINUS, right-left + 0x002e: 0x002e, # FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR + 0x002e: 0x00ae, # FULL STOP, right-left + 0x002f: 0x002f, # SOLIDUS, left-right + 0x002f: 0x00af, # SOLIDUS, right-left + 0x0030: 0x0030, # DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE + 0x003a: 0x003a, # COLON, left-right + 0x003a: 0x00ba, # COLON, right-left + 0x003b: 0x003b, # SEMICOLON, left-right + 0x003c: 0x003c, # LESS-THAN SIGN, left-right + 0x003c: 0x00bc, # LESS-THAN SIGN, right-left + 0x003d: 0x003d, # EQUALS SIGN, left-right + 0x003d: 0x00bd, # EQUALS SIGN, right-left + 0x003e: 0x003e, # GREATER-THAN SIGN, left-right + 0x003e: 0x00be, # GREATER-THAN SIGN, right-left + 0x003f: 0x003f, # QUESTION MARK, left-right + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET, left-right + 0x005b: 0x00db, # LEFT SQUARE BRACKET, right-left + 0x005c: 0x005c, # REVERSE SOLIDUS, left-right + 0x005c: 0x00dc, # REVERSE SOLIDUS, right-left + 0x005d: 0x005d, # RIGHT SQUARE BRACKET, left-right + 0x005d: 0x00dd, # RIGHT SQUARE BRACKET, right-left + 0x005e: 0x005e, # CIRCUMFLEX ACCENT, left-right + 0x005e: 0x00de, # CIRCUMFLEX ACCENT, right-left + 0x005f: 0x005f, # LOW LINE, left-right + 0x005f: 0x00df, # LOW LINE, right-left + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET, left-right + 0x007b: 0x00fb, # LEFT CURLY BRACKET, right-left + 0x007c: 0x007c, # VERTICAL LINE, left-right + 0x007c: 0x00fc, # VERTICAL LINE, right-left + 0x007d: 0x007d, # RIGHT CURLY BRACKET, left-right + 0x007d: 0x00fd, # RIGHT CURLY BRACKET, right-left + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # CONTROL CHARACTER + 0x00a0: 0x0081, # NO-BREAK SPACE, right-left + 0x00ab: 0x008c, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left + 0x00bb: 0x0098, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left + 0x00c4: 0x0080, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c7: 0x0082, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c9: 0x0083, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00d1: 0x0084, # LATIN CAPITAL LETTER N WITH TILDE + 0x00d6: 0x0085, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00dc: 0x0086, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00e0: 0x0088, # LATIN SMALL LETTER A WITH GRAVE + 0x00e1: 0x0087, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0089, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e4: 0x008a, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e7: 0x008d, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008f, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x008e, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0090, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00eb: 0x0091, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ed: 0x0092, # LATIN SMALL LETTER I WITH ACUTE + 0x00ee: 0x0094, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00ef: 0x0095, # LATIN SMALL LETTER I WITH DIAERESIS + 0x00f1: 0x0096, # LATIN SMALL LETTER N WITH TILDE + 0x00f3: 0x0097, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0099, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f6: 0x009a, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x009b, # DIVISION SIGN, right-left + 0x00f9: 0x009d, # LATIN SMALL LETTER U WITH GRAVE + 0x00fa: 0x009c, # LATIN SMALL LETTER U WITH ACUTE + 0x00fb: 0x009e, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x00fc: 0x009f, # LATIN SMALL LETTER U WITH DIAERESIS + 0x060c: 0x00ac, # ARABIC COMMA + 0x061b: 0x00bb, # ARABIC SEMICOLON + 0x061f: 0x00bf, # ARABIC QUESTION MARK + 0x0621: 0x00c1, # ARABIC LETTER HAMZA + 0x0622: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE + 0x0623: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE + 0x0624: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE + 0x0625: 0x00c5, # ARABIC LETTER ALEF WITH HAMZA BELOW + 0x0626: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE + 0x0627: 0x00c7, # ARABIC LETTER ALEF + 0x0628: 0x00c8, # ARABIC LETTER BEH + 0x0629: 0x00c9, # ARABIC LETTER TEH MARBUTA + 0x062a: 0x00ca, # ARABIC LETTER TEH + 0x062b: 0x00cb, # ARABIC LETTER THEH + 0x062c: 0x00cc, # ARABIC LETTER JEEM + 0x062d: 0x00cd, # ARABIC LETTER HAH + 0x062e: 0x00ce, # ARABIC LETTER KHAH + 0x062f: 0x00cf, # ARABIC LETTER DAL + 0x0630: 0x00d0, # ARABIC LETTER THAL + 0x0631: 0x00d1, # ARABIC LETTER REH + 0x0632: 0x00d2, # ARABIC LETTER ZAIN + 0x0633: 0x00d3, # ARABIC LETTER SEEN + 0x0634: 0x00d4, # ARABIC LETTER SHEEN + 0x0635: 0x00d5, # ARABIC LETTER SAD + 0x0636: 0x00d6, # ARABIC LETTER DAD + 0x0637: 0x00d7, # ARABIC LETTER TAH + 0x0638: 0x00d8, # ARABIC LETTER ZAH + 0x0639: 0x00d9, # ARABIC LETTER AIN + 0x063a: 0x00da, # ARABIC LETTER GHAIN + 0x0640: 0x00e0, # ARABIC TATWEEL + 0x0641: 0x00e1, # ARABIC LETTER FEH + 0x0642: 0x00e2, # ARABIC LETTER QAF + 0x0643: 0x00e3, # ARABIC LETTER KAF + 0x0644: 0x00e4, # ARABIC LETTER LAM + 0x0645: 0x00e5, # ARABIC LETTER MEEM + 0x0646: 0x00e6, # ARABIC LETTER NOON + 0x0647: 0x00e7, # ARABIC LETTER HEH + 0x0648: 0x00e8, # ARABIC LETTER WAW + 0x0649: 0x00e9, # ARABIC LETTER ALEF MAKSURA + 0x064a: 0x00ea, # ARABIC LETTER YEH + 0x064b: 0x00eb, # ARABIC FATHATAN + 0x064c: 0x00ec, # ARABIC DAMMATAN + 0x064d: 0x00ed, # ARABIC KASRATAN + 0x064e: 0x00ee, # ARABIC FATHA + 0x064f: 0x00ef, # ARABIC DAMMA + 0x0650: 0x00f0, # ARABIC KASRA + 0x0651: 0x00f1, # ARABIC SHADDA + 0x0652: 0x00f2, # ARABIC SUKUN + 0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO, right-left (need override) + 0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE, right-left (need override) + 0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO, right-left (need override) + 0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE, right-left (need override) + 0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR, right-left (need override) + 0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE, right-left (need override) + 0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX, right-left (need override) + 0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN, right-left (need override) + 0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT, right-left (need override) + 0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE, right-left (need override) + 0x066a: 0x00a5, # ARABIC PERCENT SIGN + 0x0679: 0x00f4, # ARABIC LETTER TTEH + 0x067e: 0x00f3, # ARABIC LETTER PEH + 0x0686: 0x00f5, # ARABIC LETTER TCHEH + 0x0688: 0x00f9, # ARABIC LETTER DDAL + 0x0691: 0x00fa, # ARABIC LETTER RREH + 0x0698: 0x00fe, # ARABIC LETTER JEH + 0x06a4: 0x00f7, # ARABIC LETTER VEH + 0x06af: 0x00f8, # ARABIC LETTER GAF + 0x06ba: 0x008b, # ARABIC LETTER NOON GHUNNA + 0x06d2: 0x00ff, # ARABIC LETTER YEH BARREE + 0x06d5: 0x00f6, # ARABIC LETTER AE + 0x2026: 0x0093, # HORIZONTAL ELLIPSIS, right-left + 0x274a: 0x00c0, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left +} diff --git a/venv/Lib/encodings/mac_centeuro.py b/venv/Lib/encodings/mac_centeuro.py new file mode 100644 index 00000000..5785a0ec --- /dev/null +++ b/venv/Lib/encodings/mac_centeuro.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-centeuro', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON + '\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON + '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK + '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE + '\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK + '\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON + '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS + '\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON + '\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE + '\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE + '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE + '\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE + '\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE + '\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON + '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE + '\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON + '\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON + '\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON + '\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE + '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE + '\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE + '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE + '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE + '\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON + '\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON + '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS + '\u2020' # 0xA0 -> DAGGER + '\xb0' # 0xA1 -> DEGREE SIGN + '\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK + '\xa3' # 0xA3 -> POUND SIGN + '\xa7' # 0xA4 -> SECTION SIGN + '\u2022' # 0xA5 -> BULLET + '\xb6' # 0xA6 -> PILCROW SIGN + '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S + '\xae' # 0xA8 -> REGISTERED SIGN + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u2122' # 0xAA -> TRADE MARK SIGN + '\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK + '\xa8' # 0xAC -> DIAERESIS + '\u2260' # 0xAD -> NOT EQUAL TO + '\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA + '\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK + '\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK + '\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON + '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO + '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO + '\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON + '\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA + '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL + '\u2211' # 0xB7 -> N-ARY SUMMATION + '\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE + '\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA + '\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA + '\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON + '\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON + '\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE + '\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE + '\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA + '\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA + '\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE + '\xac' # 0xC2 -> NOT SIGN + '\u221a' # 0xC3 -> SQUARE ROOT + '\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE + '\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON + '\u2206' # 0xC6 -> INCREMENT + '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS + '\xa0' # 0xCA -> NO-BREAK SPACE + '\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON + '\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE + '\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE + '\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON + '\u2013' # 0xD0 -> EN DASH + '\u2014' # 0xD1 -> EM DASH + '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK + '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK + '\xf7' # 0xD6 -> DIVISION SIGN + '\u25ca' # 0xD7 -> LOZENGE + '\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON + '\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE + '\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE + '\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON + '\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON + '\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA + '\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA + '\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON + '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK + '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK + '\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON + '\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE + '\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE + '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE + '\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON + '\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON + '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE + '\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON + '\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON + '\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON + '\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE + '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE + '\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE + '\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + '\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE + '\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK + '\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK + '\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE + '\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA + '\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE + '\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA + '\u02c7' # 0xFF -> CARON +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mac_croatian.py b/venv/Lib/encodings/mac_croatian.py new file mode 100644 index 00000000..4a92fe61 --- /dev/null +++ b/venv/Lib/encodings/mac_croatian.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-croatian', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE + '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE + '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA + '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE + '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE + '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE + '\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE + '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE + '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE + '\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE + '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE + '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE + '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE + '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS + '\u2020' # 0xA0 -> DAGGER + '\xb0' # 0xA1 -> DEGREE SIGN + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa7' # 0xA4 -> SECTION SIGN + '\u2022' # 0xA5 -> BULLET + '\xb6' # 0xA6 -> PILCROW SIGN + '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S + '\xae' # 0xA8 -> REGISTERED SIGN + '\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON + '\u2122' # 0xAA -> TRADE MARK SIGN + '\xb4' # 0xAB -> ACUTE ACCENT + '\xa8' # 0xAC -> DIAERESIS + '\u2260' # 0xAD -> NOT EQUAL TO + '\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON + '\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE + '\u221e' # 0xB0 -> INFINITY + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO + '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO + '\u2206' # 0xB4 -> INCREMENT + '\xb5' # 0xB5 -> MICRO SIGN + '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL + '\u2211' # 0xB7 -> N-ARY SUMMATION + '\u220f' # 0xB8 -> N-ARY PRODUCT + '\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON + '\u222b' # 0xBA -> INTEGRAL + '\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR + '\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR + '\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA + '\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON + '\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE + '\xbf' # 0xC0 -> INVERTED QUESTION MARK + '\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK + '\xac' # 0xC2 -> NOT SIGN + '\u221a' # 0xC3 -> SQUARE ROOT + '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK + '\u2248' # 0xC5 -> ALMOST EQUAL TO + '\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE + '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON + '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS + '\xa0' # 0xCA -> NO-BREAK SPACE + '\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE + '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE + '\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE + '\u0153' # 0xCF -> LATIN SMALL LIGATURE OE + '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE + '\u2014' # 0xD1 -> EM DASH + '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK + '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK + '\xf7' # 0xD6 -> DIVISION SIGN + '\u25ca' # 0xD7 -> LOZENGE + '\uf8ff' # 0xD8 -> Apple logo + '\xa9' # 0xD9 -> COPYRIGHT SIGN + '\u2044' # 0xDA -> FRACTION SLASH + '\u20ac' # 0xDB -> EURO SIGN + '\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\xc6' # 0xDE -> LATIN CAPITAL LETTER AE + '\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2013' # 0xE0 -> EN DASH + '\xb7' # 0xE1 -> MIDDLE DOT + '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK + '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK + '\u2030' # 0xE4 -> PER MILLE SIGN + '\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE + '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE + '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON + '\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE + '\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE + '\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I + '\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u02dc' # 0xF7 -> SMALL TILDE + '\xaf' # 0xF8 -> MACRON + '\u03c0' # 0xF9 -> GREEK SMALL LETTER PI + '\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\u02da' # 0xFB -> RING ABOVE + '\xb8' # 0xFC -> CEDILLA + '\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xe6' # 0xFE -> LATIN SMALL LETTER AE + '\u02c7' # 0xFF -> CARON +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mac_cyrillic.py b/venv/Lib/encodings/mac_cyrillic.py new file mode 100644 index 00000000..d20272ac --- /dev/null +++ b/venv/Lib/encodings/mac_cyrillic.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-cyrillic', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE + '\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE + '\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE + '\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE + '\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE + '\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE + '\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE + '\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U + '\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF + '\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA + '\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE + '\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE + '\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA + '\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA + '\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN + '\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU + '\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E + '\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU + '\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA + '\u2020' # 0xA0 -> DAGGER + '\xb0' # 0xA1 -> DEGREE SIGN + '\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN + '\xa3' # 0xA3 -> POUND SIGN + '\xa7' # 0xA4 -> SECTION SIGN + '\u2022' # 0xA5 -> BULLET + '\xb6' # 0xA6 -> PILCROW SIGN + '\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + '\xae' # 0xA8 -> REGISTERED SIGN + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u2122' # 0xAA -> TRADE MARK SIGN + '\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE + '\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE + '\u2260' # 0xAD -> NOT EQUAL TO + '\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE + '\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE + '\u221e' # 0xB0 -> INFINITY + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO + '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO + '\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + '\xb5' # 0xB5 -> MICRO SIGN + '\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN + '\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE + '\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE + '\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE + '\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI + '\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI + '\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE + '\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE + '\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE + '\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE + '\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE + '\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE + '\xac' # 0xC2 -> NOT SIGN + '\u221a' # 0xC3 -> SQUARE ROOT + '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK + '\u2248' # 0xC5 -> ALMOST EQUAL TO + '\u2206' # 0xC6 -> INCREMENT + '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS + '\xa0' # 0xCA -> NO-BREAK SPACE + '\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE + '\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE + '\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE + '\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE + '\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE + '\u2013' # 0xD0 -> EN DASH + '\u2014' # 0xD1 -> EM DASH + '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK + '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK + '\xf7' # 0xD6 -> DIVISION SIGN + '\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK + '\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U + '\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U + '\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE + '\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE + '\u2116' # 0xDC -> NUMERO SIGN + '\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO + '\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO + '\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA + '\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A + '\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE + '\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE + '\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE + '\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE + '\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE + '\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE + '\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I + '\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA + '\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL + '\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM + '\u043d' # 0xED -> CYRILLIC SMALL LETTER EN + '\u043e' # 0xEE -> CYRILLIC SMALL LETTER O + '\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE + '\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U + '\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF + '\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA + '\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE + '\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE + '\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA + '\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA + '\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN + '\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU + '\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044d' # 0xFD -> CYRILLIC SMALL LETTER E + '\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU + '\u20ac' # 0xFF -> EURO SIGN +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mac_farsi.py b/venv/Lib/encodings/mac_farsi.py new file mode 100644 index 00000000..e357d435 --- /dev/null +++ b/venv/Lib/encodings/mac_farsi.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-farsi', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE, left-right + '!' # 0x21 -> EXCLAMATION MARK, left-right + '"' # 0x22 -> QUOTATION MARK, left-right + '#' # 0x23 -> NUMBER SIGN, left-right + '$' # 0x24 -> DOLLAR SIGN, left-right + '%' # 0x25 -> PERCENT SIGN, left-right + '&' # 0x26 -> AMPERSAND, left-right + "'" # 0x27 -> APOSTROPHE, left-right + '(' # 0x28 -> LEFT PARENTHESIS, left-right + ')' # 0x29 -> RIGHT PARENTHESIS, left-right + '*' # 0x2A -> ASTERISK, left-right + '+' # 0x2B -> PLUS SIGN, left-right + ',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR + '-' # 0x2D -> HYPHEN-MINUS, left-right + '.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR + '/' # 0x2F -> SOLIDUS, left-right + '0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO + '1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE + '2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO + '3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE + '4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE + '6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE + ':' # 0x3A -> COLON, left-right + ';' # 0x3B -> SEMICOLON, left-right + '<' # 0x3C -> LESS-THAN SIGN, left-right + '=' # 0x3D -> EQUALS SIGN, left-right + '>' # 0x3E -> GREATER-THAN SIGN, left-right + '?' # 0x3F -> QUESTION MARK, left-right + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET, left-right + '\\' # 0x5C -> REVERSE SOLIDUS, left-right + ']' # 0x5D -> RIGHT SQUARE BRACKET, left-right + '^' # 0x5E -> CIRCUMFLEX ACCENT, left-right + '_' # 0x5F -> LOW LINE, left-right + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET, left-right + '|' # 0x7C -> VERTICAL LINE, left-right + '}' # 0x7D -> RIGHT CURLY BRACKET, left-right + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xa0' # 0x81 -> NO-BREAK SPACE, right-left + '\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE + '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE + '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS + '\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA + '\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left + '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA + '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE + '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE + '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE + '\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left + '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE + '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE + '\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left + '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0x9B -> DIVISION SIGN, right-left + '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE + '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE + '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS + ' ' # 0xA0 -> SPACE, right-left + '!' # 0xA1 -> EXCLAMATION MARK, right-left + '"' # 0xA2 -> QUOTATION MARK, right-left + '#' # 0xA3 -> NUMBER SIGN, right-left + '$' # 0xA4 -> DOLLAR SIGN, right-left + '\u066a' # 0xA5 -> ARABIC PERCENT SIGN + '&' # 0xA6 -> AMPERSAND, right-left + "'" # 0xA7 -> APOSTROPHE, right-left + '(' # 0xA8 -> LEFT PARENTHESIS, right-left + ')' # 0xA9 -> RIGHT PARENTHESIS, right-left + '*' # 0xAA -> ASTERISK, right-left + '+' # 0xAB -> PLUS SIGN, right-left + '\u060c' # 0xAC -> ARABIC COMMA + '-' # 0xAD -> HYPHEN-MINUS, right-left + '.' # 0xAE -> FULL STOP, right-left + '/' # 0xAF -> SOLIDUS, right-left + '\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override) + '\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override) + '\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override) + '\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override) + '\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override) + '\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override) + '\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override) + '\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override) + '\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override) + '\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override) + ':' # 0xBA -> COLON, right-left + '\u061b' # 0xBB -> ARABIC SEMICOLON + '<' # 0xBC -> LESS-THAN SIGN, right-left + '=' # 0xBD -> EQUALS SIGN, right-left + '>' # 0xBE -> GREATER-THAN SIGN, right-left + '\u061f' # 0xBF -> ARABIC QUESTION MARK + '\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left + '\u0621' # 0xC1 -> ARABIC LETTER HAMZA + '\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE + '\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE + '\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE + '\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW + '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE + '\u0627' # 0xC7 -> ARABIC LETTER ALEF + '\u0628' # 0xC8 -> ARABIC LETTER BEH + '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0xCA -> ARABIC LETTER TEH + '\u062b' # 0xCB -> ARABIC LETTER THEH + '\u062c' # 0xCC -> ARABIC LETTER JEEM + '\u062d' # 0xCD -> ARABIC LETTER HAH + '\u062e' # 0xCE -> ARABIC LETTER KHAH + '\u062f' # 0xCF -> ARABIC LETTER DAL + '\u0630' # 0xD0 -> ARABIC LETTER THAL + '\u0631' # 0xD1 -> ARABIC LETTER REH + '\u0632' # 0xD2 -> ARABIC LETTER ZAIN + '\u0633' # 0xD3 -> ARABIC LETTER SEEN + '\u0634' # 0xD4 -> ARABIC LETTER SHEEN + '\u0635' # 0xD5 -> ARABIC LETTER SAD + '\u0636' # 0xD6 -> ARABIC LETTER DAD + '\u0637' # 0xD7 -> ARABIC LETTER TAH + '\u0638' # 0xD8 -> ARABIC LETTER ZAH + '\u0639' # 0xD9 -> ARABIC LETTER AIN + '\u063a' # 0xDA -> ARABIC LETTER GHAIN + '[' # 0xDB -> LEFT SQUARE BRACKET, right-left + '\\' # 0xDC -> REVERSE SOLIDUS, right-left + ']' # 0xDD -> RIGHT SQUARE BRACKET, right-left + '^' # 0xDE -> CIRCUMFLEX ACCENT, right-left + '_' # 0xDF -> LOW LINE, right-left + '\u0640' # 0xE0 -> ARABIC TATWEEL + '\u0641' # 0xE1 -> ARABIC LETTER FEH + '\u0642' # 0xE2 -> ARABIC LETTER QAF + '\u0643' # 0xE3 -> ARABIC LETTER KAF + '\u0644' # 0xE4 -> ARABIC LETTER LAM + '\u0645' # 0xE5 -> ARABIC LETTER MEEM + '\u0646' # 0xE6 -> ARABIC LETTER NOON + '\u0647' # 0xE7 -> ARABIC LETTER HEH + '\u0648' # 0xE8 -> ARABIC LETTER WAW + '\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA + '\u064a' # 0xEA -> ARABIC LETTER YEH + '\u064b' # 0xEB -> ARABIC FATHATAN + '\u064c' # 0xEC -> ARABIC DAMMATAN + '\u064d' # 0xED -> ARABIC KASRATAN + '\u064e' # 0xEE -> ARABIC FATHA + '\u064f' # 0xEF -> ARABIC DAMMA + '\u0650' # 0xF0 -> ARABIC KASRA + '\u0651' # 0xF1 -> ARABIC SHADDA + '\u0652' # 0xF2 -> ARABIC SUKUN + '\u067e' # 0xF3 -> ARABIC LETTER PEH + '\u0679' # 0xF4 -> ARABIC LETTER TTEH + '\u0686' # 0xF5 -> ARABIC LETTER TCHEH + '\u06d5' # 0xF6 -> ARABIC LETTER AE + '\u06a4' # 0xF7 -> ARABIC LETTER VEH + '\u06af' # 0xF8 -> ARABIC LETTER GAF + '\u0688' # 0xF9 -> ARABIC LETTER DDAL + '\u0691' # 0xFA -> ARABIC LETTER RREH + '{' # 0xFB -> LEFT CURLY BRACKET, right-left + '|' # 0xFC -> VERTICAL LINE, right-left + '}' # 0xFD -> RIGHT CURLY BRACKET, right-left + '\u0698' # 0xFE -> ARABIC LETTER JEH + '\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mac_greek.py b/venv/Lib/encodings/mac_greek.py new file mode 100644 index 00000000..d3d0c4f0 --- /dev/null +++ b/venv/Lib/encodings/mac_greek.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec mac_greek generated from 'MAPPINGS/VENDORS/APPLE/GREEK.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-greek', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xb9' # 0x81 -> SUPERSCRIPT ONE + '\xb2' # 0x82 -> SUPERSCRIPT TWO + '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xb3' # 0x84 -> SUPERSCRIPT THREE + '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u0385' # 0x87 -> GREEK DIALYTIKA TONOS + '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE + '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS + '\u0384' # 0x8B -> GREEK TONOS + '\xa8' # 0x8C -> DIAERESIS + '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA + '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE + '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE + '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xa3' # 0x92 -> POUND SIGN + '\u2122' # 0x93 -> TRADE MARK SIGN + '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS + '\u2022' # 0x96 -> BULLET + '\xbd' # 0x97 -> VULGAR FRACTION ONE HALF + '\u2030' # 0x98 -> PER MILLE SIGN + '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS + '\xa6' # 0x9B -> BROKEN BAR + '\u20ac' # 0x9C -> EURO SIGN # before Mac OS 9.2.2, was SOFT HYPHEN + '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE + '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS + '\u2020' # 0xA0 -> DAGGER + '\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA + '\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA + '\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA + '\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA + '\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI + '\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI + '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S + '\xae' # 0xA8 -> REGISTERED SIGN + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u03a3' # 0xAA -> GREEK CAPITAL LETTER SIGMA + '\u03aa' # 0xAB -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + '\xa7' # 0xAC -> SECTION SIGN + '\u2260' # 0xAD -> NOT EQUAL TO + '\xb0' # 0xAE -> DEGREE SIGN + '\xb7' # 0xAF -> MIDDLE DOT + '\u0391' # 0xB0 -> GREEK CAPITAL LETTER ALPHA + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO + '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO + '\xa5' # 0xB4 -> YEN SIGN + '\u0392' # 0xB5 -> GREEK CAPITAL LETTER BETA + '\u0395' # 0xB6 -> GREEK CAPITAL LETTER EPSILON + '\u0396' # 0xB7 -> GREEK CAPITAL LETTER ZETA + '\u0397' # 0xB8 -> GREEK CAPITAL LETTER ETA + '\u0399' # 0xB9 -> GREEK CAPITAL LETTER IOTA + '\u039a' # 0xBA -> GREEK CAPITAL LETTER KAPPA + '\u039c' # 0xBB -> GREEK CAPITAL LETTER MU + '\u03a6' # 0xBC -> GREEK CAPITAL LETTER PHI + '\u03ab' # 0xBD -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + '\u03a8' # 0xBE -> GREEK CAPITAL LETTER PSI + '\u03a9' # 0xBF -> GREEK CAPITAL LETTER OMEGA + '\u03ac' # 0xC0 -> GREEK SMALL LETTER ALPHA WITH TONOS + '\u039d' # 0xC1 -> GREEK CAPITAL LETTER NU + '\xac' # 0xC2 -> NOT SIGN + '\u039f' # 0xC3 -> GREEK CAPITAL LETTER OMICRON + '\u03a1' # 0xC4 -> GREEK CAPITAL LETTER RHO + '\u2248' # 0xC5 -> ALMOST EQUAL TO + '\u03a4' # 0xC6 -> GREEK CAPITAL LETTER TAU + '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS + '\xa0' # 0xCA -> NO-BREAK SPACE + '\u03a5' # 0xCB -> GREEK CAPITAL LETTER UPSILON + '\u03a7' # 0xCC -> GREEK CAPITAL LETTER CHI + '\u0386' # 0xCD -> GREEK CAPITAL LETTER ALPHA WITH TONOS + '\u0388' # 0xCE -> GREEK CAPITAL LETTER EPSILON WITH TONOS + '\u0153' # 0xCF -> LATIN SMALL LIGATURE OE + '\u2013' # 0xD0 -> EN DASH + '\u2015' # 0xD1 -> HORIZONTAL BAR + '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK + '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK + '\xf7' # 0xD6 -> DIVISION SIGN + '\u0389' # 0xD7 -> GREEK CAPITAL LETTER ETA WITH TONOS + '\u038a' # 0xD8 -> GREEK CAPITAL LETTER IOTA WITH TONOS + '\u038c' # 0xD9 -> GREEK CAPITAL LETTER OMICRON WITH TONOS + '\u038e' # 0xDA -> GREEK CAPITAL LETTER UPSILON WITH TONOS + '\u03ad' # 0xDB -> GREEK SMALL LETTER EPSILON WITH TONOS + '\u03ae' # 0xDC -> GREEK SMALL LETTER ETA WITH TONOS + '\u03af' # 0xDD -> GREEK SMALL LETTER IOTA WITH TONOS + '\u03cc' # 0xDE -> GREEK SMALL LETTER OMICRON WITH TONOS + '\u038f' # 0xDF -> GREEK CAPITAL LETTER OMEGA WITH TONOS + '\u03cd' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH TONOS + '\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA + '\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA + '\u03c8' # 0xE3 -> GREEK SMALL LETTER PSI + '\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA + '\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON + '\u03c6' # 0xE6 -> GREEK SMALL LETTER PHI + '\u03b3' # 0xE7 -> GREEK SMALL LETTER GAMMA + '\u03b7' # 0xE8 -> GREEK SMALL LETTER ETA + '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA + '\u03be' # 0xEA -> GREEK SMALL LETTER XI + '\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA + '\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA + '\u03bc' # 0xED -> GREEK SMALL LETTER MU + '\u03bd' # 0xEE -> GREEK SMALL LETTER NU + '\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON + '\u03c0' # 0xF0 -> GREEK SMALL LETTER PI + '\u03ce' # 0xF1 -> GREEK SMALL LETTER OMEGA WITH TONOS + '\u03c1' # 0xF2 -> GREEK SMALL LETTER RHO + '\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA + '\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU + '\u03b8' # 0xF5 -> GREEK SMALL LETTER THETA + '\u03c9' # 0xF6 -> GREEK SMALL LETTER OMEGA + '\u03c2' # 0xF7 -> GREEK SMALL LETTER FINAL SIGMA + '\u03c7' # 0xF8 -> GREEK SMALL LETTER CHI + '\u03c5' # 0xF9 -> GREEK SMALL LETTER UPSILON + '\u03b6' # 0xFA -> GREEK SMALL LETTER ZETA + '\u03ca' # 0xFB -> GREEK SMALL LETTER IOTA WITH DIALYTIKA + '\u03cb' # 0xFC -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA + '\u0390' # 0xFD -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS + '\u03b0' # 0xFE -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS + '\xad' # 0xFF -> SOFT HYPHEN # before Mac OS 9.2.2, was undefined +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mac_iceland.py b/venv/Lib/encodings/mac_iceland.py new file mode 100644 index 00000000..add10f4e --- /dev/null +++ b/venv/Lib/encodings/mac_iceland.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-iceland', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE + '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE + '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA + '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE + '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE + '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE + '\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE + '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE + '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE + '\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE + '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE + '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE + '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE + '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS + '\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xb0' # 0xA1 -> DEGREE SIGN + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa7' # 0xA4 -> SECTION SIGN + '\u2022' # 0xA5 -> BULLET + '\xb6' # 0xA6 -> PILCROW SIGN + '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S + '\xae' # 0xA8 -> REGISTERED SIGN + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u2122' # 0xAA -> TRADE MARK SIGN + '\xb4' # 0xAB -> ACUTE ACCENT + '\xa8' # 0xAC -> DIAERESIS + '\u2260' # 0xAD -> NOT EQUAL TO + '\xc6' # 0xAE -> LATIN CAPITAL LETTER AE + '\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE + '\u221e' # 0xB0 -> INFINITY + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO + '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO + '\xa5' # 0xB4 -> YEN SIGN + '\xb5' # 0xB5 -> MICRO SIGN + '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL + '\u2211' # 0xB7 -> N-ARY SUMMATION + '\u220f' # 0xB8 -> N-ARY PRODUCT + '\u03c0' # 0xB9 -> GREEK SMALL LETTER PI + '\u222b' # 0xBA -> INTEGRAL + '\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR + '\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR + '\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA + '\xe6' # 0xBE -> LATIN SMALL LETTER AE + '\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE + '\xbf' # 0xC0 -> INVERTED QUESTION MARK + '\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK + '\xac' # 0xC2 -> NOT SIGN + '\u221a' # 0xC3 -> SQUARE ROOT + '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK + '\u2248' # 0xC5 -> ALMOST EQUAL TO + '\u2206' # 0xC6 -> INCREMENT + '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS + '\xa0' # 0xCA -> NO-BREAK SPACE + '\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE + '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE + '\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE + '\u0153' # 0xCF -> LATIN SMALL LIGATURE OE + '\u2013' # 0xD0 -> EN DASH + '\u2014' # 0xD1 -> EM DASH + '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK + '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK + '\xf7' # 0xD6 -> DIVISION SIGN + '\u25ca' # 0xD7 -> LOZENGE + '\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS + '\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\u2044' # 0xDA -> FRACTION SLASH + '\u20ac' # 0xDB -> EURO SIGN + '\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH + '\xf0' # 0xDD -> LATIN SMALL LETTER ETH + '\xde' # 0xDE -> LATIN CAPITAL LETTER THORN + '\xfe' # 0xDF -> LATIN SMALL LETTER THORN + '\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE + '\xb7' # 0xE1 -> MIDDLE DOT + '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK + '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK + '\u2030' # 0xE4 -> PER MILLE SIGN + '\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\uf8ff' # 0xF0 -> Apple logo + '\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE + '\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I + '\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u02dc' # 0xF7 -> SMALL TILDE + '\xaf' # 0xF8 -> MACRON + '\u02d8' # 0xF9 -> BREVE + '\u02d9' # 0xFA -> DOT ABOVE + '\u02da' # 0xFB -> RING ABOVE + '\xb8' # 0xFC -> CEDILLA + '\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT + '\u02db' # 0xFE -> OGONEK + '\u02c7' # 0xFF -> CARON +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mac_latin2.py b/venv/Lib/encodings/mac_latin2.py new file mode 100644 index 00000000..da9d4b13 --- /dev/null +++ b/venv/Lib/encodings/mac_latin2.py @@ -0,0 +1,312 @@ +""" Python Character Mapping Codec mac_latin2 generated from 'MAPPINGS/VENDORS/MICSFT/MAC/LATIN2.TXT' with gencodec.py. + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. +(c) Copyright 2000 Guido van Rossum. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-latin2', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON + '\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON + '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK + '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE + '\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK + '\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON + '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS + '\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON + '\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE + '\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE + '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE + '\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE + '\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE + '\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON + '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE + '\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON + '\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON + '\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON + '\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE + '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE + '\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE + '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE + '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE + '\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON + '\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON + '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS + '\u2020' # 0xA0 -> DAGGER + '\xb0' # 0xA1 -> DEGREE SIGN + '\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK + '\xa3' # 0xA3 -> POUND SIGN + '\xa7' # 0xA4 -> SECTION SIGN + '\u2022' # 0xA5 -> BULLET + '\xb6' # 0xA6 -> PILCROW SIGN + '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S + '\xae' # 0xA8 -> REGISTERED SIGN + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u2122' # 0xAA -> TRADE MARK SIGN + '\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK + '\xa8' # 0xAC -> DIAERESIS + '\u2260' # 0xAD -> NOT EQUAL TO + '\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA + '\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK + '\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK + '\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON + '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO + '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO + '\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON + '\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA + '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL + '\u2211' # 0xB7 -> N-ARY SUMMATION + '\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE + '\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA + '\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA + '\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON + '\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON + '\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE + '\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE + '\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA + '\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA + '\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE + '\xac' # 0xC2 -> NOT SIGN + '\u221a' # 0xC3 -> SQUARE ROOT + '\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE + '\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON + '\u2206' # 0xC6 -> INCREMENT + '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS + '\xa0' # 0xCA -> NO-BREAK SPACE + '\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON + '\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE + '\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE + '\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON + '\u2013' # 0xD0 -> EN DASH + '\u2014' # 0xD1 -> EM DASH + '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK + '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK + '\xf7' # 0xD6 -> DIVISION SIGN + '\u25ca' # 0xD7 -> LOZENGE + '\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON + '\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE + '\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE + '\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON + '\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON + '\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA + '\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA + '\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON + '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK + '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK + '\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON + '\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE + '\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE + '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE + '\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON + '\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON + '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE + '\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON + '\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON + '\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON + '\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE + '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE + '\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE + '\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + '\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE + '\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK + '\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK + '\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE + '\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA + '\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE + '\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA + '\u02c7' # 0xFF -> CARON +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mac_roman.py b/venv/Lib/encodings/mac_roman.py new file mode 100644 index 00000000..b74b0021 --- /dev/null +++ b/venv/Lib/encodings/mac_roman.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec mac_roman generated from 'MAPPINGS/VENDORS/APPLE/ROMAN.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-roman', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE + '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE + '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA + '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE + '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE + '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE + '\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE + '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE + '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE + '\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE + '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE + '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE + '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE + '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS + '\u2020' # 0xA0 -> DAGGER + '\xb0' # 0xA1 -> DEGREE SIGN + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa7' # 0xA4 -> SECTION SIGN + '\u2022' # 0xA5 -> BULLET + '\xb6' # 0xA6 -> PILCROW SIGN + '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S + '\xae' # 0xA8 -> REGISTERED SIGN + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u2122' # 0xAA -> TRADE MARK SIGN + '\xb4' # 0xAB -> ACUTE ACCENT + '\xa8' # 0xAC -> DIAERESIS + '\u2260' # 0xAD -> NOT EQUAL TO + '\xc6' # 0xAE -> LATIN CAPITAL LETTER AE + '\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE + '\u221e' # 0xB0 -> INFINITY + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO + '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO + '\xa5' # 0xB4 -> YEN SIGN + '\xb5' # 0xB5 -> MICRO SIGN + '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL + '\u2211' # 0xB7 -> N-ARY SUMMATION + '\u220f' # 0xB8 -> N-ARY PRODUCT + '\u03c0' # 0xB9 -> GREEK SMALL LETTER PI + '\u222b' # 0xBA -> INTEGRAL + '\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR + '\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR + '\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA + '\xe6' # 0xBE -> LATIN SMALL LETTER AE + '\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE + '\xbf' # 0xC0 -> INVERTED QUESTION MARK + '\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK + '\xac' # 0xC2 -> NOT SIGN + '\u221a' # 0xC3 -> SQUARE ROOT + '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK + '\u2248' # 0xC5 -> ALMOST EQUAL TO + '\u2206' # 0xC6 -> INCREMENT + '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS + '\xa0' # 0xCA -> NO-BREAK SPACE + '\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE + '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE + '\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE + '\u0153' # 0xCF -> LATIN SMALL LIGATURE OE + '\u2013' # 0xD0 -> EN DASH + '\u2014' # 0xD1 -> EM DASH + '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK + '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK + '\xf7' # 0xD6 -> DIVISION SIGN + '\u25ca' # 0xD7 -> LOZENGE + '\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS + '\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\u2044' # 0xDA -> FRACTION SLASH + '\u20ac' # 0xDB -> EURO SIGN + '\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\ufb01' # 0xDE -> LATIN SMALL LIGATURE FI + '\ufb02' # 0xDF -> LATIN SMALL LIGATURE FL + '\u2021' # 0xE0 -> DOUBLE DAGGER + '\xb7' # 0xE1 -> MIDDLE DOT + '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK + '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK + '\u2030' # 0xE4 -> PER MILLE SIGN + '\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\uf8ff' # 0xF0 -> Apple logo + '\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE + '\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I + '\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u02dc' # 0xF7 -> SMALL TILDE + '\xaf' # 0xF8 -> MACRON + '\u02d8' # 0xF9 -> BREVE + '\u02d9' # 0xFA -> DOT ABOVE + '\u02da' # 0xFB -> RING ABOVE + '\xb8' # 0xFC -> CEDILLA + '\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT + '\u02db' # 0xFE -> OGONEK + '\u02c7' # 0xFF -> CARON +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mac_romanian.py b/venv/Lib/encodings/mac_romanian.py new file mode 100644 index 00000000..d141b4c5 --- /dev/null +++ b/venv/Lib/encodings/mac_romanian.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-romanian', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE + '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE + '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA + '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE + '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE + '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE + '\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE + '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE + '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE + '\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE + '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE + '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE + '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE + '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS + '\u2020' # 0xA0 -> DAGGER + '\xb0' # 0xA1 -> DEGREE SIGN + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa7' # 0xA4 -> SECTION SIGN + '\u2022' # 0xA5 -> BULLET + '\xb6' # 0xA6 -> PILCROW SIGN + '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S + '\xae' # 0xA8 -> REGISTERED SIGN + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u2122' # 0xAA -> TRADE MARK SIGN + '\xb4' # 0xAB -> ACUTE ACCENT + '\xa8' # 0xAC -> DIAERESIS + '\u2260' # 0xAD -> NOT EQUAL TO + '\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE + '\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later + '\u221e' # 0xB0 -> INFINITY + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO + '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO + '\xa5' # 0xB4 -> YEN SIGN + '\xb5' # 0xB5 -> MICRO SIGN + '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL + '\u2211' # 0xB7 -> N-ARY SUMMATION + '\u220f' # 0xB8 -> N-ARY PRODUCT + '\u03c0' # 0xB9 -> GREEK SMALL LETTER PI + '\u222b' # 0xBA -> INTEGRAL + '\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR + '\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR + '\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA + '\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE + '\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later + '\xbf' # 0xC0 -> INVERTED QUESTION MARK + '\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK + '\xac' # 0xC2 -> NOT SIGN + '\u221a' # 0xC3 -> SQUARE ROOT + '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK + '\u2248' # 0xC5 -> ALMOST EQUAL TO + '\u2206' # 0xC6 -> INCREMENT + '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS + '\xa0' # 0xCA -> NO-BREAK SPACE + '\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE + '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE + '\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE + '\u0153' # 0xCF -> LATIN SMALL LIGATURE OE + '\u2013' # 0xD0 -> EN DASH + '\u2014' # 0xD1 -> EM DASH + '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK + '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK + '\xf7' # 0xD6 -> DIVISION SIGN + '\u25ca' # 0xD7 -> LOZENGE + '\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS + '\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\u2044' # 0xDA -> FRACTION SLASH + '\u20ac' # 0xDB -> EURO SIGN + '\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later + '\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later + '\u2021' # 0xE0 -> DOUBLE DAGGER + '\xb7' # 0xE1 -> MIDDLE DOT + '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK + '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK + '\u2030' # 0xE4 -> PER MILLE SIGN + '\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\uf8ff' # 0xF0 -> Apple logo + '\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE + '\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I + '\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u02dc' # 0xF7 -> SMALL TILDE + '\xaf' # 0xF8 -> MACRON + '\u02d8' # 0xF9 -> BREVE + '\u02d9' # 0xFA -> DOT ABOVE + '\u02da' # 0xFB -> RING ABOVE + '\xb8' # 0xFC -> CEDILLA + '\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT + '\u02db' # 0xFE -> OGONEK + '\u02c7' # 0xFF -> CARON +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mac_turkish.py b/venv/Lib/encodings/mac_turkish.py new file mode 100644 index 00000000..044d4cb5 --- /dev/null +++ b/venv/Lib/encodings/mac_turkish.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mac-turkish', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> CONTROL CHARACTER + '\x01' # 0x01 -> CONTROL CHARACTER + '\x02' # 0x02 -> CONTROL CHARACTER + '\x03' # 0x03 -> CONTROL CHARACTER + '\x04' # 0x04 -> CONTROL CHARACTER + '\x05' # 0x05 -> CONTROL CHARACTER + '\x06' # 0x06 -> CONTROL CHARACTER + '\x07' # 0x07 -> CONTROL CHARACTER + '\x08' # 0x08 -> CONTROL CHARACTER + '\t' # 0x09 -> CONTROL CHARACTER + '\n' # 0x0A -> CONTROL CHARACTER + '\x0b' # 0x0B -> CONTROL CHARACTER + '\x0c' # 0x0C -> CONTROL CHARACTER + '\r' # 0x0D -> CONTROL CHARACTER + '\x0e' # 0x0E -> CONTROL CHARACTER + '\x0f' # 0x0F -> CONTROL CHARACTER + '\x10' # 0x10 -> CONTROL CHARACTER + '\x11' # 0x11 -> CONTROL CHARACTER + '\x12' # 0x12 -> CONTROL CHARACTER + '\x13' # 0x13 -> CONTROL CHARACTER + '\x14' # 0x14 -> CONTROL CHARACTER + '\x15' # 0x15 -> CONTROL CHARACTER + '\x16' # 0x16 -> CONTROL CHARACTER + '\x17' # 0x17 -> CONTROL CHARACTER + '\x18' # 0x18 -> CONTROL CHARACTER + '\x19' # 0x19 -> CONTROL CHARACTER + '\x1a' # 0x1A -> CONTROL CHARACTER + '\x1b' # 0x1B -> CONTROL CHARACTER + '\x1c' # 0x1C -> CONTROL CHARACTER + '\x1d' # 0x1D -> CONTROL CHARACTER + '\x1e' # 0x1E -> CONTROL CHARACTER + '\x1f' # 0x1F -> CONTROL CHARACTER + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> CONTROL CHARACTER + '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE + '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE + '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE + '\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA + '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE + '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE + '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE + '\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE + '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE + '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE + '\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE + '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE + '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE + '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE + '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS + '\u2020' # 0xA0 -> DAGGER + '\xb0' # 0xA1 -> DEGREE SIGN + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa7' # 0xA4 -> SECTION SIGN + '\u2022' # 0xA5 -> BULLET + '\xb6' # 0xA6 -> PILCROW SIGN + '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S + '\xae' # 0xA8 -> REGISTERED SIGN + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u2122' # 0xAA -> TRADE MARK SIGN + '\xb4' # 0xAB -> ACUTE ACCENT + '\xa8' # 0xAC -> DIAERESIS + '\u2260' # 0xAD -> NOT EQUAL TO + '\xc6' # 0xAE -> LATIN CAPITAL LETTER AE + '\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE + '\u221e' # 0xB0 -> INFINITY + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO + '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO + '\xa5' # 0xB4 -> YEN SIGN + '\xb5' # 0xB5 -> MICRO SIGN + '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL + '\u2211' # 0xB7 -> N-ARY SUMMATION + '\u220f' # 0xB8 -> N-ARY PRODUCT + '\u03c0' # 0xB9 -> GREEK SMALL LETTER PI + '\u222b' # 0xBA -> INTEGRAL + '\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR + '\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR + '\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA + '\xe6' # 0xBE -> LATIN SMALL LETTER AE + '\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE + '\xbf' # 0xC0 -> INVERTED QUESTION MARK + '\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK + '\xac' # 0xC2 -> NOT SIGN + '\u221a' # 0xC3 -> SQUARE ROOT + '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK + '\u2248' # 0xC5 -> ALMOST EQUAL TO + '\u2206' # 0xC6 -> INCREMENT + '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS + '\xa0' # 0xCA -> NO-BREAK SPACE + '\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE + '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE + '\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE + '\u0153' # 0xCF -> LATIN SMALL LIGATURE OE + '\u2013' # 0xD0 -> EN DASH + '\u2014' # 0xD1 -> EM DASH + '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK + '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK + '\xf7' # 0xD6 -> DIVISION SIGN + '\u25ca' # 0xD7 -> LOZENGE + '\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS + '\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\u011e' # 0xDA -> LATIN CAPITAL LETTER G WITH BREVE + '\u011f' # 0xDB -> LATIN SMALL LETTER G WITH BREVE + '\u0130' # 0xDC -> LATIN CAPITAL LETTER I WITH DOT ABOVE + '\u0131' # 0xDD -> LATIN SMALL LETTER DOTLESS I + '\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA + '\u015f' # 0xDF -> LATIN SMALL LETTER S WITH CEDILLA + '\u2021' # 0xE0 -> DOUBLE DAGGER + '\xb7' # 0xE1 -> MIDDLE DOT + '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK + '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK + '\u2030' # 0xE4 -> PER MILLE SIGN + '\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE + '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\uf8ff' # 0xF0 -> Apple logo + '\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE + '\uf8a0' # 0xF5 -> undefined1 + '\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u02dc' # 0xF7 -> SMALL TILDE + '\xaf' # 0xF8 -> MACRON + '\u02d8' # 0xF9 -> BREVE + '\u02d9' # 0xFA -> DOT ABOVE + '\u02da' # 0xFB -> RING ABOVE + '\xb8' # 0xFC -> CEDILLA + '\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT + '\u02db' # 0xFE -> OGONEK + '\u02c7' # 0xFF -> CARON +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/mbcs.py b/venv/Lib/encodings/mbcs.py new file mode 100644 index 00000000..baf46cbd --- /dev/null +++ b/venv/Lib/encodings/mbcs.py @@ -0,0 +1,47 @@ +""" Python 'mbcs' Codec for Windows + + +Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py, +which was written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +# Import them explicitly to cause an ImportError +# on non-Windows systems +from codecs import mbcs_encode, mbcs_decode +# for IncrementalDecoder, IncrementalEncoder, ... +import codecs + +### Codec APIs + +encode = mbcs_encode + +def decode(input, errors='strict'): + return mbcs_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return mbcs_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + _buffer_decode = mbcs_decode + +class StreamWriter(codecs.StreamWriter): + encode = mbcs_encode + +class StreamReader(codecs.StreamReader): + decode = mbcs_decode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='mbcs', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/oem.py b/venv/Lib/encodings/oem.py new file mode 100644 index 00000000..2c3426ba --- /dev/null +++ b/venv/Lib/encodings/oem.py @@ -0,0 +1,41 @@ +""" Python 'oem' Codec for Windows + +""" +# Import them explicitly to cause an ImportError +# on non-Windows systems +from codecs import oem_encode, oem_decode +# for IncrementalDecoder, IncrementalEncoder, ... +import codecs + +### Codec APIs + +encode = oem_encode + +def decode(input, errors='strict'): + return oem_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return oem_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + _buffer_decode = oem_decode + +class StreamWriter(codecs.StreamWriter): + encode = oem_encode + +class StreamReader(codecs.StreamReader): + decode = oem_decode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='oem', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/palmos.py b/venv/Lib/encodings/palmos.py new file mode 100644 index 00000000..c506d654 --- /dev/null +++ b/venv/Lib/encodings/palmos.py @@ -0,0 +1,308 @@ +""" Python Character Mapping Codec for PalmOS 3.5. + +Written by Sjoerd Mullender (sjoerd@acm.org); based on iso8859_15.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='palmos', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\x81' # 0x81 -> + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u2030' # 0x89 -> PER MILLE SIGN + '\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE + '\u2666' # 0x8D -> BLACK DIAMOND SUIT + '\u2663' # 0x8E -> BLACK CLUB SUIT + '\u2665' # 0x8F -> BLACK HEART SUIT + '\u2660' # 0x90 -> BLACK SPADE SUIT + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\u02dc' # 0x98 -> SMALL TILDE + '\u2122' # 0x99 -> TRADE MARK SIGN + '\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON + '\x9b' # 0x9B -> + '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xbf' # 0xBF -> INVERTED QUESTION MARK + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic) + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE + '\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic) + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German) + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic) + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE + '\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic) + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/ptcp154.py b/venv/Lib/encodings/ptcp154.py new file mode 100644 index 00000000..656b79d6 --- /dev/null +++ b/venv/Lib/encodings/ptcp154.py @@ -0,0 +1,312 @@ +""" Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py. + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. +(c) Copyright 2000 Guido van Rossum. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='ptcp154', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE (DEL) + '\u0496' # 0x80 -> CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER + '\u0492' # 0x81 -> CYRILLIC CAPITAL LETTER GHE WITH STROKE + '\u04ee' # 0x82 -> CYRILLIC CAPITAL LETTER U WITH MACRON + '\u0493' # 0x83 -> CYRILLIC SMALL LETTER GHE WITH STROKE + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u04b6' # 0x86 -> CYRILLIC CAPITAL LETTER CHE WITH DESCENDER + '\u04ae' # 0x87 -> CYRILLIC CAPITAL LETTER STRAIGHT U + '\u04b2' # 0x88 -> CYRILLIC CAPITAL LETTER HA WITH DESCENDER + '\u04af' # 0x89 -> CYRILLIC SMALL LETTER STRAIGHT U + '\u04a0' # 0x8A -> CYRILLIC CAPITAL LETTER BASHKIR KA + '\u04e2' # 0x8B -> CYRILLIC CAPITAL LETTER I WITH MACRON + '\u04a2' # 0x8C -> CYRILLIC CAPITAL LETTER EN WITH DESCENDER + '\u049a' # 0x8D -> CYRILLIC CAPITAL LETTER KA WITH DESCENDER + '\u04ba' # 0x8E -> CYRILLIC CAPITAL LETTER SHHA + '\u04b8' # 0x8F -> CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE + '\u0497' # 0x90 -> CYRILLIC SMALL LETTER ZHE WITH DESCENDER + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\u04b3' # 0x98 -> CYRILLIC SMALL LETTER HA WITH DESCENDER + '\u04b7' # 0x99 -> CYRILLIC SMALL LETTER CHE WITH DESCENDER + '\u04a1' # 0x9A -> CYRILLIC SMALL LETTER BASHKIR KA + '\u04e3' # 0x9B -> CYRILLIC SMALL LETTER I WITH MACRON + '\u04a3' # 0x9C -> CYRILLIC SMALL LETTER EN WITH DESCENDER + '\u049b' # 0x9D -> CYRILLIC SMALL LETTER KA WITH DESCENDER + '\u04bb' # 0x9E -> CYRILLIC SMALL LETTER SHHA + '\u04b9' # 0x9F -> CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U (Byelorussian) + '\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U (Byelorussian) + '\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE + '\u04e8' # 0xA4 -> CYRILLIC CAPITAL LETTER BARRED O + '\u0498' # 0xA5 -> CYRILLIC CAPITAL LETTER ZE WITH DESCENDER + '\u04b0' # 0xA6 -> CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE + '\xa7' # 0xA7 -> SECTION SIGN + '\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u04d8' # 0xAA -> CYRILLIC CAPITAL LETTER SCHWA + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\u04ef' # 0xAD -> CYRILLIC SMALL LETTER U WITH MACRON + '\xae' # 0xAE -> REGISTERED SIGN + '\u049c' # 0xAF -> CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE + '\xb0' # 0xB0 -> DEGREE SIGN + '\u04b1' # 0xB1 -> CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE + '\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0499' # 0xB4 -> CYRILLIC SMALL LETTER ZE WITH DESCENDER + '\u04e9' # 0xB5 -> CYRILLIC SMALL LETTER BARRED O + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO + '\u2116' # 0xB9 -> NUMERO SIGN + '\u04d9' # 0xBA -> CYRILLIC SMALL LETTER SCHWA + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE + '\u04aa' # 0xBD -> CYRILLIC CAPITAL LETTER ES WITH DESCENDER + '\u04ab' # 0xBE -> CYRILLIC SMALL LETTER ES WITH DESCENDER + '\u049d' # 0xBF -> CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE + '\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE + '\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE + '\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE + '\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE + '\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE + '\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE + '\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE + '\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U + '\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF + '\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA + '\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE + '\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE + '\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA + '\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA + '\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN + '\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU + '\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E + '\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU + '\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA + '\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A + '\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE + '\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE + '\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE + '\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE + '\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE + '\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE + '\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I + '\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA + '\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL + '\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM + '\u043d' # 0xED -> CYRILLIC SMALL LETTER EN + '\u043e' # 0xEE -> CYRILLIC SMALL LETTER O + '\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE + '\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U + '\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF + '\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA + '\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE + '\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE + '\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA + '\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA + '\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN + '\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU + '\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044d' # 0xFD -> CYRILLIC SMALL LETTER E + '\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU + '\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/punycode.py b/venv/Lib/encodings/punycode.py new file mode 100644 index 00000000..66c51013 --- /dev/null +++ b/venv/Lib/encodings/punycode.py @@ -0,0 +1,237 @@ +""" Codec for the Punicode encoding, as specified in RFC 3492 + +Written by Martin v. Löwis. +""" + +import codecs + +##################### Encoding ##################################### + +def segregate(str): + """3.1 Basic code point segregation""" + base = bytearray() + extended = set() + for c in str: + if ord(c) < 128: + base.append(ord(c)) + else: + extended.add(c) + extended = sorted(extended) + return bytes(base), extended + +def selective_len(str, max): + """Return the length of str, considering only characters below max.""" + res = 0 + for c in str: + if ord(c) < max: + res += 1 + return res + +def selective_find(str, char, index, pos): + """Return a pair (index, pos), indicating the next occurrence of + char in str. index is the position of the character considering + only ordinals up to and including char, and pos is the position in + the full string. index/pos is the starting position in the full + string.""" + + l = len(str) + while 1: + pos += 1 + if pos == l: + return (-1, -1) + c = str[pos] + if c == char: + return index+1, pos + elif c < char: + index += 1 + +def insertion_unsort(str, extended): + """3.2 Insertion unsort coding""" + oldchar = 0x80 + result = [] + oldindex = -1 + for c in extended: + index = pos = -1 + char = ord(c) + curlen = selective_len(str, char) + delta = (curlen+1) * (char - oldchar) + while 1: + index,pos = selective_find(str,c,index,pos) + if index == -1: + break + delta += index - oldindex + result.append(delta-1) + oldindex = index + delta = 0 + oldchar = char + + return result + +def T(j, bias): + # Punycode parameters: tmin = 1, tmax = 26, base = 36 + res = 36 * (j + 1) - bias + if res < 1: return 1 + if res > 26: return 26 + return res + +digits = b"abcdefghijklmnopqrstuvwxyz0123456789" +def generate_generalized_integer(N, bias): + """3.3 Generalized variable-length integers""" + result = bytearray() + j = 0 + while 1: + t = T(j, bias) + if N < t: + result.append(digits[N]) + return bytes(result) + result.append(digits[t + ((N - t) % (36 - t))]) + N = (N - t) // (36 - t) + j += 1 + +def adapt(delta, first, numchars): + if first: + delta //= 700 + else: + delta //= 2 + delta += delta // numchars + # ((base - tmin) * tmax) // 2 == 455 + divisions = 0 + while delta > 455: + delta = delta // 35 # base - tmin + divisions += 36 + bias = divisions + (36 * delta // (delta + 38)) + return bias + + +def generate_integers(baselen, deltas): + """3.4 Bias adaptation""" + # Punycode parameters: initial bias = 72, damp = 700, skew = 38 + result = bytearray() + bias = 72 + for points, delta in enumerate(deltas): + s = generate_generalized_integer(delta, bias) + result.extend(s) + bias = adapt(delta, points==0, baselen+points+1) + return bytes(result) + +def punycode_encode(text): + base, extended = segregate(text) + deltas = insertion_unsort(text, extended) + extended = generate_integers(len(base), deltas) + if base: + return base + b"-" + extended + return extended + +##################### Decoding ##################################### + +def decode_generalized_number(extended, extpos, bias, errors): + """3.3 Generalized variable-length integers""" + result = 0 + w = 1 + j = 0 + while 1: + try: + char = ord(extended[extpos]) + except IndexError: + if errors == "strict": + raise UnicodeError("incomplete punicode string") + return extpos + 1, None + extpos += 1 + if 0x41 <= char <= 0x5A: # A-Z + digit = char - 0x41 + elif 0x30 <= char <= 0x39: + digit = char - 22 # 0x30-26 + elif errors == "strict": + raise UnicodeError("Invalid extended code point '%s'" + % extended[extpos]) + else: + return extpos, None + t = T(j, bias) + result += digit * w + if digit < t: + return extpos, result + w = w * (36 - t) + j += 1 + + +def insertion_sort(base, extended, errors): + """3.2 Insertion unsort coding""" + char = 0x80 + pos = -1 + bias = 72 + extpos = 0 + while extpos < len(extended): + newpos, delta = decode_generalized_number(extended, extpos, + bias, errors) + if delta is None: + # There was an error in decoding. We can't continue because + # synchronization is lost. + return base + pos += delta+1 + char += pos // (len(base) + 1) + if char > 0x10FFFF: + if errors == "strict": + raise UnicodeError("Invalid character U+%x" % char) + char = ord('?') + pos = pos % (len(base) + 1) + base = base[:pos] + chr(char) + base[pos:] + bias = adapt(delta, (extpos == 0), len(base)) + extpos = newpos + return base + +def punycode_decode(text, errors): + if isinstance(text, str): + text = text.encode("ascii") + if isinstance(text, memoryview): + text = bytes(text) + pos = text.rfind(b"-") + if pos == -1: + base = "" + extended = str(text, "ascii").upper() + else: + base = str(text[:pos], "ascii", errors) + extended = str(text[pos+1:], "ascii").upper() + return insertion_sort(base, extended, errors) + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self, input, errors='strict'): + res = punycode_encode(input) + return res, len(input) + + def decode(self, input, errors='strict'): + if errors not in ('strict', 'replace', 'ignore'): + raise UnicodeError("Unsupported error handling "+errors) + res = punycode_decode(input, errors) + return res, len(input) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return punycode_encode(input) + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + if self.errors not in ('strict', 'replace', 'ignore'): + raise UnicodeError("Unsupported error handling "+self.errors) + return punycode_decode(input, self.errors) + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='punycode', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/venv/Lib/encodings/quopri_codec.py b/venv/Lib/encodings/quopri_codec.py new file mode 100644 index 00000000..496cb765 --- /dev/null +++ b/venv/Lib/encodings/quopri_codec.py @@ -0,0 +1,56 @@ +"""Codec for quoted-printable encoding. + +This codec de/encodes from bytes to bytes. +""" + +import codecs +import quopri +from io import BytesIO + +def quopri_encode(input, errors='strict'): + assert errors == 'strict' + f = BytesIO(input) + g = BytesIO() + quopri.encode(f, g, quotetabs=True) + return (g.getvalue(), len(input)) + +def quopri_decode(input, errors='strict'): + assert errors == 'strict' + f = BytesIO(input) + g = BytesIO() + quopri.decode(f, g) + return (g.getvalue(), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return quopri_encode(input, errors) + def decode(self, input, errors='strict'): + return quopri_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return quopri_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return quopri_decode(input, self.errors)[0] + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +# encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='quopri', + encode=quopri_encode, + decode=quopri_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + _is_text_encoding=False, + ) diff --git a/venv/Lib/encodings/raw_unicode_escape.py b/venv/Lib/encodings/raw_unicode_escape.py new file mode 100644 index 00000000..2b919b40 --- /dev/null +++ b/venv/Lib/encodings/raw_unicode_escape.py @@ -0,0 +1,45 @@ +""" Python 'raw-unicode-escape' Codec + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + # Note: Binding these as C functions will result in the class not + # converting them to methods. This is intended. + encode = codecs.raw_unicode_escape_encode + decode = codecs.raw_unicode_escape_decode + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.raw_unicode_escape_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.raw_unicode_escape_decode(input, self.errors)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='raw-unicode-escape', + encode=Codec.encode, + decode=Codec.decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/venv/Lib/encodings/rot_13.py b/venv/Lib/encodings/rot_13.py new file mode 100644 index 00000000..5627bfbc --- /dev/null +++ b/venv/Lib/encodings/rot_13.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +""" Python Character Mapping Codec for ROT13. + +This codec de/encodes from str to str. + +Written by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return (str.translate(input, rot13_map), len(input)) + + def decode(self, input, errors='strict'): + return (str.translate(input, rot13_map), len(input)) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return str.translate(input, rot13_map) + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return str.translate(input, rot13_map) + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='rot-13', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + _is_text_encoding=False, + ) + +### Map + +rot13_map = codecs.make_identity_dict(range(256)) +rot13_map.update({ + 0x0041: 0x004e, + 0x0042: 0x004f, + 0x0043: 0x0050, + 0x0044: 0x0051, + 0x0045: 0x0052, + 0x0046: 0x0053, + 0x0047: 0x0054, + 0x0048: 0x0055, + 0x0049: 0x0056, + 0x004a: 0x0057, + 0x004b: 0x0058, + 0x004c: 0x0059, + 0x004d: 0x005a, + 0x004e: 0x0041, + 0x004f: 0x0042, + 0x0050: 0x0043, + 0x0051: 0x0044, + 0x0052: 0x0045, + 0x0053: 0x0046, + 0x0054: 0x0047, + 0x0055: 0x0048, + 0x0056: 0x0049, + 0x0057: 0x004a, + 0x0058: 0x004b, + 0x0059: 0x004c, + 0x005a: 0x004d, + 0x0061: 0x006e, + 0x0062: 0x006f, + 0x0063: 0x0070, + 0x0064: 0x0071, + 0x0065: 0x0072, + 0x0066: 0x0073, + 0x0067: 0x0074, + 0x0068: 0x0075, + 0x0069: 0x0076, + 0x006a: 0x0077, + 0x006b: 0x0078, + 0x006c: 0x0079, + 0x006d: 0x007a, + 0x006e: 0x0061, + 0x006f: 0x0062, + 0x0070: 0x0063, + 0x0071: 0x0064, + 0x0072: 0x0065, + 0x0073: 0x0066, + 0x0074: 0x0067, + 0x0075: 0x0068, + 0x0076: 0x0069, + 0x0077: 0x006a, + 0x0078: 0x006b, + 0x0079: 0x006c, + 0x007a: 0x006d, +}) + +### Filter API + +def rot13(infile, outfile): + outfile.write(codecs.encode(infile.read(), 'rot-13')) + +if __name__ == '__main__': + import sys + rot13(sys.stdin, sys.stdout) diff --git a/venv/Lib/encodings/shift_jis.py b/venv/Lib/encodings/shift_jis.py new file mode 100644 index 00000000..83381172 --- /dev/null +++ b/venv/Lib/encodings/shift_jis.py @@ -0,0 +1,39 @@ +# +# shift_jis.py: Python Unicode Codec for SHIFT_JIS +# +# Written by Hye-Shik Chang +# + +import _codecs_jp, codecs +import _multibytecodec as mbc + +codec = _codecs_jp.getcodec('shift_jis') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='shift_jis', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/shift_jis_2004.py b/venv/Lib/encodings/shift_jis_2004.py new file mode 100644 index 00000000..161b1e86 --- /dev/null +++ b/venv/Lib/encodings/shift_jis_2004.py @@ -0,0 +1,39 @@ +# +# shift_jis_2004.py: Python Unicode Codec for SHIFT_JIS_2004 +# +# Written by Hye-Shik Chang +# + +import _codecs_jp, codecs +import _multibytecodec as mbc + +codec = _codecs_jp.getcodec('shift_jis_2004') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='shift_jis_2004', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/shift_jisx0213.py b/venv/Lib/encodings/shift_jisx0213.py new file mode 100644 index 00000000..cb653f53 --- /dev/null +++ b/venv/Lib/encodings/shift_jisx0213.py @@ -0,0 +1,39 @@ +# +# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213 +# +# Written by Hye-Shik Chang +# + +import _codecs_jp, codecs +import _multibytecodec as mbc + +codec = _codecs_jp.getcodec('shift_jisx0213') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='shift_jisx0213', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/tis_620.py b/venv/Lib/encodings/tis_620.py new file mode 100644 index 00000000..e2883863 --- /dev/null +++ b/venv/Lib/encodings/tis_620.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec tis_620 generated from 'python-mappings/TIS-620.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='tis-620', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\x80' # 0x80 -> + '\x81' # 0x81 -> + '\x82' # 0x82 -> + '\x83' # 0x83 -> + '\x84' # 0x84 -> + '\x85' # 0x85 -> + '\x86' # 0x86 -> + '\x87' # 0x87 -> + '\x88' # 0x88 -> + '\x89' # 0x89 -> + '\x8a' # 0x8A -> + '\x8b' # 0x8B -> + '\x8c' # 0x8C -> + '\x8d' # 0x8D -> + '\x8e' # 0x8E -> + '\x8f' # 0x8F -> + '\x90' # 0x90 -> + '\x91' # 0x91 -> + '\x92' # 0x92 -> + '\x93' # 0x93 -> + '\x94' # 0x94 -> + '\x95' # 0x95 -> + '\x96' # 0x96 -> + '\x97' # 0x97 -> + '\x98' # 0x98 -> + '\x99' # 0x99 -> + '\x9a' # 0x9A -> + '\x9b' # 0x9B -> + '\x9c' # 0x9C -> + '\x9d' # 0x9D -> + '\x9e' # 0x9E -> + '\x9f' # 0x9F -> + '\ufffe' + '\u0e01' # 0xA1 -> THAI CHARACTER KO KAI + '\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI + '\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT + '\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI + '\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON + '\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG + '\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU + '\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN + '\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING + '\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG + '\u0e0b' # 0xAB -> THAI CHARACTER SO SO + '\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE + '\u0e0d' # 0xAD -> THAI CHARACTER YO YING + '\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA + '\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK + '\u0e10' # 0xB0 -> THAI CHARACTER THO THAN + '\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO + '\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO + '\u0e13' # 0xB3 -> THAI CHARACTER NO NEN + '\u0e14' # 0xB4 -> THAI CHARACTER DO DEK + '\u0e15' # 0xB5 -> THAI CHARACTER TO TAO + '\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG + '\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN + '\u0e18' # 0xB8 -> THAI CHARACTER THO THONG + '\u0e19' # 0xB9 -> THAI CHARACTER NO NU + '\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI + '\u0e1b' # 0xBB -> THAI CHARACTER PO PLA + '\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG + '\u0e1d' # 0xBD -> THAI CHARACTER FO FA + '\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN + '\u0e1f' # 0xBF -> THAI CHARACTER FO FAN + '\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO + '\u0e21' # 0xC1 -> THAI CHARACTER MO MA + '\u0e22' # 0xC2 -> THAI CHARACTER YO YAK + '\u0e23' # 0xC3 -> THAI CHARACTER RO RUA + '\u0e24' # 0xC4 -> THAI CHARACTER RU + '\u0e25' # 0xC5 -> THAI CHARACTER LO LING + '\u0e26' # 0xC6 -> THAI CHARACTER LU + '\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN + '\u0e28' # 0xC8 -> THAI CHARACTER SO SALA + '\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI + '\u0e2a' # 0xCA -> THAI CHARACTER SO SUA + '\u0e2b' # 0xCB -> THAI CHARACTER HO HIP + '\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA + '\u0e2d' # 0xCD -> THAI CHARACTER O ANG + '\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK + '\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI + '\u0e30' # 0xD0 -> THAI CHARACTER SARA A + '\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT + '\u0e32' # 0xD2 -> THAI CHARACTER SARA AA + '\u0e33' # 0xD3 -> THAI CHARACTER SARA AM + '\u0e34' # 0xD4 -> THAI CHARACTER SARA I + '\u0e35' # 0xD5 -> THAI CHARACTER SARA II + '\u0e36' # 0xD6 -> THAI CHARACTER SARA UE + '\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE + '\u0e38' # 0xD8 -> THAI CHARACTER SARA U + '\u0e39' # 0xD9 -> THAI CHARACTER SARA UU + '\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' + '\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT + '\u0e40' # 0xE0 -> THAI CHARACTER SARA E + '\u0e41' # 0xE1 -> THAI CHARACTER SARA AE + '\u0e42' # 0xE2 -> THAI CHARACTER SARA O + '\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN + '\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI + '\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO + '\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK + '\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU + '\u0e48' # 0xE8 -> THAI CHARACTER MAI EK + '\u0e49' # 0xE9 -> THAI CHARACTER MAI THO + '\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI + '\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA + '\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT + '\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT + '\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN + '\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN + '\u0e50' # 0xF0 -> THAI DIGIT ZERO + '\u0e51' # 0xF1 -> THAI DIGIT ONE + '\u0e52' # 0xF2 -> THAI DIGIT TWO + '\u0e53' # 0xF3 -> THAI DIGIT THREE + '\u0e54' # 0xF4 -> THAI DIGIT FOUR + '\u0e55' # 0xF5 -> THAI DIGIT FIVE + '\u0e56' # 0xF6 -> THAI DIGIT SIX + '\u0e57' # 0xF7 -> THAI DIGIT SEVEN + '\u0e58' # 0xF8 -> THAI DIGIT EIGHT + '\u0e59' # 0xF9 -> THAI DIGIT NINE + '\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU + '\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT + '\ufffe' + '\ufffe' + '\ufffe' + '\ufffe' +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/venv/Lib/encodings/undefined.py b/venv/Lib/encodings/undefined.py new file mode 100644 index 00000000..46902883 --- /dev/null +++ b/venv/Lib/encodings/undefined.py @@ -0,0 +1,49 @@ +""" Python 'undefined' Codec + + This codec will always raise a ValueError exception when being + used. It is intended for use by the site.py file to switch off + automatic string to Unicode coercion. + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + raise UnicodeError("undefined encoding") + + def decode(self,input,errors='strict'): + raise UnicodeError("undefined encoding") + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + raise UnicodeError("undefined encoding") + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + raise UnicodeError("undefined encoding") + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='undefined', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/venv/Lib/encodings/unicode_escape.py b/venv/Lib/encodings/unicode_escape.py new file mode 100644 index 00000000..817f9326 --- /dev/null +++ b/venv/Lib/encodings/unicode_escape.py @@ -0,0 +1,45 @@ +""" Python 'unicode-escape' Codec + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + # Note: Binding these as C functions will result in the class not + # converting them to methods. This is intended. + encode = codecs.unicode_escape_encode + decode = codecs.unicode_escape_decode + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.unicode_escape_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.unicode_escape_decode(input, self.errors)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='unicode-escape', + encode=Codec.encode, + decode=Codec.decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/venv/Lib/encodings/unicode_internal.py b/venv/Lib/encodings/unicode_internal.py new file mode 100644 index 00000000..df3e7752 --- /dev/null +++ b/venv/Lib/encodings/unicode_internal.py @@ -0,0 +1,45 @@ +""" Python 'unicode-internal' Codec + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + # Note: Binding these as C functions will result in the class not + # converting them to methods. This is intended. + encode = codecs.unicode_internal_encode + decode = codecs.unicode_internal_decode + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.unicode_internal_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.unicode_internal_decode(input, self.errors)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='unicode-internal', + encode=Codec.encode, + decode=Codec.decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/venv/Lib/encodings/utf_16.py b/venv/Lib/encodings/utf_16.py new file mode 100644 index 00000000..c6124824 --- /dev/null +++ b/venv/Lib/encodings/utf_16.py @@ -0,0 +1,155 @@ +""" Python 'utf-16' Codec + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs, sys + +### Codec APIs + +encode = codecs.utf_16_encode + +def decode(input, errors='strict'): + return codecs.utf_16_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def __init__(self, errors='strict'): + codecs.IncrementalEncoder.__init__(self, errors) + self.encoder = None + + def encode(self, input, final=False): + if self.encoder is None: + result = codecs.utf_16_encode(input, self.errors)[0] + if sys.byteorder == 'little': + self.encoder = codecs.utf_16_le_encode + else: + self.encoder = codecs.utf_16_be_encode + return result + return self.encoder(input, self.errors)[0] + + def reset(self): + codecs.IncrementalEncoder.reset(self) + self.encoder = None + + def getstate(self): + # state info we return to the caller: + # 0: stream is in natural order for this platform + # 2: endianness hasn't been determined yet + # (we're never writing in unnatural order) + return (2 if self.encoder is None else 0) + + def setstate(self, state): + if state: + self.encoder = None + else: + if sys.byteorder == 'little': + self.encoder = codecs.utf_16_le_encode + else: + self.encoder = codecs.utf_16_be_encode + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def __init__(self, errors='strict'): + codecs.BufferedIncrementalDecoder.__init__(self, errors) + self.decoder = None + + def _buffer_decode(self, input, errors, final): + if self.decoder is None: + (output, consumed, byteorder) = \ + codecs.utf_16_ex_decode(input, errors, 0, final) + if byteorder == -1: + self.decoder = codecs.utf_16_le_decode + elif byteorder == 1: + self.decoder = codecs.utf_16_be_decode + elif consumed >= 2: + raise UnicodeError("UTF-16 stream does not start with BOM") + return (output, consumed) + return self.decoder(input, self.errors, final) + + def reset(self): + codecs.BufferedIncrementalDecoder.reset(self) + self.decoder = None + + def getstate(self): + # additional state info from the base class must be None here, + # as it isn't passed along to the caller + state = codecs.BufferedIncrementalDecoder.getstate(self)[0] + # additional state info we pass to the caller: + # 0: stream is in natural order for this platform + # 1: stream is in unnatural order + # 2: endianness hasn't been determined yet + if self.decoder is None: + return (state, 2) + addstate = int((sys.byteorder == "big") != + (self.decoder is codecs.utf_16_be_decode)) + return (state, addstate) + + def setstate(self, state): + # state[1] will be ignored by BufferedIncrementalDecoder.setstate() + codecs.BufferedIncrementalDecoder.setstate(self, state) + state = state[1] + if state == 0: + self.decoder = (codecs.utf_16_be_decode + if sys.byteorder == "big" + else codecs.utf_16_le_decode) + elif state == 1: + self.decoder = (codecs.utf_16_le_decode + if sys.byteorder == "big" + else codecs.utf_16_be_decode) + else: + self.decoder = None + +class StreamWriter(codecs.StreamWriter): + def __init__(self, stream, errors='strict'): + codecs.StreamWriter.__init__(self, stream, errors) + self.encoder = None + + def reset(self): + codecs.StreamWriter.reset(self) + self.encoder = None + + def encode(self, input, errors='strict'): + if self.encoder is None: + result = codecs.utf_16_encode(input, errors) + if sys.byteorder == 'little': + self.encoder = codecs.utf_16_le_encode + else: + self.encoder = codecs.utf_16_be_encode + return result + else: + return self.encoder(input, errors) + +class StreamReader(codecs.StreamReader): + + def reset(self): + codecs.StreamReader.reset(self) + try: + del self.decode + except AttributeError: + pass + + def decode(self, input, errors='strict'): + (object, consumed, byteorder) = \ + codecs.utf_16_ex_decode(input, errors, 0, False) + if byteorder == -1: + self.decode = codecs.utf_16_le_decode + elif byteorder == 1: + self.decode = codecs.utf_16_be_decode + elif consumed>=2: + raise UnicodeError("UTF-16 stream does not start with BOM") + return (object, consumed) + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='utf-16', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/utf_16_be.py b/venv/Lib/encodings/utf_16_be.py new file mode 100644 index 00000000..86b458eb --- /dev/null +++ b/venv/Lib/encodings/utf_16_be.py @@ -0,0 +1,42 @@ +""" Python 'utf-16-be' Codec + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs + +### Codec APIs + +encode = codecs.utf_16_be_encode + +def decode(input, errors='strict'): + return codecs.utf_16_be_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.utf_16_be_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + _buffer_decode = codecs.utf_16_be_decode + +class StreamWriter(codecs.StreamWriter): + encode = codecs.utf_16_be_encode + +class StreamReader(codecs.StreamReader): + decode = codecs.utf_16_be_decode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='utf-16-be', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/utf_16_le.py b/venv/Lib/encodings/utf_16_le.py new file mode 100644 index 00000000..ec454142 --- /dev/null +++ b/venv/Lib/encodings/utf_16_le.py @@ -0,0 +1,42 @@ +""" Python 'utf-16-le' Codec + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs + +### Codec APIs + +encode = codecs.utf_16_le_encode + +def decode(input, errors='strict'): + return codecs.utf_16_le_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.utf_16_le_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + _buffer_decode = codecs.utf_16_le_decode + +class StreamWriter(codecs.StreamWriter): + encode = codecs.utf_16_le_encode + +class StreamReader(codecs.StreamReader): + decode = codecs.utf_16_le_decode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='utf-16-le', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/utf_32.py b/venv/Lib/encodings/utf_32.py new file mode 100644 index 00000000..cdf84d14 --- /dev/null +++ b/venv/Lib/encodings/utf_32.py @@ -0,0 +1,150 @@ +""" +Python 'utf-32' Codec +""" +import codecs, sys + +### Codec APIs + +encode = codecs.utf_32_encode + +def decode(input, errors='strict'): + return codecs.utf_32_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def __init__(self, errors='strict'): + codecs.IncrementalEncoder.__init__(self, errors) + self.encoder = None + + def encode(self, input, final=False): + if self.encoder is None: + result = codecs.utf_32_encode(input, self.errors)[0] + if sys.byteorder == 'little': + self.encoder = codecs.utf_32_le_encode + else: + self.encoder = codecs.utf_32_be_encode + return result + return self.encoder(input, self.errors)[0] + + def reset(self): + codecs.IncrementalEncoder.reset(self) + self.encoder = None + + def getstate(self): + # state info we return to the caller: + # 0: stream is in natural order for this platform + # 2: endianness hasn't been determined yet + # (we're never writing in unnatural order) + return (2 if self.encoder is None else 0) + + def setstate(self, state): + if state: + self.encoder = None + else: + if sys.byteorder == 'little': + self.encoder = codecs.utf_32_le_encode + else: + self.encoder = codecs.utf_32_be_encode + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def __init__(self, errors='strict'): + codecs.BufferedIncrementalDecoder.__init__(self, errors) + self.decoder = None + + def _buffer_decode(self, input, errors, final): + if self.decoder is None: + (output, consumed, byteorder) = \ + codecs.utf_32_ex_decode(input, errors, 0, final) + if byteorder == -1: + self.decoder = codecs.utf_32_le_decode + elif byteorder == 1: + self.decoder = codecs.utf_32_be_decode + elif consumed >= 4: + raise UnicodeError("UTF-32 stream does not start with BOM") + return (output, consumed) + return self.decoder(input, self.errors, final) + + def reset(self): + codecs.BufferedIncrementalDecoder.reset(self) + self.decoder = None + + def getstate(self): + # additional state info from the base class must be None here, + # as it isn't passed along to the caller + state = codecs.BufferedIncrementalDecoder.getstate(self)[0] + # additional state info we pass to the caller: + # 0: stream is in natural order for this platform + # 1: stream is in unnatural order + # 2: endianness hasn't been determined yet + if self.decoder is None: + return (state, 2) + addstate = int((sys.byteorder == "big") != + (self.decoder is codecs.utf_32_be_decode)) + return (state, addstate) + + def setstate(self, state): + # state[1] will be ignored by BufferedIncrementalDecoder.setstate() + codecs.BufferedIncrementalDecoder.setstate(self, state) + state = state[1] + if state == 0: + self.decoder = (codecs.utf_32_be_decode + if sys.byteorder == "big" + else codecs.utf_32_le_decode) + elif state == 1: + self.decoder = (codecs.utf_32_le_decode + if sys.byteorder == "big" + else codecs.utf_32_be_decode) + else: + self.decoder = None + +class StreamWriter(codecs.StreamWriter): + def __init__(self, stream, errors='strict'): + self.encoder = None + codecs.StreamWriter.__init__(self, stream, errors) + + def reset(self): + codecs.StreamWriter.reset(self) + self.encoder = None + + def encode(self, input, errors='strict'): + if self.encoder is None: + result = codecs.utf_32_encode(input, errors) + if sys.byteorder == 'little': + self.encoder = codecs.utf_32_le_encode + else: + self.encoder = codecs.utf_32_be_encode + return result + else: + return self.encoder(input, errors) + +class StreamReader(codecs.StreamReader): + + def reset(self): + codecs.StreamReader.reset(self) + try: + del self.decode + except AttributeError: + pass + + def decode(self, input, errors='strict'): + (object, consumed, byteorder) = \ + codecs.utf_32_ex_decode(input, errors, 0, False) + if byteorder == -1: + self.decode = codecs.utf_32_le_decode + elif byteorder == 1: + self.decode = codecs.utf_32_be_decode + elif consumed>=4: + raise UnicodeError("UTF-32 stream does not start with BOM") + return (object, consumed) + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='utf-32', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/utf_32_be.py b/venv/Lib/encodings/utf_32_be.py new file mode 100644 index 00000000..fe272b5f --- /dev/null +++ b/venv/Lib/encodings/utf_32_be.py @@ -0,0 +1,37 @@ +""" +Python 'utf-32-be' Codec +""" +import codecs + +### Codec APIs + +encode = codecs.utf_32_be_encode + +def decode(input, errors='strict'): + return codecs.utf_32_be_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.utf_32_be_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + _buffer_decode = codecs.utf_32_be_decode + +class StreamWriter(codecs.StreamWriter): + encode = codecs.utf_32_be_encode + +class StreamReader(codecs.StreamReader): + decode = codecs.utf_32_be_decode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='utf-32-be', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/utf_32_le.py b/venv/Lib/encodings/utf_32_le.py new file mode 100644 index 00000000..9e482109 --- /dev/null +++ b/venv/Lib/encodings/utf_32_le.py @@ -0,0 +1,37 @@ +""" +Python 'utf-32-le' Codec +""" +import codecs + +### Codec APIs + +encode = codecs.utf_32_le_encode + +def decode(input, errors='strict'): + return codecs.utf_32_le_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.utf_32_le_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + _buffer_decode = codecs.utf_32_le_decode + +class StreamWriter(codecs.StreamWriter): + encode = codecs.utf_32_le_encode + +class StreamReader(codecs.StreamReader): + decode = codecs.utf_32_le_decode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='utf-32-le', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/utf_7.py b/venv/Lib/encodings/utf_7.py new file mode 100644 index 00000000..8e0567f2 --- /dev/null +++ b/venv/Lib/encodings/utf_7.py @@ -0,0 +1,38 @@ +""" Python 'utf-7' Codec + +Written by Brian Quinlan (brian@sweetapp.com). +""" +import codecs + +### Codec APIs + +encode = codecs.utf_7_encode + +def decode(input, errors='strict'): + return codecs.utf_7_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.utf_7_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + _buffer_decode = codecs.utf_7_decode + +class StreamWriter(codecs.StreamWriter): + encode = codecs.utf_7_encode + +class StreamReader(codecs.StreamReader): + decode = codecs.utf_7_decode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='utf-7', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/utf_8.py b/venv/Lib/encodings/utf_8.py new file mode 100644 index 00000000..1bf63365 --- /dev/null +++ b/venv/Lib/encodings/utf_8.py @@ -0,0 +1,42 @@ +""" Python 'utf-8' Codec + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" +import codecs + +### Codec APIs + +encode = codecs.utf_8_encode + +def decode(input, errors='strict'): + return codecs.utf_8_decode(input, errors, True) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.utf_8_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + _buffer_decode = codecs.utf_8_decode + +class StreamWriter(codecs.StreamWriter): + encode = codecs.utf_8_encode + +class StreamReader(codecs.StreamReader): + decode = codecs.utf_8_decode + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='utf-8', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/utf_8_sig.py b/venv/Lib/encodings/utf_8_sig.py new file mode 100644 index 00000000..1bb47920 --- /dev/null +++ b/venv/Lib/encodings/utf_8_sig.py @@ -0,0 +1,130 @@ +""" Python 'utf-8-sig' Codec +This work similar to UTF-8 with the following changes: + +* On encoding/writing a UTF-8 encoded BOM will be prepended/written as the + first three bytes. + +* On decoding/reading if the first three bytes are a UTF-8 encoded BOM, these + bytes will be skipped. +""" +import codecs + +### Codec APIs + +def encode(input, errors='strict'): + return (codecs.BOM_UTF8 + codecs.utf_8_encode(input, errors)[0], + len(input)) + +def decode(input, errors='strict'): + prefix = 0 + if input[:3] == codecs.BOM_UTF8: + input = input[3:] + prefix = 3 + (output, consumed) = codecs.utf_8_decode(input, errors, True) + return (output, consumed+prefix) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def __init__(self, errors='strict'): + codecs.IncrementalEncoder.__init__(self, errors) + self.first = 1 + + def encode(self, input, final=False): + if self.first: + self.first = 0 + return codecs.BOM_UTF8 + \ + codecs.utf_8_encode(input, self.errors)[0] + else: + return codecs.utf_8_encode(input, self.errors)[0] + + def reset(self): + codecs.IncrementalEncoder.reset(self) + self.first = 1 + + def getstate(self): + return self.first + + def setstate(self, state): + self.first = state + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def __init__(self, errors='strict'): + codecs.BufferedIncrementalDecoder.__init__(self, errors) + self.first = 1 + + def _buffer_decode(self, input, errors, final): + if self.first: + if len(input) < 3: + if codecs.BOM_UTF8.startswith(input): + # not enough data to decide if this really is a BOM + # => try again on the next call + return ("", 0) + else: + self.first = 0 + else: + self.first = 0 + if input[:3] == codecs.BOM_UTF8: + (output, consumed) = \ + codecs.utf_8_decode(input[3:], errors, final) + return (output, consumed+3) + return codecs.utf_8_decode(input, errors, final) + + def reset(self): + codecs.BufferedIncrementalDecoder.reset(self) + self.first = 1 + + def getstate(self): + state = codecs.BufferedIncrementalDecoder.getstate(self) + # state[1] must be 0 here, as it isn't passed along to the caller + return (state[0], self.first) + + def setstate(self, state): + # state[1] will be ignored by BufferedIncrementalDecoder.setstate() + codecs.BufferedIncrementalDecoder.setstate(self, state) + self.first = state[1] + +class StreamWriter(codecs.StreamWriter): + def reset(self): + codecs.StreamWriter.reset(self) + try: + del self.encode + except AttributeError: + pass + + def encode(self, input, errors='strict'): + self.encode = codecs.utf_8_encode + return encode(input, errors) + +class StreamReader(codecs.StreamReader): + def reset(self): + codecs.StreamReader.reset(self) + try: + del self.decode + except AttributeError: + pass + + def decode(self, input, errors='strict'): + if len(input) < 3: + if codecs.BOM_UTF8.startswith(input): + # not enough data to decide if this is a BOM + # => try again on the next call + return ("", 0) + elif input[:3] == codecs.BOM_UTF8: + self.decode = codecs.utf_8_decode + (output, consumed) = codecs.utf_8_decode(input[3:],errors) + return (output, consumed+3) + # (else) no BOM present + self.decode = codecs.utf_8_decode + return codecs.utf_8_decode(input, errors) + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='utf-8-sig', + encode=encode, + decode=decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) diff --git a/venv/Lib/encodings/uu_codec.py b/venv/Lib/encodings/uu_codec.py new file mode 100644 index 00000000..2a5728fb --- /dev/null +++ b/venv/Lib/encodings/uu_codec.py @@ -0,0 +1,99 @@ +"""Python 'uu_codec' Codec - UU content transfer encoding. + +This codec de/encodes from bytes to bytes. + +Written by Marc-Andre Lemburg (mal@lemburg.com). Some details were +adapted from uu.py which was written by Lance Ellinghouse and +modified by Jack Jansen and Fredrik Lundh. +""" + +import codecs +import binascii +from io import BytesIO + +### Codec APIs + +def uu_encode(input, errors='strict', filename='', mode=0o666): + assert errors == 'strict' + infile = BytesIO(input) + outfile = BytesIO() + read = infile.read + write = outfile.write + + # Encode + write(('begin %o %s\n' % (mode & 0o777, filename)).encode('ascii')) + chunk = read(45) + while chunk: + write(binascii.b2a_uu(chunk)) + chunk = read(45) + write(b' \nend\n') + + return (outfile.getvalue(), len(input)) + +def uu_decode(input, errors='strict'): + assert errors == 'strict' + infile = BytesIO(input) + outfile = BytesIO() + readline = infile.readline + write = outfile.write + + # Find start of encoded data + while 1: + s = readline() + if not s: + raise ValueError('Missing "begin" line in input data') + if s[:5] == b'begin': + break + + # Decode + while True: + s = readline() + if not s or s == b'end\n': + break + try: + data = binascii.a2b_uu(s) + except binascii.Error as v: + # Workaround for broken uuencoders by /Fredrik Lundh + nbytes = (((s[0]-32) & 63) * 4 + 5) // 3 + data = binascii.a2b_uu(s[:nbytes]) + #sys.stderr.write("Warning: %s\n" % str(v)) + write(data) + if not s: + raise ValueError('Truncated input data') + + return (outfile.getvalue(), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return uu_encode(input, errors) + + def decode(self, input, errors='strict'): + return uu_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return uu_encode(input, self.errors)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return uu_decode(input, self.errors)[0] + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='uu', + encode=uu_encode, + decode=uu_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + _is_text_encoding=False, + ) diff --git a/venv/Lib/encodings/zlib_codec.py b/venv/Lib/encodings/zlib_codec.py new file mode 100644 index 00000000..95908a4b --- /dev/null +++ b/venv/Lib/encodings/zlib_codec.py @@ -0,0 +1,77 @@ +"""Python 'zlib_codec' Codec - zlib compression encoding. + +This codec de/encodes from bytes to bytes. + +Written by Marc-Andre Lemburg (mal@lemburg.com). +""" + +import codecs +import zlib # this codec needs the optional zlib module ! + +### Codec APIs + +def zlib_encode(input, errors='strict'): + assert errors == 'strict' + return (zlib.compress(input), len(input)) + +def zlib_decode(input, errors='strict'): + assert errors == 'strict' + return (zlib.decompress(input), len(input)) + +class Codec(codecs.Codec): + def encode(self, input, errors='strict'): + return zlib_encode(input, errors) + def decode(self, input, errors='strict'): + return zlib_decode(input, errors) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def __init__(self, errors='strict'): + assert errors == 'strict' + self.errors = errors + self.compressobj = zlib.compressobj() + + def encode(self, input, final=False): + if final: + c = self.compressobj.compress(input) + return c + self.compressobj.flush() + else: + return self.compressobj.compress(input) + + def reset(self): + self.compressobj = zlib.compressobj() + +class IncrementalDecoder(codecs.IncrementalDecoder): + def __init__(self, errors='strict'): + assert errors == 'strict' + self.errors = errors + self.decompressobj = zlib.decompressobj() + + def decode(self, input, final=False): + if final: + c = self.decompressobj.decompress(input) + return c + self.decompressobj.flush() + else: + return self.decompressobj.decompress(input) + + def reset(self): + self.decompressobj = zlib.decompressobj() + +class StreamWriter(Codec, codecs.StreamWriter): + charbuffertype = bytes + +class StreamReader(Codec, codecs.StreamReader): + charbuffertype = bytes + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='zlib', + encode=zlib_encode, + decode=zlib_decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + _is_text_encoding=False, + ) diff --git a/venv/Lib/enum.py b/venv/Lib/enum.py new file mode 100644 index 00000000..576de031 --- /dev/null +++ b/venv/Lib/enum.py @@ -0,0 +1,901 @@ +import sys +from types import MappingProxyType, DynamicClassAttribute + +# try _collections first to reduce startup cost +try: + from _collections import OrderedDict +except ImportError: + from collections import OrderedDict + + +__all__ = [ + 'EnumMeta', + 'Enum', 'IntEnum', 'Flag', 'IntFlag', + 'auto', 'unique', + ] + + +def _is_descriptor(obj): + """Returns True if obj is a descriptor, False otherwise.""" + return ( + hasattr(obj, '__get__') or + hasattr(obj, '__set__') or + hasattr(obj, '__delete__')) + + +def _is_dunder(name): + """Returns True if a __dunder__ name, False otherwise.""" + return (name[:2] == name[-2:] == '__' and + name[2:3] != '_' and + name[-3:-2] != '_' and + len(name) > 4) + + +def _is_sunder(name): + """Returns True if a _sunder_ name, False otherwise.""" + return (name[0] == name[-1] == '_' and + name[1:2] != '_' and + name[-2:-1] != '_' and + len(name) > 2) + +def _make_class_unpicklable(cls): + """Make the given class un-picklable.""" + def _break_on_call_reduce(self, proto): + raise TypeError('%r cannot be pickled' % self) + cls.__reduce_ex__ = _break_on_call_reduce + cls.__module__ = '' + +_auto_null = object() +class auto: + """ + Instances are replaced with an appropriate value in Enum class suites. + """ + value = _auto_null + + +class _EnumDict(dict): + """Track enum member order and ensure member names are not reused. + + EnumMeta will use the names found in self._member_names as the + enumeration member names. + + """ + def __init__(self): + super().__init__() + self._member_names = [] + self._last_values = [] + self._ignore = [] + + def __setitem__(self, key, value): + """Changes anything not dundered or not a descriptor. + + If an enum member name is used twice, an error is raised; duplicate + values are not checked for. + + Single underscore (sunder) names are reserved. + + """ + if _is_sunder(key): + if key not in ( + '_order_', '_create_pseudo_member_', + '_generate_next_value_', '_missing_', '_ignore_', + ): + raise ValueError('_names_ are reserved for future Enum use') + if key == '_generate_next_value_': + setattr(self, '_generate_next_value', value) + elif key == '_ignore_': + if isinstance(value, str): + value = value.replace(',',' ').split() + else: + value = list(value) + self._ignore = value + already = set(value) & set(self._member_names) + if already: + raise ValueError('_ignore_ cannot specify already set names: %r' % (already, )) + elif _is_dunder(key): + if key == '__order__': + key = '_order_' + elif key in self._member_names: + # descriptor overwriting an enum? + raise TypeError('Attempted to reuse key: %r' % key) + elif key in self._ignore: + pass + elif not _is_descriptor(value): + if key in self: + # enum overwriting a descriptor? + raise TypeError('%r already defined as: %r' % (key, self[key])) + if isinstance(value, auto): + if value.value == _auto_null: + value.value = self._generate_next_value(key, 1, len(self._member_names), self._last_values[:]) + value = value.value + self._member_names.append(key) + self._last_values.append(value) + super().__setitem__(key, value) + + +# Dummy value for Enum as EnumMeta explicitly checks for it, but of course +# until EnumMeta finishes running the first time the Enum class doesn't exist. +# This is also why there are checks in EnumMeta like `if Enum is not None` +Enum = None + + +class EnumMeta(type): + """Metaclass for Enum""" + @classmethod + def __prepare__(metacls, cls, bases): + # create the namespace dict + enum_dict = _EnumDict() + # inherit previous flags and _generate_next_value_ function + member_type, first_enum = metacls._get_mixins_(bases) + if first_enum is not None: + enum_dict['_generate_next_value_'] = getattr(first_enum, '_generate_next_value_', None) + return enum_dict + + def __new__(metacls, cls, bases, classdict): + # an Enum class is final once enumeration items have been defined; it + # cannot be mixed with other types (int, float, etc.) if it has an + # inherited __new__ unless a new __new__ is defined (or the resulting + # class will fail). + # + # remove any keys listed in _ignore_ + classdict.setdefault('_ignore_', []).append('_ignore_') + ignore = classdict['_ignore_'] + for key in ignore: + classdict.pop(key, None) + member_type, first_enum = metacls._get_mixins_(bases) + __new__, save_new, use_args = metacls._find_new_(classdict, member_type, + first_enum) + + # save enum items into separate mapping so they don't get baked into + # the new class + enum_members = {k: classdict[k] for k in classdict._member_names} + for name in classdict._member_names: + del classdict[name] + + # adjust the sunders + _order_ = classdict.pop('_order_', None) + + # check for illegal enum names (any others?) + invalid_names = set(enum_members) & {'mro', } + if invalid_names: + raise ValueError('Invalid enum member name: {0}'.format( + ','.join(invalid_names))) + + # create a default docstring if one has not been provided + if '__doc__' not in classdict: + classdict['__doc__'] = 'An enumeration.' + + # create our new Enum type + enum_class = super().__new__(metacls, cls, bases, classdict) + enum_class._member_names_ = [] # names in definition order + enum_class._member_map_ = OrderedDict() # name->value map + enum_class._member_type_ = member_type + + # save attributes from super classes so we know if we can take + # the shortcut of storing members in the class dict + base_attributes = {a for b in enum_class.mro() for a in b.__dict__} + + # Reverse value->name map for hashable values. + enum_class._value2member_map_ = {} + + # If a custom type is mixed into the Enum, and it does not know how + # to pickle itself, pickle.dumps will succeed but pickle.loads will + # fail. Rather than have the error show up later and possibly far + # from the source, sabotage the pickle protocol for this class so + # that pickle.dumps also fails. + # + # However, if the new class implements its own __reduce_ex__, do not + # sabotage -- it's on them to make sure it works correctly. We use + # __reduce_ex__ instead of any of the others as it is preferred by + # pickle over __reduce__, and it handles all pickle protocols. + if '__reduce_ex__' not in classdict: + if member_type is not object: + methods = ('__getnewargs_ex__', '__getnewargs__', + '__reduce_ex__', '__reduce__') + if not any(m in member_type.__dict__ for m in methods): + _make_class_unpicklable(enum_class) + + # instantiate them, checking for duplicates as we go + # we instantiate first instead of checking for duplicates first in case + # a custom __new__ is doing something funky with the values -- such as + # auto-numbering ;) + for member_name in classdict._member_names: + value = enum_members[member_name] + if not isinstance(value, tuple): + args = (value, ) + else: + args = value + if member_type is tuple: # special case for tuple enums + args = (args, ) # wrap it one more time + if not use_args: + enum_member = __new__(enum_class) + if not hasattr(enum_member, '_value_'): + enum_member._value_ = value + else: + enum_member = __new__(enum_class, *args) + if not hasattr(enum_member, '_value_'): + if member_type is object: + enum_member._value_ = value + else: + enum_member._value_ = member_type(*args) + value = enum_member._value_ + enum_member._name_ = member_name + enum_member.__objclass__ = enum_class + enum_member.__init__(*args) + # If another member with the same value was already defined, the + # new member becomes an alias to the existing one. + for name, canonical_member in enum_class._member_map_.items(): + if canonical_member._value_ == enum_member._value_: + enum_member = canonical_member + break + else: + # Aliases don't appear in member names (only in __members__). + enum_class._member_names_.append(member_name) + # performance boost for any member that would not shadow + # a DynamicClassAttribute + if member_name not in base_attributes: + setattr(enum_class, member_name, enum_member) + # now add to _member_map_ + enum_class._member_map_[member_name] = enum_member + try: + # This may fail if value is not hashable. We can't add the value + # to the map, and by-value lookups for this value will be + # linear. + enum_class._value2member_map_[value] = enum_member + except TypeError: + pass + + # double check that repr and friends are not the mixin's or various + # things break (such as pickle) + for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): + class_method = getattr(enum_class, name) + obj_method = getattr(member_type, name, None) + enum_method = getattr(first_enum, name, None) + if obj_method is not None and obj_method is class_method: + setattr(enum_class, name, enum_method) + + # replace any other __new__ with our own (as long as Enum is not None, + # anyway) -- again, this is to support pickle + if Enum is not None: + # if the user defined their own __new__, save it before it gets + # clobbered in case they subclass later + if save_new: + enum_class.__new_member__ = __new__ + enum_class.__new__ = Enum.__new__ + + # py3 support for definition order (helps keep py2/py3 code in sync) + if _order_ is not None: + if isinstance(_order_, str): + _order_ = _order_.replace(',', ' ').split() + if _order_ != enum_class._member_names_: + raise TypeError('member order does not match _order_') + + return enum_class + + def __bool__(self): + """ + classes/types should always be True. + """ + return True + + def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): + """Either returns an existing member, or creates a new enum class. + + This method is used both when an enum class is given a value to match + to an enumeration member (i.e. Color(3)) and for the functional API + (i.e. Color = Enum('Color', names='RED GREEN BLUE')). + + When used for the functional API: + + `value` will be the name of the new class. + + `names` should be either a string of white-space/comma delimited names + (values will start at `start`), or an iterator/mapping of name, value pairs. + + `module` should be set to the module this class is being created in; + if it is not set, an attempt to find that module will be made, but if + it fails the class will not be picklable. + + `qualname` should be set to the actual location this class can be found + at in its module; by default it is set to the global scope. If this is + not correct, unpickling will fail in some circumstances. + + `type`, if set, will be mixed in as the first base class. + + """ + if names is None: # simple value lookup + return cls.__new__(cls, value) + # otherwise, functional API: we're creating a new Enum type + return cls._create_(value, names, module=module, qualname=qualname, type=type, start=start) + + def __contains__(cls, member): + if not isinstance(member, Enum): + import warnings + warnings.warn( + "using non-Enums in containment checks will raise " + "TypeError in Python 3.8", + DeprecationWarning, 2) + return isinstance(member, cls) and member._name_ in cls._member_map_ + + def __delattr__(cls, attr): + # nicer error message when someone tries to delete an attribute + # (see issue19025). + if attr in cls._member_map_: + raise AttributeError( + "%s: cannot delete Enum member." % cls.__name__) + super().__delattr__(attr) + + def __dir__(self): + return (['__class__', '__doc__', '__members__', '__module__'] + + self._member_names_) + + def __getattr__(cls, name): + """Return the enum member matching `name` + + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + + """ + if _is_dunder(name): + raise AttributeError(name) + try: + return cls._member_map_[name] + except KeyError: + raise AttributeError(name) from None + + def __getitem__(cls, name): + return cls._member_map_[name] + + def __iter__(cls): + return (cls._member_map_[name] for name in cls._member_names_) + + def __len__(cls): + return len(cls._member_names_) + + @property + def __members__(cls): + """Returns a mapping of member name->value. + + This mapping lists all enum members, including aliases. Note that this + is a read-only view of the internal mapping. + + """ + return MappingProxyType(cls._member_map_) + + def __repr__(cls): + return "" % cls.__name__ + + def __reversed__(cls): + return (cls._member_map_[name] for name in reversed(cls._member_names_)) + + def __setattr__(cls, name, value): + """Block attempts to reassign Enum members. + + A simple assignment to the class namespace only changes one of the + several possible ways to get an Enum member from the Enum class, + resulting in an inconsistent Enumeration. + + """ + member_map = cls.__dict__.get('_member_map_', {}) + if name in member_map: + raise AttributeError('Cannot reassign members.') + super().__setattr__(name, value) + + def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1): + """Convenience method to create a new Enum class. + + `names` can be: + + * A string containing member names, separated either with spaces or + commas. Values are incremented by 1 from `start`. + * An iterable of member names. Values are incremented by 1 from `start`. + * An iterable of (member name, value) pairs. + * A mapping of member name -> value pairs. + + """ + metacls = cls.__class__ + bases = (cls, ) if type is None else (type, cls) + _, first_enum = cls._get_mixins_(bases) + classdict = metacls.__prepare__(class_name, bases) + + # special processing needed for names? + if isinstance(names, str): + names = names.replace(',', ' ').split() + if isinstance(names, (tuple, list)) and names and isinstance(names[0], str): + original_names, names = names, [] + last_values = [] + for count, name in enumerate(original_names): + value = first_enum._generate_next_value_(name, start, count, last_values[:]) + last_values.append(value) + names.append((name, value)) + + # Here, names is either an iterable of (name, value) or a mapping. + for item in names: + if isinstance(item, str): + member_name, member_value = item, names[item] + else: + member_name, member_value = item + classdict[member_name] = member_value + enum_class = metacls.__new__(metacls, class_name, bases, classdict) + + # TODO: replace the frame hack if a blessed way to know the calling + # module is ever developed + if module is None: + try: + module = sys._getframe(2).f_globals['__name__'] + except (AttributeError, ValueError) as exc: + pass + if module is None: + _make_class_unpicklable(enum_class) + else: + enum_class.__module__ = module + if qualname is not None: + enum_class.__qualname__ = qualname + + return enum_class + + @staticmethod + def _get_mixins_(bases): + """Returns the type for creating enum members, and the first inherited + enum class. + + bases: the tuple of bases that was given to __new__ + + """ + if not bases: + return object, Enum + + # double check that we are not subclassing a class with existing + # enumeration members; while we're at it, see if any other data + # type has been mixed in so we can use the correct __new__ + member_type = first_enum = None + for base in bases: + if (base is not Enum and + issubclass(base, Enum) and + base._member_names_): + raise TypeError("Cannot extend enumerations") + # base is now the last base in bases + if not issubclass(base, Enum): + raise TypeError("new enumerations must be created as " + "`ClassName([mixin_type,] enum_type)`") + + # get correct mix-in type (either mix-in type of Enum subclass, or + # first base if last base is Enum) + if not issubclass(bases[0], Enum): + member_type = bases[0] # first data type + first_enum = bases[-1] # enum type + else: + for base in bases[0].__mro__: + # most common: (IntEnum, int, Enum, object) + # possible: (, , + # , , + # ) + if issubclass(base, Enum): + if first_enum is None: + first_enum = base + else: + if member_type is None: + member_type = base + + return member_type, first_enum + + @staticmethod + def _find_new_(classdict, member_type, first_enum): + """Returns the __new__ to be used for creating the enum members. + + classdict: the class dictionary given to __new__ + member_type: the data type whose __new__ will be used by default + first_enum: enumeration to check for an overriding __new__ + + """ + # now find the correct __new__, checking to see of one was defined + # by the user; also check earlier enum classes in case a __new__ was + # saved as __new_member__ + __new__ = classdict.get('__new__', None) + + # should __new__ be saved as __new_member__ later? + save_new = __new__ is not None + + if __new__ is None: + # check all possibles for __new_member__ before falling back to + # __new__ + for method in ('__new_member__', '__new__'): + for possible in (member_type, first_enum): + target = getattr(possible, method, None) + if target not in { + None, + None.__new__, + object.__new__, + Enum.__new__, + }: + __new__ = target + break + if __new__ is not None: + break + else: + __new__ = object.__new__ + + # if a non-object.__new__ is used then whatever value/tuple was + # assigned to the enum member name will be passed to __new__ and to the + # new enum member's __init__ + if __new__ is object.__new__: + use_args = False + else: + use_args = True + + return __new__, save_new, use_args + + +class Enum(metaclass=EnumMeta): + """Generic enumeration. + + Derive from this class to define new enumerations. + + """ + def __new__(cls, value): + # all enum instances are actually created during class construction + # without calling this method; this method is called by the metaclass' + # __call__ (i.e. Color(3) ), and by pickle + if type(value) is cls: + # For lookups like Color(Color.RED) + return value + # by-value search for a matching enum member + # see if it's in the reverse mapping (for hashable values) + try: + if value in cls._value2member_map_: + return cls._value2member_map_[value] + except TypeError: + # not there, now do long search -- O(n) behavior + for member in cls._member_map_.values(): + if member._value_ == value: + return member + # still not found -- try _missing_ hook + return cls._missing_(value) + + def _generate_next_value_(name, start, count, last_values): + for last_value in reversed(last_values): + try: + return last_value + 1 + except TypeError: + pass + else: + return start + + @classmethod + def _missing_(cls, value): + raise ValueError("%r is not a valid %s" % (value, cls.__name__)) + + def __repr__(self): + return "<%s.%s: %r>" % ( + self.__class__.__name__, self._name_, self._value_) + + def __str__(self): + return "%s.%s" % (self.__class__.__name__, self._name_) + + def __dir__(self): + added_behavior = [ + m + for cls in self.__class__.mro() + for m in cls.__dict__ + if m[0] != '_' and m not in self._member_map_ + ] + return (['__class__', '__doc__', '__module__'] + added_behavior) + + def __format__(self, format_spec): + # mixed-in Enums should use the mixed-in type's __format__, otherwise + # we can get strange results with the Enum name showing up instead of + # the value + + # pure Enum branch + if self._member_type_ is object: + cls = str + val = str(self) + # mix-in branch + else: + cls = self._member_type_ + val = self._value_ + return cls.__format__(val, format_spec) + + def __hash__(self): + return hash(self._name_) + + def __reduce_ex__(self, proto): + return self.__class__, (self._value_, ) + + # DynamicClassAttribute is used to provide access to the `name` and + # `value` properties of enum members while keeping some measure of + # protection from modification, while still allowing for an enumeration + # to have members named `name` and `value`. This works because enumeration + # members are not set directly on the enum class -- __getattr__ is + # used to look them up. + + @DynamicClassAttribute + def name(self): + """The name of the Enum member.""" + return self._name_ + + @DynamicClassAttribute + def value(self): + """The value of the Enum member.""" + return self._value_ + + @classmethod + def _convert(cls, name, module, filter, source=None): + """ + Create a new Enum subclass that replaces a collection of global constants + """ + # convert all constants from source (or module) that pass filter() to + # a new Enum called name, and export the enum and its members back to + # module; + # also, replace the __reduce_ex__ method so unpickling works in + # previous Python versions + module_globals = vars(sys.modules[module]) + if source: + source = vars(source) + else: + source = module_globals + # We use an OrderedDict of sorted source keys so that the + # _value2member_map is populated in the same order every time + # for a consistent reverse mapping of number to name when there + # are multiple names for the same number rather than varying + # between runs due to hash randomization of the module dictionary. + members = [ + (name, source[name]) + for name in source.keys() + if filter(name)] + try: + # sort by value + members.sort(key=lambda t: (t[1], t[0])) + except TypeError: + # unless some values aren't comparable, in which case sort by name + members.sort(key=lambda t: t[0]) + cls = cls(name, members, module=module) + cls.__reduce_ex__ = _reduce_ex_by_name + module_globals.update(cls.__members__) + module_globals[name] = cls + return cls + + +class IntEnum(int, Enum): + """Enum where members are also (and must be) ints""" + + +def _reduce_ex_by_name(self, proto): + return self.name + +class Flag(Enum): + """Support for flags""" + + def _generate_next_value_(name, start, count, last_values): + """ + Generate the next value when not given. + + name: the name of the member + start: the initital start value or None + count: the number of existing members + last_value: the last value assigned or None + """ + if not count: + return start if start is not None else 1 + for last_value in reversed(last_values): + try: + high_bit = _high_bit(last_value) + break + except Exception: + raise TypeError('Invalid Flag value: %r' % last_value) from None + return 2 ** (high_bit+1) + + @classmethod + def _missing_(cls, value): + original_value = value + if value < 0: + value = ~value + possible_member = cls._create_pseudo_member_(value) + if original_value < 0: + possible_member = ~possible_member + return possible_member + + @classmethod + def _create_pseudo_member_(cls, value): + """ + Create a composite member iff value contains only members. + """ + pseudo_member = cls._value2member_map_.get(value, None) + if pseudo_member is None: + # verify all bits are accounted for + _, extra_flags = _decompose(cls, value) + if extra_flags: + raise ValueError("%r is not a valid %s" % (value, cls.__name__)) + # construct a singleton enum pseudo-member + pseudo_member = object.__new__(cls) + pseudo_member._name_ = None + pseudo_member._value_ = value + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) + return pseudo_member + + def __contains__(self, other): + if not isinstance(other, self.__class__): + import warnings + warnings.warn( + "using non-Flags in containment checks will raise " + "TypeError in Python 3.8", + DeprecationWarning, 2) + return False + return other._value_ & self._value_ == other._value_ + + def __repr__(self): + cls = self.__class__ + if self._name_ is not None: + return '<%s.%s: %r>' % (cls.__name__, self._name_, self._value_) + members, uncovered = _decompose(cls, self._value_) + return '<%s.%s: %r>' % ( + cls.__name__, + '|'.join([str(m._name_ or m._value_) for m in members]), + self._value_, + ) + + def __str__(self): + cls = self.__class__ + if self._name_ is not None: + return '%s.%s' % (cls.__name__, self._name_) + members, uncovered = _decompose(cls, self._value_) + if len(members) == 1 and members[0]._name_ is None: + return '%s.%r' % (cls.__name__, members[0]._value_) + else: + return '%s.%s' % ( + cls.__name__, + '|'.join([str(m._name_ or m._value_) for m in members]), + ) + + def __bool__(self): + return bool(self._value_) + + def __or__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.__class__(self._value_ | other._value_) + + def __and__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.__class__(self._value_ & other._value_) + + def __xor__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.__class__(self._value_ ^ other._value_) + + def __invert__(self): + members, uncovered = _decompose(self.__class__, self._value_) + inverted = self.__class__(0) + for m in self.__class__: + if m not in members and not (m._value_ & self._value_): + inverted = inverted | m + return self.__class__(inverted) + + +class IntFlag(int, Flag): + """Support for integer-based Flags""" + + @classmethod + def _missing_(cls, value): + if not isinstance(value, int): + raise ValueError("%r is not a valid %s" % (value, cls.__name__)) + new_member = cls._create_pseudo_member_(value) + return new_member + + @classmethod + def _create_pseudo_member_(cls, value): + pseudo_member = cls._value2member_map_.get(value, None) + if pseudo_member is None: + need_to_create = [value] + # get unaccounted for bits + _, extra_flags = _decompose(cls, value) + # timer = 10 + while extra_flags: + # timer -= 1 + bit = _high_bit(extra_flags) + flag_value = 2 ** bit + if (flag_value not in cls._value2member_map_ and + flag_value not in need_to_create + ): + need_to_create.append(flag_value) + if extra_flags == -flag_value: + extra_flags = 0 + else: + extra_flags ^= flag_value + for value in reversed(need_to_create): + # construct singleton pseudo-members + pseudo_member = int.__new__(cls, value) + pseudo_member._name_ = None + pseudo_member._value_ = value + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) + return pseudo_member + + def __or__(self, other): + if not isinstance(other, (self.__class__, int)): + return NotImplemented + result = self.__class__(self._value_ | self.__class__(other)._value_) + return result + + def __and__(self, other): + if not isinstance(other, (self.__class__, int)): + return NotImplemented + return self.__class__(self._value_ & self.__class__(other)._value_) + + def __xor__(self, other): + if not isinstance(other, (self.__class__, int)): + return NotImplemented + return self.__class__(self._value_ ^ self.__class__(other)._value_) + + __ror__ = __or__ + __rand__ = __and__ + __rxor__ = __xor__ + + def __invert__(self): + result = self.__class__(~self._value_) + return result + + +def _high_bit(value): + """returns index of highest bit, or -1 if value is zero or negative""" + return value.bit_length() - 1 + +def unique(enumeration): + """Class decorator for enumerations ensuring unique member values.""" + duplicates = [] + for name, member in enumeration.__members__.items(): + if name != member.name: + duplicates.append((name, member.name)) + if duplicates: + alias_details = ', '.join( + ["%s -> %s" % (alias, name) for (alias, name) in duplicates]) + raise ValueError('duplicate values found in %r: %s' % + (enumeration, alias_details)) + return enumeration + +def _decompose(flag, value): + """Extract all members from the value.""" + # _decompose is only called if the value is not named + not_covered = value + negative = value < 0 + # issue29167: wrap accesses to _value2member_map_ in a list to avoid race + # conditions between iterating over it and having more pseudo- + # members added to it + if negative: + # only check for named flags + flags_to_check = [ + (m, v) + for v, m in list(flag._value2member_map_.items()) + if m.name is not None + ] + else: + # check for named flags and powers-of-two flags + flags_to_check = [ + (m, v) + for v, m in list(flag._value2member_map_.items()) + if m.name is not None or _power_of_two(v) + ] + members = [] + for member, member_value in flags_to_check: + if member_value and member_value & value == member_value: + members.append(member) + not_covered &= ~member_value + if not members and value in flag._value2member_map_: + members.append(flag._value2member_map_[value]) + members.sort(key=lambda m: m._value_, reverse=True) + if len(members) > 1 and members[0].value == value: + # we have the breakdown, don't need the value member itself + members.pop(0) + return members, not_covered + +def _power_of_two(value): + if value < 1: + return False + return value == 2 ** _high_bit(value) diff --git a/venv/Lib/fnmatch.py b/venv/Lib/fnmatch.py new file mode 100644 index 00000000..b98e6413 --- /dev/null +++ b/venv/Lib/fnmatch.py @@ -0,0 +1,128 @@ +"""Filename matching with shell patterns. + +fnmatch(FILENAME, PATTERN) matches according to the local convention. +fnmatchcase(FILENAME, PATTERN) always takes case in account. + +The functions operate by translating the pattern into a regular +expression. They cache the compiled regular expressions for speed. + +The function translate(PATTERN) returns a regular expression +corresponding to PATTERN. (It does not compile it.) +""" +import os +import posixpath +import re +import functools + +__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"] + +def fnmatch(name, pat): + """Test whether FILENAME matches PATTERN. + + Patterns are Unix shell style: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + An initial period in FILENAME is not special. + Both FILENAME and PATTERN are first case-normalized + if the operating system requires it. + If you don't want this, use fnmatchcase(FILENAME, PATTERN). + """ + name = os.path.normcase(name) + pat = os.path.normcase(pat) + return fnmatchcase(name, pat) + +@functools.lru_cache(maxsize=256, typed=True) +def _compile_pattern(pat): + if isinstance(pat, bytes): + pat_str = str(pat, 'ISO-8859-1') + res_str = translate(pat_str) + res = bytes(res_str, 'ISO-8859-1') + else: + res = translate(pat) + return re.compile(res).match + +def filter(names, pat): + """Return the subset of the list NAMES that match PAT.""" + result = [] + pat = os.path.normcase(pat) + match = _compile_pattern(pat) + if os.path is posixpath: + # normcase on posix is NOP. Optimize it away from the loop. + for name in names: + if match(name): + result.append(name) + else: + for name in names: + if match(os.path.normcase(name)): + result.append(name) + return result + +def fnmatchcase(name, pat): + """Test whether FILENAME matches PATTERN, including case. + + This is a version of fnmatch() which doesn't case-normalize + its arguments. + """ + match = _compile_pattern(pat) + return match(name) is not None + + +def translate(pat): + """Translate a shell PATTERN to a regular expression. + + There is no way to quote meta-characters. + """ + + i, n = 0, len(pat) + res = '' + while i < n: + c = pat[i] + i = i+1 + if c == '*': + res = res + '.*' + elif c == '?': + res = res + '.' + elif c == '[': + j = i + if j < n and pat[j] == '!': + j = j+1 + if j < n and pat[j] == ']': + j = j+1 + while j < n and pat[j] != ']': + j = j+1 + if j >= n: + res = res + '\\[' + else: + stuff = pat[i:j] + if '--' not in stuff: + stuff = stuff.replace('\\', r'\\') + else: + chunks = [] + k = i+2 if pat[i] == '!' else i+1 + while True: + k = pat.find('-', k, j) + if k < 0: + break + chunks.append(pat[i:k]) + i = k+1 + k = k+3 + chunks.append(pat[i:j]) + # Escape backslashes and hyphens for set difference (--). + # Hyphens that create ranges shouldn't be escaped. + stuff = '-'.join(s.replace('\\', r'\\').replace('-', r'\-') + for s in chunks) + # Escape set operations (&&, ~~ and ||). + stuff = re.sub(r'([&~|])', r'\\\1', stuff) + i = j+1 + if stuff[0] == '!': + stuff = '^' + stuff[1:] + elif stuff[0] in ('^', '['): + stuff = '\\' + stuff + res = '%s[%s]' % (res, stuff) + else: + res = res + re.escape(c) + return r'(?s:%s)\Z' % res diff --git a/venv/Lib/functools.py b/venv/Lib/functools.py new file mode 100644 index 00000000..c8b79c2a --- /dev/null +++ b/venv/Lib/functools.py @@ -0,0 +1,828 @@ +"""functools.py - Tools for working with functions and callable objects +""" +# Python module wrapper for _functools C module +# to allow utilities written in Python to be added +# to the functools module. +# Written by Nick Coghlan , +# Raymond Hettinger , +# and Åukasz Langa . +# Copyright (C) 2006-2013 Python Software Foundation. +# See C source code for _functools credits/copyright + +__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', + 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial', + 'partialmethod', 'singledispatch'] + +try: + from _functools import reduce +except ImportError: + pass +from abc import get_cache_token +from collections import namedtuple +# import types, weakref # Deferred to single_dispatch() +from reprlib import recursive_repr +from _thread import RLock + + +################################################################################ +### update_wrapper() and wraps() decorator +################################################################################ + +# update_wrapper() and wraps() are tools to help write +# wrapper functions that can handle naive introspection + +WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', + '__annotations__') +WRAPPER_UPDATES = ('__dict__',) +def update_wrapper(wrapper, + wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Update a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + """ + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + pass + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + # Issue #17482: set __wrapped__ last so we don't inadvertently copy it + # from the wrapped function when updating __dict__ + wrapper.__wrapped__ = wrapped + # Return the wrapper so this can be used as a decorator via partial() + return wrapper + +def wraps(wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + """ + return partial(update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) + + +################################################################################ +### total_ordering class decorator +################################################################################ + +# The total ordering functions all invoke the root magic method directly +# rather than using the corresponding operator. This avoids possible +# infinite recursion that could occur when the operator dispatch logic +# detects a NotImplemented result and then calls a reflected method. + +def _gt_from_lt(self, other, NotImplemented=NotImplemented): + 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).' + op_result = self.__lt__(other) + if op_result is NotImplemented: + return op_result + return not op_result and self != other + +def _le_from_lt(self, other, NotImplemented=NotImplemented): + 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).' + op_result = self.__lt__(other) + return op_result or self == other + +def _ge_from_lt(self, other, NotImplemented=NotImplemented): + 'Return a >= b. Computed by @total_ordering from (not a < b).' + op_result = self.__lt__(other) + if op_result is NotImplemented: + return op_result + return not op_result + +def _ge_from_le(self, other, NotImplemented=NotImplemented): + 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' + op_result = self.__le__(other) + if op_result is NotImplemented: + return op_result + return not op_result or self == other + +def _lt_from_le(self, other, NotImplemented=NotImplemented): + 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).' + op_result = self.__le__(other) + if op_result is NotImplemented: + return op_result + return op_result and self != other + +def _gt_from_le(self, other, NotImplemented=NotImplemented): + 'Return a > b. Computed by @total_ordering from (not a <= b).' + op_result = self.__le__(other) + if op_result is NotImplemented: + return op_result + return not op_result + +def _lt_from_gt(self, other, NotImplemented=NotImplemented): + 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' + op_result = self.__gt__(other) + if op_result is NotImplemented: + return op_result + return not op_result and self != other + +def _ge_from_gt(self, other, NotImplemented=NotImplemented): + 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).' + op_result = self.__gt__(other) + return op_result or self == other + +def _le_from_gt(self, other, NotImplemented=NotImplemented): + 'Return a <= b. Computed by @total_ordering from (not a > b).' + op_result = self.__gt__(other) + if op_result is NotImplemented: + return op_result + return not op_result + +def _le_from_ge(self, other, NotImplemented=NotImplemented): + 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' + op_result = self.__ge__(other) + if op_result is NotImplemented: + return op_result + return not op_result or self == other + +def _gt_from_ge(self, other, NotImplemented=NotImplemented): + 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).' + op_result = self.__ge__(other) + if op_result is NotImplemented: + return op_result + return op_result and self != other + +def _lt_from_ge(self, other, NotImplemented=NotImplemented): + 'Return a < b. Computed by @total_ordering from (not a >= b).' + op_result = self.__ge__(other) + if op_result is NotImplemented: + return op_result + return not op_result + +_convert = { + '__lt__': [('__gt__', _gt_from_lt), + ('__le__', _le_from_lt), + ('__ge__', _ge_from_lt)], + '__le__': [('__ge__', _ge_from_le), + ('__lt__', _lt_from_le), + ('__gt__', _gt_from_le)], + '__gt__': [('__lt__', _lt_from_gt), + ('__ge__', _ge_from_gt), + ('__le__', _le_from_gt)], + '__ge__': [('__le__', _le_from_ge), + ('__gt__', _gt_from_ge), + ('__lt__', _lt_from_ge)] +} + +def total_ordering(cls): + """Class decorator that fills in missing ordering methods""" + # Find user-defined comparisons (not those inherited from object). + roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)} + if not roots: + raise ValueError('must define at least one ordering operation: < > <= >=') + root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ + for opname, opfunc in _convert[root]: + if opname not in roots: + opfunc.__name__ = opname + setattr(cls, opname, opfunc) + return cls + + +################################################################################ +### cmp_to_key() function converter +################################################################################ + +def cmp_to_key(mycmp): + """Convert a cmp= function into a key= function""" + class K(object): + __slots__ = ['obj'] + def __init__(self, obj): + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + def __le__(self, other): + return mycmp(self.obj, other.obj) <= 0 + def __ge__(self, other): + return mycmp(self.obj, other.obj) >= 0 + __hash__ = None + return K + +try: + from _functools import cmp_to_key +except ImportError: + pass + + +################################################################################ +### partial() argument application +################################################################################ + +# Purely functional, no descriptor behaviour +class partial: + """New function with partial application of the given arguments + and keywords. + """ + + __slots__ = "func", "args", "keywords", "__dict__", "__weakref__" + + def __new__(*args, **keywords): + if not args: + raise TypeError("descriptor '__new__' of partial needs an argument") + if len(args) < 2: + raise TypeError("type 'partial' takes at least one argument") + cls, func, *args = args + if not callable(func): + raise TypeError("the first argument must be callable") + args = tuple(args) + + if hasattr(func, "func"): + args = func.args + args + tmpkw = func.keywords.copy() + tmpkw.update(keywords) + keywords = tmpkw + del tmpkw + func = func.func + + self = super(partial, cls).__new__(cls) + + self.func = func + self.args = args + self.keywords = keywords + return self + + def __call__(*args, **keywords): + if not args: + raise TypeError("descriptor '__call__' of partial needs an argument") + self, *args = args + newkeywords = self.keywords.copy() + newkeywords.update(keywords) + return self.func(*self.args, *args, **newkeywords) + + @recursive_repr() + def __repr__(self): + qualname = type(self).__qualname__ + args = [repr(self.func)] + args.extend(repr(x) for x in self.args) + args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items()) + if type(self).__module__ == "functools": + return f"functools.{qualname}({', '.join(args)})" + return f"{qualname}({', '.join(args)})" + + def __reduce__(self): + return type(self), (self.func,), (self.func, self.args, + self.keywords or None, self.__dict__ or None) + + def __setstate__(self, state): + if not isinstance(state, tuple): + raise TypeError("argument to __setstate__ must be a tuple") + if len(state) != 4: + raise TypeError(f"expected 4 items in state, got {len(state)}") + func, args, kwds, namespace = state + if (not callable(func) or not isinstance(args, tuple) or + (kwds is not None and not isinstance(kwds, dict)) or + (namespace is not None and not isinstance(namespace, dict))): + raise TypeError("invalid partial state") + + args = tuple(args) # just in case it's a subclass + if kwds is None: + kwds = {} + elif type(kwds) is not dict: # XXX does it need to be *exactly* dict? + kwds = dict(kwds) + if namespace is None: + namespace = {} + + self.__dict__ = namespace + self.func = func + self.args = args + self.keywords = kwds + +try: + from _functools import partial +except ImportError: + pass + +# Descriptor version +class partialmethod(object): + """Method descriptor with partial application of the given arguments + and keywords. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + """ + + def __init__(self, func, *args, **keywords): + if not callable(func) and not hasattr(func, "__get__"): + raise TypeError("{!r} is not callable or a descriptor" + .format(func)) + + # func could be a descriptor like classmethod which isn't callable, + # so we can't inherit from partial (it verifies func is callable) + if isinstance(func, partialmethod): + # flattening is mandatory in order to place cls/self before all + # other arguments + # it's also more efficient since only one function will be called + self.func = func.func + self.args = func.args + args + self.keywords = func.keywords.copy() + self.keywords.update(keywords) + else: + self.func = func + self.args = args + self.keywords = keywords + + def __repr__(self): + args = ", ".join(map(repr, self.args)) + keywords = ", ".join("{}={!r}".format(k, v) + for k, v in self.keywords.items()) + format_string = "{module}.{cls}({func}, {args}, {keywords})" + return format_string.format(module=self.__class__.__module__, + cls=self.__class__.__qualname__, + func=self.func, + args=args, + keywords=keywords) + + def _make_unbound_method(self): + def _method(*args, **keywords): + call_keywords = self.keywords.copy() + call_keywords.update(keywords) + cls_or_self, *rest = args + call_args = (cls_or_self,) + self.args + tuple(rest) + return self.func(*call_args, **call_keywords) + _method.__isabstractmethod__ = self.__isabstractmethod__ + _method._partialmethod = self + return _method + + def __get__(self, obj, cls): + get = getattr(self.func, "__get__", None) + result = None + if get is not None: + new_func = get(obj, cls) + if new_func is not self.func: + # Assume __get__ returning something new indicates the + # creation of an appropriate callable + result = partial(new_func, *self.args, **self.keywords) + try: + result.__self__ = new_func.__self__ + except AttributeError: + pass + if result is None: + # If the underlying descriptor didn't do anything, treat this + # like an instance method + result = self._make_unbound_method().__get__(obj, cls) + return result + + @property + def __isabstractmethod__(self): + return getattr(self.func, "__isabstractmethod__", False) + + +################################################################################ +### LRU Cache function decorator +################################################################################ + +_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) + +class _HashedSeq(list): + """ This class guarantees that hash() will be called no more than once + per element. This is important because the lru_cache() will hash + the key multiple times on a cache miss. + + """ + + __slots__ = 'hashvalue' + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + +def _make_key(args, kwds, typed, + kwd_mark = (object(),), + fasttypes = {int, str, frozenset, type(None)}, + tuple=tuple, type=type, len=len): + """Make a cache key from optionally typed positional and keyword arguments + + The key is constructed in a way that is flat as possible rather than + as a nested structure that would take more memory. + + If there is only a single argument and its data type is known to cache + its hash value, then that argument is returned without a wrapper. This + saves space and improves lookup speed. + + """ + # All of code below relies on kwds preserving the order input by the user. + # Formerly, we sorted() the kwds before looping. The new way is *much* + # faster; however, it means that f(x=1, y=2) will now be treated as a + # distinct call from f(y=2, x=1) which will be cached separately. + key = args + if kwds: + key += kwd_mark + for item in kwds.items(): + key += item + if typed: + key += tuple(type(v) for v in args) + if kwds: + key += tuple(type(v) for v in kwds.values()) + elif len(key) == 1 and type(key[0]) in fasttypes: + return key[0] + return _HashedSeq(key) + +def lru_cache(maxsize=128, typed=False): + """Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) + with f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used + + """ + + # Users should only access the lru_cache through its public API: + # cache_info, cache_clear, and f.__wrapped__ + # The internals of the lru_cache are encapsulated for thread safety and + # to allow the implementation to change (including a possible C version). + + # Early detection of an erroneous call to @lru_cache without any arguments + # resulting in the inner function being passed to maxsize instead of an + # integer or None. + if maxsize is not None and not isinstance(maxsize, int): + raise TypeError('Expected maxsize to be an integer or None') + + def decorating_function(user_function): + wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) + return update_wrapper(wrapper, user_function) + + return decorating_function + +def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): + # Constants shared by all lru cache instances: + sentinel = object() # unique object used to signal cache misses + make_key = _make_key # build a key from the function arguments + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields + + cache = {} + hits = misses = 0 + full = False + cache_get = cache.get # bound method to lookup a key or return None + cache_len = cache.__len__ # get cache size without calling len() + lock = RLock() # because linkedlist updates aren't threadsafe + root = [] # root of the circular doubly linked list + root[:] = [root, root, None, None] # initialize by pointing to self + + if maxsize == 0: + + def wrapper(*args, **kwds): + # No caching -- just a statistics update after a successful call + nonlocal misses + result = user_function(*args, **kwds) + misses += 1 + return result + + elif maxsize is None: + + def wrapper(*args, **kwds): + # Simple caching without ordering or size limit + nonlocal hits, misses + key = make_key(args, kwds, typed) + result = cache_get(key, sentinel) + if result is not sentinel: + hits += 1 + return result + result = user_function(*args, **kwds) + cache[key] = result + misses += 1 + return result + + else: + + def wrapper(*args, **kwds): + # Size limited caching that tracks accesses by recency + nonlocal root, hits, misses, full + key = make_key(args, kwds, typed) + with lock: + link = cache_get(key) + if link is not None: + # Move the link to the front of the circular queue + link_prev, link_next, _key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + hits += 1 + return result + result = user_function(*args, **kwds) + with lock: + if key in cache: + # Getting here means that this same key was added to the + # cache while the lock was released. Since the link + # update is already done, we need only return the + # computed result and update the count of misses. + pass + elif full: + # Use the old root to store the new key and result. + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + # Empty the oldest link and make it the new root. + # Keep a reference to the old key and old result to + # prevent their ref counts from going to zero during the + # update. That will prevent potentially arbitrary object + # clean-up code (i.e. __del__) from running while we're + # still adjusting the links. + root = oldroot[NEXT] + oldkey = root[KEY] + oldresult = root[RESULT] + root[KEY] = root[RESULT] = None + # Now update the cache dictionary. + del cache[oldkey] + # Save the potentially reentrant cache[key] assignment + # for last, after the root and links have been put in + # a consistent state. + cache[key] = oldroot + else: + # Put result in a new link at the front of the queue. + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + # Use the cache_len bound method instead of the len() function + # which could potentially be wrapped in an lru_cache itself. + full = (cache_len() >= maxsize) + misses += 1 + return result + + def cache_info(): + """Report cache statistics""" + with lock: + return _CacheInfo(hits, misses, maxsize, cache_len()) + + def cache_clear(): + """Clear the cache and cache statistics""" + nonlocal hits, misses, full + with lock: + cache.clear() + root[:] = [root, root, None, None] + hits = misses = 0 + full = False + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return wrapper + +try: + from _functools import _lru_cache_wrapper +except ImportError: + pass + + +################################################################################ +### singledispatch() - single-dispatch generic function decorator +################################################################################ + +def _c3_merge(sequences): + """Merges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from http://www.python.org/download/releases/2.3/mro/. + + """ + result = [] + while True: + sequences = [s for s in sequences if s] # purge empty sequences + if not sequences: + return result + for s1 in sequences: # find merge candidates among seq heads + candidate = s1[0] + for s2 in sequences: + if candidate in s2[1:]: + candidate = None + break # reject the current head, it appears later + else: + break + if candidate is None: + raise RuntimeError("Inconsistent hierarchy") + result.append(candidate) + # remove the chosen candidate + for seq in sequences: + if seq[0] == candidate: + del seq[0] + +def _c3_mro(cls, abcs=None): + """Computes the method resolution order using extended C3 linearization. + + If no *abcs* are given, the algorithm works exactly like the built-in C3 + linearization used for method resolution. + + If given, *abcs* is a list of abstract base classes that should be inserted + into the resulting MRO. Unrelated ABCs are ignored and don't end up in the + result. The algorithm inserts ABCs where their functionality is introduced, + i.e. issubclass(cls, abc) returns True for the class itself but returns + False for all its direct base classes. Implicit ABCs for a given class + (either registered or inferred from the presence of a special method like + __len__) are inserted directly after the last ABC explicitly listed in the + MRO of said class. If two implicit ABCs end up next to each other in the + resulting MRO, their ordering depends on the order of types in *abcs*. + + """ + for i, base in enumerate(reversed(cls.__bases__)): + if hasattr(base, '__abstractmethods__'): + boundary = len(cls.__bases__) - i + break # Bases up to the last explicit ABC are considered first. + else: + boundary = 0 + abcs = list(abcs) if abcs else [] + explicit_bases = list(cls.__bases__[:boundary]) + abstract_bases = [] + other_bases = list(cls.__bases__[boundary:]) + for base in abcs: + if issubclass(cls, base) and not any( + issubclass(b, base) for b in cls.__bases__ + ): + # If *cls* is the class that introduces behaviour described by + # an ABC *base*, insert said ABC to its MRO. + abstract_bases.append(base) + for base in abstract_bases: + abcs.remove(base) + explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] + abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] + other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] + return _c3_merge( + [[cls]] + + explicit_c3_mros + abstract_c3_mros + other_c3_mros + + [explicit_bases] + [abstract_bases] + [other_bases] + ) + +def _compose_mro(cls, types): + """Calculates the method resolution order for a given class *cls*. + + Includes relevant abstract base classes (with their respective bases) from + the *types* iterable. Uses a modified C3 linearization algorithm. + + """ + bases = set(cls.__mro__) + # Remove entries which are already present in the __mro__ or unrelated. + def is_related(typ): + return (typ not in bases and hasattr(typ, '__mro__') + and issubclass(cls, typ)) + types = [n for n in types if is_related(n)] + # Remove entries which are strict bases of other entries (they will end up + # in the MRO anyway. + def is_strict_base(typ): + for other in types: + if typ != other and typ in other.__mro__: + return True + return False + types = [n for n in types if not is_strict_base(n)] + # Subclasses of the ABCs in *types* which are also implemented by + # *cls* can be used to stabilize ABC ordering. + type_set = set(types) + mro = [] + for typ in types: + found = [] + for sub in typ.__subclasses__(): + if sub not in bases and issubclass(cls, sub): + found.append([s for s in sub.__mro__ if s in type_set]) + if not found: + mro.append(typ) + continue + # Favor subclasses with the biggest number of useful bases + found.sort(key=len, reverse=True) + for sub in found: + for subcls in sub: + if subcls not in mro: + mro.append(subcls) + return _c3_mro(cls, abcs=mro) + +def _find_impl(cls, registry): + """Returns the best matching implementation from *registry* for type *cls*. + + Where there is no registered implementation for a specific type, its method + resolution order is used to find a more generic implementation. + + Note: if *registry* does not contain an implementation for the base + *object* type, this function may return None. + + """ + mro = _compose_mro(cls, registry.keys()) + match = None + for t in mro: + if match is not None: + # If *match* is an implicit ABC but there is another unrelated, + # equally matching implicit ABC, refuse the temptation to guess. + if (t in registry and t not in cls.__mro__ + and match not in cls.__mro__ + and not issubclass(match, t)): + raise RuntimeError("Ambiguous dispatch: {} or {}".format( + match, t)) + break + if t in registry: + match = t + return registry.get(match) + +def singledispatch(func): + """Single-dispatch generic function decorator. + + Transforms a function into a generic function, which can have different + behaviours depending upon the type of its first argument. The decorated + function acts as the default implementation, and additional + implementations can be registered using the register() attribute of the + generic function. + """ + # There are many programs that use functools without singledispatch, so we + # trade-off making singledispatch marginally slower for the benefit of + # making start-up of such applications slightly faster. + import types, weakref + + registry = {} + dispatch_cache = weakref.WeakKeyDictionary() + cache_token = None + + def dispatch(cls): + """generic_func.dispatch(cls) -> + + Runs the dispatch algorithm to return the best available implementation + for the given *cls* registered on *generic_func*. + + """ + nonlocal cache_token + if cache_token is not None: + current_token = get_cache_token() + if cache_token != current_token: + dispatch_cache.clear() + cache_token = current_token + try: + impl = dispatch_cache[cls] + except KeyError: + try: + impl = registry[cls] + except KeyError: + impl = _find_impl(cls, registry) + dispatch_cache[cls] = impl + return impl + + def register(cls, func=None): + """generic_func.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_func*. + + """ + nonlocal cache_token + if func is None: + if isinstance(cls, type): + return lambda f: register(cls, f) + ann = getattr(cls, '__annotations__', {}) + if not ann: + raise TypeError( + f"Invalid first argument to `register()`: {cls!r}. " + f"Use either `@register(some_class)` or plain `@register` " + f"on an annotated function." + ) + func = cls + + # only import typing if annotation parsing is necessary + from typing import get_type_hints + argname, cls = next(iter(get_type_hints(func).items())) + assert isinstance(cls, type), ( + f"Invalid annotation for {argname!r}. {cls!r} is not a class." + ) + registry[cls] = func + if cache_token is None and hasattr(cls, '__abstractmethods__'): + cache_token = get_cache_token() + dispatch_cache.clear() + return func + + def wrapper(*args, **kw): + return dispatch(args[0].__class__)(*args, **kw) + + registry[object] = func + wrapper.register = register + wrapper.dispatch = dispatch + wrapper.registry = types.MappingProxyType(registry) + wrapper._clear_cache = dispatch_cache.clear + update_wrapper(wrapper, func) + return wrapper diff --git a/venv/Lib/genericpath.py b/venv/Lib/genericpath.py new file mode 100644 index 00000000..303b3b34 --- /dev/null +++ b/venv/Lib/genericpath.py @@ -0,0 +1,151 @@ +""" +Path operations common to more than one OS +Do not use directly. The OS specific modules import the appropriate +functions from this module themselves. +""" +import os +import stat + +__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', + 'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile', + 'samestat'] + + +# Does a path exist? +# This is false for dangling symbolic links on systems that support them. +def exists(path): + """Test whether a path exists. Returns False for broken symbolic links""" + try: + os.stat(path) + except OSError: + return False + return True + + +# This follows symbolic links, so both islink() and isdir() can be true +# for the same path on systems that support symlinks +def isfile(path): + """Test whether a path is a regular file""" + try: + st = os.stat(path) + except OSError: + return False + return stat.S_ISREG(st.st_mode) + + +# Is a path a directory? +# This follows symbolic links, so both islink() and isdir() +# can be true for the same path on systems that support symlinks +def isdir(s): + """Return true if the pathname refers to an existing directory.""" + try: + st = os.stat(s) + except OSError: + return False + return stat.S_ISDIR(st.st_mode) + + +def getsize(filename): + """Return the size of a file, reported by os.stat().""" + return os.stat(filename).st_size + + +def getmtime(filename): + """Return the last modification time of a file, reported by os.stat().""" + return os.stat(filename).st_mtime + + +def getatime(filename): + """Return the last access time of a file, reported by os.stat().""" + return os.stat(filename).st_atime + + +def getctime(filename): + """Return the metadata change time of a file, reported by os.stat().""" + return os.stat(filename).st_ctime + + +# Return the longest prefix of all list elements. +def commonprefix(m): + "Given a list of pathnames, returns the longest common leading component" + if not m: return '' + # Some people pass in a list of pathname parts to operate in an OS-agnostic + # fashion; don't try to translate in that case as that's an abuse of the + # API and they are already doing what they need to be OS-agnostic and so + # they most likely won't be using an os.PathLike object in the sublists. + if not isinstance(m[0], (list, tuple)): + m = tuple(map(os.fspath, m)) + s1 = min(m) + s2 = max(m) + for i, c in enumerate(s1): + if c != s2[i]: + return s1[:i] + return s1 + +# Are two stat buffers (obtained from stat, fstat or lstat) +# describing the same file? +def samestat(s1, s2): + """Test whether two stat buffers reference the same file""" + return (s1.st_ino == s2.st_ino and + s1.st_dev == s2.st_dev) + + +# Are two filenames really pointing to the same file? +def samefile(f1, f2): + """Test whether two pathnames reference the same actual file""" + s1 = os.stat(f1) + s2 = os.stat(f2) + return samestat(s1, s2) + + +# Are two open files really referencing the same file? +# (Not necessarily the same file descriptor!) +def sameopenfile(fp1, fp2): + """Test whether two open file objects reference the same file""" + s1 = os.fstat(fp1) + s2 = os.fstat(fp2) + return samestat(s1, s2) + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +# Generic implementation of splitext, to be parametrized with +# the separators +def _splitext(p, sep, altsep, extsep): + """Split the extension from a pathname. + + Extension is everything from the last dot to the end, ignoring + leading dots. Returns "(root, ext)"; ext may be empty.""" + # NOTE: This code must work for text and bytes strings. + + sepIndex = p.rfind(sep) + if altsep: + altsepIndex = p.rfind(altsep) + sepIndex = max(sepIndex, altsepIndex) + + dotIndex = p.rfind(extsep) + if dotIndex > sepIndex: + # skip all leading dots + filenameIndex = sepIndex + 1 + while filenameIndex < dotIndex: + if p[filenameIndex:filenameIndex+1] != extsep: + return p[:dotIndex], p[dotIndex:] + filenameIndex += 1 + + return p, p[:0] + +def _check_arg_types(funcname, *args): + hasstr = hasbytes = False + for s in args: + if isinstance(s, str): + hasstr = True + elif isinstance(s, bytes): + hasbytes = True + else: + raise TypeError('%s() argument must be str or bytes, not %r' % + (funcname, s.__class__.__name__)) from None + if hasstr and hasbytes: + raise TypeError("Can't mix strings and bytes in path components") from None diff --git a/venv/Lib/hashlib.py b/venv/Lib/hashlib.py new file mode 100644 index 00000000..e30c6100 --- /dev/null +++ b/venv/Lib/hashlib.py @@ -0,0 +1,251 @@ +#. Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org) +# Licensed to PSF under a Contributor Agreement. +# + +__doc__ = """hashlib module - A common interface to many hash functions. + +new(name, data=b'', **kwargs) - returns a new hash object implementing the + given hash function; initializing the hash + using the given binary data. + +Named constructor functions are also available, these are faster +than using new(name): + +md5(), sha1(), sha224(), sha256(), sha384(), sha512(), blake2b(), blake2s(), +sha3_224, sha3_256, sha3_384, sha3_512, shake_128, and shake_256. + +More algorithms may be available on your platform but the above are guaranteed +to exist. See the algorithms_guaranteed and algorithms_available attributes +to find out what algorithm names can be passed to new(). + +NOTE: If you want the adler32 or crc32 hash functions they are available in +the zlib module. + +Choose your hash function wisely. Some have known collision weaknesses. +sha384 and sha512 will be slow on 32 bit platforms. + +Hash objects have these methods: + - update(arg): Update the hash object with the bytes in arg. Repeated calls + are equivalent to a single call with the concatenation of all + the arguments. + - digest(): Return the digest of the bytes passed to the update() method + so far. + - hexdigest(): Like digest() except the digest is returned as a unicode + object of double length, containing only hexadecimal digits. + - copy(): Return a copy (clone) of the hash object. This can be used to + efficiently compute the digests of strings that share a common + initial substring. + +For example, to obtain the digest of the string 'Nobody inspects the +spammish repetition': + + >>> import hashlib + >>> m = hashlib.md5() + >>> m.update(b"Nobody inspects") + >>> m.update(b" the spammish repetition") + >>> m.digest() + b'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9' + +More condensed: + + >>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest() + 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' + +""" + +# This tuple and __get_builtin_constructor() must be modified if a new +# always available algorithm is added. +__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', + 'blake2b', 'blake2s', + 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', + 'shake_128', 'shake_256') + + +algorithms_guaranteed = set(__always_supported) +algorithms_available = set(__always_supported) + +__all__ = __always_supported + ('new', 'algorithms_guaranteed', + 'algorithms_available', 'pbkdf2_hmac') + + +__builtin_constructor_cache = {} + +def __get_builtin_constructor(name): + cache = __builtin_constructor_cache + constructor = cache.get(name) + if constructor is not None: + return constructor + try: + if name in ('SHA1', 'sha1'): + import _sha1 + cache['SHA1'] = cache['sha1'] = _sha1.sha1 + elif name in ('MD5', 'md5'): + import _md5 + cache['MD5'] = cache['md5'] = _md5.md5 + elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'): + import _sha256 + cache['SHA224'] = cache['sha224'] = _sha256.sha224 + cache['SHA256'] = cache['sha256'] = _sha256.sha256 + elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'): + import _sha512 + cache['SHA384'] = cache['sha384'] = _sha512.sha384 + cache['SHA512'] = cache['sha512'] = _sha512.sha512 + elif name in ('blake2b', 'blake2s'): + import _blake2 + cache['blake2b'] = _blake2.blake2b + cache['blake2s'] = _blake2.blake2s + elif name in {'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', + 'shake_128', 'shake_256'}: + import _sha3 + cache['sha3_224'] = _sha3.sha3_224 + cache['sha3_256'] = _sha3.sha3_256 + cache['sha3_384'] = _sha3.sha3_384 + cache['sha3_512'] = _sha3.sha3_512 + cache['shake_128'] = _sha3.shake_128 + cache['shake_256'] = _sha3.shake_256 + except ImportError: + pass # no extension module, this hash is unsupported. + + constructor = cache.get(name) + if constructor is not None: + return constructor + + raise ValueError('unsupported hash type ' + name) + + +def __get_openssl_constructor(name): + if name in {'blake2b', 'blake2s'}: + # Prefer our blake2 implementation. + return __get_builtin_constructor(name) + try: + f = getattr(_hashlib, 'openssl_' + name) + # Allow the C module to raise ValueError. The function will be + # defined but the hash not actually available thanks to OpenSSL. + f() + # Use the C function directly (very fast) + return f + except (AttributeError, ValueError): + return __get_builtin_constructor(name) + + +def __py_new(name, data=b'', **kwargs): + """new(name, data=b'', **kwargs) - Return a new hashing object using the + named algorithm; optionally initialized with data (which must be bytes). + """ + return __get_builtin_constructor(name)(data, **kwargs) + + +def __hash_new(name, data=b'', **kwargs): + """new(name, data=b'') - Return a new hashing object using the named algorithm; + optionally initialized with data (which must be bytes). + """ + if name in {'blake2b', 'blake2s'}: + # Prefer our blake2 implementation. + # OpenSSL 1.1.0 comes with a limited implementation of blake2b/s. + # It does neither support keyed blake2 nor advanced features like + # salt, personal, tree hashing or SSE. + return __get_builtin_constructor(name)(data, **kwargs) + try: + return _hashlib.new(name, data) + except ValueError: + # If the _hashlib module (OpenSSL) doesn't support the named + # hash, try using our builtin implementations. + # This allows for SHA224/256 and SHA384/512 support even though + # the OpenSSL library prior to 0.9.8 doesn't provide them. + return __get_builtin_constructor(name)(data) + + +try: + import _hashlib + new = __hash_new + __get_hash = __get_openssl_constructor + algorithms_available = algorithms_available.union( + _hashlib.openssl_md_meth_names) +except ImportError: + new = __py_new + __get_hash = __get_builtin_constructor + +try: + # OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA + from _hashlib import pbkdf2_hmac +except ImportError: + _trans_5C = bytes((x ^ 0x5C) for x in range(256)) + _trans_36 = bytes((x ^ 0x36) for x in range(256)) + + def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None): + """Password based key derivation function 2 (PKCS #5 v2.0) + + This Python implementations based on the hmac module about as fast + as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster + for long passwords. + """ + if not isinstance(hash_name, str): + raise TypeError(hash_name) + + if not isinstance(password, (bytes, bytearray)): + password = bytes(memoryview(password)) + if not isinstance(salt, (bytes, bytearray)): + salt = bytes(memoryview(salt)) + + # Fast inline HMAC implementation + inner = new(hash_name) + outer = new(hash_name) + blocksize = getattr(inner, 'block_size', 64) + if len(password) > blocksize: + password = new(hash_name, password).digest() + password = password + b'\x00' * (blocksize - len(password)) + inner.update(password.translate(_trans_36)) + outer.update(password.translate(_trans_5C)) + + def prf(msg, inner=inner, outer=outer): + # PBKDF2_HMAC uses the password as key. We can re-use the same + # digest objects and just update copies to skip initialization. + icpy = inner.copy() + ocpy = outer.copy() + icpy.update(msg) + ocpy.update(icpy.digest()) + return ocpy.digest() + + if iterations < 1: + raise ValueError(iterations) + if dklen is None: + dklen = outer.digest_size + if dklen < 1: + raise ValueError(dklen) + + dkey = b'' + loop = 1 + from_bytes = int.from_bytes + while len(dkey) < dklen: + prev = prf(salt + loop.to_bytes(4, 'big')) + # endianness doesn't matter here as long to / from use the same + rkey = int.from_bytes(prev, 'big') + for i in range(iterations - 1): + prev = prf(prev) + # rkey = rkey ^ prev + rkey ^= from_bytes(prev, 'big') + loop += 1 + dkey += rkey.to_bytes(inner.digest_size, 'big') + + return dkey[:dklen] + +try: + # OpenSSL's scrypt requires OpenSSL 1.1+ + from _hashlib import scrypt +except ImportError: + pass + + +for __func_name in __always_supported: + # try them all, some may not work due to the OpenSSL + # version not supporting that algorithm. + try: + globals()[__func_name] = __get_hash(__func_name) + except ValueError: + import logging + logging.exception('code for hash %s was not found.', __func_name) + + +# Cleanup locals() +del __always_supported, __func_name, __get_hash +del __py_new, __hash_new, __get_openssl_constructor diff --git a/venv/Lib/heapq.py b/venv/Lib/heapq.py new file mode 100644 index 00000000..b31f4186 --- /dev/null +++ b/venv/Lib/heapq.py @@ -0,0 +1,607 @@ +"""Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +""" + +# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger + +__about__ = """Heap queues + +[explanation by François Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +a usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +""" + +__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', + 'nlargest', 'nsmallest', 'heappushpop'] + +def heappush(heap, item): + """Push item onto heap, maintaining the heap invariant.""" + heap.append(item) + _siftdown(heap, 0, len(heap)-1) + +def heappop(heap): + """Pop the smallest item off the heap, maintaining the heap invariant.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup(heap, 0) + return returnitem + return lastelt + +def heapreplace(heap, item): + """Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and heap[0] < item: + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + +def heapify(x): + """Transform list into a heap, in-place, in O(len(x)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(range(n//2)): + _siftup(x, i) + +def _heappop_max(heap): + """Maxheap version of a heappop.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup_max(heap, 0) + return returnitem + return lastelt + +def _heapreplace_max(heap, item): + """Maxheap version of a heappop followed by a heappush.""" + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup_max(heap, 0) + return returnitem + +def _heapify_max(x): + """Transform list into a maxheap, in-place, in O(len(x)) time.""" + n = len(x) + for i in reversed(range(n//2)): + _siftup_max(x, i) + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if newitem < parent: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom comparison methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[childpos] < heap[rightpos]: + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + +def _siftdown_max(heap, startpos, pos): + 'Maxheap variant of _siftdown' + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if parent < newitem: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +def _siftup_max(heap, pos): + 'Maxheap variant of _siftup' + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the larger child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of larger child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[rightpos] < heap[childpos]: + childpos = rightpos + # Move the larger child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown_max(heap, startpos, pos) + +def merge(*iterables, key=None, reverse=False): + '''Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + If *key* is not None, applies a key function to each element to determine + its sort order. + + >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) + ['dog', 'cat', 'fish', 'horse', 'kangaroo'] + + ''' + + h = [] + h_append = h.append + + if reverse: + _heapify = _heapify_max + _heappop = _heappop_max + _heapreplace = _heapreplace_max + direction = -1 + else: + _heapify = heapify + _heappop = heappop + _heapreplace = heapreplace + direction = 1 + + if key is None: + for order, it in enumerate(map(iter, iterables)): + try: + next = it.__next__ + h_append([next(), order * direction, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + value, order, next = s = h[0] + yield value + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except StopIteration: + _heappop(h) # remove empty iterator + if h: + # fast case when only a single iterator remains + value, order, next = h[0] + yield value + yield from next.__self__ + return + + for order, it in enumerate(map(iter, iterables)): + try: + next = it.__next__ + value = next() + h_append([key(value), order * direction, value, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + key_value, order, value, next = s = h[0] + yield value + value = next() + s[0] = key(value) + s[2] = value + _heapreplace(h, s) + except StopIteration: + _heappop(h) + if h: + key_value, order, value, next = h[0] + yield value + yield from next.__self__ + + +# Algorithm notes for nlargest() and nsmallest() +# ============================================== +# +# Make a single pass over the data while keeping the k most extreme values +# in a heap. Memory consumption is limited to keeping k values in a list. +# +# Measured performance for random inputs: +# +# number of comparisons +# n inputs k-extreme values (average of 5 trials) % more than min() +# ------------- ---------------- --------------------- ----------------- +# 1,000 100 3,317 231.7% +# 10,000 100 14,046 40.5% +# 100,000 100 105,749 5.7% +# 1,000,000 100 1,007,751 0.8% +# 10,000,000 100 10,009,401 0.1% +# +# Theoretical number of comparisons for k smallest of n random inputs: +# +# Step Comparisons Action +# ---- -------------------------- --------------------------- +# 1 1.66 * k heapify the first k-inputs +# 2 n - k compare remaining elements to top of heap +# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap +# 4 k * lg2(k) - (k/2) final sort of the k most extreme values +# +# Combining and simplifying for a rough estimate gives: +# +# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k)) +# +# Computing the number of comparisons for step 3: +# ----------------------------------------------- +# * For the i-th new value from the iterable, the probability of being in the +# k most extreme values is k/i. For example, the probability of the 101st +# value seen being in the 100 most extreme values is 100/101. +# * If the value is a new extreme value, the cost of inserting it into the +# heap is 1 + log(k, 2). +# * The probability times the cost gives: +# (k/i) * (1 + log(k, 2)) +# * Summing across the remaining n-k elements gives: +# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1)) +# * This reduces to: +# (H(n) - H(k)) * k * (1 + log(k, 2)) +# * Where H(n) is the n-th harmonic number estimated by: +# gamma = 0.5772156649 +# H(n) = log(n, e) + gamma + 1 / (2 * n) +# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence +# * Substituting the H(n) formula: +# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2) +# +# Worst-case for step 3: +# ---------------------- +# In the worst case, the input data is reversed sorted so that every new element +# must be inserted in the heap: +# +# comparisons = 1.66 * k + log(k, 2) * (n - k) +# +# Alternative Algorithms +# ---------------------- +# Other algorithms were not used because they: +# 1) Took much more auxiliary memory, +# 2) Made multiple passes over the data. +# 3) Made more comparisons in common cases (small k, large n, semi-random input). +# See the more detailed comparison of approach at: +# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest + +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + """ + + # Short-cut for n==1 is to use min() + if n == 1: + it = iter(iterable) + sentinel = object() + if key is None: + result = min(it, default=sentinel) + else: + result = min(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + # put the range(n) first so that zip() doesn't + # consume one too many elements from the iterator + result = [(elem, i) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + if elem < top: + _heapreplace(result, (elem, order)) + top, _order = result[0] + order += 1 + result.sort() + return [elem for (elem, order) in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + k = key(elem) + if k < top: + _heapreplace(result, (k, order, elem)) + top, _order, _elem = result[0] + order += 1 + result.sort() + return [elem for (k, order, elem) in result] + +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() + if n == 1: + it = iter(iterable) + sentinel = object() + if key is None: + result = max(it, default=sentinel) + else: + result = max(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + if top < elem: + _heapreplace(result, (elem, order)) + top, _order = result[0] + order -= 1 + result.sort(reverse=True) + return [elem for (elem, order) in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + k = key(elem) + if top < k: + _heapreplace(result, (k, order, elem)) + top, _order, _elem = result[0] + order -= 1 + result.sort(reverse=True) + return [elem for (k, order, elem) in result] + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass +try: + from _heapq import _heapreplace_max +except ImportError: + pass +try: + from _heapq import _heapify_max +except ImportError: + pass +try: + from _heapq import _heappop_max +except ImportError: + pass + + +if __name__ == "__main__": + + import doctest + print(doctest.testmod()) diff --git a/venv/Lib/hmac.py b/venv/Lib/hmac.py new file mode 100644 index 00000000..43b72129 --- /dev/null +++ b/venv/Lib/hmac.py @@ -0,0 +1,188 @@ +"""HMAC (Keyed-Hashing for Message Authentication) Python module. + +Implements the HMAC algorithm as described by RFC 2104. +""" + +import warnings as _warnings +from _operator import _compare_digest as compare_digest +try: + import _hashlib as _hashopenssl +except ImportError: + _hashopenssl = None + _openssl_md_meths = None +else: + _openssl_md_meths = frozenset(_hashopenssl.openssl_md_meth_names) +import hashlib as _hashlib + +trans_5C = bytes((x ^ 0x5C) for x in range(256)) +trans_36 = bytes((x ^ 0x36) for x in range(256)) + +# The size of the digests returned by HMAC depends on the underlying +# hashing module used. Use digest_size from the instance of HMAC instead. +digest_size = None + + + +class HMAC: + """RFC 2104 HMAC class. Also complies with RFC 4231. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + blocksize = 64 # 512-bit HMAC; can be changed in subclasses. + + def __init__(self, key, msg = None, digestmod = None): + """Create a new HMAC object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. *OR* + A hashlib constructor returning a new hash object. *OR* + A hash name suitable for hashlib.new(). + Defaults to hashlib.md5. + Implicit default to hashlib.md5 is deprecated since Python + 3.4 and will be removed in Python 3.8. + + Note: key and msg must be a bytes or bytearray objects. + """ + + if not isinstance(key, (bytes, bytearray)): + raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__) + + if digestmod is None: + _warnings.warn("HMAC() without an explicit digestmod argument " + "is deprecated since Python 3.4, and will be removed " + "in 3.8", + DeprecationWarning, 2) + digestmod = _hashlib.md5 + + if callable(digestmod): + self.digest_cons = digestmod + elif isinstance(digestmod, str): + self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d) + else: + self.digest_cons = lambda d=b'': digestmod.new(d) + + self.outer = self.digest_cons() + self.inner = self.digest_cons() + self.digest_size = self.inner.digest_size + + if hasattr(self.inner, 'block_size'): + blocksize = self.inner.block_size + if blocksize < 16: + _warnings.warn('block_size of %d seems too small; using our ' + 'default of %d.' % (blocksize, self.blocksize), + RuntimeWarning, 2) + blocksize = self.blocksize + else: + _warnings.warn('No block_size attribute on given digest object; ' + 'Assuming %d.' % (self.blocksize), + RuntimeWarning, 2) + blocksize = self.blocksize + + # self.blocksize is the default blocksize. self.block_size is + # effective block size as well as the public API attribute. + self.block_size = blocksize + + if len(key) > blocksize: + key = self.digest_cons(key).digest() + + key = key.ljust(blocksize, b'\0') + self.outer.update(key.translate(trans_5C)) + self.inner.update(key.translate(trans_36)) + if msg is not None: + self.update(msg) + + @property + def name(self): + return "hmac-" + self.inner.name + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + # Call __new__ directly to avoid the expensive __init__. + other = self.__class__.__new__(self.__class__) + other.digest_cons = self.digest_cons + other.digest_size = self.digest_size + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def _current(self): + """Return a hash object for the current state. + + To be used only internally with digest() and hexdigest(). + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self._current() + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + h = self._current() + return h.hexdigest() + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) + + +def digest(key, msg, digest): + """Fast inline implementation of HMAC + + key: key for the keyed hash object. + msg: input message + digest: A hash name suitable for hashlib.new() for best performance. *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + + Note: key and msg must be a bytes or bytearray objects. + """ + if (_hashopenssl is not None and + isinstance(digest, str) and digest in _openssl_md_meths): + return _hashopenssl.hmac_digest(key, msg, digest) + + if callable(digest): + digest_cons = digest + elif isinstance(digest, str): + digest_cons = lambda d=b'': _hashlib.new(digest, d) + else: + digest_cons = lambda d=b'': digest.new(d) + + inner = digest_cons() + outer = digest_cons() + blocksize = getattr(inner, 'block_size', 64) + if len(key) > blocksize: + key = digest_cons(key).digest() + key = key + b'\x00' * (blocksize - len(key)) + inner.update(key.translate(trans_36)) + outer.update(key.translate(trans_5C)) + inner.update(msg) + outer.update(inner.digest()) + return outer.digest() diff --git a/venv/Lib/imp.py b/venv/Lib/imp.py new file mode 100644 index 00000000..866464b2 --- /dev/null +++ b/venv/Lib/imp.py @@ -0,0 +1,346 @@ +"""This module provides the components needed to build your own __import__ +function. Undocumented functions are obsolete. + +In most cases it is preferred you consider using the importlib module's +functionality over this module. + +""" +# (Probably) need to stay in _imp +from _imp import (lock_held, acquire_lock, release_lock, + get_frozen_object, is_frozen_package, + init_frozen, is_builtin, is_frozen, + _fix_co_filename) +try: + from _imp import create_dynamic +except ImportError: + # Platform doesn't support dynamic loading. + create_dynamic = None + +from importlib._bootstrap import _ERR_MSG, _exec, _load, _builtin_from_name +from importlib._bootstrap_external import SourcelessFileLoader + +from importlib import machinery +from importlib import util +import importlib +import os +import sys +import tokenize +import types +import warnings + +warnings.warn("the imp module is deprecated in favour of importlib; " + "see the module's documentation for alternative uses", + DeprecationWarning, stacklevel=2) + +# DEPRECATED +SEARCH_ERROR = 0 +PY_SOURCE = 1 +PY_COMPILED = 2 +C_EXTENSION = 3 +PY_RESOURCE = 4 +PKG_DIRECTORY = 5 +C_BUILTIN = 6 +PY_FROZEN = 7 +PY_CODERESOURCE = 8 +IMP_HOOK = 9 + + +def new_module(name): + """**DEPRECATED** + + Create a new module. + + The module is not entered into sys.modules. + + """ + return types.ModuleType(name) + + +def get_magic(): + """**DEPRECATED** + + Return the magic number for .pyc files. + """ + return util.MAGIC_NUMBER + + +def get_tag(): + """Return the magic tag for .pyc files.""" + return sys.implementation.cache_tag + + +def cache_from_source(path, debug_override=None): + """**DEPRECATED** + + Given the path to a .py file, return the path to its .pyc file. + + The .py file does not need to exist; this simply returns the path to the + .pyc file calculated as if the .py file were imported. + + If debug_override is not None, then it must be a boolean and is used in + place of sys.flags.optimize. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + return util.cache_from_source(path, debug_override) + + +def source_from_cache(path): + """**DEPRECATED** + + Given the path to a .pyc. file, return the path to its .py file. + + The .pyc file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc file. If path does + not conform to PEP 3147 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + return util.source_from_cache(path) + + +def get_suffixes(): + """**DEPRECATED**""" + extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES] + source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES] + bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES] + + return extensions + source + bytecode + + +class NullImporter: + + """**DEPRECATED** + + Null import object. + + """ + + def __init__(self, path): + if path == '': + raise ImportError('empty pathname', path='') + elif os.path.isdir(path): + raise ImportError('existing directory', path=path) + + def find_module(self, fullname): + """Always returns None.""" + return None + + +class _HackedGetData: + + """Compatibility support for 'file' arguments of various load_*() + functions.""" + + def __init__(self, fullname, path, file=None): + super().__init__(fullname, path) + self.file = file + + def get_data(self, path): + """Gross hack to contort loader to deal w/ load_*()'s bad API.""" + if self.file and path == self.path: + if not self.file.closed: + file = self.file + else: + self.file = file = open(self.path, 'r') + + with file: + # Technically should be returning bytes, but + # SourceLoader.get_code() just passed what is returned to + # compile() which can handle str. And converting to bytes would + # require figuring out the encoding to decode to and + # tokenize.detect_encoding() only accepts bytes. + return file.read() + else: + return super().get_data(path) + + +class _LoadSourceCompatibility(_HackedGetData, machinery.SourceFileLoader): + + """Compatibility support for implementing load_source().""" + + +def load_source(name, pathname, file=None): + loader = _LoadSourceCompatibility(name, pathname, file) + spec = util.spec_from_file_location(name, pathname, loader=loader) + if name in sys.modules: + module = _exec(spec, sys.modules[name]) + else: + module = _load(spec) + # To allow reloading to potentially work, use a non-hacked loader which + # won't rely on a now-closed file object. + module.__loader__ = machinery.SourceFileLoader(name, pathname) + module.__spec__.loader = module.__loader__ + return module + + +class _LoadCompiledCompatibility(_HackedGetData, SourcelessFileLoader): + + """Compatibility support for implementing load_compiled().""" + + +def load_compiled(name, pathname, file=None): + """**DEPRECATED**""" + loader = _LoadCompiledCompatibility(name, pathname, file) + spec = util.spec_from_file_location(name, pathname, loader=loader) + if name in sys.modules: + module = _exec(spec, sys.modules[name]) + else: + module = _load(spec) + # To allow reloading to potentially work, use a non-hacked loader which + # won't rely on a now-closed file object. + module.__loader__ = SourcelessFileLoader(name, pathname) + module.__spec__.loader = module.__loader__ + return module + + +def load_package(name, path): + """**DEPRECATED**""" + if os.path.isdir(path): + extensions = (machinery.SOURCE_SUFFIXES[:] + + machinery.BYTECODE_SUFFIXES[:]) + for extension in extensions: + init_path = os.path.join(path, '__init__' + extension) + if os.path.exists(init_path): + path = init_path + break + else: + raise ValueError('{!r} is not a package'.format(path)) + spec = util.spec_from_file_location(name, path, + submodule_search_locations=[]) + if name in sys.modules: + return _exec(spec, sys.modules[name]) + else: + return _load(spec) + + +def load_module(name, file, filename, details): + """**DEPRECATED** + + Load a module, given information returned by find_module(). + + The module name must include the full package name, if any. + + """ + suffix, mode, type_ = details + if mode and (not mode.startswith(('r', 'U')) or '+' in mode): + raise ValueError('invalid file open mode {!r}'.format(mode)) + elif file is None and type_ in {PY_SOURCE, PY_COMPILED}: + msg = 'file object required for import (type code {})'.format(type_) + raise ValueError(msg) + elif type_ == PY_SOURCE: + return load_source(name, filename, file) + elif type_ == PY_COMPILED: + return load_compiled(name, filename, file) + elif type_ == C_EXTENSION and load_dynamic is not None: + if file is None: + with open(filename, 'rb') as opened_file: + return load_dynamic(name, filename, opened_file) + else: + return load_dynamic(name, filename, file) + elif type_ == PKG_DIRECTORY: + return load_package(name, filename) + elif type_ == C_BUILTIN: + return init_builtin(name) + elif type_ == PY_FROZEN: + return init_frozen(name) + else: + msg = "Don't know how to import {} (type code {})".format(name, type_) + raise ImportError(msg, name=name) + + +def find_module(name, path=None): + """**DEPRECATED** + + Search for a module. + + If path is omitted or None, search for a built-in, frozen or special + module and continue search in sys.path. The module name cannot + contain '.'; to search for a submodule of a package, pass the + submodule name and the package's __path__. + + """ + if not isinstance(name, str): + raise TypeError("'name' must be a str, not {}".format(type(name))) + elif not isinstance(path, (type(None), list)): + # Backwards-compatibility + raise RuntimeError("'path' must be None or a list, " + "not {}".format(type(path))) + + if path is None: + if is_builtin(name): + return None, None, ('', '', C_BUILTIN) + elif is_frozen(name): + return None, None, ('', '', PY_FROZEN) + else: + path = sys.path + + for entry in path: + package_directory = os.path.join(entry, name) + for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]: + package_file_name = '__init__' + suffix + file_path = os.path.join(package_directory, package_file_name) + if os.path.isfile(file_path): + return None, package_directory, ('', '', PKG_DIRECTORY) + for suffix, mode, type_ in get_suffixes(): + file_name = name + suffix + file_path = os.path.join(entry, file_name) + if os.path.isfile(file_path): + break + else: + continue + break # Break out of outer loop when breaking out of inner loop. + else: + raise ImportError(_ERR_MSG.format(name), name=name) + + encoding = None + if 'b' not in mode: + with open(file_path, 'rb') as file: + encoding = tokenize.detect_encoding(file.readline)[0] + file = open(file_path, mode, encoding=encoding) + return file, file_path, (suffix, mode, type_) + + +def reload(module): + """**DEPRECATED** + + Reload the module and return it. + + The module must have been successfully imported before. + + """ + return importlib.reload(module) + + +def init_builtin(name): + """**DEPRECATED** + + Load and return a built-in module by name, or None is such module doesn't + exist + """ + try: + return _builtin_from_name(name) + except ImportError: + return None + + +if create_dynamic: + def load_dynamic(name, path, file=None): + """**DEPRECATED** + + Load an extension module. + """ + import importlib.machinery + loader = importlib.machinery.ExtensionFileLoader(name, path) + + # Issue #24748: Skip the sys.modules check in _load_module_shim; + # always load new extension + spec = importlib.machinery.ModuleSpec( + name=name, loader=loader, origin=path) + return _load(spec) + +else: + load_dynamic = None diff --git a/venv/Lib/importlib/__init__.py b/venv/Lib/importlib/__init__.py new file mode 100644 index 00000000..cb37d246 --- /dev/null +++ b/venv/Lib/importlib/__init__.py @@ -0,0 +1,176 @@ +"""A pure Python implementation of import.""" +__all__ = ['__import__', 'import_module', 'invalidate_caches', 'reload'] + +# Bootstrap help ##################################################### + +# Until bootstrapping is complete, DO NOT import any modules that attempt +# to import importlib._bootstrap (directly or indirectly). Since this +# partially initialised package would be present in sys.modules, those +# modules would get an uninitialised copy of the source version, instead +# of a fully initialised version (either the frozen one or the one +# initialised below if the frozen one is not available). +import _imp # Just the builtin component, NOT the full Python module +import sys + +try: + import _frozen_importlib as _bootstrap +except ImportError: + from . import _bootstrap + _bootstrap._setup(sys, _imp) +else: + # importlib._bootstrap is the built-in import, ensure we don't create + # a second copy of the module. + _bootstrap.__name__ = 'importlib._bootstrap' + _bootstrap.__package__ = 'importlib' + try: + _bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py') + except NameError: + # __file__ is not guaranteed to be defined, e.g. if this code gets + # frozen by a tool like cx_Freeze. + pass + sys.modules['importlib._bootstrap'] = _bootstrap + +try: + import _frozen_importlib_external as _bootstrap_external +except ImportError: + from . import _bootstrap_external + _bootstrap_external._setup(_bootstrap) + _bootstrap._bootstrap_external = _bootstrap_external +else: + _bootstrap_external.__name__ = 'importlib._bootstrap_external' + _bootstrap_external.__package__ = 'importlib' + try: + _bootstrap_external.__file__ = __file__.replace('__init__.py', '_bootstrap_external.py') + except NameError: + # __file__ is not guaranteed to be defined, e.g. if this code gets + # frozen by a tool like cx_Freeze. + pass + sys.modules['importlib._bootstrap_external'] = _bootstrap_external + +# To simplify imports in test code +_w_long = _bootstrap_external._w_long +_r_long = _bootstrap_external._r_long + +# Fully bootstrapped at this point, import whatever you like, circular +# dependencies and startup overhead minimisation permitting :) + +import types +import warnings + + +# Public API ######################################################### + +from ._bootstrap import __import__ + + +def invalidate_caches(): + """Call the invalidate_caches() method on all meta path finders stored in + sys.meta_path (where implemented).""" + for finder in sys.meta_path: + if hasattr(finder, 'invalidate_caches'): + finder.invalidate_caches() + + +def find_loader(name, path=None): + """Return the loader for the specified module. + + This is a backward-compatible wrapper around find_spec(). + + This function is deprecated in favor of importlib.util.find_spec(). + + """ + warnings.warn('Deprecated since Python 3.4. ' + 'Use importlib.util.find_spec() instead.', + DeprecationWarning, stacklevel=2) + try: + loader = sys.modules[name].__loader__ + if loader is None: + raise ValueError('{}.__loader__ is None'.format(name)) + else: + return loader + except KeyError: + pass + except AttributeError: + raise ValueError('{}.__loader__ is not set'.format(name)) from None + + spec = _bootstrap._find_spec(name, path) + # We won't worry about malformed specs (missing attributes). + if spec is None: + return None + if spec.loader is None: + if spec.submodule_search_locations is None: + raise ImportError('spec for {} missing loader'.format(name), + name=name) + raise ImportError('namespace packages do not have loaders', + name=name) + return spec.loader + + +def import_module(name, package=None): + """Import a module. + + The 'package' argument is required when performing a relative import. It + specifies the package to use as the anchor point from which to resolve the + relative import to an absolute import. + + """ + level = 0 + if name.startswith('.'): + if not package: + msg = ("the 'package' argument is required to perform a relative " + "import for {!r}") + raise TypeError(msg.format(name)) + for character in name: + if character != '.': + break + level += 1 + return _bootstrap._gcd_import(name[level:], package, level) + + +_RELOADING = {} + + +def reload(module): + """Reload the module and return it. + + The module must have been successfully imported before. + + """ + if not module or not isinstance(module, types.ModuleType): + raise TypeError("reload() argument must be a module") + try: + name = module.__spec__.name + except AttributeError: + name = module.__name__ + + if sys.modules.get(name) is not module: + msg = "module {} not in sys.modules" + raise ImportError(msg.format(name), name=name) + if name in _RELOADING: + return _RELOADING[name] + _RELOADING[name] = module + try: + parent_name = name.rpartition('.')[0] + if parent_name: + try: + parent = sys.modules[parent_name] + except KeyError: + msg = "parent {!r} not in sys.modules" + raise ImportError(msg.format(parent_name), + name=parent_name) from None + else: + pkgpath = parent.__path__ + else: + pkgpath = None + target = module + spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target) + if spec is None: + raise ModuleNotFoundError(f"spec not found for the module {name!r}", name=name) + _bootstrap._exec(spec, module) + # The module may have replaced itself in sys.modules! + return sys.modules[name] + finally: + try: + del _RELOADING[name] + except KeyError: + pass diff --git a/venv/Lib/importlib/__pycache__/__init__.cpython-37.opt-1.pyc b/venv/Lib/importlib/__pycache__/__init__.cpython-37.opt-1.pyc new file mode 100644 index 00000000..9ea0eb8e Binary files /dev/null and b/venv/Lib/importlib/__pycache__/__init__.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/importlib/__pycache__/__init__.cpython-37.opt-2.pyc b/venv/Lib/importlib/__pycache__/__init__.cpython-37.opt-2.pyc new file mode 100644 index 00000000..75f29bfe Binary files /dev/null and b/venv/Lib/importlib/__pycache__/__init__.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/importlib/__pycache__/__init__.cpython-37.pyc b/venv/Lib/importlib/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..9ea0eb8e Binary files /dev/null and b/venv/Lib/importlib/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/importlib/__pycache__/_bootstrap.cpython-37.opt-1.pyc b/venv/Lib/importlib/__pycache__/_bootstrap.cpython-37.opt-1.pyc new file mode 100644 index 00000000..930e9550 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/_bootstrap.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/importlib/__pycache__/_bootstrap.cpython-37.opt-2.pyc b/venv/Lib/importlib/__pycache__/_bootstrap.cpython-37.opt-2.pyc new file mode 100644 index 00000000..45cd557b Binary files /dev/null and b/venv/Lib/importlib/__pycache__/_bootstrap.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/importlib/__pycache__/_bootstrap.cpython-37.pyc b/venv/Lib/importlib/__pycache__/_bootstrap.cpython-37.pyc new file mode 100644 index 00000000..e18572ce Binary files /dev/null and b/venv/Lib/importlib/__pycache__/_bootstrap.cpython-37.pyc differ diff --git a/venv/Lib/importlib/__pycache__/_bootstrap_external.cpython-37.opt-1.pyc b/venv/Lib/importlib/__pycache__/_bootstrap_external.cpython-37.opt-1.pyc new file mode 100644 index 00000000..7d39c7aa Binary files /dev/null and b/venv/Lib/importlib/__pycache__/_bootstrap_external.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/importlib/__pycache__/_bootstrap_external.cpython-37.opt-2.pyc b/venv/Lib/importlib/__pycache__/_bootstrap_external.cpython-37.opt-2.pyc new file mode 100644 index 00000000..d663d112 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/_bootstrap_external.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/importlib/__pycache__/_bootstrap_external.cpython-37.pyc b/venv/Lib/importlib/__pycache__/_bootstrap_external.cpython-37.pyc new file mode 100644 index 00000000..79040138 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/_bootstrap_external.cpython-37.pyc differ diff --git a/venv/Lib/importlib/__pycache__/abc.cpython-37.opt-1.pyc b/venv/Lib/importlib/__pycache__/abc.cpython-37.opt-1.pyc new file mode 100644 index 00000000..4bbd06de Binary files /dev/null and b/venv/Lib/importlib/__pycache__/abc.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/importlib/__pycache__/abc.cpython-37.opt-2.pyc b/venv/Lib/importlib/__pycache__/abc.cpython-37.opt-2.pyc new file mode 100644 index 00000000..a7dd5bdc Binary files /dev/null and b/venv/Lib/importlib/__pycache__/abc.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/importlib/__pycache__/abc.cpython-37.pyc b/venv/Lib/importlib/__pycache__/abc.cpython-37.pyc new file mode 100644 index 00000000..4bbd06de Binary files /dev/null and b/venv/Lib/importlib/__pycache__/abc.cpython-37.pyc differ diff --git a/venv/Lib/importlib/__pycache__/machinery.cpython-37.opt-1.pyc b/venv/Lib/importlib/__pycache__/machinery.cpython-37.opt-1.pyc new file mode 100644 index 00000000..85aaa719 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/machinery.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/importlib/__pycache__/machinery.cpython-37.opt-2.pyc b/venv/Lib/importlib/__pycache__/machinery.cpython-37.opt-2.pyc new file mode 100644 index 00000000..79b9fe34 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/machinery.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/importlib/__pycache__/machinery.cpython-37.pyc b/venv/Lib/importlib/__pycache__/machinery.cpython-37.pyc new file mode 100644 index 00000000..85aaa719 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/machinery.cpython-37.pyc differ diff --git a/venv/Lib/importlib/__pycache__/resources.cpython-37.opt-1.pyc b/venv/Lib/importlib/__pycache__/resources.cpython-37.opt-1.pyc new file mode 100644 index 00000000..f9f03275 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/resources.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/importlib/__pycache__/resources.cpython-37.opt-2.pyc b/venv/Lib/importlib/__pycache__/resources.cpython-37.opt-2.pyc new file mode 100644 index 00000000..8d7821d1 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/resources.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/importlib/__pycache__/resources.cpython-37.pyc b/venv/Lib/importlib/__pycache__/resources.cpython-37.pyc new file mode 100644 index 00000000..1849abe4 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/resources.cpython-37.pyc differ diff --git a/venv/Lib/importlib/__pycache__/util.cpython-37.opt-1.pyc b/venv/Lib/importlib/__pycache__/util.cpython-37.opt-1.pyc new file mode 100644 index 00000000..50851062 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/util.cpython-37.opt-1.pyc differ diff --git a/venv/Lib/importlib/__pycache__/util.cpython-37.opt-2.pyc b/venv/Lib/importlib/__pycache__/util.cpython-37.opt-2.pyc new file mode 100644 index 00000000..914c431f Binary files /dev/null and b/venv/Lib/importlib/__pycache__/util.cpython-37.opt-2.pyc differ diff --git a/venv/Lib/importlib/__pycache__/util.cpython-37.pyc b/venv/Lib/importlib/__pycache__/util.cpython-37.pyc new file mode 100644 index 00000000..50851062 Binary files /dev/null and b/venv/Lib/importlib/__pycache__/util.cpython-37.pyc differ diff --git a/venv/Lib/importlib/_bootstrap.py b/venv/Lib/importlib/_bootstrap.py new file mode 100644 index 00000000..2bdd1929 --- /dev/null +++ b/venv/Lib/importlib/_bootstrap.py @@ -0,0 +1,1164 @@ +"""Core implementation of import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +""" +# +# IMPORTANT: Whenever making changes to this module, be sure to run +# a top-level make in order to get the frozen version of the module +# updated. Not doing so will result in the Makefile to fail for +# all others who don't have a ./python around to freeze the module +# in the early stages of compilation. +# + +# See importlib._setup() for what is injected into the global namespace. + +# When editing this code be aware that code executed at import time CANNOT +# reference any injected objects! This includes not only global code but also +# anything specified at the class level. + +# Bootstrap-related code ###################################################### + +_bootstrap_external = None + +def _wrap(new, old): + """Simple substitute for functools.update_wrapper.""" + for replace in ['__module__', '__name__', '__qualname__', '__doc__']: + if hasattr(old, replace): + setattr(new, replace, getattr(old, replace)) + new.__dict__.update(old.__dict__) + + +def _new_module(name): + return type(sys)(name) + + +# Module-level locking ######################################################## + +# A dict mapping module names to weakrefs of _ModuleLock instances +# Dictionary protected by the global import lock +_module_locks = {} +# A dict mapping thread ids to _ModuleLock instances +_blocking_on = {} + + +class _DeadlockError(RuntimeError): + pass + + +class _ModuleLock: + """A recursive lock implementation which is able to detect deadlocks + (e.g. thread 1 trying to take locks A then B, and thread 2 trying to + take locks B then A). + """ + + def __init__(self, name): + self.lock = _thread.allocate_lock() + self.wakeup = _thread.allocate_lock() + self.name = name + self.owner = None + self.count = 0 + self.waiters = 0 + + def has_deadlock(self): + # Deadlock avoidance for concurrent circular imports. + me = _thread.get_ident() + tid = self.owner + while True: + lock = _blocking_on.get(tid) + if lock is None: + return False + tid = lock.owner + if tid == me: + return True + + def acquire(self): + """ + Acquire the module lock. If a potential deadlock is detected, + a _DeadlockError is raised. + Otherwise, the lock is always acquired and True is returned. + """ + tid = _thread.get_ident() + _blocking_on[tid] = self + try: + while True: + with self.lock: + if self.count == 0 or self.owner == tid: + self.owner = tid + self.count += 1 + return True + if self.has_deadlock(): + raise _DeadlockError('deadlock detected by %r' % self) + if self.wakeup.acquire(False): + self.waiters += 1 + # Wait for a release() call + self.wakeup.acquire() + self.wakeup.release() + finally: + del _blocking_on[tid] + + def release(self): + tid = _thread.get_ident() + with self.lock: + if self.owner != tid: + raise RuntimeError('cannot release un-acquired lock') + assert self.count > 0 + self.count -= 1 + if self.count == 0: + self.owner = None + if self.waiters: + self.waiters -= 1 + self.wakeup.release() + + def __repr__(self): + return '_ModuleLock({!r}) at {}'.format(self.name, id(self)) + + +class _DummyModuleLock: + """A simple _ModuleLock equivalent for Python builds without + multi-threading support.""" + + def __init__(self, name): + self.name = name + self.count = 0 + + def acquire(self): + self.count += 1 + return True + + def release(self): + if self.count == 0: + raise RuntimeError('cannot release un-acquired lock') + self.count -= 1 + + def __repr__(self): + return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self)) + + +class _ModuleLockManager: + + def __init__(self, name): + self._name = name + self._lock = None + + def __enter__(self): + self._lock = _get_module_lock(self._name) + self._lock.acquire() + + def __exit__(self, *args, **kwargs): + self._lock.release() + + +# The following two functions are for consumption by Python/import.c. + +def _get_module_lock(name): + """Get or create the module lock for a given module name. + + Acquire/release internally the global import lock to protect + _module_locks.""" + + _imp.acquire_lock() + try: + try: + lock = _module_locks[name]() + except KeyError: + lock = None + + if lock is None: + if _thread is None: + lock = _DummyModuleLock(name) + else: + lock = _ModuleLock(name) + + def cb(ref, name=name): + _imp.acquire_lock() + try: + # bpo-31070: Check if another thread created a new lock + # after the previous lock was destroyed + # but before the weakref callback was called. + if _module_locks.get(name) is ref: + del _module_locks[name] + finally: + _imp.release_lock() + + _module_locks[name] = _weakref.ref(lock, cb) + finally: + _imp.release_lock() + + return lock + + +def _lock_unlock_module(name): + """Acquires then releases the module lock for a given module name. + + This is used to ensure a module is completely initialized, in the + event it is being imported by another thread. + """ + lock = _get_module_lock(name) + try: + lock.acquire() + except _DeadlockError: + # Concurrent circular import, we'll accept a partially initialized + # module object. + pass + else: + lock.release() + +# Frame stripping magic ############################################### +def _call_with_frames_removed(f, *args, **kwds): + """remove_importlib_frames in import.c will always remove sequences + of importlib frames that end with a call to this function + + Use it instead of a normal call in places where including the importlib + frames introduces unwanted noise into the traceback (e.g. when executing + module code) + """ + return f(*args, **kwds) + + +def _verbose_message(message, *args, verbosity=1): + """Print the message to stderr if -v/PYTHONVERBOSE is turned on.""" + if sys.flags.verbose >= verbosity: + if not message.startswith(('#', 'import ')): + message = '# ' + message + print(message.format(*args), file=sys.stderr) + + +def _requires_builtin(fxn): + """Decorator to verify the named module is built-in.""" + def _requires_builtin_wrapper(self, fullname): + if fullname not in sys.builtin_module_names: + raise ImportError('{!r} is not a built-in module'.format(fullname), + name=fullname) + return fxn(self, fullname) + _wrap(_requires_builtin_wrapper, fxn) + return _requires_builtin_wrapper + + +def _requires_frozen(fxn): + """Decorator to verify the named module is frozen.""" + def _requires_frozen_wrapper(self, fullname): + if not _imp.is_frozen(fullname): + raise ImportError('{!r} is not a frozen module'.format(fullname), + name=fullname) + return fxn(self, fullname) + _wrap(_requires_frozen_wrapper, fxn) + return _requires_frozen_wrapper + + +# Typically used by loader classes as a method replacement. +def _load_module_shim(self, fullname): + """Load the specified module into sys.modules and return it. + + This method is deprecated. Use loader.exec_module instead. + + """ + spec = spec_from_loader(fullname, self) + if fullname in sys.modules: + module = sys.modules[fullname] + _exec(spec, module) + return sys.modules[fullname] + else: + return _load(spec) + +# Module specifications ####################################################### + +def _module_repr(module): + # The implementation of ModuleType.__repr__(). + loader = getattr(module, '__loader__', None) + if hasattr(loader, 'module_repr'): + # As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader + # drop their implementations for module_repr. we can add a + # deprecation warning here. + try: + return loader.module_repr(module) + except Exception: + pass + try: + spec = module.__spec__ + except AttributeError: + pass + else: + if spec is not None: + return _module_repr_from_spec(spec) + + # We could use module.__class__.__name__ instead of 'module' in the + # various repr permutations. + try: + name = module.__name__ + except AttributeError: + name = '?' + try: + filename = module.__file__ + except AttributeError: + if loader is None: + return ''.format(name) + else: + return ''.format(name, loader) + else: + return ''.format(name, filename) + + +class _installed_safely: + + def __init__(self, module): + self._module = module + self._spec = module.__spec__ + + def __enter__(self): + # This must be done before putting the module in sys.modules + # (otherwise an optimization shortcut in import.c becomes + # wrong) + self._spec._initializing = True + sys.modules[self._spec.name] = self._module + + def __exit__(self, *args): + try: + spec = self._spec + if any(arg is not None for arg in args): + try: + del sys.modules[spec.name] + except KeyError: + pass + else: + _verbose_message('import {!r} # {!r}', spec.name, spec.loader) + finally: + self._spec._initializing = False + + +class ModuleSpec: + """The specification for a module, used for loading. + + A module's spec is the source for information about the module. For + data associated with the module, including source, use the spec's + loader. + + `name` is the absolute name of the module. `loader` is the loader + to use when loading the module. `parent` is the name of the + package the module is in. The parent is derived from the name. + + `is_package` determines if the module is considered a package or + not. On modules this is reflected by the `__path__` attribute. + + `origin` is the specific location used by the loader from which to + load the module, if that information is available. When filename is + set, origin will match. + + `has_location` indicates that a spec's "origin" reflects a location. + When this is True, `__file__` attribute of the module is set. + + `cached` is the location of the cached bytecode file, if any. It + corresponds to the `__cached__` attribute. + + `submodule_search_locations` is the sequence of path entries to + search when importing submodules. If set, is_package should be + True--and False otherwise. + + Packages are simply modules that (may) have submodules. If a spec + has a non-None value in `submodule_search_locations`, the import + system will consider modules loaded from the spec as packages. + + Only finders (see importlib.abc.MetaPathFinder and + importlib.abc.PathEntryFinder) should modify ModuleSpec instances. + + """ + + def __init__(self, name, loader, *, origin=None, loader_state=None, + is_package=None): + self.name = name + self.loader = loader + self.origin = origin + self.loader_state = loader_state + self.submodule_search_locations = [] if is_package else None + + # file-location attributes + self._set_fileattr = False + self._cached = None + + def __repr__(self): + args = ['name={!r}'.format(self.name), + 'loader={!r}'.format(self.loader)] + if self.origin is not None: + args.append('origin={!r}'.format(self.origin)) + if self.submodule_search_locations is not None: + args.append('submodule_search_locations={}' + .format(self.submodule_search_locations)) + return '{}({})'.format(self.__class__.__name__, ', '.join(args)) + + def __eq__(self, other): + smsl = self.submodule_search_locations + try: + return (self.name == other.name and + self.loader == other.loader and + self.origin == other.origin and + smsl == other.submodule_search_locations and + self.cached == other.cached and + self.has_location == other.has_location) + except AttributeError: + return False + + @property + def cached(self): + if self._cached is None: + if self.origin is not None and self._set_fileattr: + if _bootstrap_external is None: + raise NotImplementedError + self._cached = _bootstrap_external._get_cached(self.origin) + return self._cached + + @cached.setter + def cached(self, cached): + self._cached = cached + + @property + def parent(self): + """The name of the module's parent.""" + if self.submodule_search_locations is None: + return self.name.rpartition('.')[0] + else: + return self.name + + @property + def has_location(self): + return self._set_fileattr + + @has_location.setter + def has_location(self, value): + self._set_fileattr = bool(value) + + +def spec_from_loader(name, loader, *, origin=None, is_package=None): + """Return a module spec based on various loader methods.""" + if hasattr(loader, 'get_filename'): + if _bootstrap_external is None: + raise NotImplementedError + spec_from_file_location = _bootstrap_external.spec_from_file_location + + if is_package is None: + return spec_from_file_location(name, loader=loader) + search = [] if is_package else None + return spec_from_file_location(name, loader=loader, + submodule_search_locations=search) + + if is_package is None: + if hasattr(loader, 'is_package'): + try: + is_package = loader.is_package(name) + except ImportError: + is_package = None # aka, undefined + else: + # the default + is_package = False + + return ModuleSpec(name, loader, origin=origin, is_package=is_package) + + +def _spec_from_module(module, loader=None, origin=None): + # This function is meant for use in _setup(). + try: + spec = module.__spec__ + except AttributeError: + pass + else: + if spec is not None: + return spec + + name = module.__name__ + if loader is None: + try: + loader = module.__loader__ + except AttributeError: + # loader will stay None. + pass + try: + location = module.__file__ + except AttributeError: + location = None + if origin is None: + if location is None: + try: + origin = loader._ORIGIN + except AttributeError: + origin = None + else: + origin = location + try: + cached = module.__cached__ + except AttributeError: + cached = None + try: + submodule_search_locations = list(module.__path__) + except AttributeError: + submodule_search_locations = None + + spec = ModuleSpec(name, loader, origin=origin) + spec._set_fileattr = False if location is None else True + spec.cached = cached + spec.submodule_search_locations = submodule_search_locations + return spec + + +def _init_module_attrs(spec, module, *, override=False): + # The passed-in module may be not support attribute assignment, + # in which case we simply don't set the attributes. + # __name__ + if (override or getattr(module, '__name__', None) is None): + try: + module.__name__ = spec.name + except AttributeError: + pass + # __loader__ + if override or getattr(module, '__loader__', None) is None: + loader = spec.loader + if loader is None: + # A backward compatibility hack. + if spec.submodule_search_locations is not None: + if _bootstrap_external is None: + raise NotImplementedError + _NamespaceLoader = _bootstrap_external._NamespaceLoader + + loader = _NamespaceLoader.__new__(_NamespaceLoader) + loader._path = spec.submodule_search_locations + spec.loader = loader + # While the docs say that module.__file__ is not set for + # built-in modules, and the code below will avoid setting it if + # spec.has_location is false, this is incorrect for namespace + # packages. Namespace packages have no location, but their + # __spec__.origin is None, and thus their module.__file__ + # should also be None for consistency. While a bit of a hack, + # this is the best place to ensure this consistency. + # + # See # https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.load_module + # and bpo-32305 + module.__file__ = None + try: + module.__loader__ = loader + except AttributeError: + pass + # __package__ + if override or getattr(module, '__package__', None) is None: + try: + module.__package__ = spec.parent + except AttributeError: + pass + # __spec__ + try: + module.__spec__ = spec + except AttributeError: + pass + # __path__ + if override or getattr(module, '__path__', None) is None: + if spec.submodule_search_locations is not None: + try: + module.__path__ = spec.submodule_search_locations + except AttributeError: + pass + # __file__/__cached__ + if spec.has_location: + if override or getattr(module, '__file__', None) is None: + try: + module.__file__ = spec.origin + except AttributeError: + pass + + if override or getattr(module, '__cached__', None) is None: + if spec.cached is not None: + try: + module.__cached__ = spec.cached + except AttributeError: + pass + return module + + +def module_from_spec(spec): + """Create a module based on the provided spec.""" + # Typically loaders will not implement create_module(). + module = None + if hasattr(spec.loader, 'create_module'): + # If create_module() returns `None` then it means default + # module creation should be used. + module = spec.loader.create_module(spec) + elif hasattr(spec.loader, 'exec_module'): + raise ImportError('loaders that define exec_module() ' + 'must also define create_module()') + if module is None: + module = _new_module(spec.name) + _init_module_attrs(spec, module) + return module + + +def _module_repr_from_spec(spec): + """Return the repr to use for the module.""" + # We mostly replicate _module_repr() using the spec attributes. + name = '?' if spec.name is None else spec.name + if spec.origin is None: + if spec.loader is None: + return ''.format(name) + else: + return ''.format(name, spec.loader) + else: + if spec.has_location: + return ''.format(name, spec.origin) + else: + return ''.format(spec.name, spec.origin) + + +# Used by importlib.reload() and _load_module_shim(). +def _exec(spec, module): + """Execute the spec's specified module in an existing module's namespace.""" + name = spec.name + with _ModuleLockManager(name): + if sys.modules.get(name) is not module: + msg = 'module {!r} not in sys.modules'.format(name) + raise ImportError(msg, name=name) + if spec.loader is None: + if spec.submodule_search_locations is None: + raise ImportError('missing loader', name=spec.name) + # namespace package + _init_module_attrs(spec, module, override=True) + return module + _init_module_attrs(spec, module, override=True) + if not hasattr(spec.loader, 'exec_module'): + # (issue19713) Once BuiltinImporter and ExtensionFileLoader + # have exec_module() implemented, we can add a deprecation + # warning here. + spec.loader.load_module(name) + else: + spec.loader.exec_module(module) + return sys.modules[name] + + +def _load_backward_compatible(spec): + # (issue19713) Once BuiltinImporter and ExtensionFileLoader + # have exec_module() implemented, we can add a deprecation + # warning here. + spec.loader.load_module(spec.name) + # The module must be in sys.modules at this point! + module = sys.modules[spec.name] + if getattr(module, '__loader__', None) is None: + try: + module.__loader__ = spec.loader + except AttributeError: + pass + if getattr(module, '__package__', None) is None: + try: + # Since module.__path__ may not line up with + # spec.submodule_search_paths, we can't necessarily rely + # on spec.parent here. + module.__package__ = module.__name__ + if not hasattr(module, '__path__'): + module.__package__ = spec.name.rpartition('.')[0] + except AttributeError: + pass + if getattr(module, '__spec__', None) is None: + try: + module.__spec__ = spec + except AttributeError: + pass + return module + +def _load_unlocked(spec): + # A helper for direct use by the import system. + if spec.loader is not None: + # not a namespace package + if not hasattr(spec.loader, 'exec_module'): + return _load_backward_compatible(spec) + + module = module_from_spec(spec) + with _installed_safely(module): + if spec.loader is None: + if spec.submodule_search_locations is None: + raise ImportError('missing loader', name=spec.name) + # A namespace package so do nothing. + else: + spec.loader.exec_module(module) + + # We don't ensure that the import-related module attributes get + # set in the sys.modules replacement case. Such modules are on + # their own. + return sys.modules[spec.name] + +# A method used during testing of _load_unlocked() and by +# _load_module_shim(). +def _load(spec): + """Return a new module object, loaded by the spec's loader. + + The module is not added to its parent. + + If a module is already in sys.modules, that existing module gets + clobbered. + + """ + with _ModuleLockManager(spec.name): + return _load_unlocked(spec) + + +# Loaders ##################################################################### + +class BuiltinImporter: + + """Meta path import for built-in modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + """ + + @staticmethod + def module_repr(module): + """Return repr for the module. + + The method is deprecated. The import machinery does the job itself. + + """ + return ''.format(module.__name__) + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + if path is not None: + return None + if _imp.is_builtin(fullname): + return spec_from_loader(fullname, cls, origin='built-in') + else: + return None + + @classmethod + def find_module(cls, fullname, path=None): + """Find the built-in module. + + If 'path' is ever specified then the search is considered a failure. + + This method is deprecated. Use find_spec() instead. + + """ + spec = cls.find_spec(fullname, path) + return spec.loader if spec is not None else None + + @classmethod + def create_module(self, spec): + """Create a built-in module""" + if spec.name not in sys.builtin_module_names: + raise ImportError('{!r} is not a built-in module'.format(spec.name), + name=spec.name) + return _call_with_frames_removed(_imp.create_builtin, spec) + + @classmethod + def exec_module(self, module): + """Exec a built-in module""" + _call_with_frames_removed(_imp.exec_builtin, module) + + @classmethod + @_requires_builtin + def get_code(cls, fullname): + """Return None as built-in modules do not have code objects.""" + return None + + @classmethod + @_requires_builtin + def get_source(cls, fullname): + """Return None as built-in modules do not have source code.""" + return None + + @classmethod + @_requires_builtin + def is_package(cls, fullname): + """Return False as built-in modules are never packages.""" + return False + + load_module = classmethod(_load_module_shim) + + +class FrozenImporter: + + """Meta path import for frozen modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + """ + + @staticmethod + def module_repr(m): + """Return repr for the module. + + The method is deprecated. The import machinery does the job itself. + + """ + return ''.format(m.__name__) + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + if _imp.is_frozen(fullname): + return spec_from_loader(fullname, cls, origin='frozen') + else: + return None + + @classmethod + def find_module(cls, fullname, path=None): + """Find a frozen module. + + This method is deprecated. Use find_spec() instead. + + """ + return cls if _imp.is_frozen(fullname) else None + + @classmethod + def create_module(cls, spec): + """Use default semantics for module creation.""" + + @staticmethod + def exec_module(module): + name = module.__spec__.name + if not _imp.is_frozen(name): + raise ImportError('{!r} is not a frozen module'.format(name), + name=name) + code = _call_with_frames_removed(_imp.get_frozen_object, name) + exec(code, module.__dict__) + + @classmethod + def load_module(cls, fullname): + """Load a frozen module. + + This method is deprecated. Use exec_module() instead. + + """ + return _load_module_shim(cls, fullname) + + @classmethod + @_requires_frozen + def get_code(cls, fullname): + """Return the code object for the frozen module.""" + return _imp.get_frozen_object(fullname) + + @classmethod + @_requires_frozen + def get_source(cls, fullname): + """Return None as frozen modules do not have source code.""" + return None + + @classmethod + @_requires_frozen + def is_package(cls, fullname): + """Return True if the frozen module is a package.""" + return _imp.is_frozen_package(fullname) + + +# Import itself ############################################################### + +class _ImportLockContext: + + """Context manager for the import lock.""" + + def __enter__(self): + """Acquire the import lock.""" + _imp.acquire_lock() + + def __exit__(self, exc_type, exc_value, exc_traceback): + """Release the import lock regardless of any raised exceptions.""" + _imp.release_lock() + + +def _resolve_name(name, package, level): + """Resolve a relative module name to an absolute one.""" + bits = package.rsplit('.', level - 1) + if len(bits) < level: + raise ValueError('attempted relative import beyond top-level package') + base = bits[0] + return '{}.{}'.format(base, name) if name else base + + +def _find_spec_legacy(finder, name, path): + # This would be a good place for a DeprecationWarning if + # we ended up going that route. + loader = finder.find_module(name, path) + if loader is None: + return None + return spec_from_loader(name, loader) + + +def _find_spec(name, path, target=None): + """Find a module's spec.""" + meta_path = sys.meta_path + if meta_path is None: + # PyImport_Cleanup() is running or has been called. + raise ImportError("sys.meta_path is None, Python is likely " + "shutting down") + + if not meta_path: + _warnings.warn('sys.meta_path is empty', ImportWarning) + + # We check sys.modules here for the reload case. While a passed-in + # target will usually indicate a reload there is no guarantee, whereas + # sys.modules provides one. + is_reload = name in sys.modules + for finder in meta_path: + with _ImportLockContext(): + try: + find_spec = finder.find_spec + except AttributeError: + spec = _find_spec_legacy(finder, name, path) + if spec is None: + continue + else: + spec = find_spec(name, path, target) + if spec is not None: + # The parent import may have already imported this module. + if not is_reload and name in sys.modules: + module = sys.modules[name] + try: + __spec__ = module.__spec__ + except AttributeError: + # We use the found spec since that is the one that + # we would have used if the parent module hadn't + # beaten us to the punch. + return spec + else: + if __spec__ is None: + return spec + else: + return __spec__ + else: + return spec + else: + return None + + +def _sanity_check(name, package, level): + """Verify arguments are "sane".""" + if not isinstance(name, str): + raise TypeError('module name must be str, not {}'.format(type(name))) + if level < 0: + raise ValueError('level must be >= 0') + if level > 0: + if not isinstance(package, str): + raise TypeError('__package__ not set to a string') + elif not package: + raise ImportError('attempted relative import with no known parent ' + 'package') + if not name and level == 0: + raise ValueError('Empty module name') + + +_ERR_MSG_PREFIX = 'No module named ' +_ERR_MSG = _ERR_MSG_PREFIX + '{!r}' + +def _find_and_load_unlocked(name, import_): + path = None + parent = name.rpartition('.')[0] + if parent: + if parent not in sys.modules: + _call_with_frames_removed(import_, parent) + # Crazy side-effects! + if name in sys.modules: + return sys.modules[name] + parent_module = sys.modules[parent] + try: + path = parent_module.__path__ + except AttributeError: + msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent) + raise ModuleNotFoundError(msg, name=name) from None + spec = _find_spec(name, path) + if spec is None: + raise ModuleNotFoundError(_ERR_MSG.format(name), name=name) + else: + module = _load_unlocked(spec) + if parent: + # Set the module as an attribute on its parent. + parent_module = sys.modules[parent] + setattr(parent_module, name.rpartition('.')[2], module) + return module + + +_NEEDS_LOADING = object() + + +def _find_and_load(name, import_): + """Find and load the module.""" + with _ModuleLockManager(name): + module = sys.modules.get(name, _NEEDS_LOADING) + if module is _NEEDS_LOADING: + return _find_and_load_unlocked(name, import_) + + if module is None: + message = ('import of {} halted; ' + 'None in sys.modules'.format(name)) + raise ModuleNotFoundError(message, name=name) + + _lock_unlock_module(name) + return module + + +def _gcd_import(name, package=None, level=0): + """Import and return the module based on its name, the package the call is + being made from, and the level adjustment. + + This function represents the greatest common denominator of functionality + between import_module and __import__. This includes setting __package__ if + the loader did not. + + """ + _sanity_check(name, package, level) + if level > 0: + name = _resolve_name(name, package, level) + return _find_and_load(name, _gcd_import) + + +def _handle_fromlist(module, fromlist, import_, *, recursive=False): + """Figure out what __import__ should return. + + The import_ parameter is a callable which takes the name of module to + import. It is required to decouple the function from assuming importlib's + import implementation is desired. + + """ + # The hell that is fromlist ... + # If a package was imported, try to import stuff from fromlist. + if hasattr(module, '__path__'): + for x in fromlist: + if not isinstance(x, str): + if recursive: + where = module.__name__ + '.__all__' + else: + where = "``from list''" + raise TypeError(f"Item in {where} must be str, " + f"not {type(x).__name__}") + elif x == '*': + if not recursive and hasattr(module, '__all__'): + _handle_fromlist(module, module.__all__, import_, + recursive=True) + elif not hasattr(module, x): + from_name = '{}.{}'.format(module.__name__, x) + try: + _call_with_frames_removed(import_, from_name) + except ModuleNotFoundError as exc: + # Backwards-compatibility dictates we ignore failed + # imports triggered by fromlist for modules that don't + # exist. + if (exc.name == from_name and + sys.modules.get(from_name, _NEEDS_LOADING) is not None): + continue + raise + return module + + +def _calc___package__(globals): + """Calculate what __package__ should be. + + __package__ is not guaranteed to be defined or could be set to None + to represent that its proper value is unknown. + + """ + package = globals.get('__package__') + spec = globals.get('__spec__') + if package is not None: + if spec is not None and package != spec.parent: + _warnings.warn("__package__ != __spec__.parent " + f"({package!r} != {spec.parent!r})", + ImportWarning, stacklevel=3) + return package + elif spec is not None: + return spec.parent + else: + _warnings.warn("can't resolve package from __spec__ or __package__, " + "falling back on __name__ and __path__", + ImportWarning, stacklevel=3) + package = globals['__name__'] + if '__path__' not in globals: + package = package.rpartition('.')[0] + return package + + +def __import__(name, globals=None, locals=None, fromlist=(), level=0): + """Import a module. + + The 'globals' argument is used to infer where the import is occurring from + to handle relative imports. The 'locals' argument is ignored. The + 'fromlist' argument specifies what should exist as attributes on the module + being imported (e.g. ``from module import ``). The 'level' + argument represents the package location to import from in a relative + import (e.g. ``from ..pkg import mod`` would have a 'level' of 2). + + """ + if level == 0: + module = _gcd_import(name) + else: + globals_ = globals if globals is not None else {} + package = _calc___package__(globals_) + module = _gcd_import(name, package, level) + if not fromlist: + # Return up to the first dot in 'name'. This is complicated by the fact + # that 'name' may be relative. + if level == 0: + return _gcd_import(name.partition('.')[0]) + elif not name: + return module + else: + # Figure out where to slice the module's name up to the first dot + # in 'name'. + cut_off = len(name) - len(name.partition('.')[0]) + # Slice end needs to be positive to alleviate need to special-case + # when ``'.' not in name``. + return sys.modules[module.__name__[:len(module.__name__)-cut_off]] + else: + return _handle_fromlist(module, fromlist, _gcd_import) + + +def _builtin_from_name(name): + spec = BuiltinImporter.find_spec(name) + if spec is None: + raise ImportError('no built-in module named ' + name) + return _load_unlocked(spec) + + +def _setup(sys_module, _imp_module): + """Setup importlib by importing needed built-in modules and injecting them + into the global namespace. + + As sys is needed for sys.modules access and _imp is needed to load built-in + modules, those two modules must be explicitly passed in. + + """ + global _imp, sys + _imp = _imp_module + sys = sys_module + + # Set up the spec for existing builtin/frozen modules. + module_type = type(sys) + for name, module in sys.modules.items(): + if isinstance(module, module_type): + if name in sys.builtin_module_names: + loader = BuiltinImporter + elif _imp.is_frozen(name): + loader = FrozenImporter + else: + continue + spec = _spec_from_module(module, loader) + _init_module_attrs(spec, module) + + # Directly load built-in modules needed during bootstrap. + self_module = sys.modules[__name__] + for builtin_name in ('_thread', '_warnings', '_weakref'): + if builtin_name not in sys.modules: + builtin_module = _builtin_from_name(builtin_name) + else: + builtin_module = sys.modules[builtin_name] + setattr(self_module, builtin_name, builtin_module) + + +def _install(sys_module, _imp_module): + """Install importers for builtin and frozen modules""" + _setup(sys_module, _imp_module) + + sys.meta_path.append(BuiltinImporter) + sys.meta_path.append(FrozenImporter) + + +def _install_external_importers(): + """Install importers that require external filesystem access""" + global _bootstrap_external + import _frozen_importlib_external + _bootstrap_external = _frozen_importlib_external + _frozen_importlib_external._install(sys.modules[__name__]) diff --git a/venv/Lib/importlib/_bootstrap_external.py b/venv/Lib/importlib/_bootstrap_external.py new file mode 100644 index 00000000..53b24ff1 --- /dev/null +++ b/venv/Lib/importlib/_bootstrap_external.py @@ -0,0 +1,1562 @@ +"""Core implementation of path-based import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +""" +# IMPORTANT: Whenever making changes to this module, be sure to run a top-level +# `make regen-importlib` followed by `make` in order to get the frozen version +# of the module updated. Not doing so will result in the Makefile to fail for +# all others who don't have a ./python around to freeze the module in the early +# stages of compilation. +# + +# See importlib._setup() for what is injected into the global namespace. + +# When editing this code be aware that code executed at import time CANNOT +# reference any injected objects! This includes not only global code but also +# anything specified at the class level. + +# Bootstrap-related code ###################################################### +_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win', +_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin' +_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY + + _CASE_INSENSITIVE_PLATFORMS_STR_KEY) + + +def _make_relax_case(): + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY): + key = 'PYTHONCASEOK' + else: + key = b'PYTHONCASEOK' + + def _relax_case(): + """True if filenames must be checked case-insensitively.""" + return key in _os.environ + else: + def _relax_case(): + """True if filenames must be checked case-insensitively.""" + return False + return _relax_case + + +def _w_long(x): + """Convert a 32-bit integer to little-endian.""" + return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little') + + +def _r_long(int_bytes): + """Convert 4 bytes in little-endian to an integer.""" + return int.from_bytes(int_bytes, 'little') + + +def _path_join(*path_parts): + """Replacement for os.path.join().""" + return path_sep.join([part.rstrip(path_separators) + for part in path_parts if part]) + + +def _path_split(path): + """Replacement for os.path.split().""" + if len(path_separators) == 1: + front, _, tail = path.rpartition(path_sep) + return front, tail + for x in reversed(path): + if x in path_separators: + front, tail = path.rsplit(x, maxsplit=1) + return front, tail + return '', path + + +def _path_stat(path): + """Stat the path. + + Made a separate function to make it easier to override in experiments + (e.g. cache stat results). + + """ + return _os.stat(path) + + +def _path_is_mode_type(path, mode): + """Test whether the path is the specified mode type.""" + try: + stat_info = _path_stat(path) + except OSError: + return False + return (stat_info.st_mode & 0o170000) == mode + + +def _path_isfile(path): + """Replacement for os.path.isfile.""" + return _path_is_mode_type(path, 0o100000) + + +def _path_isdir(path): + """Replacement for os.path.isdir.""" + if not path: + path = _os.getcwd() + return _path_is_mode_type(path, 0o040000) + + +def _write_atomic(path, data, mode=0o666): + """Best-effort function to write data to a path atomically. + Be prepared to handle a FileExistsError if concurrent writing of the + temporary file is attempted.""" + # id() is used to generate a pseudo-random filename. + path_tmp = '{}.{}'.format(path, id(path)) + fd = _os.open(path_tmp, + _os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666) + try: + # We first write data to a temporary file, and then use os.replace() to + # perform an atomic rename. + with _io.FileIO(fd, 'wb') as file: + file.write(data) + _os.replace(path_tmp, path) + except OSError: + try: + _os.unlink(path_tmp) + except OSError: + pass + raise + + +_code_type = type(_write_atomic.__code__) + + +# Finder/loader utility code ############################################### + +# Magic word to reject .pyc files generated by other Python versions. +# It should change for each incompatible change to the bytecode. +# +# The value of CR and LF is incorporated so if you ever read or write +# a .pyc file in text mode the magic number will be wrong; also, the +# Apple MPW compiler swaps their values, botching string constants. +# +# There were a variety of old schemes for setting the magic number. +# The current working scheme is to increment the previous value by +# 10. +# +# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic +# number also includes a new "magic tag", i.e. a human readable string used +# to represent the magic number in __pycache__ directories. When you change +# the magic number, you must also set a new unique magic tag. Generally this +# can be named after the Python major version of the magic number bump, but +# it can really be anything, as long as it's different than anything else +# that's come before. The tags are included in the following table, starting +# with Python 3.2a0. +# +# Known values: +# Python 1.5: 20121 +# Python 1.5.1: 20121 +# Python 1.5.2: 20121 +# Python 1.6: 50428 +# Python 2.0: 50823 +# Python 2.0.1: 50823 +# Python 2.1: 60202 +# Python 2.1.1: 60202 +# Python 2.1.2: 60202 +# Python 2.2: 60717 +# Python 2.3a0: 62011 +# Python 2.3a0: 62021 +# Python 2.3a0: 62011 (!) +# Python 2.4a0: 62041 +# Python 2.4a3: 62051 +# Python 2.4b1: 62061 +# Python 2.5a0: 62071 +# Python 2.5a0: 62081 (ast-branch) +# Python 2.5a0: 62091 (with) +# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode) +# Python 2.5b3: 62101 (fix wrong code: for x, in ...) +# Python 2.5b3: 62111 (fix wrong code: x += yield) +# Python 2.5c1: 62121 (fix wrong lnotab with for loops and +# storing constants that should have been removed) +# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp) +# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode) +# Python 2.6a1: 62161 (WITH_CLEANUP optimization) +# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND) +# Python 2.7a0: 62181 (optimize conditional branches: +# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE) +# Python 2.7a0 62191 (introduce SETUP_WITH) +# Python 2.7a0 62201 (introduce BUILD_SET) +# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD) +# Python 3000: 3000 +# 3010 (removed UNARY_CONVERT) +# 3020 (added BUILD_SET) +# 3030 (added keyword-only parameters) +# 3040 (added signature annotations) +# 3050 (print becomes a function) +# 3060 (PEP 3115 metaclass syntax) +# 3061 (string literals become unicode) +# 3071 (PEP 3109 raise changes) +# 3081 (PEP 3137 make __file__ and __name__ unicode) +# 3091 (kill str8 interning) +# 3101 (merge from 2.6a0, see 62151) +# 3103 (__file__ points to source file) +# Python 3.0a4: 3111 (WITH_CLEANUP optimization). +# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT + #3021) +# Python 3.1a1: 3141 (optimize list, set and dict comprehensions: +# change LIST_APPEND and SET_ADD, add MAP_ADD #2183) +# Python 3.1a1: 3151 (optimize conditional branches: +# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE + #4715) +# Python 3.2a1: 3160 (add SETUP_WITH #6101) +# tag: cpython-32 +# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225) +# tag: cpython-32 +# Python 3.2a3 3180 (add DELETE_DEREF #4617) +# Python 3.3a1 3190 (__class__ super closure changed) +# Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448) +# Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645) +# Python 3.3a2 3220 (changed PEP 380 implementation #14230) +# Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857) +# Python 3.4a1 3250 (evaluate positional default arguments before +# keyword-only defaults #16967) +# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override +# free vars #17853) +# Python 3.4a1 3270 (various tweaks to the __class__ closure #12370) +# Python 3.4a1 3280 (remove implicit class argument) +# Python 3.4a4 3290 (changes to __qualname__ computation #19301) +# Python 3.4a4 3300 (more changes to __qualname__ computation #19301) +# Python 3.4rc2 3310 (alter __qualname__ computation #20625) +# Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176) +# Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292) +# Python 3.5b2 3340 (fix dictionary display evaluation order #11205) +# Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400) +# Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286) +# Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483) +# Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107) +# Python 3.6a2 3370 (16 bit wordcode #26647) +# Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140) +# Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE +# #27095) +# Python 3.6b1 3373 (add BUILD_STRING opcode #27078) +# Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes +# #27985) +# Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL + #27213) +# Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722) +# Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257) +# Python 3.6rc1 3379 (more thorough __class__ validation #23722) +# Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110) +# Python 3.7a2 3391 (update GET_AITER #31709) +# Python 3.7a4 3392 (PEP 552: Deterministic pycs #31650) +# Python 3.7b1 3393 (remove STORE_ANNOTATION opcode #32550) +# Python 3.7b5 3394 (restored docstring as the firts stmt in the body; +# this might affected the first line number #32911) +# +# MAGIC must change whenever the bytecode emitted by the compiler may no +# longer be understood by older implementations of the eval loop (usually +# due to the addition of new opcodes). +# +# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array +# in PC/launcher.c must also be updated. + +MAGIC_NUMBER = (3394).to_bytes(2, 'little') + b'\r\n' +_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c + +_PYCACHE = '__pycache__' +_OPT = 'opt-' + +SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed. + +BYTECODE_SUFFIXES = ['.pyc'] +# Deprecated. +DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES + +def cache_from_source(path, debug_override=None, *, optimization=None): + """Given the path to a .py file, return the path to its .pyc file. + + The .py file does not need to exist; this simply returns the path to the + .pyc file calculated as if the .py file were imported. + + The 'optimization' parameter controls the presumed optimization level of + the bytecode file. If 'optimization' is not None, the string representation + of the argument is taken and verified to be alphanumeric (else ValueError + is raised). + + The debug_override parameter is deprecated. If debug_override is not None, + a True value is the same as setting 'optimization' to the empty string + while a False value is equivalent to setting 'optimization' to '1'. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + if debug_override is not None: + _warnings.warn('the debug_override parameter is deprecated; use ' + "'optimization' instead", DeprecationWarning) + if optimization is not None: + message = 'debug_override or optimization must be set to None' + raise TypeError(message) + optimization = '' if debug_override else 1 + path = _os.fspath(path) + head, tail = _path_split(path) + base, sep, rest = tail.rpartition('.') + tag = sys.implementation.cache_tag + if tag is None: + raise NotImplementedError('sys.implementation.cache_tag is None') + almost_filename = ''.join([(base if base else rest), sep, tag]) + if optimization is None: + if sys.flags.optimize == 0: + optimization = '' + else: + optimization = sys.flags.optimize + optimization = str(optimization) + if optimization != '': + if not optimization.isalnum(): + raise ValueError('{!r} is not alphanumeric'.format(optimization)) + almost_filename = '{}.{}{}'.format(almost_filename, _OPT, optimization) + return _path_join(head, _PYCACHE, almost_filename + BYTECODE_SUFFIXES[0]) + + +def source_from_cache(path): + """Given the path to a .pyc. file, return the path to its .py file. + + The .pyc file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc file. If path does + not conform to PEP 3147/488 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + if sys.implementation.cache_tag is None: + raise NotImplementedError('sys.implementation.cache_tag is None') + path = _os.fspath(path) + head, pycache_filename = _path_split(path) + head, pycache = _path_split(head) + if pycache != _PYCACHE: + raise ValueError('{} not bottom-level directory in ' + '{!r}'.format(_PYCACHE, path)) + dot_count = pycache_filename.count('.') + if dot_count not in {2, 3}: + raise ValueError('expected only 2 or 3 dots in ' + '{!r}'.format(pycache_filename)) + elif dot_count == 3: + optimization = pycache_filename.rsplit('.', 2)[-2] + if not optimization.startswith(_OPT): + raise ValueError("optimization portion of filename does not start " + "with {!r}".format(_OPT)) + opt_level = optimization[len(_OPT):] + if not opt_level.isalnum(): + raise ValueError("optimization level {!r} is not an alphanumeric " + "value".format(optimization)) + base_filename = pycache_filename.partition('.')[0] + return _path_join(head, base_filename + SOURCE_SUFFIXES[0]) + + +def _get_sourcefile(bytecode_path): + """Convert a bytecode file path to a source path (if possible). + + This function exists purely for backwards-compatibility for + PyImport_ExecCodeModuleWithFilenames() in the C API. + + """ + if len(bytecode_path) == 0: + return None + rest, _, extension = bytecode_path.rpartition('.') + if not rest or extension.lower()[-3:-1] != 'py': + return bytecode_path + try: + source_path = source_from_cache(bytecode_path) + except (NotImplementedError, ValueError): + source_path = bytecode_path[:-1] + return source_path if _path_isfile(source_path) else bytecode_path + + +def _get_cached(filename): + if filename.endswith(tuple(SOURCE_SUFFIXES)): + try: + return cache_from_source(filename) + except NotImplementedError: + pass + elif filename.endswith(tuple(BYTECODE_SUFFIXES)): + return filename + else: + return None + + +def _calc_mode(path): + """Calculate the mode permissions for a bytecode file.""" + try: + mode = _path_stat(path).st_mode + except OSError: + mode = 0o666 + # We always ensure write access so we can update cached files + # later even when the source files are read-only on Windows (#6074) + mode |= 0o200 + return mode + + +def _check_name(method): + """Decorator to verify that the module being requested matches the one the + loader can handle. + + The first argument (self) must define _name which the second argument is + compared against. If the comparison fails then ImportError is raised. + + """ + def _check_name_wrapper(self, name=None, *args, **kwargs): + if name is None: + name = self.name + elif self.name != name: + raise ImportError('loader for %s cannot handle %s' % + (self.name, name), name=name) + return method(self, name, *args, **kwargs) + try: + _wrap = _bootstrap._wrap + except NameError: + # XXX yuck + def _wrap(new, old): + for replace in ['__module__', '__name__', '__qualname__', '__doc__']: + if hasattr(old, replace): + setattr(new, replace, getattr(old, replace)) + new.__dict__.update(old.__dict__) + _wrap(_check_name_wrapper, method) + return _check_name_wrapper + + +def _find_module_shim(self, fullname): + """Try to find a loader for the specified module by delegating to + self.find_loader(). + + This method is deprecated in favor of finder.find_spec(). + + """ + # Call find_loader(). If it returns a string (indicating this + # is a namespace package portion), generate a warning and + # return None. + loader, portions = self.find_loader(fullname) + if loader is None and len(portions): + msg = 'Not importing directory {}: missing __init__' + _warnings.warn(msg.format(portions[0]), ImportWarning) + return loader + + +def _classify_pyc(data, name, exc_details): + """Perform basic validity checking of a pyc header and return the flags field, + which determines how the pyc should be further validated against the source. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required, though.) + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + ImportError is raised when the magic number is incorrect or when the flags + field is invalid. EOFError is raised when the data is found to be truncated. + + """ + magic = data[:4] + if magic != MAGIC_NUMBER: + message = f'bad magic number in {name!r}: {magic!r}' + _bootstrap._verbose_message('{}', message) + raise ImportError(message, **exc_details) + if len(data) < 16: + message = f'reached EOF while reading pyc header of {name!r}' + _bootstrap._verbose_message('{}', message) + raise EOFError(message) + flags = _r_long(data[4:8]) + # Only the first two flags are defined. + if flags & ~0b11: + message = f'invalid flags {flags!r} in {name!r}' + raise ImportError(message, **exc_details) + return flags + + +def _validate_timestamp_pyc(data, source_mtime, source_size, name, + exc_details): + """Validate a pyc against the source last-modified time. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_mtime* is the last modified timestamp of the source file. + + *source_size* is None or the size of the source file in bytes. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + """ + if _r_long(data[8:12]) != (source_mtime & 0xFFFFFFFF): + message = f'bytecode is stale for {name!r}' + _bootstrap._verbose_message('{}', message) + raise ImportError(message, **exc_details) + if (source_size is not None and + _r_long(data[12:16]) != (source_size & 0xFFFFFFFF)): + raise ImportError(f'bytecode is stale for {name!r}', **exc_details) + + +def _validate_hash_pyc(data, source_hash, name, exc_details): + """Validate a hash-based pyc by checking the real source hash against the one in + the pyc header. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_hash* is the importlib.util.source_hash() of the source file. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + """ + if data[8:16] != source_hash: + raise ImportError( + f'hash in bytecode doesn\'t match hash of source {name!r}', + **exc_details, + ) + + +def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None): + """Compile bytecode as found in a pyc.""" + code = marshal.loads(data) + if isinstance(code, _code_type): + _bootstrap._verbose_message('code object from {!r}', bytecode_path) + if source_path is not None: + _imp._fix_co_filename(code, source_path) + return code + else: + raise ImportError('Non-code object in {!r}'.format(bytecode_path), + name=name, path=bytecode_path) + + +def _code_to_timestamp_pyc(code, mtime=0, source_size=0): + "Produce the data for a timestamp-based pyc." + data = bytearray(MAGIC_NUMBER) + data.extend(_w_long(0)) + data.extend(_w_long(mtime)) + data.extend(_w_long(source_size)) + data.extend(marshal.dumps(code)) + return data + + +def _code_to_hash_pyc(code, source_hash, checked=True): + "Produce the data for a hash-based pyc." + data = bytearray(MAGIC_NUMBER) + flags = 0b1 | checked << 1 + data.extend(_w_long(flags)) + assert len(source_hash) == 8 + data.extend(source_hash) + data.extend(marshal.dumps(code)) + return data + + +def decode_source(source_bytes): + """Decode bytes representing source code and return the string. + + Universal newline support is used in the decoding. + """ + import tokenize # To avoid bootstrap issues. + source_bytes_readline = _io.BytesIO(source_bytes).readline + encoding = tokenize.detect_encoding(source_bytes_readline) + newline_decoder = _io.IncrementalNewlineDecoder(None, True) + return newline_decoder.decode(source_bytes.decode(encoding[0])) + + +# Module specifications ####################################################### + +_POPULATE = object() + + +def spec_from_file_location(name, location=None, *, loader=None, + submodule_search_locations=_POPULATE): + """Return a module spec based on a file location. + + To indicate that the module is a package, set + submodule_search_locations to a list of directory paths. An + empty list is sufficient, though its not otherwise useful to the + import system. + + The loader must take a spec as its only __init__() arg. + + """ + if location is None: + # The caller may simply want a partially populated location- + # oriented spec. So we set the location to a bogus value and + # fill in as much as we can. + location = '' + if hasattr(loader, 'get_filename'): + # ExecutionLoader + try: + location = loader.get_filename(name) + except ImportError: + pass + else: + location = _os.fspath(location) + + # If the location is on the filesystem, but doesn't actually exist, + # we could return None here, indicating that the location is not + # valid. However, we don't have a good way of testing since an + # indirect location (e.g. a zip file or URL) will look like a + # non-existent file relative to the filesystem. + + spec = _bootstrap.ModuleSpec(name, loader, origin=location) + spec._set_fileattr = True + + # Pick a loader if one wasn't provided. + if loader is None: + for loader_class, suffixes in _get_supported_file_loaders(): + if location.endswith(tuple(suffixes)): + loader = loader_class(name, location) + spec.loader = loader + break + else: + return None + + # Set submodule_search_paths appropriately. + if submodule_search_locations is _POPULATE: + # Check the loader. + if hasattr(loader, 'is_package'): + try: + is_package = loader.is_package(name) + except ImportError: + pass + else: + if is_package: + spec.submodule_search_locations = [] + else: + spec.submodule_search_locations = submodule_search_locations + if spec.submodule_search_locations == []: + if location: + dirname = _path_split(location)[0] + spec.submodule_search_locations.append(dirname) + + return spec + + +# Loaders ##################################################################### + +class WindowsRegistryFinder: + + """Meta path finder for modules declared in the Windows registry.""" + + REGISTRY_KEY = ( + 'Software\\Python\\PythonCore\\{sys_version}' + '\\Modules\\{fullname}') + REGISTRY_KEY_DEBUG = ( + 'Software\\Python\\PythonCore\\{sys_version}' + '\\Modules\\{fullname}\\Debug') + DEBUG_BUILD = False # Changed in _setup() + + @classmethod + def _open_registry(cls, key): + try: + return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key) + except OSError: + return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key) + + @classmethod + def _search_registry(cls, fullname): + if cls.DEBUG_BUILD: + registry_key = cls.REGISTRY_KEY_DEBUG + else: + registry_key = cls.REGISTRY_KEY + key = registry_key.format(fullname=fullname, + sys_version='%d.%d' % sys.version_info[:2]) + try: + with cls._open_registry(key) as hkey: + filepath = _winreg.QueryValue(hkey, '') + except OSError: + return None + return filepath + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + filepath = cls._search_registry(fullname) + if filepath is None: + return None + try: + _path_stat(filepath) + except OSError: + return None + for loader, suffixes in _get_supported_file_loaders(): + if filepath.endswith(tuple(suffixes)): + spec = _bootstrap.spec_from_loader(fullname, + loader(fullname, filepath), + origin=filepath) + return spec + + @classmethod + def find_module(cls, fullname, path=None): + """Find module named in the registry. + + This method is deprecated. Use exec_module() instead. + + """ + spec = cls.find_spec(fullname, path) + if spec is not None: + return spec.loader + else: + return None + + +class _LoaderBasics: + + """Base class of common code needed by both SourceLoader and + SourcelessFileLoader.""" + + def is_package(self, fullname): + """Concrete implementation of InspectLoader.is_package by checking if + the path returned by get_filename has a filename of '__init__.py'.""" + filename = _path_split(self.get_filename(fullname))[1] + filename_base = filename.rsplit('.', 1)[0] + tail_name = fullname.rpartition('.')[2] + return filename_base == '__init__' and tail_name != '__init__' + + def create_module(self, spec): + """Use default semantics for module creation.""" + + def exec_module(self, module): + """Execute the module.""" + code = self.get_code(module.__name__) + if code is None: + raise ImportError('cannot load module {!r} when get_code() ' + 'returns None'.format(module.__name__)) + _bootstrap._call_with_frames_removed(exec, code, module.__dict__) + + def load_module(self, fullname): + """This module is deprecated.""" + return _bootstrap._load_module_shim(self, fullname) + + +class SourceLoader(_LoaderBasics): + + def path_mtime(self, path): + """Optional method that returns the modification time (an int) for the + specified path, where path is a str. + + Raises OSError when the path cannot be handled. + """ + raise OSError + + def path_stats(self, path): + """Optional method returning a metadata dict for the specified path + to by the path (str). + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + + Implementing this method allows the loader to read bytecode files. + Raises OSError when the path cannot be handled. + """ + return {'mtime': self.path_mtime(path)} + + def _cache_bytecode(self, source_path, cache_path, data): + """Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + + The source path is needed in order to correctly transfer permissions + """ + # For backwards compatibility, we delegate to set_data() + return self.set_data(cache_path, data) + + def set_data(self, path, data): + """Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + """ + + + def get_source(self, fullname): + """Concrete implementation of InspectLoader.get_source.""" + path = self.get_filename(fullname) + try: + source_bytes = self.get_data(path) + except OSError as exc: + raise ImportError('source not available through get_data()', + name=fullname) from exc + return decode_source(source_bytes) + + def source_to_code(self, data, path, *, _optimize=-1): + """Return the code object compiled from source. + + The 'data' argument can be any object type that compile() supports. + """ + return _bootstrap._call_with_frames_removed(compile, data, path, 'exec', + dont_inherit=True, optimize=_optimize) + + def get_code(self, fullname): + """Concrete implementation of InspectLoader.get_code. + + Reading of bytecode requires path_stats to be implemented. To write + bytecode, set_data must also be implemented. + + """ + source_path = self.get_filename(fullname) + source_mtime = None + source_bytes = None + source_hash = None + hash_based = False + check_source = True + try: + bytecode_path = cache_from_source(source_path) + except NotImplementedError: + bytecode_path = None + else: + try: + st = self.path_stats(source_path) + except OSError: + pass + else: + source_mtime = int(st['mtime']) + try: + data = self.get_data(bytecode_path) + except OSError: + pass + else: + exc_details = { + 'name': fullname, + 'path': bytecode_path, + } + try: + flags = _classify_pyc(data, fullname, exc_details) + bytes_data = memoryview(data)[16:] + hash_based = flags & 0b1 != 0 + if hash_based: + check_source = flags & 0b10 != 0 + if (_imp.check_hash_based_pycs != 'never' and + (check_source or + _imp.check_hash_based_pycs == 'always')): + source_bytes = self.get_data(source_path) + source_hash = _imp.source_hash( + _RAW_MAGIC_NUMBER, + source_bytes, + ) + _validate_hash_pyc(data, source_hash, fullname, + exc_details) + else: + _validate_timestamp_pyc( + data, + source_mtime, + st['size'], + fullname, + exc_details, + ) + except (ImportError, EOFError): + pass + else: + _bootstrap._verbose_message('{} matches {}', bytecode_path, + source_path) + return _compile_bytecode(bytes_data, name=fullname, + bytecode_path=bytecode_path, + source_path=source_path) + if source_bytes is None: + source_bytes = self.get_data(source_path) + code_object = self.source_to_code(source_bytes, source_path) + _bootstrap._verbose_message('code object from {}', source_path) + if (not sys.dont_write_bytecode and bytecode_path is not None and + source_mtime is not None): + if hash_based: + if source_hash is None: + source_hash = _imp.source_hash(source_bytes) + data = _code_to_hash_pyc(code_object, source_hash, check_source) + else: + data = _code_to_timestamp_pyc(code_object, source_mtime, + len(source_bytes)) + try: + self._cache_bytecode(source_path, bytecode_path, data) + _bootstrap._verbose_message('wrote {!r}', bytecode_path) + except NotImplementedError: + pass + return code_object + + +class FileLoader: + + """Base file loader class which implements the loader protocol methods that + require file system usage.""" + + def __init__(self, fullname, path): + """Cache the module name and the path to the file found by the + finder.""" + self.name = fullname + self.path = path + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + + def __hash__(self): + return hash(self.name) ^ hash(self.path) + + @_check_name + def load_module(self, fullname): + """Load a module from a file. + + This method is deprecated. Use exec_module() instead. + + """ + # The only reason for this method is for the name check. + # Issue #14857: Avoid the zero-argument form of super so the implementation + # of that form can be updated without breaking the frozen module + return super(FileLoader, self).load_module(fullname) + + @_check_name + def get_filename(self, fullname): + """Return the path to the source file as found by the finder.""" + return self.path + + def get_data(self, path): + """Return the data from path as raw bytes.""" + with _io.FileIO(path, 'r') as file: + return file.read() + + # ResourceReader ABC API. + + @_check_name + def get_resource_reader(self, module): + if self.is_package(module): + return self + return None + + def open_resource(self, resource): + path = _path_join(_path_split(self.path)[0], resource) + return _io.FileIO(path, 'r') + + def resource_path(self, resource): + if not self.is_resource(resource): + raise FileNotFoundError + path = _path_join(_path_split(self.path)[0], resource) + return path + + def is_resource(self, name): + if path_sep in name: + return False + path = _path_join(_path_split(self.path)[0], name) + return _path_isfile(path) + + def contents(self): + return iter(_os.listdir(_path_split(self.path)[0])) + + +class SourceFileLoader(FileLoader, SourceLoader): + + """Concrete implementation of SourceLoader using the file system.""" + + def path_stats(self, path): + """Return the metadata for the path.""" + st = _path_stat(path) + return {'mtime': st.st_mtime, 'size': st.st_size} + + def _cache_bytecode(self, source_path, bytecode_path, data): + # Adapt between the two APIs + mode = _calc_mode(source_path) + return self.set_data(bytecode_path, data, _mode=mode) + + def set_data(self, path, data, *, _mode=0o666): + """Write bytes data to a file.""" + parent, filename = _path_split(path) + path_parts = [] + # Figure out what directories are missing. + while parent and not _path_isdir(parent): + parent, part = _path_split(parent) + path_parts.append(part) + # Create needed directories. + for part in reversed(path_parts): + parent = _path_join(parent, part) + try: + _os.mkdir(parent) + except FileExistsError: + # Probably another Python process already created the dir. + continue + except OSError as exc: + # Could be a permission error, read-only filesystem: just forget + # about writing the data. + _bootstrap._verbose_message('could not create {!r}: {!r}', + parent, exc) + return + try: + _write_atomic(path, data, _mode) + _bootstrap._verbose_message('created {!r}', path) + except OSError as exc: + # Same as above: just don't write the bytecode. + _bootstrap._verbose_message('could not create {!r}: {!r}', path, + exc) + + +class SourcelessFileLoader(FileLoader, _LoaderBasics): + + """Loader which handles sourceless file imports.""" + + def get_code(self, fullname): + path = self.get_filename(fullname) + data = self.get_data(path) + # Call _classify_pyc to do basic validation of the pyc but ignore the + # result. There's no source to check against. + exc_details = { + 'name': fullname, + 'path': path, + } + _classify_pyc(data, fullname, exc_details) + return _compile_bytecode( + memoryview(data)[16:], + name=fullname, + bytecode_path=path, + ) + + def get_source(self, fullname): + """Return None as there is no source code.""" + return None + + +# Filled in by _setup(). +EXTENSION_SUFFIXES = [] + + +class ExtensionFileLoader(FileLoader, _LoaderBasics): + + """Loader for extension modules. + + The constructor is designed to work with FileFinder. + + """ + + def __init__(self, name, path): + self.name = name + self.path = path + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + + def __hash__(self): + return hash(self.name) ^ hash(self.path) + + def create_module(self, spec): + """Create an unitialized extension module""" + module = _bootstrap._call_with_frames_removed( + _imp.create_dynamic, spec) + _bootstrap._verbose_message('extension module {!r} loaded from {!r}', + spec.name, self.path) + return module + + def exec_module(self, module): + """Initialize an extension module""" + _bootstrap._call_with_frames_removed(_imp.exec_dynamic, module) + _bootstrap._verbose_message('extension module {!r} executed from {!r}', + self.name, self.path) + + def is_package(self, fullname): + """Return True if the extension module is a package.""" + file_name = _path_split(self.path)[1] + return any(file_name == '__init__' + suffix + for suffix in EXTENSION_SUFFIXES) + + def get_code(self, fullname): + """Return None as an extension module cannot create a code object.""" + return None + + def get_source(self, fullname): + """Return None as extension modules have no source code.""" + return None + + @_check_name + def get_filename(self, fullname): + """Return the path to the source file as found by the finder.""" + return self.path + + +class _NamespacePath: + """Represents a namespace package's path. It uses the module name + to find its parent module, and from there it looks up the parent's + __path__. When this changes, the module's own path is recomputed, + using path_finder. For top-level modules, the parent module's path + is sys.path.""" + + def __init__(self, name, path, path_finder): + self._name = name + self._path = path + self._last_parent_path = tuple(self._get_parent_path()) + self._path_finder = path_finder + + def _find_parent_path_names(self): + """Returns a tuple of (parent-module-name, parent-path-attr-name)""" + parent, dot, me = self._name.rpartition('.') + if dot == '': + # This is a top-level module. sys.path contains the parent path. + return 'sys', 'path' + # Not a top-level module. parent-module.__path__ contains the + # parent path. + return parent, '__path__' + + def _get_parent_path(self): + parent_module_name, path_attr_name = self._find_parent_path_names() + return getattr(sys.modules[parent_module_name], path_attr_name) + + def _recalculate(self): + # If the parent's path has changed, recalculate _path + parent_path = tuple(self._get_parent_path()) # Make a copy + if parent_path != self._last_parent_path: + spec = self._path_finder(self._name, parent_path) + # Note that no changes are made if a loader is returned, but we + # do remember the new parent path + if spec is not None and spec.loader is None: + if spec.submodule_search_locations: + self._path = spec.submodule_search_locations + self._last_parent_path = parent_path # Save the copy + return self._path + + def __iter__(self): + return iter(self._recalculate()) + + def __setitem__(self, index, path): + self._path[index] = path + + def __len__(self): + return len(self._recalculate()) + + def __repr__(self): + return '_NamespacePath({!r})'.format(self._path) + + def __contains__(self, item): + return item in self._recalculate() + + def append(self, item): + self._path.append(item) + + +# We use this exclusively in module_from_spec() for backward-compatibility. +class _NamespaceLoader: + def __init__(self, name, path, path_finder): + self._path = _NamespacePath(name, path, path_finder) + + @classmethod + def module_repr(cls, module): + """Return repr for the module. + + The method is deprecated. The import machinery does the job itself. + + """ + return ''.format(module.__name__) + + def is_package(self, fullname): + return True + + def get_source(self, fullname): + return '' + + def get_code(self, fullname): + return compile('', '', 'exec', dont_inherit=True) + + def create_module(self, spec): + """Use default semantics for module creation.""" + + def exec_module(self, module): + pass + + def load_module(self, fullname): + """Load a namespace module. + + This method is deprecated. Use exec_module() instead. + + """ + # The import system never calls this method. + _bootstrap._verbose_message('namespace module loaded with path {!r}', + self._path) + return _bootstrap._load_module_shim(self, fullname) + + +# Finders ##################################################################### + +class PathFinder: + + """Meta path finder for sys.path and package __path__ attributes.""" + + @classmethod + def invalidate_caches(cls): + """Call the invalidate_caches() method on all path entry finders + stored in sys.path_importer_caches (where implemented).""" + for name, finder in list(sys.path_importer_cache.items()): + if finder is None: + del sys.path_importer_cache[name] + elif hasattr(finder, 'invalidate_caches'): + finder.invalidate_caches() + + @classmethod + def _path_hooks(cls, path): + """Search sys.path_hooks for a finder for 'path'.""" + if sys.path_hooks is not None and not sys.path_hooks: + _warnings.warn('sys.path_hooks is empty', ImportWarning) + for hook in sys.path_hooks: + try: + return hook(path) + except ImportError: + continue + else: + return None + + @classmethod + def _path_importer_cache(cls, path): + """Get the finder for the path entry from sys.path_importer_cache. + + If the path entry is not in the cache, find the appropriate finder + and cache it. If no finder is available, store None. + + """ + if path == '': + try: + path = _os.getcwd() + except FileNotFoundError: + # Don't cache the failure as the cwd can easily change to + # a valid directory later on. + return None + try: + finder = sys.path_importer_cache[path] + except KeyError: + finder = cls._path_hooks(path) + sys.path_importer_cache[path] = finder + return finder + + @classmethod + def _legacy_get_spec(cls, fullname, finder): + # This would be a good place for a DeprecationWarning if + # we ended up going that route. + if hasattr(finder, 'find_loader'): + loader, portions = finder.find_loader(fullname) + else: + loader = finder.find_module(fullname) + portions = [] + if loader is not None: + return _bootstrap.spec_from_loader(fullname, loader) + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = portions + return spec + + @classmethod + def _get_spec(cls, fullname, path, target=None): + """Find the loader or namespace_path for this module/package name.""" + # If this ends up being a namespace package, namespace_path is + # the list of paths that will become its __path__ + namespace_path = [] + for entry in path: + if not isinstance(entry, (str, bytes)): + continue + finder = cls._path_importer_cache(entry) + if finder is not None: + if hasattr(finder, 'find_spec'): + spec = finder.find_spec(fullname, target) + else: + spec = cls._legacy_get_spec(fullname, finder) + if spec is None: + continue + if spec.loader is not None: + return spec + portions = spec.submodule_search_locations + if portions is None: + raise ImportError('spec missing loader') + # This is possibly part of a namespace package. + # Remember these path entries (if any) for when we + # create a namespace package, and continue iterating + # on path. + namespace_path.extend(portions) + else: + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = namespace_path + return spec + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + """Try to find a spec for 'fullname' on sys.path or 'path'. + + The search is based on sys.path_hooks and sys.path_importer_cache. + """ + if path is None: + path = sys.path + spec = cls._get_spec(fullname, path, target) + if spec is None: + return None + elif spec.loader is None: + namespace_path = spec.submodule_search_locations + if namespace_path: + # We found at least one namespace path. Return a spec which + # can create the namespace package. + spec.origin = None + spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec) + return spec + else: + return None + else: + return spec + + @classmethod + def find_module(cls, fullname, path=None): + """find the module on sys.path or 'path' based on sys.path_hooks and + sys.path_importer_cache. + + This method is deprecated. Use find_spec() instead. + + """ + spec = cls.find_spec(fullname, path) + if spec is None: + return None + return spec.loader + + +class FileFinder: + + """File-based finder. + + Interactions with the file system are cached for performance, being + refreshed when the directory the finder is handling has been modified. + + """ + + def __init__(self, path, *loader_details): + """Initialize with the path to search on and a variable number of + 2-tuples containing the loader and the file suffixes the loader + recognizes.""" + loaders = [] + for loader, suffixes in loader_details: + loaders.extend((suffix, loader) for suffix in suffixes) + self._loaders = loaders + # Base (directory) path + self.path = path or '.' + self._path_mtime = -1 + self._path_cache = set() + self._relaxed_path_cache = set() + + def invalidate_caches(self): + """Invalidate the directory mtime.""" + self._path_mtime = -1 + + find_module = _find_module_shim + + def find_loader(self, fullname): + """Try to find a loader for the specified module, or the namespace + package portions. Returns (loader, list-of-portions). + + This method is deprecated. Use find_spec() instead. + + """ + spec = self.find_spec(fullname) + if spec is None: + return None, [] + return spec.loader, spec.submodule_search_locations or [] + + def _get_spec(self, loader_class, fullname, path, smsl, target): + loader = loader_class(fullname, path) + return spec_from_file_location(fullname, path, loader=loader, + submodule_search_locations=smsl) + + def find_spec(self, fullname, target=None): + """Try to find a spec for the specified module. + + Returns the matching spec, or None if not found. + """ + is_namespace = False + tail_module = fullname.rpartition('.')[2] + try: + mtime = _path_stat(self.path or _os.getcwd()).st_mtime + except OSError: + mtime = -1 + if mtime != self._path_mtime: + self._fill_cache() + self._path_mtime = mtime + # tail_module keeps the original casing, for __file__ and friends + if _relax_case(): + cache = self._relaxed_path_cache + cache_module = tail_module.lower() + else: + cache = self._path_cache + cache_module = tail_module + # Check if the module is the name of a directory (and thus a package). + if cache_module in cache: + base_path = _path_join(self.path, tail_module) + for suffix, loader_class in self._loaders: + init_filename = '__init__' + suffix + full_path = _path_join(base_path, init_filename) + if _path_isfile(full_path): + return self._get_spec(loader_class, fullname, full_path, [base_path], target) + else: + # If a namespace package, return the path if we don't + # find a module in the next section. + is_namespace = _path_isdir(base_path) + # Check for a file w/ a proper suffix exists. + for suffix, loader_class in self._loaders: + full_path = _path_join(self.path, tail_module + suffix) + _bootstrap._verbose_message('trying {}', full_path, verbosity=2) + if cache_module + suffix in cache: + if _path_isfile(full_path): + return self._get_spec(loader_class, fullname, full_path, + None, target) + if is_namespace: + _bootstrap._verbose_message('possible namespace for {}', base_path) + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = [base_path] + return spec + return None + + def _fill_cache(self): + """Fill the cache of potential modules and packages for this directory.""" + path = self.path + try: + contents = _os.listdir(path or _os.getcwd()) + except (FileNotFoundError, PermissionError, NotADirectoryError): + # Directory has either been removed, turned into a file, or made + # unreadable. + contents = [] + # We store two cached versions, to handle runtime changes of the + # PYTHONCASEOK environment variable. + if not sys.platform.startswith('win'): + self._path_cache = set(contents) + else: + # Windows users can import modules with case-insensitive file + # suffixes (for legacy reasons). Make the suffix lowercase here + # so it's done once instead of for every import. This is safe as + # the specified suffixes to check against are always specified in a + # case-sensitive manner. + lower_suffix_contents = set() + for item in contents: + name, dot, suffix = item.partition('.') + if dot: + new_name = '{}.{}'.format(name, suffix.lower()) + else: + new_name = name + lower_suffix_contents.add(new_name) + self._path_cache = lower_suffix_contents + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): + self._relaxed_path_cache = {fn.lower() for fn in contents} + + @classmethod + def path_hook(cls, *loader_details): + """A class method which returns a closure to use on sys.path_hook + which will return an instance using the specified loaders and the path + called on the closure. + + If the path called on the closure is not a directory, ImportError is + raised. + + """ + def path_hook_for_FileFinder(path): + """Path hook for importlib.machinery.FileFinder.""" + if not _path_isdir(path): + raise ImportError('only directories are supported', path=path) + return cls(path, *loader_details) + + return path_hook_for_FileFinder + + def __repr__(self): + return 'FileFinder({!r})'.format(self.path) + + +# Import setup ############################################################### + +def _fix_up_module(ns, name, pathname, cpathname=None): + # This function is used by PyImport_ExecCodeModuleObject(). + loader = ns.get('__loader__') + spec = ns.get('__spec__') + if not loader: + if spec: + loader = spec.loader + elif pathname == cpathname: + loader = SourcelessFileLoader(name, pathname) + else: + loader = SourceFileLoader(name, pathname) + if not spec: + spec = spec_from_file_location(name, pathname, loader=loader) + try: + ns['__spec__'] = spec + ns['__loader__'] = loader + ns['__file__'] = pathname + ns['__cached__'] = cpathname + except Exception: + # Not important enough to report. + pass + + +def _get_supported_file_loaders(): + """Returns a list of file-based module loaders. + + Each item is a tuple (loader, suffixes). + """ + extensions = ExtensionFileLoader, _imp.extension_suffixes() + source = SourceFileLoader, SOURCE_SUFFIXES + bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES + return [extensions, source, bytecode] + + +def _setup(_bootstrap_module): + """Setup the path-based importers for importlib by importing needed + built-in modules and injecting them into the global namespace. + + Other components are extracted from the core bootstrap module. + + """ + global sys, _imp, _bootstrap + _bootstrap = _bootstrap_module + sys = _bootstrap.sys + _imp = _bootstrap._imp + + # Directly load built-in modules needed during bootstrap. + self_module = sys.modules[__name__] + for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'): + if builtin_name not in sys.modules: + builtin_module = _bootstrap._builtin_from_name(builtin_name) + else: + builtin_module = sys.modules[builtin_name] + setattr(self_module, builtin_name, builtin_module) + + # Directly load the os module (needed during bootstrap). + os_details = ('posix', ['/']), ('nt', ['\\', '/']) + for builtin_os, path_separators in os_details: + # Assumption made in _path_join() + assert all(len(sep) == 1 for sep in path_separators) + path_sep = path_separators[0] + if builtin_os in sys.modules: + os_module = sys.modules[builtin_os] + break + else: + try: + os_module = _bootstrap._builtin_from_name(builtin_os) + break + except ImportError: + continue + else: + raise ImportError('importlib requires posix or nt') + setattr(self_module, '_os', os_module) + setattr(self_module, 'path_sep', path_sep) + setattr(self_module, 'path_separators', ''.join(path_separators)) + + # Directly load the _thread module (needed during bootstrap). + thread_module = _bootstrap._builtin_from_name('_thread') + setattr(self_module, '_thread', thread_module) + + # Directly load the _weakref module (needed during bootstrap). + weakref_module = _bootstrap._builtin_from_name('_weakref') + setattr(self_module, '_weakref', weakref_module) + + # Directly load the winreg module (needed during bootstrap). + if builtin_os == 'nt': + winreg_module = _bootstrap._builtin_from_name('winreg') + setattr(self_module, '_winreg', winreg_module) + + # Constants + setattr(self_module, '_relax_case', _make_relax_case()) + EXTENSION_SUFFIXES.extend(_imp.extension_suffixes()) + if builtin_os == 'nt': + SOURCE_SUFFIXES.append('.pyw') + if '_d.pyd' in EXTENSION_SUFFIXES: + WindowsRegistryFinder.DEBUG_BUILD = True + + +def _install(_bootstrap_module): + """Install the path-based import components.""" + _setup(_bootstrap_module) + supported_loaders = _get_supported_file_loaders() + sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)]) + sys.meta_path.append(PathFinder) diff --git a/venv/Lib/importlib/abc.py b/venv/Lib/importlib/abc.py new file mode 100644 index 00000000..dbdd5bf6 --- /dev/null +++ b/venv/Lib/importlib/abc.py @@ -0,0 +1,388 @@ +"""Abstract base classes related to import.""" +from . import _bootstrap +from . import _bootstrap_external +from . import machinery +try: + import _frozen_importlib +except ImportError as exc: + if exc.name != '_frozen_importlib': + raise + _frozen_importlib = None +try: + import _frozen_importlib_external +except ImportError as exc: + _frozen_importlib_external = _bootstrap_external +import abc +import warnings + + +def _register(abstract_cls, *classes): + for cls in classes: + abstract_cls.register(cls) + if _frozen_importlib is not None: + try: + frozen_cls = getattr(_frozen_importlib, cls.__name__) + except AttributeError: + frozen_cls = getattr(_frozen_importlib_external, cls.__name__) + abstract_cls.register(frozen_cls) + + +class Finder(metaclass=abc.ABCMeta): + + """Legacy abstract base class for import finders. + + It may be subclassed for compatibility with legacy third party + reimplementations of the import system. Otherwise, finder + implementations should derive from the more specific MetaPathFinder + or PathEntryFinder ABCs. + + Deprecated since Python 3.3 + """ + + @abc.abstractmethod + def find_module(self, fullname, path=None): + """An abstract method that should find a module. + The fullname is a str and the optional path is a str or None. + Returns a Loader object or None. + """ + + +class MetaPathFinder(Finder): + + """Abstract base class for import finders on sys.meta_path.""" + + # We don't define find_spec() here since that would break + # hasattr checks we do to support backward compatibility. + + def find_module(self, fullname, path): + """Return a loader for the module. + + If no module is found, return None. The fullname is a str and + the path is a list of strings or None. + + This method is deprecated since Python 3.4 in favor of + finder.find_spec(). If find_spec() exists then backwards-compatible + functionality is provided for this method. + + """ + warnings.warn("MetaPathFinder.find_module() is deprecated since Python " + "3.4 in favor of MetaPathFinder.find_spec()" + "(available since 3.4)", + DeprecationWarning, + stacklevel=2) + if not hasattr(self, 'find_spec'): + return None + found = self.find_spec(fullname, path) + return found.loader if found is not None else None + + def invalidate_caches(self): + """An optional method for clearing the finder's cache, if any. + This method is used by importlib.invalidate_caches(). + """ + +_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter, + machinery.PathFinder, machinery.WindowsRegistryFinder) + + +class PathEntryFinder(Finder): + + """Abstract base class for path entry finders used by PathFinder.""" + + # We don't define find_spec() here since that would break + # hasattr checks we do to support backward compatibility. + + def find_loader(self, fullname): + """Return (loader, namespace portion) for the path entry. + + The fullname is a str. The namespace portion is a sequence of + path entries contributing to part of a namespace package. The + sequence may be empty. If loader is not None, the portion will + be ignored. + + The portion will be discarded if another path entry finder + locates the module as a normal module or package. + + This method is deprecated since Python 3.4 in favor of + finder.find_spec(). If find_spec() is provided than backwards-compatible + functionality is provided. + """ + warnings.warn("PathEntryFinder.find_loader() is deprecated since Python " + "3.4 in favor of PathEntryFinder.find_spec() " + "(available since 3.4)", + DeprecationWarning, + stacklevel=2) + if not hasattr(self, 'find_spec'): + return None, [] + found = self.find_spec(fullname) + if found is not None: + if not found.submodule_search_locations: + portions = [] + else: + portions = found.submodule_search_locations + return found.loader, portions + else: + return None, [] + + find_module = _bootstrap_external._find_module_shim + + def invalidate_caches(self): + """An optional method for clearing the finder's cache, if any. + This method is used by PathFinder.invalidate_caches(). + """ + +_register(PathEntryFinder, machinery.FileFinder) + + +class Loader(metaclass=abc.ABCMeta): + + """Abstract base class for import loaders.""" + + def create_module(self, spec): + """Return a module to initialize and into which to load. + + This method should raise ImportError if anything prevents it + from creating a new module. It may return None to indicate + that the spec should create the new module. + """ + # By default, defer to default semantics for the new module. + return None + + # We don't define exec_module() here since that would break + # hasattr checks we do to support backward compatibility. + + def load_module(self, fullname): + """Return the loaded module. + + The module must be added to sys.modules and have import-related + attributes set properly. The fullname is a str. + + ImportError is raised on failure. + + This method is deprecated in favor of loader.exec_module(). If + exec_module() exists then it is used to provide a backwards-compatible + functionality for this method. + + """ + if not hasattr(self, 'exec_module'): + raise ImportError + return _bootstrap._load_module_shim(self, fullname) + + def module_repr(self, module): + """Return a module's repr. + + Used by the module type when the method does not raise + NotImplementedError. + + This method is deprecated. + + """ + # The exception will cause ModuleType.__repr__ to ignore this method. + raise NotImplementedError + + +class ResourceLoader(Loader): + + """Abstract base class for loaders which can return data from their + back-end storage. + + This ABC represents one of the optional protocols specified by PEP 302. + + """ + + @abc.abstractmethod + def get_data(self, path): + """Abstract method which when implemented should return the bytes for + the specified path. The path must be a str.""" + raise OSError + + +class InspectLoader(Loader): + + """Abstract base class for loaders which support inspection about the + modules they can load. + + This ABC represents one of the optional protocols specified by PEP 302. + + """ + + def is_package(self, fullname): + """Optional method which when implemented should return whether the + module is a package. The fullname is a str. Returns a bool. + + Raises ImportError if the module cannot be found. + """ + raise ImportError + + def get_code(self, fullname): + """Method which returns the code object for the module. + + The fullname is a str. Returns a types.CodeType if possible, else + returns None if a code object does not make sense + (e.g. built-in module). Raises ImportError if the module cannot be + found. + """ + source = self.get_source(fullname) + if source is None: + return None + return self.source_to_code(source) + + @abc.abstractmethod + def get_source(self, fullname): + """Abstract method which should return the source code for the + module. The fullname is a str. Returns a str. + + Raises ImportError if the module cannot be found. + """ + raise ImportError + + @staticmethod + def source_to_code(data, path=''): + """Compile 'data' into a code object. + + The 'data' argument can be anything that compile() can handle. The'path' + argument should be where the data was retrieved (when applicable).""" + return compile(data, path, 'exec', dont_inherit=True) + + exec_module = _bootstrap_external._LoaderBasics.exec_module + load_module = _bootstrap_external._LoaderBasics.load_module + +_register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter) + + +class ExecutionLoader(InspectLoader): + + """Abstract base class for loaders that wish to support the execution of + modules as scripts. + + This ABC represents one of the optional protocols specified in PEP 302. + + """ + + @abc.abstractmethod + def get_filename(self, fullname): + """Abstract method which should return the value that __file__ is to be + set to. + + Raises ImportError if the module cannot be found. + """ + raise ImportError + + def get_code(self, fullname): + """Method to return the code object for fullname. + + Should return None if not applicable (e.g. built-in module). + Raise ImportError if the module cannot be found. + """ + source = self.get_source(fullname) + if source is None: + return None + try: + path = self.get_filename(fullname) + except ImportError: + return self.source_to_code(source) + else: + return self.source_to_code(source, path) + +_register(ExecutionLoader, machinery.ExtensionFileLoader) + + +class FileLoader(_bootstrap_external.FileLoader, ResourceLoader, ExecutionLoader): + + """Abstract base class partially implementing the ResourceLoader and + ExecutionLoader ABCs.""" + +_register(FileLoader, machinery.SourceFileLoader, + machinery.SourcelessFileLoader) + + +class SourceLoader(_bootstrap_external.SourceLoader, ResourceLoader, ExecutionLoader): + + """Abstract base class for loading source code (and optionally any + corresponding bytecode). + + To support loading from source code, the abstractmethods inherited from + ResourceLoader and ExecutionLoader need to be implemented. To also support + loading from bytecode, the optional methods specified directly by this ABC + is required. + + Inherited abstractmethods not implemented in this ABC: + + * ResourceLoader.get_data + * ExecutionLoader.get_filename + + """ + + def path_mtime(self, path): + """Return the (int) modification time for the path (str).""" + if self.path_stats.__func__ is SourceLoader.path_stats: + raise OSError + return int(self.path_stats(path)['mtime']) + + def path_stats(self, path): + """Return a metadata dict for the source pointed to by the path (str). + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + """ + if self.path_mtime.__func__ is SourceLoader.path_mtime: + raise OSError + return {'mtime': self.path_mtime(path)} + + def set_data(self, path, data): + """Write the bytes to the path (if possible). + + Accepts a str path and data as bytes. + + Any needed intermediary directories are to be created. If for some + reason the file cannot be written because of permissions, fail + silently. + """ + +_register(SourceLoader, machinery.SourceFileLoader) + + +class ResourceReader(metaclass=abc.ABCMeta): + + """Abstract base class to provide resource-reading support. + + Loaders that support resource reading are expected to implement + the ``get_resource_reader(fullname)`` method and have it either return None + or an object compatible with this ABC. + """ + + @abc.abstractmethod + def open_resource(self, resource): + """Return an opened, file-like object for binary reading. + + The 'resource' argument is expected to represent only a file name + and thus not contain any subdirectory components. + + If the resource cannot be found, FileNotFoundError is raised. + """ + raise FileNotFoundError + + @abc.abstractmethod + def resource_path(self, resource): + """Return the file system path to the specified resource. + + The 'resource' argument is expected to represent only a file name + and thus not contain any subdirectory components. + + If the resource does not exist on the file system, raise + FileNotFoundError. + """ + raise FileNotFoundError + + @abc.abstractmethod + def is_resource(self, name): + """Return True if the named 'name' is consider a resource.""" + raise FileNotFoundError + + @abc.abstractmethod + def contents(self): + """Return an iterable of strings over the contents of the package.""" + return [] + + +_register(ResourceReader, machinery.SourceFileLoader) diff --git a/venv/Lib/importlib/machinery.py b/venv/Lib/importlib/machinery.py new file mode 100644 index 00000000..1b2b5c9b --- /dev/null +++ b/venv/Lib/importlib/machinery.py @@ -0,0 +1,21 @@ +"""The machinery of importlib: finders, loaders, hooks, etc.""" + +import _imp + +from ._bootstrap import ModuleSpec +from ._bootstrap import BuiltinImporter +from ._bootstrap import FrozenImporter +from ._bootstrap_external import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES, + OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES, + EXTENSION_SUFFIXES) +from ._bootstrap_external import WindowsRegistryFinder +from ._bootstrap_external import PathFinder +from ._bootstrap_external import FileFinder +from ._bootstrap_external import SourceFileLoader +from ._bootstrap_external import SourcelessFileLoader +from ._bootstrap_external import ExtensionFileLoader + + +def all_suffixes(): + """Returns a list of all recognized module suffixes for this process""" + return SOURCE_SUFFIXES + BYTECODE_SUFFIXES + EXTENSION_SUFFIXES diff --git a/venv/Lib/importlib/resources.py b/venv/Lib/importlib/resources.py new file mode 100644 index 00000000..cbefdd54 --- /dev/null +++ b/venv/Lib/importlib/resources.py @@ -0,0 +1,343 @@ +import os +import tempfile + +from . import abc as resources_abc +from contextlib import contextmanager, suppress +from importlib import import_module +from importlib.abc import ResourceLoader +from io import BytesIO, TextIOWrapper +from pathlib import Path +from types import ModuleType +from typing import Iterable, Iterator, Optional, Set, Union # noqa: F401 +from typing import cast +from typing.io import BinaryIO, TextIO +from zipimport import ZipImportError + + +__all__ = [ + 'Package', + 'Resource', + 'contents', + 'is_resource', + 'open_binary', + 'open_text', + 'path', + 'read_binary', + 'read_text', + ] + + +Package = Union[str, ModuleType] +Resource = Union[str, os.PathLike] + + +def _get_package(package) -> ModuleType: + """Take a package name or module object and return the module. + + If a name, the module is imported. If the passed or imported module + object is not a package, raise an exception. + """ + if hasattr(package, '__spec__'): + if package.__spec__.submodule_search_locations is None: + raise TypeError('{!r} is not a package'.format( + package.__spec__.name)) + else: + return package + else: + module = import_module(package) + if module.__spec__.submodule_search_locations is None: + raise TypeError('{!r} is not a package'.format(package)) + else: + return module + + +def _normalize_path(path) -> str: + """Normalize a path by ensuring it is a string. + + If the resulting string contains path separators, an exception is raised. + """ + parent, file_name = os.path.split(path) + if parent: + raise ValueError('{!r} must be only a file name'.format(path)) + else: + return file_name + + +def _get_resource_reader( + package: ModuleType) -> Optional[resources_abc.ResourceReader]: + # Return the package's loader if it's a ResourceReader. We can't use + # a issubclass() check here because apparently abc.'s __subclasscheck__() + # hook wants to create a weak reference to the object, but + # zipimport.zipimporter does not support weak references, resulting in a + # TypeError. That seems terrible. + spec = package.__spec__ + if hasattr(spec.loader, 'get_resource_reader'): + return cast(resources_abc.ResourceReader, + spec.loader.get_resource_reader(spec.name)) + return None + + +def _check_location(package): + if package.__spec__.origin is None or not package.__spec__.has_location: + raise FileNotFoundError(f'Package has no location {package!r}') + + +def open_binary(package: Package, resource: Resource) -> BinaryIO: + """Return a file-like object opened for binary reading of the resource.""" + resource = _normalize_path(resource) + package = _get_package(package) + reader = _get_resource_reader(package) + if reader is not None: + return reader.open_resource(resource) + _check_location(package) + absolute_package_path = os.path.abspath(package.__spec__.origin) + package_path = os.path.dirname(absolute_package_path) + full_path = os.path.join(package_path, resource) + try: + return open(full_path, mode='rb') + except OSError: + # Just assume the loader is a resource loader; all the relevant + # importlib.machinery loaders are and an AttributeError for + # get_data() will make it clear what is needed from the loader. + loader = cast(ResourceLoader, package.__spec__.loader) + data = None + if hasattr(package.__spec__.loader, 'get_data'): + with suppress(OSError): + data = loader.get_data(full_path) + if data is None: + package_name = package.__spec__.name + message = '{!r} resource not found in {!r}'.format( + resource, package_name) + raise FileNotFoundError(message) + else: + return BytesIO(data) + + +def open_text(package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict') -> TextIO: + """Return a file-like object opened for text reading of the resource.""" + resource = _normalize_path(resource) + package = _get_package(package) + reader = _get_resource_reader(package) + if reader is not None: + return TextIOWrapper(reader.open_resource(resource), encoding, errors) + _check_location(package) + absolute_package_path = os.path.abspath(package.__spec__.origin) + package_path = os.path.dirname(absolute_package_path) + full_path = os.path.join(package_path, resource) + try: + return open(full_path, mode='r', encoding=encoding, errors=errors) + except OSError: + # Just assume the loader is a resource loader; all the relevant + # importlib.machinery loaders are and an AttributeError for + # get_data() will make it clear what is needed from the loader. + loader = cast(ResourceLoader, package.__spec__.loader) + data = None + if hasattr(package.__spec__.loader, 'get_data'): + with suppress(OSError): + data = loader.get_data(full_path) + if data is None: + package_name = package.__spec__.name + message = '{!r} resource not found in {!r}'.format( + resource, package_name) + raise FileNotFoundError(message) + else: + return TextIOWrapper(BytesIO(data), encoding, errors) + + +def read_binary(package: Package, resource: Resource) -> bytes: + """Return the binary contents of the resource.""" + resource = _normalize_path(resource) + package = _get_package(package) + with open_binary(package, resource) as fp: + return fp.read() + + +def read_text(package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict') -> str: + """Return the decoded string of the resource. + + The decoding-related arguments have the same semantics as those of + bytes.decode(). + """ + resource = _normalize_path(resource) + package = _get_package(package) + with open_text(package, resource, encoding, errors) as fp: + return fp.read() + + +@contextmanager +def path(package: Package, resource: Resource) -> Iterator[Path]: + """A context manager providing a file path object to the resource. + + If the resource does not already exist on its own on the file system, + a temporary file will be created. If the file was created, the file + will be deleted upon exiting the context manager (no exception is + raised if the file was deleted prior to the context manager + exiting). + """ + resource = _normalize_path(resource) + package = _get_package(package) + reader = _get_resource_reader(package) + if reader is not None: + try: + yield Path(reader.resource_path(resource)) + return + except FileNotFoundError: + pass + else: + _check_location(package) + # Fall-through for both the lack of resource_path() *and* if + # resource_path() raises FileNotFoundError. + package_directory = Path(package.__spec__.origin).parent + file_path = package_directory / resource + if file_path.exists(): + yield file_path + else: + with open_binary(package, resource) as fp: + data = fp.read() + # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' + # blocks due to the need to close the temporary file to work on + # Windows properly. + fd, raw_path = tempfile.mkstemp() + try: + os.write(fd, data) + os.close(fd) + yield Path(raw_path) + finally: + try: + os.remove(raw_path) + except FileNotFoundError: + pass + + +def is_resource(package: Package, name: str) -> bool: + """True if 'name' is a resource inside 'package'. + + Directories are *not* resources. + """ + package = _get_package(package) + _normalize_path(name) + reader = _get_resource_reader(package) + if reader is not None: + return reader.is_resource(name) + try: + package_contents = set(contents(package)) + except (NotADirectoryError, FileNotFoundError): + return False + if name not in package_contents: + return False + # Just because the given file_name lives as an entry in the package's + # contents doesn't necessarily mean it's a resource. Directories are not + # resources, so let's try to find out if it's a directory or not. + path = Path(package.__spec__.origin).parent / name + return path.is_file() + + +def contents(package: Package) -> Iterable[str]: + """Return an iterable of entries in 'package'. + + Note that not all entries are resources. Specifically, directories are + not considered resources. Use `is_resource()` on each entry returned here + to check if it is a resource or not. + """ + package = _get_package(package) + reader = _get_resource_reader(package) + if reader is not None: + return reader.contents() + # Is the package a namespace package? By definition, namespace packages + # cannot have resources. We could use _check_location() and catch the + # exception, but that's extra work, so just inline the check. + elif package.__spec__.origin is None or not package.__spec__.has_location: + return () + else: + package_directory = Path(package.__spec__.origin).parent + return os.listdir(package_directory) + + +# Private implementation of ResourceReader and get_resource_reader() called +# from zipimport.c. Don't use these directly! We're implementing these in +# Python because 1) it's easier, 2) zipimport may get rewritten in Python +# itself at some point, so doing this all in C would difficult and a waste of +# effort. + +class _ZipImportResourceReader(resources_abc.ResourceReader): + """Private class used to support ZipImport.get_resource_reader(). + + This class is allowed to reference all the innards and private parts of + the zipimporter. + """ + + def __init__(self, zipimporter, fullname): + self.zipimporter = zipimporter + self.fullname = fullname + + def open_resource(self, resource): + fullname_as_path = self.fullname.replace('.', '/') + path = f'{fullname_as_path}/{resource}' + try: + return BytesIO(self.zipimporter.get_data(path)) + except OSError: + raise FileNotFoundError(path) + + def resource_path(self, resource): + # All resources are in the zip file, so there is no path to the file. + # Raising FileNotFoundError tells the higher level API to extract the + # binary data and create a temporary file. + raise FileNotFoundError + + def is_resource(self, name): + # Maybe we could do better, but if we can get the data, it's a + # resource. Otherwise it isn't. + fullname_as_path = self.fullname.replace('.', '/') + path = f'{fullname_as_path}/{name}' + try: + self.zipimporter.get_data(path) + except OSError: + return False + return True + + def contents(self): + # This is a bit convoluted, because fullname will be a module path, + # but _files is a list of file names relative to the top of the + # archive's namespace. We want to compare file paths to find all the + # names of things inside the module represented by fullname. So we + # turn the module path of fullname into a file path relative to the + # top of the archive, and then we iterate through _files looking for + # names inside that "directory". + fullname_path = Path(self.zipimporter.get_filename(self.fullname)) + relative_path = fullname_path.relative_to(self.zipimporter.archive) + # Don't forget that fullname names a package, so its path will include + # __init__.py, which we want to ignore. + assert relative_path.name == '__init__.py' + package_path = relative_path.parent + subdirs_seen = set() + for filename in self.zipimporter._files: + try: + relative = Path(filename).relative_to(package_path) + except ValueError: + continue + # If the path of the file (which is relative to the top of the zip + # namespace), relative to the package given when the resource + # reader was created, has a parent, then it's a name in a + # subdirectory and thus we skip it. + parent_name = relative.parent.name + if len(parent_name) == 0: + yield relative.name + elif parent_name not in subdirs_seen: + subdirs_seen.add(parent_name) + yield parent_name + + +# Called from zipimport.c +def _zipimport_get_resource_reader(zipimporter, fullname): + try: + if not zipimporter.is_package(fullname): + return None + except ZipImportError: + return None + return _ZipImportResourceReader(zipimporter, fullname) diff --git a/venv/Lib/importlib/util.py b/venv/Lib/importlib/util.py new file mode 100644 index 00000000..201e0f4c --- /dev/null +++ b/venv/Lib/importlib/util.py @@ -0,0 +1,300 @@ +"""Utility code for constructing importers, etc.""" +from . import abc +from ._bootstrap import module_from_spec +from ._bootstrap import _resolve_name +from ._bootstrap import spec_from_loader +from ._bootstrap import _find_spec +from ._bootstrap_external import MAGIC_NUMBER +from ._bootstrap_external import _RAW_MAGIC_NUMBER +from ._bootstrap_external import cache_from_source +from ._bootstrap_external import decode_source +from ._bootstrap_external import source_from_cache +from ._bootstrap_external import spec_from_file_location + +from contextlib import contextmanager +import _imp +import functools +import sys +import types +import warnings + + +def source_hash(source_bytes): + "Return the hash of *source_bytes* as used in hash-based pyc files." + return _imp.source_hash(_RAW_MAGIC_NUMBER, source_bytes) + + +def resolve_name(name, package): + """Resolve a relative module name to an absolute one.""" + if not name.startswith('.'): + return name + elif not package: + raise ValueError(f'no package specified for {repr(name)} ' + '(required for relative module names)') + level = 0 + for character in name: + if character != '.': + break + level += 1 + return _resolve_name(name[level:], package, level) + + +def _find_spec_from_path(name, path=None): + """Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + Dotted names do not have their parent packages implicitly imported. You will + most likely need to explicitly import all parent packages in the proper + order for a submodule to get the correct spec. + + """ + if name not in sys.modules: + return _find_spec(name, path) + else: + module = sys.modules[name] + if module is None: + return None + try: + spec = module.__spec__ + except AttributeError: + raise ValueError('{}.__spec__ is not set'.format(name)) from None + else: + if spec is None: + raise ValueError('{}.__spec__ is None'.format(name)) + return spec + + +def find_spec(name, package=None): + """Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + If the name is for submodule (contains a dot), the parent module is + automatically imported. + + The name and package arguments work the same as importlib.import_module(). + In other words, relative module names (with leading dots) work. + + """ + fullname = resolve_name(name, package) if name.startswith('.') else name + if fullname not in sys.modules: + parent_name = fullname.rpartition('.')[0] + if parent_name: + parent = __import__(parent_name, fromlist=['__path__']) + try: + parent_path = parent.__path__ + except AttributeError as e: + raise ModuleNotFoundError( + f"__path__ attribute not found on {parent_name!r} " + f"while trying to find {fullname!r}", name=fullname) from e + else: + parent_path = None + return _find_spec(fullname, parent_path) + else: + module = sys.modules[fullname] + if module is None: + return None + try: + spec = module.__spec__ + except AttributeError: + raise ValueError('{}.__spec__ is not set'.format(name)) from None + else: + if spec is None: + raise ValueError('{}.__spec__ is None'.format(name)) + return spec + + +@contextmanager +def _module_to_load(name): + is_reload = name in sys.modules + + module = sys.modules.get(name) + if not is_reload: + # This must be done before open() is called as the 'io' module + # implicitly imports 'locale' and would otherwise trigger an + # infinite loop. + module = type(sys)(name) + # This must be done before putting the module in sys.modules + # (otherwise an optimization shortcut in import.c becomes wrong) + module.__initializing__ = True + sys.modules[name] = module + try: + yield module + except Exception: + if not is_reload: + try: + del sys.modules[name] + except KeyError: + pass + finally: + module.__initializing__ = False + + +def set_package(fxn): + """Set __package__ on the returned module. + + This function is deprecated. + + """ + @functools.wraps(fxn) + def set_package_wrapper(*args, **kwargs): + warnings.warn('The import system now takes care of this automatically.', + DeprecationWarning, stacklevel=2) + module = fxn(*args, **kwargs) + if getattr(module, '__package__', None) is None: + module.__package__ = module.__name__ + if not hasattr(module, '__path__'): + module.__package__ = module.__package__.rpartition('.')[0] + return module + return set_package_wrapper + + +def set_loader(fxn): + """Set __loader__ on the returned module. + + This function is deprecated. + + """ + @functools.wraps(fxn) + def set_loader_wrapper(self, *args, **kwargs): + warnings.warn('The import system now takes care of this automatically.', + DeprecationWarning, stacklevel=2) + module = fxn(self, *args, **kwargs) + if getattr(module, '__loader__', None) is None: + module.__loader__ = self + return module + return set_loader_wrapper + + +def module_for_loader(fxn): + """Decorator to handle selecting the proper module for loaders. + + The decorated function is passed the module to use instead of the module + name. The module passed in to the function is either from sys.modules if + it already exists or is a new module. If the module is new, then __name__ + is set the first argument to the method, __loader__ is set to self, and + __package__ is set accordingly (if self.is_package() is defined) will be set + before it is passed to the decorated function (if self.is_package() does + not work for the module it will be set post-load). + + If an exception is raised and the decorator created the module it is + subsequently removed from sys.modules. + + The decorator assumes that the decorated function takes the module name as + the second argument. + + """ + warnings.warn('The import system now takes care of this automatically.', + DeprecationWarning, stacklevel=2) + @functools.wraps(fxn) + def module_for_loader_wrapper(self, fullname, *args, **kwargs): + with _module_to_load(fullname) as module: + module.__loader__ = self + try: + is_package = self.is_package(fullname) + except (ImportError, AttributeError): + pass + else: + if is_package: + module.__package__ = fullname + else: + module.__package__ = fullname.rpartition('.')[0] + # If __package__ was not set above, __import__() will do it later. + return fxn(self, module, *args, **kwargs) + + return module_for_loader_wrapper + + +class _LazyModule(types.ModuleType): + + """A subclass of the module type which triggers loading upon attribute access.""" + + def __getattribute__(self, attr): + """Trigger the load of the module and return the attribute.""" + # All module metadata must be garnered from __spec__ in order to avoid + # using mutated values. + # Stop triggering this method. + self.__class__ = types.ModuleType + # Get the original name to make sure no object substitution occurred + # in sys.modules. + original_name = self.__spec__.name + # Figure out exactly what attributes were mutated between the creation + # of the module and now. + attrs_then = self.__spec__.loader_state['__dict__'] + original_type = self.__spec__.loader_state['__class__'] + attrs_now = self.__dict__ + attrs_updated = {} + for key, value in attrs_now.items(): + # Code that set the attribute may have kept a reference to the + # assigned object, making identity more important than equality. + if key not in attrs_then: + attrs_updated[key] = value + elif id(attrs_now[key]) != id(attrs_then[key]): + attrs_updated[key] = value + self.__spec__.loader.exec_module(self) + # If exec_module() was used directly there is no guarantee the module + # object was put into sys.modules. + if original_name in sys.modules: + if id(self) != id(sys.modules[original_name]): + raise ValueError(f"module object for {original_name!r} " + "substituted in sys.modules during a lazy " + "load") + # Update after loading since that's what would happen in an eager + # loading situation. + self.__dict__.update(attrs_updated) + return getattr(self, attr) + + def __delattr__(self, attr): + """Trigger the load and then perform the deletion.""" + # To trigger the load and raise an exception if the attribute + # doesn't exist. + self.__getattribute__(attr) + delattr(self, attr) + + +class LazyLoader(abc.Loader): + + """A loader that creates a module which defers loading until attribute access.""" + + @staticmethod + def __check_eager_loader(loader): + if not hasattr(loader, 'exec_module'): + raise TypeError('loader must define exec_module()') + + @classmethod + def factory(cls, loader): + """Construct a callable which returns the eager loader made lazy.""" + cls.__check_eager_loader(loader) + return lambda *args, **kwargs: cls(loader(*args, **kwargs)) + + def __init__(self, loader): + self.__check_eager_loader(loader) + self.loader = loader + + def create_module(self, spec): + return self.loader.create_module(spec) + + def exec_module(self, module): + """Make the module load lazily.""" + module.__spec__.loader = self.loader + module.__loader__ = self.loader + # Don't need to worry about deep-copying as trying to set an attribute + # on an object would have triggered the load, + # e.g. ``module.__spec__.loader = None`` would trigger a load from + # trying to access module.__spec__. + loader_state = {} + loader_state['__dict__'] = module.__dict__.copy() + loader_state['__class__'] = module.__class__ + module.__spec__.loader_state = loader_state + module.__class__ = _LazyModule diff --git a/venv/Lib/io.py b/venv/Lib/io.py new file mode 100644 index 00000000..968ee507 --- /dev/null +++ b/venv/Lib/io.py @@ -0,0 +1,99 @@ +"""The io module provides the Python interfaces to stream handling. The +builtin open function is defined in this module. + +At the top of the I/O hierarchy is the abstract base class IOBase. It +defines the basic interface to a stream. Note, however, that there is no +separation between reading and writing to streams; implementations are +allowed to raise an OSError if they do not support a given operation. + +Extending IOBase is RawIOBase which deals simply with the reading and +writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide +an interface to OS files. + +BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its +subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer +streams that are readable, writable, and both respectively. +BufferedRandom provides a buffered interface to random access +streams. BytesIO is a simple stream of in-memory bytes. + +Another IOBase subclass, TextIOBase, deals with the encoding and decoding +of streams into text. TextIOWrapper, which extends it, is a buffered text +interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO +is an in-memory stream for text. + +Argument names are not part of the specification, and only the arguments +of open() are intended to be used as keyword arguments. + +data: + +DEFAULT_BUFFER_SIZE + + An int containing the default buffer size used by the module's buffered + I/O classes. open() uses the file's blksize (as obtained by os.stat) if + possible. +""" +# New I/O library conforming to PEP 3116. + +__author__ = ("Guido van Rossum , " + "Mike Verdone , " + "Mark Russell , " + "Antoine Pitrou , " + "Amaury Forgeot d'Arc , " + "Benjamin Peterson ") + +__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO", + "BytesIO", "StringIO", "BufferedIOBase", + "BufferedReader", "BufferedWriter", "BufferedRWPair", + "BufferedRandom", "TextIOBase", "TextIOWrapper", + "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"] + + +import _io +import abc + +from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, + open, FileIO, BytesIO, StringIO, BufferedReader, + BufferedWriter, BufferedRWPair, BufferedRandom, + IncrementalNewlineDecoder, TextIOWrapper) + +OpenWrapper = _io.open # for compatibility with _pyio + +# Pretend this exception was created here. +UnsupportedOperation.__module__ = "io" + +# for seek() +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Declaring ABCs in C is tricky so we do it here. +# Method descriptions and default implementations are inherited from the C +# version however. +class IOBase(_io._IOBase, metaclass=abc.ABCMeta): + __doc__ = _io._IOBase.__doc__ + +class RawIOBase(_io._RawIOBase, IOBase): + __doc__ = _io._RawIOBase.__doc__ + +class BufferedIOBase(_io._BufferedIOBase, IOBase): + __doc__ = _io._BufferedIOBase.__doc__ + +class TextIOBase(_io._TextIOBase, IOBase): + __doc__ = _io._TextIOBase.__doc__ + +RawIOBase.register(FileIO) + +for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, + BufferedRWPair): + BufferedIOBase.register(klass) + +for klass in (StringIO, TextIOWrapper): + TextIOBase.register(klass) +del klass + +try: + from _io import _WindowsConsoleIO +except ImportError: + pass +else: + RawIOBase.register(_WindowsConsoleIO) diff --git a/venv/Lib/keyword.py b/venv/Lib/keyword.py new file mode 100644 index 00000000..431991dc --- /dev/null +++ b/venv/Lib/keyword.py @@ -0,0 +1,96 @@ +#! /usr/bin/env python3 + +"""Keywords (from "graminit.c") + +This file is automatically generated; please don't muck it up! + +To update the symbols in this file, 'cd' to the top directory of +the python source tree after building the interpreter and run: + + ./python Lib/keyword.py +""" + +__all__ = ["iskeyword", "kwlist"] + +kwlist = [ +#--start keywords-- + 'False', + 'None', + 'True', + 'and', + 'as', + 'assert', + 'async', + 'await', + 'break', + 'class', + 'continue', + 'def', + 'del', + 'elif', + 'else', + 'except', + 'finally', + 'for', + 'from', + 'global', + 'if', + 'import', + 'in', + 'is', + 'lambda', + 'nonlocal', + 'not', + 'or', + 'pass', + 'raise', + 'return', + 'try', + 'while', + 'with', + 'yield', +#--end keywords-- + ] + +iskeyword = frozenset(kwlist).__contains__ + +def main(): + import sys, re + + args = sys.argv[1:] + iptfile = args and args[0] or "Python/graminit.c" + if len(args) > 1: optfile = args[1] + else: optfile = "Lib/keyword.py" + + # load the output skeleton from the target, taking care to preserve its + # newline convention. + with open(optfile, newline='') as fp: + format = fp.readlines() + nl = format[0][len(format[0].strip()):] if format else '\n' + + # scan the source file for keywords + with open(iptfile) as fp: + strprog = re.compile('"([^"]+)"') + lines = [] + for line in fp: + if '{1, "' in line: + match = strprog.search(line) + if match: + lines.append(" '" + match.group(1) + "'," + nl) + lines.sort() + + # insert the lines of keywords into the skeleton + try: + start = format.index("#--start keywords--" + nl) + 1 + end = format.index("#--end keywords--" + nl) + format[start:end] = lines + except ValueError: + sys.stderr.write("target does not contain format markers\n") + sys.exit(1) + + # write the output file + with open(optfile, 'w', newline='') as fp: + fp.writelines(format) + +if __name__ == "__main__": + main() diff --git a/venv/Lib/linecache.py b/venv/Lib/linecache.py new file mode 100644 index 00000000..3afcce1f --- /dev/null +++ b/venv/Lib/linecache.py @@ -0,0 +1,177 @@ +"""Cache lines from Python source files. + +This is intended to read lines from modules imported -- hence if a filename +is not found, it will look down the module search path for a file by +that name. +""" + +import functools +import sys +import os +import tokenize + +__all__ = ["getline", "clearcache", "checkcache"] + +def getline(filename, lineno, module_globals=None): + lines = getlines(filename, module_globals) + if 1 <= lineno <= len(lines): + return lines[lineno-1] + else: + return '' + + +# The cache + +# The cache. Maps filenames to either a thunk which will provide source code, +# or a tuple (size, mtime, lines, fullname) once loaded. +cache = {} + + +def clearcache(): + """Clear the cache entirely.""" + + global cache + cache = {} + + +def getlines(filename, module_globals=None): + """Get the lines for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.""" + + if filename in cache: + entry = cache[filename] + if len(entry) != 1: + return cache[filename][2] + + try: + return updatecache(filename, module_globals) + except MemoryError: + clearcache() + return [] + + +def checkcache(filename=None): + """Discard cache entries that are out of date. + (This is not checked upon each call!)""" + + if filename is None: + filenames = list(cache.keys()) + else: + if filename in cache: + filenames = [filename] + else: + return + + for filename in filenames: + entry = cache[filename] + if len(entry) == 1: + # lazy cache entry, leave it lazy. + continue + size, mtime, lines, fullname = entry + if mtime is None: + continue # no-op for files loaded via a __loader__ + try: + stat = os.stat(fullname) + except OSError: + del cache[filename] + continue + if size != stat.st_size or mtime != stat.st_mtime: + del cache[filename] + + +def updatecache(filename, module_globals=None): + """Update a cache entry and return its list of lines. + If something's wrong, print a message, discard the cache entry, + and return an empty list.""" + + if filename in cache: + if len(cache[filename]) != 1: + del cache[filename] + if not filename or (filename.startswith('<') and filename.endswith('>')): + return [] + + fullname = filename + try: + stat = os.stat(fullname) + except OSError: + basename = filename + + # Realise a lazy loader based lookup if there is one + # otherwise try to lookup right now. + if lazycache(filename, module_globals): + try: + data = cache[filename][0]() + except (ImportError, OSError): + pass + else: + if data is None: + # No luck, the PEP302 loader cannot find the source + # for this module. + return [] + cache[filename] = ( + len(data), None, + [line+'\n' for line in data.splitlines()], fullname + ) + return cache[filename][2] + + # Try looking through the module search path, which is only useful + # when handling a relative filename. + if os.path.isabs(filename): + return [] + + for dirname in sys.path: + try: + fullname = os.path.join(dirname, basename) + except (TypeError, AttributeError): + # Not sufficiently string-like to do anything useful with. + continue + try: + stat = os.stat(fullname) + break + except OSError: + pass + else: + return [] + try: + with tokenize.open(fullname) as fp: + lines = fp.readlines() + except OSError: + return [] + if lines and not lines[-1].endswith('\n'): + lines[-1] += '\n' + size, mtime = stat.st_size, stat.st_mtime + cache[filename] = size, mtime, lines, fullname + return lines + + +def lazycache(filename, module_globals): + """Seed the cache for filename with module_globals. + + The module loader will be asked for the source only when getlines is + called, not immediately. + + If there is an entry in the cache already, it is not altered. + + :return: True if a lazy load is registered in the cache, + otherwise False. To register such a load a module loader with a + get_source method must be found, the filename must be a cachable + filename, and the filename must not be already cached. + """ + if filename in cache: + if len(cache[filename]) == 1: + return True + else: + return False + if not filename or (filename.startswith('<') and filename.endswith('>')): + return False + # Try for a __loader__, if available + if module_globals and '__loader__' in module_globals: + name = module_globals.get('__name__') + loader = module_globals['__loader__'] + get_source = getattr(loader, 'get_source', None) + + if name and get_source: + get_lines = functools.partial(get_source, name) + cache[filename] = (get_lines,) + return True + return False diff --git a/venv/Lib/locale.py b/venv/Lib/locale.py new file mode 100644 index 00000000..f3d3973d --- /dev/null +++ b/venv/Lib/locale.py @@ -0,0 +1,1749 @@ +"""Locale support module. + +The module provides low-level access to the C lib's locale APIs and adds high +level number formatting APIs as well as a locale aliasing engine to complement +these. + +The aliasing engine includes support for many commonly used locale names and +maps them to values suitable for passing to the C lib's setlocale() function. It +also includes default encodings for all supported locale names. + +""" + +import sys +import encodings +import encodings.aliases +import re +import _collections_abc +from builtins import str as _builtin_str +import functools + +# Try importing the _locale module. +# +# If this fails, fall back on a basic 'C' locale emulation. + +# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before +# trying the import. So __all__ is also fiddled at the end of the file. +__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error", + "setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm", + "str", "atof", "atoi", "format", "format_string", "currency", + "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY", + "LC_NUMERIC", "LC_ALL", "CHAR_MAX"] + +def _strcoll(a,b): + """ strcoll(string,string) -> int. + Compares two strings according to the locale. + """ + return (a > b) - (a < b) + +def _strxfrm(s): + """ strxfrm(string) -> string. + Returns a string that behaves for cmp locale-aware. + """ + return s + +try: + + from _locale import * + +except ImportError: + + # Locale emulation + + CHAR_MAX = 127 + LC_ALL = 6 + LC_COLLATE = 3 + LC_CTYPE = 0 + LC_MESSAGES = 5 + LC_MONETARY = 4 + LC_NUMERIC = 1 + LC_TIME = 2 + Error = ValueError + + def localeconv(): + """ localeconv() -> dict. + Returns numeric and monetary locale-specific parameters. + """ + # 'C' locale default values + return {'grouping': [127], + 'currency_symbol': '', + 'n_sign_posn': 127, + 'p_cs_precedes': 127, + 'n_cs_precedes': 127, + 'mon_grouping': [], + 'n_sep_by_space': 127, + 'decimal_point': '.', + 'negative_sign': '', + 'positive_sign': '', + 'p_sep_by_space': 127, + 'int_curr_symbol': '', + 'p_sign_posn': 127, + 'thousands_sep': '', + 'mon_thousands_sep': '', + 'frac_digits': 127, + 'mon_decimal_point': '', + 'int_frac_digits': 127} + + def setlocale(category, value=None): + """ setlocale(integer,string=None) -> string. + Activates/queries locale processing. + """ + if value not in (None, '', 'C'): + raise Error('_locale emulation only supports "C" locale') + return 'C' + +# These may or may not exist in _locale, so be sure to set them. +if 'strxfrm' not in globals(): + strxfrm = _strxfrm +if 'strcoll' not in globals(): + strcoll = _strcoll + + +_localeconv = localeconv + +# With this dict, you can override some items of localeconv's return value. +# This is useful for testing purposes. +_override_localeconv = {} + +@functools.wraps(_localeconv) +def localeconv(): + d = _localeconv() + if _override_localeconv: + d.update(_override_localeconv) + return d + + +### Number formatting APIs + +# Author: Martin von Loewis +# improved by Georg Brandl + +# Iterate over grouping intervals +def _grouping_intervals(grouping): + last_interval = None + for interval in grouping: + # if grouping is -1, we are done + if interval == CHAR_MAX: + return + # 0: re-use last group ad infinitum + if interval == 0: + if last_interval is None: + raise ValueError("invalid grouping") + while True: + yield last_interval + yield interval + last_interval = interval + +#perform the grouping from right to left +def _group(s, monetary=False): + conv = localeconv() + thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] + grouping = conv[monetary and 'mon_grouping' or 'grouping'] + if not grouping: + return (s, 0) + if s[-1] == ' ': + stripped = s.rstrip() + right_spaces = s[len(stripped):] + s = stripped + else: + right_spaces = '' + left_spaces = '' + groups = [] + for interval in _grouping_intervals(grouping): + if not s or s[-1] not in "0123456789": + # only non-digit characters remain (sign, spaces) + left_spaces = s + s = '' + break + groups.append(s[-interval:]) + s = s[:-interval] + if s: + groups.append(s) + groups.reverse() + return ( + left_spaces + thousands_sep.join(groups) + right_spaces, + len(thousands_sep) * (len(groups) - 1) + ) + +# Strip a given amount of excess padding from the given string +def _strip_padding(s, amount): + lpos = 0 + while amount and s[lpos] == ' ': + lpos += 1 + amount -= 1 + rpos = len(s) - 1 + while amount and s[rpos] == ' ': + rpos -= 1 + amount -= 1 + return s[lpos:rpos+1] + +_percent_re = re.compile(r'%(?:\((?P.*?)\))?' + r'(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') + +def _format(percent, value, grouping=False, monetary=False, *additional): + if additional: + formatted = percent % ((value,) + additional) + else: + formatted = percent % value + # floats and decimal ints need special action! + if percent[-1] in 'eEfFgG': + seps = 0 + parts = formatted.split('.') + if grouping: + parts[0], seps = _group(parts[0], monetary=monetary) + decimal_point = localeconv()[monetary and 'mon_decimal_point' + or 'decimal_point'] + formatted = decimal_point.join(parts) + if seps: + formatted = _strip_padding(formatted, seps) + elif percent[-1] in 'diu': + seps = 0 + if grouping: + formatted, seps = _group(formatted, monetary=monetary) + if seps: + formatted = _strip_padding(formatted, seps) + return formatted + +def format_string(f, val, grouping=False, monetary=False): + """Formats a string in the same way that the % formatting would use, + but takes the current locale into account. + + Grouping is applied if the third parameter is true. + Conversion uses monetary thousands separator and grouping strings if + forth parameter monetary is true.""" + percents = list(_percent_re.finditer(f)) + new_f = _percent_re.sub('%s', f) + + if isinstance(val, _collections_abc.Mapping): + new_val = [] + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + new_val.append(_format(perc.group(), val, grouping, monetary)) + else: + if not isinstance(val, tuple): + val = (val,) + new_val = [] + i = 0 + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + starcount = perc.group('modifiers').count('*') + new_val.append(_format(perc.group(), + val[i], + grouping, + monetary, + *val[i+1:i+1+starcount])) + i += (1 + starcount) + val = tuple(new_val) + + return new_f % val + +def format(percent, value, grouping=False, monetary=False, *additional): + """Deprecated, use format_string instead.""" + import warnings + warnings.warn( + "This method will be removed in a future version of Python. " + "Use 'locale.format_string()' instead.", + DeprecationWarning, stacklevel=2 + ) + + match = _percent_re.match(percent) + if not match or len(match.group())!= len(percent): + raise ValueError(("format() must be given exactly one %%char " + "format specifier, %s not valid") % repr(percent)) + return _format(percent, value, grouping, monetary, *additional) + +def currency(val, symbol=True, grouping=False, international=False): + """Formats val according to the currency settings + in the current locale.""" + conv = localeconv() + + # check for illegal values + digits = conv[international and 'int_frac_digits' or 'frac_digits'] + if digits == 127: + raise ValueError("Currency formatting is not possible using " + "the 'C' locale.") + + s = _format('%%.%if' % digits, abs(val), grouping, monetary=True) + # '<' and '>' are markers if the sign must be inserted between symbol and value + s = '<' + s + '>' + + if symbol: + smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] + precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes'] + separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space'] + + if precedes: + s = smb + (separated and ' ' or '') + s + else: + s = s + (separated and ' ' or '') + smb + + sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn'] + sign = conv[val<0 and 'negative_sign' or 'positive_sign'] + + if sign_pos == 0: + s = '(' + s + ')' + elif sign_pos == 1: + s = sign + s + elif sign_pos == 2: + s = s + sign + elif sign_pos == 3: + s = s.replace('<', sign) + elif sign_pos == 4: + s = s.replace('>', sign) + else: + # the default if nothing specified; + # this should be the most fitting sign position + s = sign + s + + return s.replace('<', '').replace('>', '') + +def str(val): + """Convert float to string, taking the locale into account.""" + return _format("%.12g", val) + +def delocalize(string): + "Parses a string as a normalized number according to the locale settings." + + conv = localeconv() + + #First, get rid of the grouping + ts = conv['thousands_sep'] + if ts: + string = string.replace(ts, '') + + #next, replace the decimal point with a dot + dd = conv['decimal_point'] + if dd: + string = string.replace(dd, '.') + return string + +def atof(string, func=float): + "Parses a string as a float according to the locale settings." + return func(delocalize(string)) + +def atoi(string): + "Converts a string to an integer according to the locale settings." + return int(delocalize(string)) + +def _test(): + setlocale(LC_ALL, "") + #do grouping + s1 = format_string("%d", 123456789,1) + print(s1, "is", atoi(s1)) + #standard formatting + s1 = str(3.14) + print(s1, "is", atof(s1)) + +### Locale name aliasing engine + +# Author: Marc-Andre Lemburg, mal@lemburg.com +# Various tweaks by Fredrik Lundh + +# store away the low-level version of setlocale (it's +# overridden below) +_setlocale = setlocale + +def _replace_encoding(code, encoding): + if '.' in code: + langname = code[:code.index('.')] + else: + langname = code + # Convert the encoding to a C lib compatible encoding string + norm_encoding = encodings.normalize_encoding(encoding) + #print('norm encoding: %r' % norm_encoding) + norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(), + norm_encoding) + #print('aliased encoding: %r' % norm_encoding) + encoding = norm_encoding + norm_encoding = norm_encoding.lower() + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + else: + norm_encoding = norm_encoding.replace('_', '') + norm_encoding = norm_encoding.replace('-', '') + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + #print('found encoding %r' % encoding) + return langname + '.' + encoding + +def _append_modifier(code, modifier): + if modifier == 'euro': + if '.' not in code: + return code + '.ISO8859-15' + _, _, encoding = code.partition('.') + if encoding in ('ISO8859-15', 'UTF-8'): + return code + if encoding == 'ISO8859-1': + return _replace_encoding(code, 'ISO8859-15') + return code + '@' + modifier + +def normalize(localename): + + """ Returns a normalized locale code for the given locale + name. + + The returned locale code is formatted for use with + setlocale(). + + If normalization fails, the original name is returned + unchanged. + + If the given encoding is not known, the function defaults to + the default encoding for the locale code just like setlocale() + does. + + """ + # Normalize the locale name and extract the encoding and modifier + code = localename.lower() + if ':' in code: + # ':' is sometimes used as encoding delimiter. + code = code.replace(':', '.') + if '@' in code: + code, modifier = code.split('@', 1) + else: + modifier = '' + if '.' in code: + langname, encoding = code.split('.')[:2] + else: + langname = code + encoding = '' + + # First lookup: fullname (possibly with encoding and modifier) + lang_enc = langname + if encoding: + norm_encoding = encoding.replace('-', '') + norm_encoding = norm_encoding.replace('_', '') + lang_enc += '.' + norm_encoding + lookup_name = lang_enc + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + return code + #print('first lookup failed') + + if modifier: + # Second try: fullname without modifier (possibly with encoding) + code = locale_alias.get(lang_enc, None) + if code is not None: + #print('lookup without modifier succeeded') + if '@' not in code: + return _append_modifier(code, modifier) + if code.split('@', 1)[1].lower() == modifier: + return code + #print('second lookup failed') + + if encoding: + # Third try: langname (without encoding, possibly with modifier) + lookup_name = langname + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + #print('lookup without encoding succeeded') + if '@' not in code: + return _replace_encoding(code, encoding) + code, modifier = code.split('@', 1) + return _replace_encoding(code, encoding) + '@' + modifier + + if modifier: + # Fourth try: langname (without encoding and modifier) + code = locale_alias.get(langname, None) + if code is not None: + #print('lookup without modifier and encoding succeeded') + if '@' not in code: + code = _replace_encoding(code, encoding) + return _append_modifier(code, modifier) + code, defmod = code.split('@', 1) + if defmod.lower() == modifier: + return _replace_encoding(code, encoding) + '@' + defmod + + return localename + +def _parse_localename(localename): + + """ Parses the locale code for localename and returns the + result as tuple (language code, encoding). + + The localename is normalized and passed through the locale + alias engine. A ValueError is raised in case the locale name + cannot be parsed. + + The language code corresponds to RFC 1766. code and encoding + can be None in case the values cannot be determined or are + unknown to this implementation. + + """ + code = normalize(localename) + if '@' in code: + # Deal with locale modifiers + code, modifier = code.split('@', 1) + if modifier == 'euro' and '.' not in code: + # Assume Latin-9 for @euro locales. This is bogus, + # since some systems may use other encodings for these + # locales. Also, we ignore other modifiers. + return code, 'iso-8859-15' + + if '.' in code: + return tuple(code.split('.')[:2]) + elif code == 'C': + return None, None + raise ValueError('unknown locale: %s' % localename) + +def _build_localename(localetuple): + + """ Builds a locale code from the given tuple (language code, + encoding). + + No aliasing or normalizing takes place. + + """ + try: + language, encoding = localetuple + + if language is None: + language = 'C' + if encoding is None: + return language + else: + return language + '.' + encoding + except (TypeError, ValueError): + raise TypeError('Locale must be None, a string, or an iterable of ' + 'two strings -- language code, encoding.') from None + +def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): + + """ Tries to determine the default locale settings and returns + them as tuple (language code, encoding). + + According to POSIX, a program which has not called + setlocale(LC_ALL, "") runs using the portable 'C' locale. + Calling setlocale(LC_ALL, "") lets it use the default locale as + defined by the LANG variable. Since we don't want to interfere + with the current locale setting we thus emulate the behavior + in the way described above. + + To maintain compatibility with other platforms, not only the + LANG variable is tested, but a list of variables given as + envvars parameter. The first found to be defined will be + used. envvars defaults to the search path used in GNU gettext; + it must always contain the variable name 'LANG'. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + + try: + # check if it's supported by the _locale module + import _locale + code, encoding = _locale._getdefaultlocale() + except (ImportError, AttributeError): + pass + else: + # make sure the code/encoding values are valid + if sys.platform == "win32" and code and code[:2] == "0x": + # map windows language identifier to language name + code = windows_locale.get(int(code, 0)) + # ...add other platform-specific processing here, if + # necessary... + return code, encoding + + # fall back on POSIX behaviour + import os + lookup = os.environ.get + for variable in envvars: + localename = lookup(variable,None) + if localename: + if variable == 'LANGUAGE': + localename = localename.split(':')[0] + break + else: + localename = 'C' + return _parse_localename(localename) + + +def getlocale(category=LC_CTYPE): + + """ Returns the current setting for the given locale category as + tuple (language code, encoding). + + category may be one of the LC_* value except LC_ALL. It + defaults to LC_CTYPE. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + localename = _setlocale(category) + if category == LC_ALL and ';' in localename: + raise TypeError('category LC_ALL is not supported') + return _parse_localename(localename) + +def setlocale(category, locale=None): + + """ Set the locale for the given category. The locale can be + a string, an iterable of two strings (language code and encoding), + or None. + + Iterables are converted to strings using the locale aliasing + engine. Locale strings are passed directly to the C lib. + + category may be given as one of the LC_* values. + + """ + if locale and not isinstance(locale, _builtin_str): + # convert to string + locale = normalize(_build_localename(locale)) + return _setlocale(category, locale) + +def resetlocale(category=LC_ALL): + + """ Sets the locale for category to the default setting. + + The default setting is determined by calling + getdefaultlocale(). category defaults to LC_ALL. + + """ + _setlocale(category, _build_localename(getdefaultlocale())) + +if sys.platform.startswith("win"): + # On Win32, this will return the ANSI code page + def getpreferredencoding(do_setlocale = True): + """Return the charset that the user is likely using.""" + if sys.flags.utf8_mode: + return 'UTF-8' + import _bootlocale + return _bootlocale.getpreferredencoding(False) +else: + # On Unix, if CODESET is available, use that. + try: + CODESET + except NameError: + if hasattr(sys, 'getandroidapilevel'): + # On Android langinfo.h and CODESET are missing, and UTF-8 is + # always used in mbstowcs() and wcstombs(). + def getpreferredencoding(do_setlocale = True): + return 'UTF-8' + else: + # Fall back to parsing environment variables :-( + def getpreferredencoding(do_setlocale = True): + """Return the charset that the user is likely using, + by looking at environment variables.""" + if sys.flags.utf8_mode: + return 'UTF-8' + res = getdefaultlocale()[1] + if res is None: + # LANG not set, default conservatively to ASCII + res = 'ascii' + return res + else: + def getpreferredencoding(do_setlocale = True): + """Return the charset that the user is likely using, + according to the system configuration.""" + if sys.flags.utf8_mode: + return 'UTF-8' + import _bootlocale + if do_setlocale: + oldloc = setlocale(LC_CTYPE) + try: + setlocale(LC_CTYPE, "") + except Error: + pass + result = _bootlocale.getpreferredencoding(False) + if do_setlocale: + setlocale(LC_CTYPE, oldloc) + return result + + +### Database +# +# The following data was extracted from the locale.alias file which +# comes with X11 and then hand edited removing the explicit encoding +# definitions and adding some more aliases. The file is usually +# available as /usr/lib/X11/locale/locale.alias. +# + +# +# The local_encoding_alias table maps lowercase encoding alias names +# to C locale encoding names (case-sensitive). Note that normalize() +# first looks up the encoding in the encodings.aliases dictionary and +# then applies this mapping to find the correct C lib name for the +# encoding. +# +locale_encoding_alias = { + + # Mappings for non-standard encoding names used in locale names + '437': 'C', + 'c': 'C', + 'en': 'ISO8859-1', + 'jis': 'JIS7', + 'jis7': 'JIS7', + 'ajec': 'eucJP', + 'koi8c': 'KOI8-C', + 'microsoftcp1251': 'CP1251', + 'microsoftcp1255': 'CP1255', + 'microsoftcp1256': 'CP1256', + '88591': 'ISO8859-1', + '88592': 'ISO8859-2', + '88595': 'ISO8859-5', + '885915': 'ISO8859-15', + + # Mappings from Python codec names to C lib encoding names + 'ascii': 'ISO8859-1', + 'latin_1': 'ISO8859-1', + 'iso8859_1': 'ISO8859-1', + 'iso8859_10': 'ISO8859-10', + 'iso8859_11': 'ISO8859-11', + 'iso8859_13': 'ISO8859-13', + 'iso8859_14': 'ISO8859-14', + 'iso8859_15': 'ISO8859-15', + 'iso8859_16': 'ISO8859-16', + 'iso8859_2': 'ISO8859-2', + 'iso8859_3': 'ISO8859-3', + 'iso8859_4': 'ISO8859-4', + 'iso8859_5': 'ISO8859-5', + 'iso8859_6': 'ISO8859-6', + 'iso8859_7': 'ISO8859-7', + 'iso8859_8': 'ISO8859-8', + 'iso8859_9': 'ISO8859-9', + 'iso2022_jp': 'JIS7', + 'shift_jis': 'SJIS', + 'tactis': 'TACTIS', + 'euc_jp': 'eucJP', + 'euc_kr': 'eucKR', + 'utf_8': 'UTF-8', + 'koi8_r': 'KOI8-R', + 'koi8_t': 'KOI8-T', + 'koi8_u': 'KOI8-U', + 'kz1048': 'RK1048', + 'cp1251': 'CP1251', + 'cp1255': 'CP1255', + 'cp1256': 'CP1256', + + # XXX This list is still incomplete. If you know more + # mappings, please file a bug report. Thanks. +} + +for k, v in sorted(locale_encoding_alias.items()): + k = k.replace('_', '') + locale_encoding_alias.setdefault(k, v) + +# +# The locale_alias table maps lowercase alias names to C locale names +# (case-sensitive). Encodings are always separated from the locale +# name using a dot ('.'); they should only be given in case the +# language name is needed to interpret the given encoding alias +# correctly (CJK codes often have this need). +# +# Note that the normalize() function which uses this tables +# removes '_' and '-' characters from the encoding part of the +# locale name before doing the lookup. This saves a lot of +# space in the table. +# +# MAL 2004-12-10: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.4 +# and older): +# +# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1' +# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP' +# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13' +# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13' +# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11' +# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312' +# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# +# MAL 2008-05-30: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.5 +# and older): +# +# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2' +# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2' +# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# +# AP 2010-04-12: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.6.5 +# and older): +# +# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# +# SS 2013-12-20: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 3.3.3 +# and older): +# +# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8' +# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# +# SS 2014-10-01: +# Updated alias mapping with glibc 2.19 supported locales. +# +# SS 2018-05-05: +# Updated alias mapping with glibc 2.27 supported locales. +# +# These are the differences compared to the old mapping (Python 3.6.5 +# and older): +# +# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia' +# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154' +# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R' + +locale_alias = { + 'a3': 'az_AZ.KOI8-C', + 'a3_az': 'az_AZ.KOI8-C', + 'a3_az.koic': 'az_AZ.KOI8-C', + 'aa_dj': 'aa_DJ.ISO8859-1', + 'aa_er': 'aa_ER.UTF-8', + 'aa_et': 'aa_ET.UTF-8', + 'af': 'af_ZA.ISO8859-1', + 'af_za': 'af_ZA.ISO8859-1', + 'agr_pe': 'agr_PE.UTF-8', + 'ak_gh': 'ak_GH.UTF-8', + 'am': 'am_ET.UTF-8', + 'am_et': 'am_ET.UTF-8', + 'american': 'en_US.ISO8859-1', + 'an_es': 'an_ES.ISO8859-15', + 'anp_in': 'anp_IN.UTF-8', + 'ar': 'ar_AA.ISO8859-6', + 'ar_aa': 'ar_AA.ISO8859-6', + 'ar_ae': 'ar_AE.ISO8859-6', + 'ar_bh': 'ar_BH.ISO8859-6', + 'ar_dz': 'ar_DZ.ISO8859-6', + 'ar_eg': 'ar_EG.ISO8859-6', + 'ar_in': 'ar_IN.UTF-8', + 'ar_iq': 'ar_IQ.ISO8859-6', + 'ar_jo': 'ar_JO.ISO8859-6', + 'ar_kw': 'ar_KW.ISO8859-6', + 'ar_lb': 'ar_LB.ISO8859-6', + 'ar_ly': 'ar_LY.ISO8859-6', + 'ar_ma': 'ar_MA.ISO8859-6', + 'ar_om': 'ar_OM.ISO8859-6', + 'ar_qa': 'ar_QA.ISO8859-6', + 'ar_sa': 'ar_SA.ISO8859-6', + 'ar_sd': 'ar_SD.ISO8859-6', + 'ar_ss': 'ar_SS.UTF-8', + 'ar_sy': 'ar_SY.ISO8859-6', + 'ar_tn': 'ar_TN.ISO8859-6', + 'ar_ye': 'ar_YE.ISO8859-6', + 'arabic': 'ar_AA.ISO8859-6', + 'as': 'as_IN.UTF-8', + 'as_in': 'as_IN.UTF-8', + 'ast_es': 'ast_ES.ISO8859-15', + 'ayc_pe': 'ayc_PE.UTF-8', + 'az': 'az_AZ.ISO8859-9E', + 'az_az': 'az_AZ.ISO8859-9E', + 'az_az.iso88599e': 'az_AZ.ISO8859-9E', + 'az_ir': 'az_IR.UTF-8', + 'be': 'be_BY.CP1251', + 'be@latin': 'be_BY.UTF-8@latin', + 'be_bg.utf8': 'bg_BG.UTF-8', + 'be_by': 'be_BY.CP1251', + 'be_by@latin': 'be_BY.UTF-8@latin', + 'bem_zm': 'bem_ZM.UTF-8', + 'ber_dz': 'ber_DZ.UTF-8', + 'ber_ma': 'ber_MA.UTF-8', + 'bg': 'bg_BG.CP1251', + 'bg_bg': 'bg_BG.CP1251', + 'bhb_in.utf8': 'bhb_IN.UTF-8', + 'bho_in': 'bho_IN.UTF-8', + 'bho_np': 'bho_NP.UTF-8', + 'bi_vu': 'bi_VU.UTF-8', + 'bn_bd': 'bn_BD.UTF-8', + 'bn_in': 'bn_IN.UTF-8', + 'bo_cn': 'bo_CN.UTF-8', + 'bo_in': 'bo_IN.UTF-8', + 'bokmal': 'nb_NO.ISO8859-1', + 'bokm\xe5l': 'nb_NO.ISO8859-1', + 'br': 'br_FR.ISO8859-1', + 'br_fr': 'br_FR.ISO8859-1', + 'brx_in': 'brx_IN.UTF-8', + 'bs': 'bs_BA.ISO8859-2', + 'bs_ba': 'bs_BA.ISO8859-2', + 'bulgarian': 'bg_BG.CP1251', + 'byn_er': 'byn_ER.UTF-8', + 'c': 'C', + 'c-french': 'fr_CA.ISO8859-1', + 'c.ascii': 'C', + 'c.en': 'C', + 'c.iso88591': 'en_US.ISO8859-1', + 'c.utf8': 'en_US.UTF-8', + 'c_c': 'C', + 'c_c.c': 'C', + 'ca': 'ca_ES.ISO8859-1', + 'ca_ad': 'ca_AD.ISO8859-1', + 'ca_es': 'ca_ES.ISO8859-1', + 'ca_es@valencia': 'ca_ES.UTF-8@valencia', + 'ca_fr': 'ca_FR.ISO8859-1', + 'ca_it': 'ca_IT.ISO8859-1', + 'catalan': 'ca_ES.ISO8859-1', + 'ce_ru': 'ce_RU.UTF-8', + 'cextend': 'en_US.ISO8859-1', + 'chinese-s': 'zh_CN.eucCN', + 'chinese-t': 'zh_TW.eucTW', + 'chr_us': 'chr_US.UTF-8', + 'ckb_iq': 'ckb_IQ.UTF-8', + 'cmn_tw': 'cmn_TW.UTF-8', + 'crh_ua': 'crh_UA.UTF-8', + 'croatian': 'hr_HR.ISO8859-2', + 'cs': 'cs_CZ.ISO8859-2', + 'cs_cs': 'cs_CZ.ISO8859-2', + 'cs_cz': 'cs_CZ.ISO8859-2', + 'csb_pl': 'csb_PL.UTF-8', + 'cv_ru': 'cv_RU.UTF-8', + 'cy': 'cy_GB.ISO8859-1', + 'cy_gb': 'cy_GB.ISO8859-1', + 'cz': 'cs_CZ.ISO8859-2', + 'cz_cz': 'cs_CZ.ISO8859-2', + 'czech': 'cs_CZ.ISO8859-2', + 'da': 'da_DK.ISO8859-1', + 'da_dk': 'da_DK.ISO8859-1', + 'danish': 'da_DK.ISO8859-1', + 'dansk': 'da_DK.ISO8859-1', + 'de': 'de_DE.ISO8859-1', + 'de_at': 'de_AT.ISO8859-1', + 'de_be': 'de_BE.ISO8859-1', + 'de_ch': 'de_CH.ISO8859-1', + 'de_de': 'de_DE.ISO8859-1', + 'de_it': 'de_IT.ISO8859-1', + 'de_li.utf8': 'de_LI.UTF-8', + 'de_lu': 'de_LU.ISO8859-1', + 'deutsch': 'de_DE.ISO8859-1', + 'doi_in': 'doi_IN.UTF-8', + 'dutch': 'nl_NL.ISO8859-1', + 'dutch.iso88591': 'nl_BE.ISO8859-1', + 'dv_mv': 'dv_MV.UTF-8', + 'dz_bt': 'dz_BT.UTF-8', + 'ee': 'ee_EE.ISO8859-4', + 'ee_ee': 'ee_EE.ISO8859-4', + 'eesti': 'et_EE.ISO8859-1', + 'el': 'el_GR.ISO8859-7', + 'el_cy': 'el_CY.ISO8859-7', + 'el_gr': 'el_GR.ISO8859-7', + 'el_gr@euro': 'el_GR.ISO8859-15', + 'en': 'en_US.ISO8859-1', + 'en_ag': 'en_AG.UTF-8', + 'en_au': 'en_AU.ISO8859-1', + 'en_be': 'en_BE.ISO8859-1', + 'en_bw': 'en_BW.ISO8859-1', + 'en_ca': 'en_CA.ISO8859-1', + 'en_dk': 'en_DK.ISO8859-1', + 'en_dl.utf8': 'en_DL.UTF-8', + 'en_gb': 'en_GB.ISO8859-1', + 'en_hk': 'en_HK.ISO8859-1', + 'en_ie': 'en_IE.ISO8859-1', + 'en_il': 'en_IL.UTF-8', + 'en_in': 'en_IN.ISO8859-1', + 'en_ng': 'en_NG.UTF-8', + 'en_nz': 'en_NZ.ISO8859-1', + 'en_ph': 'en_PH.ISO8859-1', + 'en_sc.utf8': 'en_SC.UTF-8', + 'en_sg': 'en_SG.ISO8859-1', + 'en_uk': 'en_GB.ISO8859-1', + 'en_us': 'en_US.ISO8859-1', + 'en_us@euro@euro': 'en_US.ISO8859-15', + 'en_za': 'en_ZA.ISO8859-1', + 'en_zm': 'en_ZM.UTF-8', + 'en_zw': 'en_ZW.ISO8859-1', + 'en_zw.utf8': 'en_ZS.UTF-8', + 'eng_gb': 'en_GB.ISO8859-1', + 'english': 'en_EN.ISO8859-1', + 'english.iso88591': 'en_US.ISO8859-1', + 'english_uk': 'en_GB.ISO8859-1', + 'english_united-states': 'en_US.ISO8859-1', + 'english_united-states.437': 'C', + 'english_us': 'en_US.ISO8859-1', + 'eo': 'eo_XX.ISO8859-3', + 'eo.utf8': 'eo.UTF-8', + 'eo_eo': 'eo_EO.ISO8859-3', + 'eo_us.utf8': 'eo_US.UTF-8', + 'eo_xx': 'eo_XX.ISO8859-3', + 'es': 'es_ES.ISO8859-1', + 'es_ar': 'es_AR.ISO8859-1', + 'es_bo': 'es_BO.ISO8859-1', + 'es_cl': 'es_CL.ISO8859-1', + 'es_co': 'es_CO.ISO8859-1', + 'es_cr': 'es_CR.ISO8859-1', + 'es_cu': 'es_CU.UTF-8', + 'es_do': 'es_DO.ISO8859-1', + 'es_ec': 'es_EC.ISO8859-1', + 'es_es': 'es_ES.ISO8859-1', + 'es_gt': 'es_GT.ISO8859-1', + 'es_hn': 'es_HN.ISO8859-1', + 'es_mx': 'es_MX.ISO8859-1', + 'es_ni': 'es_NI.ISO8859-1', + 'es_pa': 'es_PA.ISO8859-1', + 'es_pe': 'es_PE.ISO8859-1', + 'es_pr': 'es_PR.ISO8859-1', + 'es_py': 'es_PY.ISO8859-1', + 'es_sv': 'es_SV.ISO8859-1', + 'es_us': 'es_US.ISO8859-1', + 'es_uy': 'es_UY.ISO8859-1', + 'es_ve': 'es_VE.ISO8859-1', + 'estonian': 'et_EE.ISO8859-1', + 'et': 'et_EE.ISO8859-15', + 'et_ee': 'et_EE.ISO8859-15', + 'eu': 'eu_ES.ISO8859-1', + 'eu_es': 'eu_ES.ISO8859-1', + 'eu_fr': 'eu_FR.ISO8859-1', + 'fa': 'fa_IR.UTF-8', + 'fa_ir': 'fa_IR.UTF-8', + 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', + 'ff_sn': 'ff_SN.UTF-8', + 'fi': 'fi_FI.ISO8859-15', + 'fi_fi': 'fi_FI.ISO8859-15', + 'fil_ph': 'fil_PH.UTF-8', + 'finnish': 'fi_FI.ISO8859-1', + 'fo': 'fo_FO.ISO8859-1', + 'fo_fo': 'fo_FO.ISO8859-1', + 'fr': 'fr_FR.ISO8859-1', + 'fr_be': 'fr_BE.ISO8859-1', + 'fr_ca': 'fr_CA.ISO8859-1', + 'fr_ch': 'fr_CH.ISO8859-1', + 'fr_fr': 'fr_FR.ISO8859-1', + 'fr_lu': 'fr_LU.ISO8859-1', + 'fran\xe7ais': 'fr_FR.ISO8859-1', + 'fre_fr': 'fr_FR.ISO8859-1', + 'french': 'fr_FR.ISO8859-1', + 'french.iso88591': 'fr_CH.ISO8859-1', + 'french_france': 'fr_FR.ISO8859-1', + 'fur_it': 'fur_IT.UTF-8', + 'fy_de': 'fy_DE.UTF-8', + 'fy_nl': 'fy_NL.UTF-8', + 'ga': 'ga_IE.ISO8859-1', + 'ga_ie': 'ga_IE.ISO8859-1', + 'galego': 'gl_ES.ISO8859-1', + 'galician': 'gl_ES.ISO8859-1', + 'gd': 'gd_GB.ISO8859-1', + 'gd_gb': 'gd_GB.ISO8859-1', + 'ger_de': 'de_DE.ISO8859-1', + 'german': 'de_DE.ISO8859-1', + 'german.iso88591': 'de_CH.ISO8859-1', + 'german_germany': 'de_DE.ISO8859-1', + 'gez_er': 'gez_ER.UTF-8', + 'gez_et': 'gez_ET.UTF-8', + 'gl': 'gl_ES.ISO8859-1', + 'gl_es': 'gl_ES.ISO8859-1', + 'greek': 'el_GR.ISO8859-7', + 'gu_in': 'gu_IN.UTF-8', + 'gv': 'gv_GB.ISO8859-1', + 'gv_gb': 'gv_GB.ISO8859-1', + 'ha_ng': 'ha_NG.UTF-8', + 'hak_tw': 'hak_TW.UTF-8', + 'he': 'he_IL.ISO8859-8', + 'he_il': 'he_IL.ISO8859-8', + 'hebrew': 'he_IL.ISO8859-8', + 'hi': 'hi_IN.ISCII-DEV', + 'hi_in': 'hi_IN.ISCII-DEV', + 'hi_in.isciidev': 'hi_IN.ISCII-DEV', + 'hif_fj': 'hif_FJ.UTF-8', + 'hne': 'hne_IN.UTF-8', + 'hne_in': 'hne_IN.UTF-8', + 'hr': 'hr_HR.ISO8859-2', + 'hr_hr': 'hr_HR.ISO8859-2', + 'hrvatski': 'hr_HR.ISO8859-2', + 'hsb_de': 'hsb_DE.ISO8859-2', + 'ht_ht': 'ht_HT.UTF-8', + 'hu': 'hu_HU.ISO8859-2', + 'hu_hu': 'hu_HU.ISO8859-2', + 'hungarian': 'hu_HU.ISO8859-2', + 'hy_am': 'hy_AM.UTF-8', + 'hy_am.armscii8': 'hy_AM.ARMSCII_8', + 'ia': 'ia.UTF-8', + 'ia_fr': 'ia_FR.UTF-8', + 'icelandic': 'is_IS.ISO8859-1', + 'id': 'id_ID.ISO8859-1', + 'id_id': 'id_ID.ISO8859-1', + 'ig_ng': 'ig_NG.UTF-8', + 'ik_ca': 'ik_CA.UTF-8', + 'in': 'id_ID.ISO8859-1', + 'in_id': 'id_ID.ISO8859-1', + 'is': 'is_IS.ISO8859-1', + 'is_is': 'is_IS.ISO8859-1', + 'iso-8859-1': 'en_US.ISO8859-1', + 'iso-8859-15': 'en_US.ISO8859-15', + 'iso8859-1': 'en_US.ISO8859-1', + 'iso8859-15': 'en_US.ISO8859-15', + 'iso_8859_1': 'en_US.ISO8859-1', + 'iso_8859_15': 'en_US.ISO8859-15', + 'it': 'it_IT.ISO8859-1', + 'it_ch': 'it_CH.ISO8859-1', + 'it_it': 'it_IT.ISO8859-1', + 'italian': 'it_IT.ISO8859-1', + 'iu': 'iu_CA.NUNACOM-8', + 'iu_ca': 'iu_CA.NUNACOM-8', + 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', + 'iw': 'he_IL.ISO8859-8', + 'iw_il': 'he_IL.ISO8859-8', + 'iw_il.utf8': 'iw_IL.UTF-8', + 'ja': 'ja_JP.eucJP', + 'ja_jp': 'ja_JP.eucJP', + 'ja_jp.euc': 'ja_JP.eucJP', + 'ja_jp.mscode': 'ja_JP.SJIS', + 'ja_jp.pck': 'ja_JP.SJIS', + 'japan': 'ja_JP.eucJP', + 'japanese': 'ja_JP.eucJP', + 'japanese-euc': 'ja_JP.eucJP', + 'japanese.euc': 'ja_JP.eucJP', + 'jp_jp': 'ja_JP.eucJP', + 'ka': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', + 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', + 'kab_dz': 'kab_DZ.UTF-8', + 'kk_kz': 'kk_KZ.ptcp154', + 'kl': 'kl_GL.ISO8859-1', + 'kl_gl': 'kl_GL.ISO8859-1', + 'km_kh': 'km_KH.UTF-8', + 'kn': 'kn_IN.UTF-8', + 'kn_in': 'kn_IN.UTF-8', + 'ko': 'ko_KR.eucKR', + 'ko_kr': 'ko_KR.eucKR', + 'ko_kr.euc': 'ko_KR.eucKR', + 'kok_in': 'kok_IN.UTF-8', + 'korean': 'ko_KR.eucKR', + 'korean.euc': 'ko_KR.eucKR', + 'ks': 'ks_IN.UTF-8', + 'ks_in': 'ks_IN.UTF-8', + 'ks_in@devanagari.utf8': 'ks_IN.UTF-8@devanagari', + 'ku_tr': 'ku_TR.ISO8859-9', + 'kw': 'kw_GB.ISO8859-1', + 'kw_gb': 'kw_GB.ISO8859-1', + 'ky': 'ky_KG.UTF-8', + 'ky_kg': 'ky_KG.UTF-8', + 'lb_lu': 'lb_LU.UTF-8', + 'lg_ug': 'lg_UG.ISO8859-10', + 'li_be': 'li_BE.UTF-8', + 'li_nl': 'li_NL.UTF-8', + 'lij_it': 'lij_IT.UTF-8', + 'lithuanian': 'lt_LT.ISO8859-13', + 'ln_cd': 'ln_CD.UTF-8', + 'lo': 'lo_LA.MULELAO-1', + 'lo_la': 'lo_LA.MULELAO-1', + 'lo_la.cp1133': 'lo_LA.IBM-CP1133', + 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', + 'lo_la.mulelao1': 'lo_LA.MULELAO-1', + 'lt': 'lt_LT.ISO8859-13', + 'lt_lt': 'lt_LT.ISO8859-13', + 'lv': 'lv_LV.ISO8859-13', + 'lv_lv': 'lv_LV.ISO8859-13', + 'lzh_tw': 'lzh_TW.UTF-8', + 'mag_in': 'mag_IN.UTF-8', + 'mai': 'mai_IN.UTF-8', + 'mai_in': 'mai_IN.UTF-8', + 'mai_np': 'mai_NP.UTF-8', + 'mfe_mu': 'mfe_MU.UTF-8', + 'mg_mg': 'mg_MG.ISO8859-15', + 'mhr_ru': 'mhr_RU.UTF-8', + 'mi': 'mi_NZ.ISO8859-1', + 'mi_nz': 'mi_NZ.ISO8859-1', + 'miq_ni': 'miq_NI.UTF-8', + 'mjw_in': 'mjw_IN.UTF-8', + 'mk': 'mk_MK.ISO8859-5', + 'mk_mk': 'mk_MK.ISO8859-5', + 'ml': 'ml_IN.UTF-8', + 'ml_in': 'ml_IN.UTF-8', + 'mn_mn': 'mn_MN.UTF-8', + 'mni_in': 'mni_IN.UTF-8', + 'mr': 'mr_IN.UTF-8', + 'mr_in': 'mr_IN.UTF-8', + 'ms': 'ms_MY.ISO8859-1', + 'ms_my': 'ms_MY.ISO8859-1', + 'mt': 'mt_MT.ISO8859-3', + 'mt_mt': 'mt_MT.ISO8859-3', + 'my_mm': 'my_MM.UTF-8', + 'nan_tw': 'nan_TW.UTF-8', + 'nb': 'nb_NO.ISO8859-1', + 'nb_no': 'nb_NO.ISO8859-1', + 'nds_de': 'nds_DE.UTF-8', + 'nds_nl': 'nds_NL.UTF-8', + 'ne_np': 'ne_NP.UTF-8', + 'nhn_mx': 'nhn_MX.UTF-8', + 'niu_nu': 'niu_NU.UTF-8', + 'niu_nz': 'niu_NZ.UTF-8', + 'nl': 'nl_NL.ISO8859-1', + 'nl_aw': 'nl_AW.UTF-8', + 'nl_be': 'nl_BE.ISO8859-1', + 'nl_nl': 'nl_NL.ISO8859-1', + 'nn': 'nn_NO.ISO8859-1', + 'nn_no': 'nn_NO.ISO8859-1', + 'no': 'no_NO.ISO8859-1', + 'no@nynorsk': 'ny_NO.ISO8859-1', + 'no_no': 'no_NO.ISO8859-1', + 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', + 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', + 'norwegian': 'no_NO.ISO8859-1', + 'nr': 'nr_ZA.ISO8859-1', + 'nr_za': 'nr_ZA.ISO8859-1', + 'nso': 'nso_ZA.ISO8859-15', + 'nso_za': 'nso_ZA.ISO8859-15', + 'ny': 'ny_NO.ISO8859-1', + 'ny_no': 'ny_NO.ISO8859-1', + 'nynorsk': 'nn_NO.ISO8859-1', + 'oc': 'oc_FR.ISO8859-1', + 'oc_fr': 'oc_FR.ISO8859-1', + 'om_et': 'om_ET.UTF-8', + 'om_ke': 'om_KE.ISO8859-1', + 'or': 'or_IN.UTF-8', + 'or_in': 'or_IN.UTF-8', + 'os_ru': 'os_RU.UTF-8', + 'pa': 'pa_IN.UTF-8', + 'pa_in': 'pa_IN.UTF-8', + 'pa_pk': 'pa_PK.UTF-8', + 'pap_an': 'pap_AN.UTF-8', + 'pap_aw': 'pap_AW.UTF-8', + 'pap_cw': 'pap_CW.UTF-8', + 'pd': 'pd_US.ISO8859-1', + 'pd_de': 'pd_DE.ISO8859-1', + 'pd_us': 'pd_US.ISO8859-1', + 'ph': 'ph_PH.ISO8859-1', + 'ph_ph': 'ph_PH.ISO8859-1', + 'pl': 'pl_PL.ISO8859-2', + 'pl_pl': 'pl_PL.ISO8859-2', + 'polish': 'pl_PL.ISO8859-2', + 'portuguese': 'pt_PT.ISO8859-1', + 'portuguese_brazil': 'pt_BR.ISO8859-1', + 'posix': 'C', + 'posix-utf2': 'C', + 'pp': 'pp_AN.ISO8859-1', + 'pp_an': 'pp_AN.ISO8859-1', + 'ps_af': 'ps_AF.UTF-8', + 'pt': 'pt_PT.ISO8859-1', + 'pt_br': 'pt_BR.ISO8859-1', + 'pt_pt': 'pt_PT.ISO8859-1', + 'quz_pe': 'quz_PE.UTF-8', + 'raj_in': 'raj_IN.UTF-8', + 'ro': 'ro_RO.ISO8859-2', + 'ro_ro': 'ro_RO.ISO8859-2', + 'romanian': 'ro_RO.ISO8859-2', + 'ru': 'ru_RU.UTF-8', + 'ru_ru': 'ru_RU.UTF-8', + 'ru_ua': 'ru_UA.KOI8-U', + 'rumanian': 'ro_RO.ISO8859-2', + 'russian': 'ru_RU.KOI8-R', + 'rw': 'rw_RW.ISO8859-1', + 'rw_rw': 'rw_RW.ISO8859-1', + 'sa_in': 'sa_IN.UTF-8', + 'sat_in': 'sat_IN.UTF-8', + 'sc_it': 'sc_IT.UTF-8', + 'sd': 'sd_IN.UTF-8', + 'sd_in': 'sd_IN.UTF-8', + 'sd_in@devanagari.utf8': 'sd_IN.UTF-8@devanagari', + 'sd_pk': 'sd_PK.UTF-8', + 'se_no': 'se_NO.UTF-8', + 'serbocroatian': 'sr_RS.UTF-8@latin', + 'sgs_lt': 'sgs_LT.UTF-8', + 'sh': 'sr_RS.UTF-8@latin', + 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', + 'sh_hr': 'sh_HR.ISO8859-2', + 'sh_hr.iso88592': 'hr_HR.ISO8859-2', + 'sh_sp': 'sr_CS.ISO8859-2', + 'sh_yu': 'sr_RS.UTF-8@latin', + 'shn_mm': 'shn_MM.UTF-8', + 'shs_ca': 'shs_CA.UTF-8', + 'si': 'si_LK.UTF-8', + 'si_lk': 'si_LK.UTF-8', + 'sid_et': 'sid_ET.UTF-8', + 'sinhala': 'si_LK.UTF-8', + 'sk': 'sk_SK.ISO8859-2', + 'sk_sk': 'sk_SK.ISO8859-2', + 'sl': 'sl_SI.ISO8859-2', + 'sl_cs': 'sl_CS.ISO8859-2', + 'sl_si': 'sl_SI.ISO8859-2', + 'slovak': 'sk_SK.ISO8859-2', + 'slovene': 'sl_SI.ISO8859-2', + 'slovenian': 'sl_SI.ISO8859-2', + 'sm_ws': 'sm_WS.UTF-8', + 'so_dj': 'so_DJ.ISO8859-1', + 'so_et': 'so_ET.UTF-8', + 'so_ke': 'so_KE.ISO8859-1', + 'so_so': 'so_SO.ISO8859-1', + 'sp': 'sr_CS.ISO8859-5', + 'sp_yu': 'sr_CS.ISO8859-5', + 'spanish': 'es_ES.ISO8859-1', + 'spanish_spain': 'es_ES.ISO8859-1', + 'sq': 'sq_AL.ISO8859-2', + 'sq_al': 'sq_AL.ISO8859-2', + 'sq_mk': 'sq_MK.UTF-8', + 'sr': 'sr_RS.UTF-8', + 'sr@cyrillic': 'sr_RS.UTF-8', + 'sr@latn': 'sr_CS.UTF-8@latin', + 'sr_cs': 'sr_CS.UTF-8', + 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', + 'sr_cs@latn': 'sr_CS.UTF-8@latin', + 'sr_me': 'sr_ME.UTF-8', + 'sr_rs': 'sr_RS.UTF-8', + 'sr_rs@latn': 'sr_RS.UTF-8@latin', + 'sr_sp': 'sr_CS.ISO8859-2', + 'sr_yu': 'sr_RS.UTF-8@latin', + 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.iso88592': 'sr_CS.ISO8859-2', + 'sr_yu.iso88595': 'sr_CS.ISO8859-5', + 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', + 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.utf8': 'sr_RS.UTF-8', + 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', + 'sr_yu@cyrillic': 'sr_RS.UTF-8', + 'ss': 'ss_ZA.ISO8859-1', + 'ss_za': 'ss_ZA.ISO8859-1', + 'st': 'st_ZA.ISO8859-1', + 'st_za': 'st_ZA.ISO8859-1', + 'sv': 'sv_SE.ISO8859-1', + 'sv_fi': 'sv_FI.ISO8859-1', + 'sv_se': 'sv_SE.ISO8859-1', + 'sw_ke': 'sw_KE.UTF-8', + 'sw_tz': 'sw_TZ.UTF-8', + 'swedish': 'sv_SE.ISO8859-1', + 'szl_pl': 'szl_PL.UTF-8', + 'ta': 'ta_IN.TSCII-0', + 'ta_in': 'ta_IN.TSCII-0', + 'ta_in.tscii': 'ta_IN.TSCII-0', + 'ta_in.tscii0': 'ta_IN.TSCII-0', + 'ta_lk': 'ta_LK.UTF-8', + 'tcy_in.utf8': 'tcy_IN.UTF-8', + 'te': 'te_IN.UTF-8', + 'te_in': 'te_IN.UTF-8', + 'tg': 'tg_TJ.KOI8-C', + 'tg_tj': 'tg_TJ.KOI8-C', + 'th': 'th_TH.ISO8859-11', + 'th_th': 'th_TH.ISO8859-11', + 'th_th.tactis': 'th_TH.TIS620', + 'th_th.tis620': 'th_TH.TIS620', + 'thai': 'th_TH.ISO8859-11', + 'the_np': 'the_NP.UTF-8', + 'ti_er': 'ti_ER.UTF-8', + 'ti_et': 'ti_ET.UTF-8', + 'tig_er': 'tig_ER.UTF-8', + 'tk_tm': 'tk_TM.UTF-8', + 'tl': 'tl_PH.ISO8859-1', + 'tl_ph': 'tl_PH.ISO8859-1', + 'tn': 'tn_ZA.ISO8859-15', + 'tn_za': 'tn_ZA.ISO8859-15', + 'to_to': 'to_TO.UTF-8', + 'tpi_pg': 'tpi_PG.UTF-8', + 'tr': 'tr_TR.ISO8859-9', + 'tr_cy': 'tr_CY.ISO8859-9', + 'tr_tr': 'tr_TR.ISO8859-9', + 'ts': 'ts_ZA.ISO8859-1', + 'ts_za': 'ts_ZA.ISO8859-1', + 'tt': 'tt_RU.TATAR-CYR', + 'tt_ru': 'tt_RU.TATAR-CYR', + 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', + 'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif', + 'turkish': 'tr_TR.ISO8859-9', + 'ug_cn': 'ug_CN.UTF-8', + 'uk': 'uk_UA.KOI8-U', + 'uk_ua': 'uk_UA.KOI8-U', + 'univ': 'en_US.utf', + 'universal': 'en_US.utf', + 'universal.utf8@ucs4': 'en_US.UTF-8', + 'unm_us': 'unm_US.UTF-8', + 'ur': 'ur_PK.CP1256', + 'ur_in': 'ur_IN.UTF-8', + 'ur_pk': 'ur_PK.CP1256', + 'uz': 'uz_UZ.UTF-8', + 'uz_uz': 'uz_UZ.UTF-8', + 'uz_uz@cyrillic': 'uz_UZ.UTF-8', + 've': 've_ZA.UTF-8', + 've_za': 've_ZA.UTF-8', + 'vi': 'vi_VN.TCVN', + 'vi_vn': 'vi_VN.TCVN', + 'vi_vn.tcvn': 'vi_VN.TCVN', + 'vi_vn.tcvn5712': 'vi_VN.TCVN', + 'vi_vn.viscii': 'vi_VN.VISCII', + 'vi_vn.viscii111': 'vi_VN.VISCII', + 'wa': 'wa_BE.ISO8859-1', + 'wa_be': 'wa_BE.ISO8859-1', + 'wae_ch': 'wae_CH.UTF-8', + 'wal_et': 'wal_ET.UTF-8', + 'wo_sn': 'wo_SN.UTF-8', + 'xh': 'xh_ZA.ISO8859-1', + 'xh_za': 'xh_ZA.ISO8859-1', + 'yi': 'yi_US.CP1255', + 'yi_us': 'yi_US.CP1255', + 'yo_ng': 'yo_NG.UTF-8', + 'yue_hk': 'yue_HK.UTF-8', + 'yuw_pg': 'yuw_PG.UTF-8', + 'zh': 'zh_CN.eucCN', + 'zh_cn': 'zh_CN.gb2312', + 'zh_cn.big5': 'zh_TW.big5', + 'zh_cn.euc': 'zh_CN.eucCN', + 'zh_hk': 'zh_HK.big5hkscs', + 'zh_hk.big5hk': 'zh_HK.big5hkscs', + 'zh_sg': 'zh_SG.GB2312', + 'zh_sg.gbk': 'zh_SG.GBK', + 'zh_tw': 'zh_TW.big5', + 'zh_tw.euc': 'zh_TW.eucTW', + 'zh_tw.euctw': 'zh_TW.eucTW', + 'zu': 'zu_ZA.ISO8859-1', + 'zu_za': 'zu_ZA.ISO8859-1', +} + +# +# This maps Windows language identifiers to locale strings. +# +# This list has been updated from +# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp +# to include every locale up to Windows Vista. +# +# NOTE: this mapping is incomplete. If your language is missing, please +# submit a bug report to the Python bug tracker at http://bugs.python.org/ +# Make sure you include the missing language identifier and the suggested +# locale code. +# + +windows_locale = { + 0x0436: "af_ZA", # Afrikaans + 0x041c: "sq_AL", # Albanian + 0x0484: "gsw_FR",# Alsatian - France + 0x045e: "am_ET", # Amharic - Ethiopia + 0x0401: "ar_SA", # Arabic - Saudi Arabia + 0x0801: "ar_IQ", # Arabic - Iraq + 0x0c01: "ar_EG", # Arabic - Egypt + 0x1001: "ar_LY", # Arabic - Libya + 0x1401: "ar_DZ", # Arabic - Algeria + 0x1801: "ar_MA", # Arabic - Morocco + 0x1c01: "ar_TN", # Arabic - Tunisia + 0x2001: "ar_OM", # Arabic - Oman + 0x2401: "ar_YE", # Arabic - Yemen + 0x2801: "ar_SY", # Arabic - Syria + 0x2c01: "ar_JO", # Arabic - Jordan + 0x3001: "ar_LB", # Arabic - Lebanon + 0x3401: "ar_KW", # Arabic - Kuwait + 0x3801: "ar_AE", # Arabic - United Arab Emirates + 0x3c01: "ar_BH", # Arabic - Bahrain + 0x4001: "ar_QA", # Arabic - Qatar + 0x042b: "hy_AM", # Armenian + 0x044d: "as_IN", # Assamese - India + 0x042c: "az_AZ", # Azeri - Latin + 0x082c: "az_AZ", # Azeri - Cyrillic + 0x046d: "ba_RU", # Bashkir + 0x042d: "eu_ES", # Basque - Russia + 0x0423: "be_BY", # Belarusian + 0x0445: "bn_IN", # Begali + 0x201a: "bs_BA", # Bosnian - Cyrillic + 0x141a: "bs_BA", # Bosnian - Latin + 0x047e: "br_FR", # Breton - France + 0x0402: "bg_BG", # Bulgarian +# 0x0455: "my_MM", # Burmese - Not supported + 0x0403: "ca_ES", # Catalan + 0x0004: "zh_CHS",# Chinese - Simplified + 0x0404: "zh_TW", # Chinese - Taiwan + 0x0804: "zh_CN", # Chinese - PRC + 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R. + 0x1004: "zh_SG", # Chinese - Singapore + 0x1404: "zh_MO", # Chinese - Macao S.A.R. + 0x7c04: "zh_CHT",# Chinese - Traditional + 0x0483: "co_FR", # Corsican - France + 0x041a: "hr_HR", # Croatian + 0x101a: "hr_BA", # Croatian - Bosnia + 0x0405: "cs_CZ", # Czech + 0x0406: "da_DK", # Danish + 0x048c: "gbz_AF",# Dari - Afghanistan + 0x0465: "div_MV",# Divehi - Maldives + 0x0413: "nl_NL", # Dutch - The Netherlands + 0x0813: "nl_BE", # Dutch - Belgium + 0x0409: "en_US", # English - United States + 0x0809: "en_GB", # English - United Kingdom + 0x0c09: "en_AU", # English - Australia + 0x1009: "en_CA", # English - Canada + 0x1409: "en_NZ", # English - New Zealand + 0x1809: "en_IE", # English - Ireland + 0x1c09: "en_ZA", # English - South Africa + 0x2009: "en_JA", # English - Jamaica + 0x2409: "en_CB", # English - Caribbean + 0x2809: "en_BZ", # English - Belize + 0x2c09: "en_TT", # English - Trinidad + 0x3009: "en_ZW", # English - Zimbabwe + 0x3409: "en_PH", # English - Philippines + 0x4009: "en_IN", # English - India + 0x4409: "en_MY", # English - Malaysia + 0x4809: "en_IN", # English - Singapore + 0x0425: "et_EE", # Estonian + 0x0438: "fo_FO", # Faroese + 0x0464: "fil_PH",# Filipino + 0x040b: "fi_FI", # Finnish + 0x040c: "fr_FR", # French - France + 0x080c: "fr_BE", # French - Belgium + 0x0c0c: "fr_CA", # French - Canada + 0x100c: "fr_CH", # French - Switzerland + 0x140c: "fr_LU", # French - Luxembourg + 0x180c: "fr_MC", # French - Monaco + 0x0462: "fy_NL", # Frisian - Netherlands + 0x0456: "gl_ES", # Galician + 0x0437: "ka_GE", # Georgian + 0x0407: "de_DE", # German - Germany + 0x0807: "de_CH", # German - Switzerland + 0x0c07: "de_AT", # German - Austria + 0x1007: "de_LU", # German - Luxembourg + 0x1407: "de_LI", # German - Liechtenstein + 0x0408: "el_GR", # Greek + 0x046f: "kl_GL", # Greenlandic - Greenland + 0x0447: "gu_IN", # Gujarati + 0x0468: "ha_NG", # Hausa - Latin + 0x040d: "he_IL", # Hebrew + 0x0439: "hi_IN", # Hindi + 0x040e: "hu_HU", # Hungarian + 0x040f: "is_IS", # Icelandic + 0x0421: "id_ID", # Indonesian + 0x045d: "iu_CA", # Inuktitut - Syllabics + 0x085d: "iu_CA", # Inuktitut - Latin + 0x083c: "ga_IE", # Irish - Ireland + 0x0410: "it_IT", # Italian - Italy + 0x0810: "it_CH", # Italian - Switzerland + 0x0411: "ja_JP", # Japanese + 0x044b: "kn_IN", # Kannada - India + 0x043f: "kk_KZ", # Kazakh + 0x0453: "kh_KH", # Khmer - Cambodia + 0x0486: "qut_GT",# K'iche - Guatemala + 0x0487: "rw_RW", # Kinyarwanda - Rwanda + 0x0457: "kok_IN",# Konkani + 0x0412: "ko_KR", # Korean + 0x0440: "ky_KG", # Kyrgyz + 0x0454: "lo_LA", # Lao - Lao PDR + 0x0426: "lv_LV", # Latvian + 0x0427: "lt_LT", # Lithuanian + 0x082e: "dsb_DE",# Lower Sorbian - Germany + 0x046e: "lb_LU", # Luxembourgish + 0x042f: "mk_MK", # FYROM Macedonian + 0x043e: "ms_MY", # Malay - Malaysia + 0x083e: "ms_BN", # Malay - Brunei Darussalam + 0x044c: "ml_IN", # Malayalam - India + 0x043a: "mt_MT", # Maltese + 0x0481: "mi_NZ", # Maori + 0x047a: "arn_CL",# Mapudungun + 0x044e: "mr_IN", # Marathi + 0x047c: "moh_CA",# Mohawk - Canada + 0x0450: "mn_MN", # Mongolian - Cyrillic + 0x0850: "mn_CN", # Mongolian - PRC + 0x0461: "ne_NP", # Nepali + 0x0414: "nb_NO", # Norwegian - Bokmal + 0x0814: "nn_NO", # Norwegian - Nynorsk + 0x0482: "oc_FR", # Occitan - France + 0x0448: "or_IN", # Oriya - India + 0x0463: "ps_AF", # Pashto - Afghanistan + 0x0429: "fa_IR", # Persian + 0x0415: "pl_PL", # Polish + 0x0416: "pt_BR", # Portuguese - Brazil + 0x0816: "pt_PT", # Portuguese - Portugal + 0x0446: "pa_IN", # Punjabi + 0x046b: "quz_BO",# Quechua (Bolivia) + 0x086b: "quz_EC",# Quechua (Ecuador) + 0x0c6b: "quz_PE",# Quechua (Peru) + 0x0418: "ro_RO", # Romanian - Romania + 0x0417: "rm_CH", # Romansh + 0x0419: "ru_RU", # Russian + 0x243b: "smn_FI",# Sami Finland + 0x103b: "smj_NO",# Sami Norway + 0x143b: "smj_SE",# Sami Sweden + 0x043b: "se_NO", # Sami Northern Norway + 0x083b: "se_SE", # Sami Northern Sweden + 0x0c3b: "se_FI", # Sami Northern Finland + 0x203b: "sms_FI",# Sami Skolt + 0x183b: "sma_NO",# Sami Southern Norway + 0x1c3b: "sma_SE",# Sami Southern Sweden + 0x044f: "sa_IN", # Sanskrit + 0x0c1a: "sr_SP", # Serbian - Cyrillic + 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic + 0x081a: "sr_SP", # Serbian - Latin + 0x181a: "sr_BA", # Serbian - Bosnia Latin + 0x045b: "si_LK", # Sinhala - Sri Lanka + 0x046c: "ns_ZA", # Northern Sotho + 0x0432: "tn_ZA", # Setswana - Southern Africa + 0x041b: "sk_SK", # Slovak + 0x0424: "sl_SI", # Slovenian + 0x040a: "es_ES", # Spanish - Spain + 0x080a: "es_MX", # Spanish - Mexico + 0x0c0a: "es_ES", # Spanish - Spain (Modern) + 0x100a: "es_GT", # Spanish - Guatemala + 0x140a: "es_CR", # Spanish - Costa Rica + 0x180a: "es_PA", # Spanish - Panama + 0x1c0a: "es_DO", # Spanish - Dominican Republic + 0x200a: "es_VE", # Spanish - Venezuela + 0x240a: "es_CO", # Spanish - Colombia + 0x280a: "es_PE", # Spanish - Peru + 0x2c0a: "es_AR", # Spanish - Argentina + 0x300a: "es_EC", # Spanish - Ecuador + 0x340a: "es_CL", # Spanish - Chile + 0x380a: "es_UR", # Spanish - Uruguay + 0x3c0a: "es_PY", # Spanish - Paraguay + 0x400a: "es_BO", # Spanish - Bolivia + 0x440a: "es_SV", # Spanish - El Salvador + 0x480a: "es_HN", # Spanish - Honduras + 0x4c0a: "es_NI", # Spanish - Nicaragua + 0x500a: "es_PR", # Spanish - Puerto Rico + 0x540a: "es_US", # Spanish - United States +# 0x0430: "", # Sutu - Not supported + 0x0441: "sw_KE", # Swahili + 0x041d: "sv_SE", # Swedish - Sweden + 0x081d: "sv_FI", # Swedish - Finland + 0x045a: "syr_SY",# Syriac + 0x0428: "tg_TJ", # Tajik - Cyrillic + 0x085f: "tmz_DZ",# Tamazight - Latin + 0x0449: "ta_IN", # Tamil + 0x0444: "tt_RU", # Tatar + 0x044a: "te_IN", # Telugu + 0x041e: "th_TH", # Thai + 0x0851: "bo_BT", # Tibetan - Bhutan + 0x0451: "bo_CN", # Tibetan - PRC + 0x041f: "tr_TR", # Turkish + 0x0442: "tk_TM", # Turkmen - Cyrillic + 0x0480: "ug_CN", # Uighur - Arabic + 0x0422: "uk_UA", # Ukrainian + 0x042e: "wen_DE",# Upper Sorbian - Germany + 0x0420: "ur_PK", # Urdu + 0x0820: "ur_IN", # Urdu - India + 0x0443: "uz_UZ", # Uzbek - Latin + 0x0843: "uz_UZ", # Uzbek - Cyrillic + 0x042a: "vi_VN", # Vietnamese + 0x0452: "cy_GB", # Welsh + 0x0488: "wo_SN", # Wolof - Senegal + 0x0434: "xh_ZA", # Xhosa - South Africa + 0x0485: "sah_RU",# Yakut - Cyrillic + 0x0478: "ii_CN", # Yi - PRC + 0x046a: "yo_NG", # Yoruba - Nigeria + 0x0435: "zu_ZA", # Zulu +} + +def _print_locale(): + + """ Test function. + """ + categories = {} + def _init_categories(categories=categories): + for k,v in globals().items(): + if k[:3] == 'LC_': + categories[k] = v + _init_categories() + del categories['LC_ALL'] + + print('Locale defaults as determined by getdefaultlocale():') + print('-'*72) + lang, enc = getdefaultlocale() + print('Language: ', lang or '(undefined)') + print('Encoding: ', enc or '(undefined)') + print() + + print('Locale settings on startup:') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + + print() + print('Locale settings after calling resetlocale():') + print('-'*72) + resetlocale() + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + + try: + setlocale(LC_ALL, "") + except: + print('NOTE:') + print('setlocale(LC_ALL, "") does not support the default locale') + print('given in the OS environment variables.') + else: + print() + print('Locale settings after calling setlocale(LC_ALL, ""):') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + +### + +try: + LC_MESSAGES +except NameError: + pass +else: + __all__.append("LC_MESSAGES") + +if __name__=='__main__': + print('Locale aliasing:') + print() + _print_locale() + print() + print('Number formatting:') + print() + _test() diff --git a/venv/Lib/no-global-site-packages.txt b/venv/Lib/no-global-site-packages.txt new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/ntpath.py b/venv/Lib/ntpath.py new file mode 100644 index 00000000..2182ec77 --- /dev/null +++ b/venv/Lib/ntpath.py @@ -0,0 +1,671 @@ +# Module 'ntpath' -- common operations on WinNT/Win95 pathnames +"""Common pathname manipulations, WindowsNT/95 version. + +Instead of importing this module directly, import os and refer to this +module as os.path. +""" + +# strings representing various path-related bits and pieces +# These are primarily for export; internally, they are hardcoded. +# Should be set before imports for resolving cyclic dependency. +curdir = '.' +pardir = '..' +extsep = '.' +sep = '\\' +pathsep = ';' +altsep = '/' +defpath = '.;C:\\bin' +devnull = 'nul' + +import os +import sys +import stat +import genericpath +from genericpath import * + +__all__ = ["normcase","isabs","join","splitdrive","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime", "islink","exists","lexists","isdir","isfile", + "ismount", "expanduser","expandvars","normpath","abspath", + "curdir","pardir","sep","pathsep","defpath","altsep", + "extsep","devnull","realpath","supports_unicode_filenames","relpath", + "samefile", "sameopenfile", "samestat", "commonpath"] + +def _get_bothseps(path): + if isinstance(path, bytes): + return b'\\/' + else: + return '\\/' + +# Normalize the case of a pathname and map slashes to backslashes. +# Other normalizations (such as optimizing '../' away) are not done +# (this is done by normpath). + +def normcase(s): + """Normalize case of pathname. + + Makes all characters lowercase and all slashes into backslashes.""" + s = os.fspath(s) + try: + if isinstance(s, bytes): + return s.replace(b'/', b'\\').lower() + else: + return s.replace('/', '\\').lower() + except (TypeError, AttributeError): + if not isinstance(s, (bytes, str)): + raise TypeError("normcase() argument must be str or bytes, " + "not %r" % s.__class__.__name__) from None + raise + + +# Return whether a path is absolute. +# Trivial in Posix, harder on Windows. +# For Windows it is absolute if it starts with a slash or backslash (current +# volume), or if a pathname after the volume-letter-and-colon or UNC-resource +# starts with a slash or backslash. + +def isabs(s): + """Test whether a path is absolute""" + s = os.fspath(s) + s = splitdrive(s)[1] + return len(s) > 0 and s[0] in _get_bothseps(s) + + +# Join two (or more) paths. +def join(path, *paths): + path = os.fspath(path) + if isinstance(path, bytes): + sep = b'\\' + seps = b'\\/' + colon = b':' + else: + sep = '\\' + seps = '\\/' + colon = ':' + try: + if not paths: + path[:0] + sep #23780: Ensure compatible data type even if p is null. + result_drive, result_path = splitdrive(path) + for p in map(os.fspath, paths): + p_drive, p_path = splitdrive(p) + if p_path and p_path[0] in seps: + # Second path is absolute + if p_drive or not result_drive: + result_drive = p_drive + result_path = p_path + continue + elif p_drive and p_drive != result_drive: + if p_drive.lower() != result_drive.lower(): + # Different drives => ignore the first path entirely + result_drive = p_drive + result_path = p_path + continue + # Same drive in different case + result_drive = p_drive + # Second path is relative to the first + if result_path and result_path[-1] not in seps: + result_path = result_path + sep + result_path = result_path + p_path + ## add separator between UNC and non-absolute path + if (result_path and result_path[0] not in seps and + result_drive and result_drive[-1:] != colon): + return result_drive + sep + result_path + return result_drive + result_path + except (TypeError, AttributeError, BytesWarning): + genericpath._check_arg_types('join', path, *paths) + raise + + +# Split a path in a drive specification (a drive letter followed by a +# colon) and the path specification. +# It is always true that drivespec + pathspec == p +def splitdrive(p): + """Split a pathname into drive/UNC sharepoint and relative path specifiers. + Returns a 2-tuple (drive_or_unc, path); either part may be empty. + + If you assign + result = splitdrive(p) + It is always true that: + result[0] + result[1] == p + + If the path contained a drive letter, drive_or_unc will contain everything + up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir") + + If the path contained a UNC path, the drive_or_unc will contain the host name + and share up to but not including the fourth directory separator character. + e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir") + + Paths cannot contain both a drive letter and a UNC path. + + """ + p = os.fspath(p) + if len(p) >= 2: + if isinstance(p, bytes): + sep = b'\\' + altsep = b'/' + colon = b':' + else: + sep = '\\' + altsep = '/' + colon = ':' + normp = p.replace(altsep, sep) + if (normp[0:2] == sep*2) and (normp[2:3] != sep): + # is a UNC path: + # vvvvvvvvvvvvvvvvvvvv drive letter or UNC path + # \\machine\mountpoint\directory\etc\... + # directory ^^^^^^^^^^^^^^^ + index = normp.find(sep, 2) + if index == -1: + return p[:0], p + index2 = normp.find(sep, index + 1) + # a UNC path can't have two slashes in a row + # (after the initial two) + if index2 == index + 1: + return p[:0], p + if index2 == -1: + index2 = len(p) + return p[:index2], p[index2:] + if normp[1:2] == colon: + return p[:2], p[2:] + return p[:0], p + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). After the trailing '/' is stripped, the invariant +# join(head, tail) == p holds. +# The resulting head won't end in '/' unless it is the root. + +def split(p): + """Split a pathname. + + Return tuple (head, tail) where tail is everything after the final slash. + Either part may be empty.""" + p = os.fspath(p) + seps = _get_bothseps(p) + d, p = splitdrive(p) + # set i to index beyond p's last slash + i = len(p) + while i and p[i-1] not in seps: + i -= 1 + head, tail = p[:i], p[i:] # now tail has no slashes + # remove trailing slashes from head, unless it's all slashes + head = head.rstrip(seps) or head + return d + head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + p = os.fspath(p) + if isinstance(p, bytes): + return genericpath._splitext(p, b'\\', b'/', b'.') + else: + return genericpath._splitext(p, '\\', '/', '.') +splitext.__doc__ = genericpath._splitext.__doc__ + + +# Return the tail (basename) part of a path. + +def basename(p): + """Returns the final component of a pathname""" + return split(p)[1] + + +# Return the head (dirname) part of a path. + +def dirname(p): + """Returns the directory component of a pathname""" + return split(p)[0] + +# Is a path a symbolic link? +# This will always return false on systems where os.lstat doesn't exist. + +def islink(path): + """Test whether a path is a symbolic link. + This will always return false for Windows prior to 6.0. + """ + try: + st = os.lstat(path) + except (OSError, AttributeError): + return False + return stat.S_ISLNK(st.st_mode) + +# Being true for dangling symbolic links is also useful. + +def lexists(path): + """Test whether a path exists. Returns True for broken symbolic links""" + try: + st = os.lstat(path) + except OSError: + return False + return True + +# Is a path a mount point? +# Any drive letter root (eg c:\) +# Any share UNC (eg \\server\share) +# Any volume mounted on a filesystem folder +# +# No one method detects all three situations. Historically we've lexically +# detected drive letter roots and share UNCs. The canonical approach to +# detecting mounted volumes (querying the reparse tag) fails for the most +# common case: drive letter roots. The alternative which uses GetVolumePathName +# fails if the drive letter is the result of a SUBST. +try: + from nt import _getvolumepathname +except ImportError: + _getvolumepathname = None +def ismount(path): + """Test whether a path is a mount point (a drive root, the root of a + share, or a mounted volume)""" + path = os.fspath(path) + seps = _get_bothseps(path) + path = abspath(path) + root, rest = splitdrive(path) + if root and root[0] in seps: + return (not rest) or (rest in seps) + if rest in seps: + return True + + if _getvolumepathname: + return path.rstrip(seps) == _getvolumepathname(path).rstrip(seps) + else: + return False + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructs. + + If user or $HOME is unknown, do nothing.""" + path = os.fspath(path) + if isinstance(path, bytes): + tilde = b'~' + else: + tilde = '~' + if not path.startswith(tilde): + return path + i, n = 1, len(path) + while i < n and path[i] not in _get_bothseps(path): + i += 1 + + if 'HOME' in os.environ: + userhome = os.environ['HOME'] + elif 'USERPROFILE' in os.environ: + userhome = os.environ['USERPROFILE'] + elif not 'HOMEPATH' in os.environ: + return path + else: + try: + drive = os.environ['HOMEDRIVE'] + except KeyError: + drive = '' + userhome = join(drive, os.environ['HOMEPATH']) + + if isinstance(path, bytes): + userhome = os.fsencode(userhome) + + if i != 1: #~user + userhome = join(dirname(userhome), path[1:i]) + + return userhome + path[i:] + + +# Expand paths containing shell variable substitutions. +# The following rules apply: +# - no expansion within single quotes +# - '$$' is translated into '$' +# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2% +# - ${varname} is accepted. +# - $varname is accepted. +# - %varname% is accepted. +# - varnames can be made out of letters, digits and the characters '_-' +# (though is not verified in the ${varname} and %varname% cases) +# XXX With COMMAND.COM you can use any characters in a variable name, +# XXX except '^|<>='. + +def expandvars(path): + """Expand shell variables of the forms $var, ${var} and %var%. + + Unknown variables are left unchanged.""" + path = os.fspath(path) + if isinstance(path, bytes): + if b'$' not in path and b'%' not in path: + return path + import string + varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii') + quote = b'\'' + percent = b'%' + brace = b'{' + rbrace = b'}' + dollar = b'$' + environ = getattr(os, 'environb', None) + else: + if '$' not in path and '%' not in path: + return path + import string + varchars = string.ascii_letters + string.digits + '_-' + quote = '\'' + percent = '%' + brace = '{' + rbrace = '}' + dollar = '$' + environ = os.environ + res = path[:0] + index = 0 + pathlen = len(path) + while index < pathlen: + c = path[index:index+1] + if c == quote: # no expansion within single quotes + path = path[index + 1:] + pathlen = len(path) + try: + index = path.index(c) + res += c + path[:index + 1] + except ValueError: + res += c + path + index = pathlen - 1 + elif c == percent: # variable or '%' + if path[index + 1:index + 2] == percent: + res += c + index += 1 + else: + path = path[index+1:] + pathlen = len(path) + try: + index = path.index(percent) + except ValueError: + res += percent + path + index = pathlen - 1 + else: + var = path[:index] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(var)]) + else: + value = environ[var] + except KeyError: + value = percent + var + percent + res += value + elif c == dollar: # variable or '$$' + if path[index + 1:index + 2] == dollar: + res += c + index += 1 + elif path[index + 1:index + 2] == brace: + path = path[index+2:] + pathlen = len(path) + try: + index = path.index(rbrace) + except ValueError: + res += dollar + brace + path + index = pathlen - 1 + else: + var = path[:index] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(var)]) + else: + value = environ[var] + except KeyError: + value = dollar + brace + var + rbrace + res += value + else: + var = path[:0] + index += 1 + c = path[index:index + 1] + while c and c in varchars: + var += c + index += 1 + c = path[index:index + 1] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(var)]) + else: + value = environ[var] + except KeyError: + value = dollar + var + res += value + if c: + index -= 1 + else: + res += c + index += 1 + return res + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B. +# Previously, this function also truncated pathnames to 8+3 format, +# but as this module is called "ntpath", that's obviously wrong! + +def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + path = os.fspath(path) + if isinstance(path, bytes): + sep = b'\\' + altsep = b'/' + curdir = b'.' + pardir = b'..' + special_prefixes = (b'\\\\.\\', b'\\\\?\\') + else: + sep = '\\' + altsep = '/' + curdir = '.' + pardir = '..' + special_prefixes = ('\\\\.\\', '\\\\?\\') + if path.startswith(special_prefixes): + # in the case of paths with these prefixes: + # \\.\ -> device names + # \\?\ -> literal paths + # do not do any normalization, but return the path unchanged + return path + path = path.replace(altsep, sep) + prefix, path = splitdrive(path) + + # collapse initial backslashes + if path.startswith(sep): + prefix += sep + path = path.lstrip(sep) + + comps = path.split(sep) + i = 0 + while i < len(comps): + if not comps[i] or comps[i] == curdir: + del comps[i] + elif comps[i] == pardir: + if i > 0 and comps[i-1] != pardir: + del comps[i-1:i+1] + i -= 1 + elif i == 0 and prefix.endswith(sep): + del comps[i] + else: + i += 1 + else: + i += 1 + # If the path is now empty, substitute '.' + if not prefix and not comps: + comps.append(curdir) + return prefix + sep.join(comps) + + +# Return an absolute path. +try: + from nt import _getfullpathname + +except ImportError: # not running on Windows - mock up something sensible + def abspath(path): + """Return the absolute version of a path.""" + path = os.fspath(path) + if not isabs(path): + if isinstance(path, bytes): + cwd = os.getcwdb() + else: + cwd = os.getcwd() + path = join(cwd, path) + return normpath(path) + +else: # use native Windows method on Windows + def abspath(path): + """Return the absolute version of a path.""" + + if path: # Empty path must return current working directory. + path = os.fspath(path) + try: + path = _getfullpathname(path) + except OSError: + pass # Bad path - return unchanged. + elif isinstance(path, bytes): + path = os.getcwdb() + else: + path = os.getcwd() + return normpath(path) + +# realpath is a no-op on systems without islink support +realpath = abspath +# Win9x family and earlier have no Unicode filename support. +supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and + sys.getwindowsversion()[3] >= 2) + +def relpath(path, start=None): + """Return a relative version of a path""" + path = os.fspath(path) + if isinstance(path, bytes): + sep = b'\\' + curdir = b'.' + pardir = b'..' + else: + sep = '\\' + curdir = '.' + pardir = '..' + + if start is None: + start = curdir + + if not path: + raise ValueError("no path specified") + + start = os.fspath(start) + try: + start_abs = abspath(normpath(start)) + path_abs = abspath(normpath(path)) + start_drive, start_rest = splitdrive(start_abs) + path_drive, path_rest = splitdrive(path_abs) + if normcase(start_drive) != normcase(path_drive): + raise ValueError("path is on mount %r, start on mount %r" % ( + path_drive, start_drive)) + + start_list = [x for x in start_rest.split(sep) if x] + path_list = [x for x in path_rest.split(sep) if x] + # Work out how much of the filepath is shared by start and path. + i = 0 + for e1, e2 in zip(start_list, path_list): + if normcase(e1) != normcase(e2): + break + i += 1 + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + except (TypeError, ValueError, AttributeError, BytesWarning, DeprecationWarning): + genericpath._check_arg_types('relpath', path, start) + raise + + +# Return the longest common sub-path of the sequence of paths given as input. +# The function is case-insensitive and 'separator-insensitive', i.e. if the +# only difference between two paths is the use of '\' versus '/' as separator, +# they are deemed to be equal. +# +# However, the returned path will have the standard '\' separator (even if the +# given paths had the alternative '/' separator) and will have the case of the +# first path given in the sequence. Additionally, any trailing separator is +# stripped from the returned path. + +def commonpath(paths): + """Given a sequence of path names, returns the longest common sub-path.""" + + if not paths: + raise ValueError('commonpath() arg is an empty sequence') + + paths = tuple(map(os.fspath, paths)) + if isinstance(paths[0], bytes): + sep = b'\\' + altsep = b'/' + curdir = b'.' + else: + sep = '\\' + altsep = '/' + curdir = '.' + + try: + drivesplits = [splitdrive(p.replace(altsep, sep).lower()) for p in paths] + split_paths = [p.split(sep) for d, p in drivesplits] + + try: + isabs, = set(p[:1] == sep for d, p in drivesplits) + except ValueError: + raise ValueError("Can't mix absolute and relative paths") from None + + # Check that all drive letters or UNC paths match. The check is made only + # now otherwise type errors for mixing strings and bytes would not be + # caught. + if len(set(d for d, p in drivesplits)) != 1: + raise ValueError("Paths don't have the same drive") + + drive, path = splitdrive(paths[0].replace(altsep, sep)) + common = path.split(sep) + common = [c for c in common if c and c != curdir] + + split_paths = [[c for c in s if c and c != curdir] for s in split_paths] + s1 = min(split_paths) + s2 = max(split_paths) + for i, c in enumerate(s1): + if c != s2[i]: + common = common[:i] + break + else: + common = common[:len(s1)] + + prefix = drive + sep if isabs else drive + return prefix + sep.join(common) + except (TypeError, AttributeError): + genericpath._check_arg_types('commonpath', *paths) + raise + + +# determine if two files are in fact the same file +try: + # GetFinalPathNameByHandle is available starting with Windows 6.0. + # Windows XP and non-Windows OS'es will mock _getfinalpathname. + if sys.getwindowsversion()[:2] >= (6, 0): + from nt import _getfinalpathname + else: + raise ImportError +except (AttributeError, ImportError): + # On Windows XP and earlier, two files are the same if their absolute + # pathnames are the same. + # Non-Windows operating systems fake this method with an XP + # approximation. + def _getfinalpathname(f): + return normcase(abspath(f)) + + +try: + # The genericpath.isdir implementation uses os.stat and checks the mode + # attribute to tell whether or not the path is a directory. + # This is overkill on Windows - just pass the path to GetFileAttributes + # and check the attribute from there. + from nt import _isdir as isdir +except ImportError: + # Use genericpath.isdir as imported above. + pass diff --git a/venv/Lib/operator.py b/venv/Lib/operator.py new file mode 100644 index 00000000..0e2e53ef --- /dev/null +++ b/venv/Lib/operator.py @@ -0,0 +1,464 @@ +""" +Operator Interface + +This module exports a set of functions corresponding to the intrinsic +operators of Python. For example, operator.add(x, y) is equivalent +to the expression x+y. The function names are those used for special +methods; variants without leading and trailing '__' are also provided +for convenience. + +This is the pure Python implementation of the module. +""" + +__all__ = ['abs', 'add', 'and_', 'attrgetter', 'concat', 'contains', 'countOf', + 'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand', + 'iconcat', 'ifloordiv', 'ilshift', 'imatmul', 'imod', 'imul', + 'index', 'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift', + 'is_', 'is_not', 'isub', 'itemgetter', 'itruediv', 'ixor', 'le', + 'length_hint', 'lshift', 'lt', 'matmul', 'methodcaller', 'mod', + 'mul', 'ne', 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift', + 'setitem', 'sub', 'truediv', 'truth', 'xor'] + +from builtins import abs as _abs + + +# Comparison Operations *******************************************************# + +def lt(a, b): + "Same as a < b." + return a < b + +def le(a, b): + "Same as a <= b." + return a <= b + +def eq(a, b): + "Same as a == b." + return a == b + +def ne(a, b): + "Same as a != b." + return a != b + +def ge(a, b): + "Same as a >= b." + return a >= b + +def gt(a, b): + "Same as a > b." + return a > b + +# Logical Operations **********************************************************# + +def not_(a): + "Same as not a." + return not a + +def truth(a): + "Return True if a is true, False otherwise." + return True if a else False + +def is_(a, b): + "Same as a is b." + return a is b + +def is_not(a, b): + "Same as a is not b." + return a is not b + +# Mathematical/Bitwise Operations *********************************************# + +def abs(a): + "Same as abs(a)." + return _abs(a) + +def add(a, b): + "Same as a + b." + return a + b + +def and_(a, b): + "Same as a & b." + return a & b + +def floordiv(a, b): + "Same as a // b." + return a // b + +def index(a): + "Same as a.__index__()." + return a.__index__() + +def inv(a): + "Same as ~a." + return ~a +invert = inv + +def lshift(a, b): + "Same as a << b." + return a << b + +def mod(a, b): + "Same as a % b." + return a % b + +def mul(a, b): + "Same as a * b." + return a * b + +def matmul(a, b): + "Same as a @ b." + return a @ b + +def neg(a): + "Same as -a." + return -a + +def or_(a, b): + "Same as a | b." + return a | b + +def pos(a): + "Same as +a." + return +a + +def pow(a, b): + "Same as a ** b." + return a ** b + +def rshift(a, b): + "Same as a >> b." + return a >> b + +def sub(a, b): + "Same as a - b." + return a - b + +def truediv(a, b): + "Same as a / b." + return a / b + +def xor(a, b): + "Same as a ^ b." + return a ^ b + +# Sequence Operations *********************************************************# + +def concat(a, b): + "Same as a + b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + return a + b + +def contains(a, b): + "Same as b in a (note reversed operands)." + return b in a + +def countOf(a, b): + "Return the number of times b occurs in a." + count = 0 + for i in a: + if i == b: + count += 1 + return count + +def delitem(a, b): + "Same as del a[b]." + del a[b] + +def getitem(a, b): + "Same as a[b]." + return a[b] + +def indexOf(a, b): + "Return the first index of b in a." + for i, j in enumerate(a): + if j == b: + return i + else: + raise ValueError('sequence.index(x): x not in sequence') + +def setitem(a, b, c): + "Same as a[b] = c." + a[b] = c + +def length_hint(obj, default=0): + """ + Return an estimate of the number of items in obj. + This is useful for presizing containers when building from an iterable. + + If the object supports len(), the result will be exact. Otherwise, it may + over- or under-estimate by an arbitrary amount. The result will be an + integer >= 0. + """ + if not isinstance(default, int): + msg = ("'%s' object cannot be interpreted as an integer" % + type(default).__name__) + raise TypeError(msg) + + try: + return len(obj) + except TypeError: + pass + + try: + hint = type(obj).__length_hint__ + except AttributeError: + return default + + try: + val = hint(obj) + except TypeError: + return default + if val is NotImplemented: + return default + if not isinstance(val, int): + msg = ('__length_hint__ must be integer, not %s' % + type(val).__name__) + raise TypeError(msg) + if val < 0: + msg = '__length_hint__() should return >= 0' + raise ValueError(msg) + return val + +# Generalized Lookup Objects **************************************************# + +class attrgetter: + """ + Return a callable object that fetches the given attribute(s) from its operand. + After f = attrgetter('name'), the call f(r) returns r.name. + After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). + After h = attrgetter('name.first', 'name.last'), the call h(r) returns + (r.name.first, r.name.last). + """ + __slots__ = ('_attrs', '_call') + + def __init__(self, attr, *attrs): + if not attrs: + if not isinstance(attr, str): + raise TypeError('attribute name must be a string') + self._attrs = (attr,) + names = attr.split('.') + def func(obj): + for name in names: + obj = getattr(obj, name) + return obj + self._call = func + else: + self._attrs = (attr,) + attrs + getters = tuple(map(attrgetter, self._attrs)) + def func(obj): + return tuple(getter(obj) for getter in getters) + self._call = func + + def __call__(self, obj): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__qualname__, + ', '.join(map(repr, self._attrs))) + + def __reduce__(self): + return self.__class__, self._attrs + +class itemgetter: + """ + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + """ + __slots__ = ('_items', '_call') + + def __init__(self, item, *items): + if not items: + self._items = (item,) + def func(obj): + return obj[item] + self._call = func + else: + self._items = items = (item,) + items + def func(obj): + return tuple(obj[i] for i in items) + self._call = func + + def __call__(self, obj): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(map(repr, self._items))) + + def __reduce__(self): + return self.__class__, self._items + +class methodcaller: + """ + Return a callable object that calls the given method on its operand. + After f = methodcaller('name'), the call f(r) returns r.name(). + After g = methodcaller('name', 'date', foo=1), the call g(r) returns + r.name('date', foo=1). + """ + __slots__ = ('_name', '_args', '_kwargs') + + def __init__(*args, **kwargs): + if len(args) < 2: + msg = "methodcaller needs at least one argument, the method name" + raise TypeError(msg) + self = args[0] + self._name = args[1] + if not isinstance(self._name, str): + raise TypeError('method name must be a string') + self._args = args[2:] + self._kwargs = kwargs + + def __call__(self, obj): + return getattr(obj, self._name)(*self._args, **self._kwargs) + + def __repr__(self): + args = [repr(self._name)] + args.extend(map(repr, self._args)) + args.extend('%s=%r' % (k, v) for k, v in self._kwargs.items()) + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(args)) + + def __reduce__(self): + if not self._kwargs: + return self.__class__, (self._name,) + self._args + else: + from functools import partial + return partial(self.__class__, self._name, **self._kwargs), self._args + + +# In-place Operations *********************************************************# + +def iadd(a, b): + "Same as a += b." + a += b + return a + +def iand(a, b): + "Same as a &= b." + a &= b + return a + +def iconcat(a, b): + "Same as a += b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + a += b + return a + +def ifloordiv(a, b): + "Same as a //= b." + a //= b + return a + +def ilshift(a, b): + "Same as a <<= b." + a <<= b + return a + +def imod(a, b): + "Same as a %= b." + a %= b + return a + +def imul(a, b): + "Same as a *= b." + a *= b + return a + +def imatmul(a, b): + "Same as a @= b." + a @= b + return a + +def ior(a, b): + "Same as a |= b." + a |= b + return a + +def ipow(a, b): + "Same as a **= b." + a **=b + return a + +def irshift(a, b): + "Same as a >>= b." + a >>= b + return a + +def isub(a, b): + "Same as a -= b." + a -= b + return a + +def itruediv(a, b): + "Same as a /= b." + a /= b + return a + +def ixor(a, b): + "Same as a ^= b." + a ^= b + return a + + +try: + from _operator import * +except ImportError: + pass +else: + from _operator import __doc__ + +# All of these "__func__ = func" assignments have to happen after importing +# from _operator to make sure they're set to the right function +__lt__ = lt +__le__ = le +__eq__ = eq +__ne__ = ne +__ge__ = ge +__gt__ = gt +__not__ = not_ +__abs__ = abs +__add__ = add +__and__ = and_ +__floordiv__ = floordiv +__index__ = index +__inv__ = inv +__invert__ = invert +__lshift__ = lshift +__mod__ = mod +__mul__ = mul +__matmul__ = matmul +__neg__ = neg +__or__ = or_ +__pos__ = pos +__pow__ = pow +__rshift__ = rshift +__sub__ = sub +__truediv__ = truediv +__xor__ = xor +__concat__ = concat +__contains__ = contains +__delitem__ = delitem +__getitem__ = getitem +__setitem__ = setitem +__iadd__ = iadd +__iand__ = iand +__iconcat__ = iconcat +__ifloordiv__ = ifloordiv +__ilshift__ = ilshift +__imod__ = imod +__imul__ = imul +__imatmul__ = imatmul +__ior__ = ior +__ipow__ = ipow +__irshift__ = irshift +__isub__ = isub +__itruediv__ = itruediv +__ixor__ = ixor diff --git a/venv/Lib/orig-prefix.txt b/venv/Lib/orig-prefix.txt new file mode 100644 index 00000000..37a012cd --- /dev/null +++ b/venv/Lib/orig-prefix.txt @@ -0,0 +1 @@ +c:\python37 \ No newline at end of file diff --git a/venv/Lib/os.py b/venv/Lib/os.py new file mode 100644 index 00000000..499e6285 --- /dev/null +++ b/venv/Lib/os.py @@ -0,0 +1,1078 @@ +r"""OS routines for NT or Posix depending on what system we're on. + +This exports: + - all functions from posix or nt, e.g. unlink, stat, etc. + - os.path is either posixpath or ntpath + - os.name is either 'posix' or 'nt' + - os.curdir is a string representing the current directory (always '.') + - os.pardir is a string representing the parent directory (always '..') + - os.sep is the (or a most common) pathname separator ('/' or '\\') + - os.extsep is the extension separator (always '.') + - os.altsep is the alternate pathname separator (None or '/') + - os.pathsep is the component separator used in $PATH etc + - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') + - os.defpath is the default search path for executables + - os.devnull is the file path of the null device ('/dev/null', etc.) + +Programs that import and use 'os' stand a better chance of being +portable between different platforms. Of course, they must then +only use functions that are defined by all platforms (e.g., unlink +and opendir), and leave all pathname manipulation to os.path +(e.g., split and join). +""" + +#' +import abc +import sys +import stat as st + +_names = sys.builtin_module_names + +# Note: more names are added to __all__ later. +__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", + "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", + "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", + "popen", "extsep"] + +def _exists(name): + return name in globals() + +def _get_exports_list(module): + try: + return list(module.__all__) + except AttributeError: + return [n for n in dir(module) if n[0] != '_'] + +# Any new dependencies of the os module and/or changes in path separator +# requires updating importlib as well. +if 'posix' in _names: + name = 'posix' + linesep = '\n' + from posix import * + try: + from posix import _exit + __all__.append('_exit') + except ImportError: + pass + import posixpath as path + + try: + from posix import _have_functions + except ImportError: + pass + + import posix + __all__.extend(_get_exports_list(posix)) + del posix + +elif 'nt' in _names: + name = 'nt' + linesep = '\r\n' + from nt import * + try: + from nt import _exit + __all__.append('_exit') + except ImportError: + pass + import ntpath as path + + import nt + __all__.extend(_get_exports_list(nt)) + del nt + + try: + from nt import _have_functions + except ImportError: + pass + +else: + raise ImportError('no os specific module found') + +sys.modules['os.path'] = path +from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, + devnull) + +del _names + + +if _exists("_have_functions"): + _globals = globals() + def _add(str, fn): + if (fn in _globals) and (str in _have_functions): + _set.add(_globals[fn]) + + _set = set() + _add("HAVE_FACCESSAT", "access") + _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_FUTIMESAT", "utime") + _add("HAVE_LINKAT", "link") + _add("HAVE_MKDIRAT", "mkdir") + _add("HAVE_MKFIFOAT", "mkfifo") + _add("HAVE_MKNODAT", "mknod") + _add("HAVE_OPENAT", "open") + _add("HAVE_READLINKAT", "readlink") + _add("HAVE_RENAMEAT", "rename") + _add("HAVE_SYMLINKAT", "symlink") + _add("HAVE_UNLINKAT", "unlink") + _add("HAVE_UNLINKAT", "rmdir") + _add("HAVE_UTIMENSAT", "utime") + supports_dir_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + supports_effective_ids = _set + + _set = set() + _add("HAVE_FCHDIR", "chdir") + _add("HAVE_FCHMOD", "chmod") + _add("HAVE_FCHOWN", "chown") + _add("HAVE_FDOPENDIR", "listdir") + _add("HAVE_FDOPENDIR", "scandir") + _add("HAVE_FEXECVE", "execve") + _set.add(stat) # fstat always works + _add("HAVE_FTRUNCATE", "truncate") + _add("HAVE_FUTIMENS", "utime") + _add("HAVE_FUTIMES", "utime") + _add("HAVE_FPATHCONF", "pathconf") + if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 + _add("HAVE_FSTATVFS", "statvfs") + supports_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + # Some platforms don't support lchmod(). Often the function exists + # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. + # (No, I don't know why that's a good design.) ./configure will detect + # this and reject it--so HAVE_LCHMOD still won't be defined on such + # platforms. This is Very Helpful. + # + # However, sometimes platforms without a working lchmod() *do* have + # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, + # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes + # it behave like lchmod(). So in theory it would be a suitable + # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s + # flag doesn't work *either*. Sadly ./configure isn't sophisticated + # enough to detect this condition--it only determines whether or not + # fchmodat() minimally works. + # + # Therefore we simply ignore fchmodat() when deciding whether or not + # os.chmod supports follow_symlinks. Just checking lchmod() is + # sufficient. After all--if you have a working fchmodat(), your + # lchmod() almost certainly works too. + # + # _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_LCHFLAGS", "chflags") + _add("HAVE_LCHMOD", "chmod") + if _exists("lchown"): # mac os x10.3 + _add("HAVE_LCHOWN", "chown") + _add("HAVE_LINKAT", "link") + _add("HAVE_LUTIMES", "utime") + _add("HAVE_LSTAT", "stat") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_UTIMENSAT", "utime") + _add("MS_WINDOWS", "stat") + supports_follow_symlinks = _set + + del _set + del _have_functions + del _globals + del _add + + +# Python uses fixed values for the SEEK_ constants; they are mapped +# to native constants if necessary in posixmodule.c +# Other possible SEEK values are directly imported from posixmodule.c +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Super directory utilities. +# (Inspired by Eric Raymond; the doc strings are mostly his) + +def makedirs(name, mode=0o777, exist_ok=False): + """makedirs(name [, mode=0o777][, exist_ok=False]) + + Super-mkdir; create a leaf directory and all intermediate ones. Works like + mkdir, except that any intermediate path segment (not just the rightmost) + will be created if it does not exist. If the target directory already + exists, raise an OSError if exist_ok is False. Otherwise no exception is + raised. This is recursive. + + """ + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + if head and tail and not path.exists(head): + try: + makedirs(head, exist_ok=exist_ok) + except FileExistsError: + # Defeats race condition when another thread created the path + pass + cdir = curdir + if isinstance(tail, bytes): + cdir = bytes(curdir, 'ASCII') + if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists + return + try: + mkdir(name, mode) + except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS + if not exist_ok or not path.isdir(name): + raise + +def removedirs(name): + """removedirs(name) + + Super-rmdir; remove a leaf directory and all empty intermediate + ones. Works like rmdir except that, if the leaf directory is + successfully removed, directories corresponding to rightmost path + segments will be pruned away until either the whole path is + consumed or an error occurs. Errors during this latter phase are + ignored -- they generally mean that a directory was not empty. + + """ + rmdir(name) + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + while head and tail: + try: + rmdir(head) + except OSError: + break + head, tail = path.split(head) + +def renames(old, new): + """renames(old, new) + + Super-rename; create directories as necessary and delete any left + empty. Works like rename, except creation of any intermediate + directories needed to make the new pathname good is attempted + first. After the rename, directories corresponding to rightmost + path segments of the old name will be pruned until either the + whole path is consumed or a nonempty directory is found. + + Note: this function can fail with the new directory structure made + if you lack permissions needed to unlink the leaf directory or + file. + + """ + head, tail = path.split(new) + if head and tail and not path.exists(head): + makedirs(head) + rename(old, new) + head, tail = path.split(old) + if head and tail: + try: + removedirs(head) + except OSError: + pass + +__all__.extend(["makedirs", "removedirs", "renames"]) + +def walk(top, topdown=True, onerror=None, followlinks=False): + """Directory tree generator. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), yields a 3-tuple + + dirpath, dirnames, filenames + + dirpath is a string, the path to the directory. dirnames is a list of + the names of the subdirectories in dirpath (excluding '.' and '..'). + filenames is a list of the names of the non-directory files in dirpath. + Note that the names in the lists are just names, with no path components. + To get a full path (which begins with top) to a file or directory in + dirpath, do os.path.join(dirpath, name). + + If optional arg 'topdown' is true or not specified, the triple for a + directory is generated before the triples for any of its subdirectories + (directories are generated top down). If topdown is false, the triple + for a directory is generated after the triples for all of its + subdirectories (directories are generated bottom up). + + When topdown is true, the caller can modify the dirnames list in-place + (e.g., via del or slice assignment), and walk will only recurse into the + subdirectories whose names remain in dirnames; this can be used to prune the + search, or to impose a specific order of visiting. Modifying dirnames when + topdown is false is ineffective, since the directories in dirnames have + already been generated by the time dirnames itself is generated. No matter + the value of topdown, the list of subdirectories is retrieved before the + tuples for the directory and its subdirectories are generated. + + By default errors from the os.scandir() call are ignored. If + optional arg 'onerror' is specified, it should be a function; it + will be called with one argument, an OSError instance. It can + report the error to continue with the walk, or raise the exception + to abort the walk. Note that the filename is available as the + filename attribute of the exception object. + + By default, os.walk does not follow symbolic links to subdirectories on + systems that support them. In order to get this functionality, set the + optional argument 'followlinks' to true. + + Caution: if you pass a relative pathname for top, don't change the + current working directory between resumptions of walk. walk never + changes the current directory, and assumes that the client doesn't + either. + + Example: + + import os + from os.path import join, getsize + for root, dirs, files in os.walk('python/Lib/email'): + print(root, "consumes", end="") + print(sum([getsize(join(root, name)) for name in files]), end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + + """ + top = fspath(top) + dirs = [] + nondirs = [] + walk_dirs = [] + + # We may not have read permission for top, in which case we can't + # get a list of the files the directory contains. os.walk + # always suppressed the exception then, rather than blow up for a + # minor reason when (say) a thousand readable directories are still + # left to visit. That logic is copied here. + try: + # Note that scandir is global in this module due + # to earlier import-*. + scandir_it = scandir(top) + except OSError as error: + if onerror is not None: + onerror(error) + return + + with scandir_it: + while True: + try: + try: + entry = next(scandir_it) + except StopIteration: + break + except OSError as error: + if onerror is not None: + onerror(error) + return + + try: + is_dir = entry.is_dir() + except OSError: + # If is_dir() raises an OSError, consider that the entry is not + # a directory, same behaviour than os.path.isdir(). + is_dir = False + + if is_dir: + dirs.append(entry.name) + else: + nondirs.append(entry.name) + + if not topdown and is_dir: + # Bottom-up: recurse into sub-directory, but exclude symlinks to + # directories if followlinks is False + if followlinks: + walk_into = True + else: + try: + is_symlink = entry.is_symlink() + except OSError: + # If is_symlink() raises an OSError, consider that the + # entry is not a symbolic link, same behaviour than + # os.path.islink(). + is_symlink = False + walk_into = not is_symlink + + if walk_into: + walk_dirs.append(entry.path) + + # Yield before recursion if going top down + if topdown: + yield top, dirs, nondirs + + # Recurse into sub-directories + islink, join = path.islink, path.join + for dirname in dirs: + new_path = join(top, dirname) + # Issue #23605: os.path.islink() is used instead of caching + # entry.is_symlink() result during the loop on os.scandir() because + # the caller can replace the directory entry during the "yield" + # above. + if followlinks or not islink(new_path): + yield from walk(new_path, topdown, onerror, followlinks) + else: + # Recurse into sub-directories + for new_path in walk_dirs: + yield from walk(new_path, topdown, onerror, followlinks) + # Yield after recursion if going bottom up + yield top, dirs, nondirs + +__all__.append("walk") + +if {open, stat} <= supports_dir_fd and {scandir, stat} <= supports_fd: + + def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): + """Directory tree generator. + + This behaves exactly like walk(), except that it yields a 4-tuple + + dirpath, dirnames, filenames, dirfd + + `dirpath`, `dirnames` and `filenames` are identical to walk() output, + and `dirfd` is a file descriptor referring to the directory `dirpath`. + + The advantage of fwalk() over walk() is that it's safe against symlink + races (when follow_symlinks is False). + + If dir_fd is not None, it should be a file descriptor open to a directory, + and top should be relative; top will then be relative to that directory. + (dir_fd is always supported for fwalk.) + + Caution: + Since fwalk() yields file descriptors, those are only valid until the + next iteration step, so you should dup() them if you want to keep them + for a longer period. + + Example: + + import os + for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): + print(root, "consumes", end="") + print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]), + end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + """ + if not isinstance(top, int) or not hasattr(top, '__index__'): + top = fspath(top) + # Note: To guard against symlink races, we use the standard + # lstat()/open()/fstat() trick. + if not follow_symlinks: + orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd) + topfd = open(top, O_RDONLY, dir_fd=dir_fd) + try: + if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and + path.samestat(orig_st, stat(topfd)))): + yield from _fwalk(topfd, top, isinstance(top, bytes), + topdown, onerror, follow_symlinks) + finally: + close(topfd) + + def _fwalk(topfd, toppath, isbytes, topdown, onerror, follow_symlinks): + # Note: This uses O(depth of the directory tree) file descriptors: if + # necessary, it can be adapted to only require O(1) FDs, see issue + # #13734. + + scandir_it = scandir(topfd) + dirs = [] + nondirs = [] + entries = None if topdown or follow_symlinks else [] + for entry in scandir_it: + name = entry.name + if isbytes: + name = fsencode(name) + try: + if entry.is_dir(): + dirs.append(name) + if entries is not None: + entries.append(entry) + else: + nondirs.append(name) + except OSError: + try: + # Add dangling symlinks, ignore disappeared files + if entry.is_symlink(): + nondirs.append(name) + except OSError: + pass + + if topdown: + yield toppath, dirs, nondirs, topfd + + for name in dirs if entries is None else zip(dirs, entries): + try: + if not follow_symlinks: + if topdown: + orig_st = stat(name, dir_fd=topfd, follow_symlinks=False) + else: + assert entries is not None + name, entry = name + orig_st = entry.stat(follow_symlinks=False) + dirfd = open(name, O_RDONLY, dir_fd=topfd) + except OSError as err: + if onerror is not None: + onerror(err) + continue + try: + if follow_symlinks or path.samestat(orig_st, stat(dirfd)): + dirpath = path.join(toppath, name) + yield from _fwalk(dirfd, dirpath, isbytes, + topdown, onerror, follow_symlinks) + finally: + close(dirfd) + + if not topdown: + yield toppath, dirs, nondirs, topfd + + __all__.append("fwalk") + +# Make sure os.environ exists, at least +try: + environ +except NameError: + environ = {} + +def execl(file, *args): + """execl(file, *args) + + Execute the executable file with argument list args, replacing the + current process. """ + execv(file, args) + +def execle(file, *args): + """execle(file, *args, env) + + Execute the executable file with argument list args and + environment env, replacing the current process. """ + env = args[-1] + execve(file, args[:-1], env) + +def execlp(file, *args): + """execlp(file, *args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. """ + execvp(file, args) + +def execlpe(file, *args): + """execlpe(file, *args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the current + process. """ + env = args[-1] + execvpe(file, args[:-1], env) + +def execvp(file, args): + """execvp(file, args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. + args may be a list or tuple of strings. """ + _execvpe(file, args) + +def execvpe(file, args, env): + """execvpe(file, args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env , replacing the + current process. + args may be a list or tuple of strings. """ + _execvpe(file, args, env) + +__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) + +def _execvpe(file, args, env=None): + if env is not None: + exec_func = execve + argrest = (args, env) + else: + exec_func = execv + argrest = (args,) + env = environ + + if path.dirname(file): + exec_func(file, *argrest) + return + saved_exc = None + path_list = get_exec_path(env) + if name != 'nt': + file = fsencode(file) + path_list = map(fsencode, path_list) + for dir in path_list: + fullname = path.join(dir, file) + try: + exec_func(fullname, *argrest) + except (FileNotFoundError, NotADirectoryError) as e: + last_exc = e + except OSError as e: + last_exc = e + if saved_exc is None: + saved_exc = e + if saved_exc is not None: + raise saved_exc + raise last_exc + + +def get_exec_path(env=None): + """Returns the sequence of directories that will be searched for the + named executable (similar to a shell) when launching a process. + + *env* must be an environment variable dict or None. If *env* is None, + os.environ will be used. + """ + # Use a local import instead of a global import to limit the number of + # modules loaded at startup: the os module is always loaded at startup by + # Python. It may also avoid a bootstrap issue. + import warnings + + if env is None: + env = environ + + # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a + # BytesWarning when using python -b or python -bb: ignore the warning + with warnings.catch_warnings(): + warnings.simplefilter("ignore", BytesWarning) + + try: + path_list = env.get('PATH') + except TypeError: + path_list = None + + if supports_bytes_environ: + try: + path_listb = env[b'PATH'] + except (KeyError, TypeError): + pass + else: + if path_list is not None: + raise ValueError( + "env cannot contain 'PATH' and b'PATH' keys") + path_list = path_listb + + if path_list is not None and isinstance(path_list, bytes): + path_list = fsdecode(path_list) + + if path_list is None: + path_list = defpath + return path_list.split(pathsep) + + +# Change environ to automatically call putenv(), unsetenv if they exist. +from _collections_abc import MutableMapping + +class _Environ(MutableMapping): + def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv): + self.encodekey = encodekey + self.decodekey = decodekey + self.encodevalue = encodevalue + self.decodevalue = decodevalue + self.putenv = putenv + self.unsetenv = unsetenv + self._data = data + + def __getitem__(self, key): + try: + value = self._data[self.encodekey(key)] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + return self.decodevalue(value) + + def __setitem__(self, key, value): + key = self.encodekey(key) + value = self.encodevalue(value) + self.putenv(key, value) + self._data[key] = value + + def __delitem__(self, key): + encodedkey = self.encodekey(key) + self.unsetenv(encodedkey) + try: + del self._data[encodedkey] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + + def __iter__(self): + # list() from dict object is an atomic operation + keys = list(self._data) + for key in keys: + yield self.decodekey(key) + + def __len__(self): + return len(self._data) + + def __repr__(self): + return 'environ({{{}}})'.format(', '.join( + ('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value)) + for key, value in self._data.items()))) + + def copy(self): + return dict(self) + + def setdefault(self, key, value): + if key not in self: + self[key] = value + return self[key] + +try: + _putenv = putenv +except NameError: + _putenv = lambda key, value: None +else: + if "putenv" not in __all__: + __all__.append("putenv") + +try: + _unsetenv = unsetenv +except NameError: + _unsetenv = lambda key: _putenv(key, "") +else: + if "unsetenv" not in __all__: + __all__.append("unsetenv") + +def _createenviron(): + if name == 'nt': + # Where Env Var Names Must Be UPPERCASE + def check_str(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value + encode = check_str + decode = str + def encodekey(key): + return encode(key).upper() + data = {} + for key, value in environ.items(): + data[encodekey(key)] = value + else: + # Where Env Var Names Can Be Mixed Case + encoding = sys.getfilesystemencoding() + def encode(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value.encode(encoding, 'surrogateescape') + def decode(value): + return value.decode(encoding, 'surrogateescape') + encodekey = encode + data = environ + return _Environ(data, + encodekey, decode, + encode, decode, + _putenv, _unsetenv) + +# unicode environ +environ = _createenviron() +del _createenviron + + +def getenv(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are str.""" + return environ.get(key, default) + +supports_bytes_environ = (name != 'nt') +__all__.extend(("getenv", "supports_bytes_environ")) + +if supports_bytes_environ: + def _check_bytes(value): + if not isinstance(value, bytes): + raise TypeError("bytes expected, not %s" % type(value).__name__) + return value + + # bytes environ + environb = _Environ(environ._data, + _check_bytes, bytes, + _check_bytes, bytes, + _putenv, _unsetenv) + del _check_bytes + + def getenvb(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are bytes.""" + return environb.get(key, default) + + __all__.extend(("environb", "getenvb")) + +def _fscodec(): + encoding = sys.getfilesystemencoding() + errors = sys.getfilesystemencodeerrors() + + def fsencode(filename): + """Encode filename (an os.PathLike, bytes, or str) to the filesystem + encoding with 'surrogateescape' error handler, return bytes unchanged. + On Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + """ + filename = fspath(filename) # Does type-checking of `filename`. + if isinstance(filename, str): + return filename.encode(encoding, errors) + else: + return filename + + def fsdecode(filename): + """Decode filename (an os.PathLike, bytes, or str) from the filesystem + encoding with 'surrogateescape' error handler, return str unchanged. On + Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + """ + filename = fspath(filename) # Does type-checking of `filename`. + if isinstance(filename, bytes): + return filename.decode(encoding, errors) + else: + return filename + + return fsencode, fsdecode + +fsencode, fsdecode = _fscodec() +del _fscodec + +# Supply spawn*() (probably only for Unix) +if _exists("fork") and not _exists("spawnv") and _exists("execv"): + + P_WAIT = 0 + P_NOWAIT = P_NOWAITO = 1 + + __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) + + # XXX Should we support P_DETACH? I suppose it could fork()**2 + # and close the std I/O streams. Also, P_OVERLAY is the same + # as execv*()? + + def _spawnvef(mode, file, args, env, func): + # Internal helper; func is the exec*() function to use + if not isinstance(args, (tuple, list)): + raise TypeError('argv must be a tuple or a list') + if not args or not args[0]: + raise ValueError('argv first element cannot be empty') + pid = fork() + if not pid: + # Child + try: + if env is None: + func(file, args) + else: + func(file, args, env) + except: + _exit(127) + else: + # Parent + if mode == P_NOWAIT: + return pid # Caller is responsible for waiting! + while 1: + wpid, sts = waitpid(pid, 0) + if WIFSTOPPED(sts): + continue + elif WIFSIGNALED(sts): + return -WTERMSIG(sts) + elif WIFEXITED(sts): + return WEXITSTATUS(sts) + else: + raise OSError("Not stopped, signaled or exited???") + + def spawnv(mode, file, args): + """spawnv(mode, file, args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execv) + + def spawnve(mode, file, args, env): + """spawnve(mode, file, args, env) -> integer + +Execute file with arguments from args in a subprocess with the +specified environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execve) + + # Note: spawnvp[e] isn't currently supported on Windows + + def spawnvp(mode, file, args): + """spawnvp(mode, file, args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execvp) + + def spawnvpe(mode, file, args, env): + """spawnvpe(mode, file, args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execvpe) + + + __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"]) + + +if _exists("spawnv"): + # These aren't supplied by the basic Windows code + # but can be easily implemented in Python + + def spawnl(mode, file, *args): + """spawnl(mode, file, *args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnv(mode, file, args) + + def spawnle(mode, file, *args): + """spawnle(mode, file, *args, env) -> integer + +Execute file with arguments from args in a subprocess with the +supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnve(mode, file, args[:-1], env) + + + __all__.extend(["spawnl", "spawnle"]) + + +if _exists("spawnvp"): + # At the moment, Windows doesn't implement spawnvp[e], + # so it won't have spawnlp[e] either. + def spawnlp(mode, file, *args): + """spawnlp(mode, file, *args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnvp(mode, file, args) + + def spawnlpe(mode, file, *args): + """spawnlpe(mode, file, *args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnvpe(mode, file, args[:-1], env) + + + __all__.extend(["spawnlp", "spawnlpe"]) + + +# Supply os.popen() +def popen(cmd, mode="r", buffering=-1): + if not isinstance(cmd, str): + raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) + if mode not in ("r", "w"): + raise ValueError("invalid mode %r" % mode) + if buffering == 0 or buffering is None: + raise ValueError("popen() does not support unbuffered streams") + import subprocess, io + if mode == "r": + proc = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(io.TextIOWrapper(proc.stdout), proc) + else: + proc = subprocess.Popen(cmd, + shell=True, + stdin=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(io.TextIOWrapper(proc.stdin), proc) + +# Helper for popen() -- a proxy for a file whose close waits for the process +class _wrap_close: + def __init__(self, stream, proc): + self._stream = stream + self._proc = proc + def close(self): + self._stream.close() + returncode = self._proc.wait() + if returncode == 0: + return None + if name == 'nt': + return returncode + else: + return returncode << 8 # Shift left to match old behavior + def __enter__(self): + return self + def __exit__(self, *args): + self.close() + def __getattr__(self, name): + return getattr(self._stream, name) + def __iter__(self): + return iter(self._stream) + +# Supply os.fdopen() +def fdopen(fd, *args, **kwargs): + if not isinstance(fd, int): + raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) + import io + return io.open(fd, *args, **kwargs) + + +# For testing purposes, make sure the function is available when the C +# implementation exists. +def _fspath(path): + """Return the path representation of a path-like object. + + If str or bytes is passed in, it is returned unchanged. Otherwise the + os.PathLike interface is used to get the path representation. If the + path representation is not str or bytes, TypeError is raised. If the + provided path is not str, bytes, or os.PathLike, TypeError is raised. + """ + if isinstance(path, (str, bytes)): + return path + + # Work from the object's type to match method resolution of other magic + # methods. + path_type = type(path) + try: + path_repr = path_type.__fspath__(path) + except AttributeError: + if hasattr(path_type, '__fspath__'): + raise + else: + raise TypeError("expected str, bytes or os.PathLike object, " + "not " + path_type.__name__) + if isinstance(path_repr, (str, bytes)): + return path_repr + else: + raise TypeError("expected {}.__fspath__() to return str or bytes, " + "not {}".format(path_type.__name__, + type(path_repr).__name__)) + +# If there is no C implementation, make the pure Python version the +# implementation as transparently as possible. +if not _exists('fspath'): + fspath = _fspath + fspath.__name__ = "fspath" + + +class PathLike(abc.ABC): + + """Abstract base class for implementing the file system path protocol.""" + + @abc.abstractmethod + def __fspath__(self): + """Return the file system path representation of the object.""" + raise NotImplementedError + + @classmethod + def __subclasshook__(cls, subclass): + return hasattr(subclass, '__fspath__') diff --git a/venv/Lib/posixpath.py b/venv/Lib/posixpath.py new file mode 100644 index 00000000..e92186c6 --- /dev/null +++ b/venv/Lib/posixpath.py @@ -0,0 +1,522 @@ +"""Common operations on Posix pathnames. + +Instead of importing this module directly, import os and refer to +this module as os.path. The "os.path" name is an alias for this +module on Posix systems; on other systems (e.g. Mac, Windows), +os.path provides the same operations in a manner specific to that +platform, and is an alias to another module (e.g. macpath, ntpath). + +Some of this can actually be useful on non-Posix systems too, e.g. +for manipulation of the pathname component of URLs. +""" + +# Strings representing various path-related bits and pieces. +# These are primarily for export; internally, they are hardcoded. +# Should be set before imports for resolving cyclic dependency. +curdir = '.' +pardir = '..' +extsep = '.' +sep = '/' +pathsep = ':' +defpath = ':/bin:/usr/bin' +altsep = None +devnull = '/dev/null' + +import os +import sys +import stat +import genericpath +from genericpath import * + +__all__ = ["normcase","isabs","join","splitdrive","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime","islink","exists","lexists","isdir","isfile", + "ismount", "expanduser","expandvars","normpath","abspath", + "samefile","sameopenfile","samestat", + "curdir","pardir","sep","pathsep","defpath","altsep","extsep", + "devnull","realpath","supports_unicode_filenames","relpath", + "commonpath"] + + +def _get_sep(path): + if isinstance(path, bytes): + return b'/' + else: + return '/' + +# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac. +# On MS-DOS this may also turn slashes into backslashes; however, other +# normalizations (such as optimizing '../' away) are not allowed +# (another function should be defined to do that). + +def normcase(s): + """Normalize case of pathname. Has no effect under Posix""" + s = os.fspath(s) + if not isinstance(s, (bytes, str)): + raise TypeError("normcase() argument must be str or bytes, " + "not '{}'".format(s.__class__.__name__)) + return s + + +# Return whether a path is absolute. +# Trivial in Posix, harder on the Mac or MS-DOS. + +def isabs(s): + """Test whether a path is absolute""" + s = os.fspath(s) + sep = _get_sep(s) + return s.startswith(sep) + + +# Join pathnames. +# Ignore the previous parts if a part is absolute. +# Insert a '/' unless the first part is empty or already ends in '/'. + +def join(a, *p): + """Join two or more pathname components, inserting '/' as needed. + If any component is an absolute path, all previous path components + will be discarded. An empty last part will result in a path that + ends with a separator.""" + a = os.fspath(a) + sep = _get_sep(a) + path = a + try: + if not p: + path[:0] + sep #23780: Ensure compatible data type even if p is null. + for b in map(os.fspath, p): + if b.startswith(sep): + path = b + elif not path or path.endswith(sep): + path += b + else: + path += sep + b + except (TypeError, AttributeError, BytesWarning): + genericpath._check_arg_types('join', a, *p) + raise + return path + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). If the path ends in '/', tail will be empty. If there is no +# '/' in the path, head will be empty. +# Trailing '/'es are stripped from head unless it is the root. + +def split(p): + """Split a pathname. Returns tuple "(head, tail)" where "tail" is + everything after the final slash. Either part may be empty.""" + p = os.fspath(p) + sep = _get_sep(p) + i = p.rfind(sep) + 1 + head, tail = p[:i], p[i:] + if head and head != sep*len(head): + head = head.rstrip(sep) + return head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + p = os.fspath(p) + if isinstance(p, bytes): + sep = b'/' + extsep = b'.' + else: + sep = '/' + extsep = '.' + return genericpath._splitext(p, sep, None, extsep) +splitext.__doc__ = genericpath._splitext.__doc__ + +# Split a pathname into a drive specification and the rest of the +# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty. + +def splitdrive(p): + """Split a pathname into drive and path. On Posix, drive is always + empty.""" + p = os.fspath(p) + return p[:0], p + + +# Return the tail (basename) part of a path, same as split(path)[1]. + +def basename(p): + """Returns the final component of a pathname""" + p = os.fspath(p) + sep = _get_sep(p) + i = p.rfind(sep) + 1 + return p[i:] + + +# Return the head (dirname) part of a path, same as split(path)[0]. + +def dirname(p): + """Returns the directory component of a pathname""" + p = os.fspath(p) + sep = _get_sep(p) + i = p.rfind(sep) + 1 + head = p[:i] + if head and head != sep*len(head): + head = head.rstrip(sep) + return head + + +# Is a path a symbolic link? +# This will always return false on systems where os.lstat doesn't exist. + +def islink(path): + """Test whether a path is a symbolic link""" + try: + st = os.lstat(path) + except (OSError, AttributeError): + return False + return stat.S_ISLNK(st.st_mode) + +# Being true for dangling symbolic links is also useful. + +def lexists(path): + """Test whether a path exists. Returns True for broken symbolic links""" + try: + os.lstat(path) + except OSError: + return False + return True + + +# Is a path a mount point? +# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?) + +def ismount(path): + """Test whether a path is a mount point""" + try: + s1 = os.lstat(path) + except OSError: + # It doesn't exist -- so not a mount point. :-) + return False + else: + # A symlink can never be a mount point + if stat.S_ISLNK(s1.st_mode): + return False + + if isinstance(path, bytes): + parent = join(path, b'..') + else: + parent = join(path, '..') + parent = realpath(parent) + try: + s2 = os.lstat(parent) + except OSError: + return False + + dev1 = s1.st_dev + dev2 = s2.st_dev + if dev1 != dev2: + return True # path/.. on a different device as path + ino1 = s1.st_ino + ino2 = s2.st_ino + if ino1 == ino2: + return True # path/.. is the same i-node as path + return False + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing.""" + path = os.fspath(path) + if isinstance(path, bytes): + tilde = b'~' + else: + tilde = '~' + if not path.startswith(tilde): + return path + sep = _get_sep(path) + i = path.find(sep, 1) + if i < 0: + i = len(path) + if i == 1: + if 'HOME' not in os.environ: + import pwd + userhome = pwd.getpwuid(os.getuid()).pw_dir + else: + userhome = os.environ['HOME'] + else: + import pwd + name = path[1:i] + if isinstance(name, bytes): + name = str(name, 'ASCII') + try: + pwent = pwd.getpwnam(name) + except KeyError: + return path + userhome = pwent.pw_dir + if isinstance(path, bytes): + userhome = os.fsencode(userhome) + root = b'/' + else: + root = '/' + userhome = userhome.rstrip(root) + return (userhome + path[i:]) or root + + +# Expand paths containing shell variable substitutions. +# This expands the forms $variable and ${variable} only. +# Non-existent variables are left unchanged. + +_varprog = None +_varprogb = None + +def expandvars(path): + """Expand shell variables of form $var and ${var}. Unknown variables + are left unchanged.""" + path = os.fspath(path) + global _varprog, _varprogb + if isinstance(path, bytes): + if b'$' not in path: + return path + if not _varprogb: + import re + _varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII) + search = _varprogb.search + start = b'{' + end = b'}' + environ = getattr(os, 'environb', None) + else: + if '$' not in path: + return path + if not _varprog: + import re + _varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII) + search = _varprog.search + start = '{' + end = '}' + environ = os.environ + i = 0 + while True: + m = search(path, i) + if not m: + break + i, j = m.span(0) + name = m.group(1) + if name.startswith(start) and name.endswith(end): + name = name[1:-1] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(name)]) + else: + value = environ[name] + except KeyError: + i = j + else: + tail = path[j:] + path = path[:i] + value + i = len(path) + path += tail + return path + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. +# It should be understood that this may change the meaning of the path +# if it contains symbolic links! + +def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + path = os.fspath(path) + if isinstance(path, bytes): + sep = b'/' + empty = b'' + dot = b'.' + dotdot = b'..' + else: + sep = '/' + empty = '' + dot = '.' + dotdot = '..' + if path == empty: + return dot + initial_slashes = path.startswith(sep) + # POSIX allows one or two initial slashes, but treats three or more + # as single slash. + if (initial_slashes and + path.startswith(sep*2) and not path.startswith(sep*3)): + initial_slashes = 2 + comps = path.split(sep) + new_comps = [] + for comp in comps: + if comp in (empty, dot): + continue + if (comp != dotdot or (not initial_slashes and not new_comps) or + (new_comps and new_comps[-1] == dotdot)): + new_comps.append(comp) + elif new_comps: + new_comps.pop() + comps = new_comps + path = sep.join(comps) + if initial_slashes: + path = sep*initial_slashes + path + return path or dot + + +def abspath(path): + """Return an absolute path.""" + path = os.fspath(path) + if not isabs(path): + if isinstance(path, bytes): + cwd = os.getcwdb() + else: + cwd = os.getcwd() + path = join(cwd, path) + return normpath(path) + + +# Return a canonical path (i.e. the absolute location of a file on the +# filesystem). + +def realpath(filename): + """Return the canonical path of the specified filename, eliminating any +symbolic links encountered in the path.""" + filename = os.fspath(filename) + path, ok = _joinrealpath(filename[:0], filename, {}) + return abspath(path) + +# Join two paths, normalizing and eliminating any symbolic links +# encountered in the second path. +def _joinrealpath(path, rest, seen): + if isinstance(path, bytes): + sep = b'/' + curdir = b'.' + pardir = b'..' + else: + sep = '/' + curdir = '.' + pardir = '..' + + if isabs(rest): + rest = rest[1:] + path = sep + + while rest: + name, _, rest = rest.partition(sep) + if not name or name == curdir: + # current dir + continue + if name == pardir: + # parent dir + if path: + path, name = split(path) + if name == pardir: + path = join(path, pardir, pardir) + else: + path = pardir + continue + newpath = join(path, name) + if not islink(newpath): + path = newpath + continue + # Resolve the symbolic link + if newpath in seen: + # Already seen this path + path = seen[newpath] + if path is not None: + # use cached value + continue + # The symlink is not resolved, so we must have a symlink loop. + # Return already resolved part + rest of the path unchanged. + return join(newpath, rest), False + seen[newpath] = None # not resolved symlink + path, ok = _joinrealpath(path, os.readlink(newpath), seen) + if not ok: + return join(path, rest), False + seen[newpath] = path # resolved symlink + + return path, True + + +supports_unicode_filenames = (sys.platform == 'darwin') + +def relpath(path, start=None): + """Return a relative version of a path""" + + if not path: + raise ValueError("no path specified") + + path = os.fspath(path) + if isinstance(path, bytes): + curdir = b'.' + sep = b'/' + pardir = b'..' + else: + curdir = '.' + sep = '/' + pardir = '..' + + if start is None: + start = curdir + else: + start = os.fspath(start) + + try: + start_list = [x for x in abspath(start).split(sep) if x] + path_list = [x for x in abspath(path).split(sep) if x] + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + except (TypeError, AttributeError, BytesWarning, DeprecationWarning): + genericpath._check_arg_types('relpath', path, start) + raise + + +# Return the longest common sub-path of the sequence of paths given as input. +# The paths are not normalized before comparing them (this is the +# responsibility of the caller). Any trailing separator is stripped from the +# returned path. + +def commonpath(paths): + """Given a sequence of path names, returns the longest common sub-path.""" + + if not paths: + raise ValueError('commonpath() arg is an empty sequence') + + paths = tuple(map(os.fspath, paths)) + if isinstance(paths[0], bytes): + sep = b'/' + curdir = b'.' + else: + sep = '/' + curdir = '.' + + try: + split_paths = [path.split(sep) for path in paths] + + try: + isabs, = set(p[:1] == sep for p in paths) + except ValueError: + raise ValueError("Can't mix absolute and relative paths") from None + + split_paths = [[c for c in s if c and c != curdir] for s in split_paths] + s1 = min(split_paths) + s2 = max(split_paths) + common = s1 + for i, c in enumerate(s1): + if c != s2[i]: + common = s1[:i] + break + + prefix = sep if isabs else sep[:0] + return prefix + sep.join(common) + except (TypeError, AttributeError): + genericpath._check_arg_types('commonpath', *paths) + raise diff --git a/venv/Lib/random.py b/venv/Lib/random.py new file mode 100644 index 00000000..0bc24174 --- /dev/null +++ b/venv/Lib/random.py @@ -0,0 +1,775 @@ +"""Random variable generators. + + integers + -------- + uniform within range + + sequences + --------- + pick random element + pick random sample + pick weighted random sample + generate random permutation + + distributions on the real line: + ------------------------------ + uniform + triangular + normal (Gaussian) + lognormal + negative exponential + gamma + beta + pareto + Weibull + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + +General notes on the underlying Mersenne Twister core generator: + +* The period is 2**19937-1. +* It is one of the most extensively tested generators in existence. +* The random() method is implemented in C, executes in a single Python step, + and is, therefore, threadsafe. + +""" + +from warnings import warn as _warn +from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType +from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil +from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin +from os import urandom as _urandom +from _collections_abc import Set as _Set, Sequence as _Sequence +from hashlib import sha512 as _sha512 +import itertools as _itertools +import bisect as _bisect +import os as _os + +__all__ = ["Random","seed","random","uniform","randint","choice","sample", + "randrange","shuffle","normalvariate","lognormvariate", + "expovariate","vonmisesvariate","gammavariate","triangular", + "gauss","betavariate","paretovariate","weibullvariate", + "getstate","setstate", "getrandbits", "choices", + "SystemRandom"] + +NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0) +TWOPI = 2.0*_pi +LOG4 = _log(4.0) +SG_MAGICCONST = 1.0 + _log(4.5) +BPF = 53 # Number of bits in a float +RECIP_BPF = 2**-BPF + + +# Translated by Guido van Rossum from C source provided by +# Adrian Baddeley. Adapted by Raymond Hettinger for use with +# the Mersenne Twister and os.urandom() core generators. + +import _random + +class Random(_random.Random): + """Random number generator base class used by bound module functions. + + Used to instantiate instances of Random to get generators that don't + share state. + + Class Random can also be subclassed if you want to use a different basic + generator of your own devising: in that case, override the following + methods: random(), seed(), getstate(), and setstate(). + Optionally, implement a getrandbits() method so that randrange() + can cover arbitrarily large ranges. + + """ + + VERSION = 3 # used by getstate/setstate + + def __init__(self, x=None): + """Initialize an instance. + + Optional argument x controls seeding, as for Random.seed(). + """ + + self.seed(x) + self.gauss_next = None + + def seed(self, a=None, version=2): + """Initialize internal state from hashable object. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + If *a* is an int, all bits are used. + + For version 2 (the default), all of the bits are used if *a* is a str, + bytes, or bytearray. For version 1 (provided for reproducing random + sequences from older versions of Python), the algorithm for str and + bytes generates a narrower range of seeds. + + """ + + if version == 1 and isinstance(a, (str, bytes)): + a = a.decode('latin-1') if isinstance(a, bytes) else a + x = ord(a[0]) << 7 if a else 0 + for c in map(ord, a): + x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF + x ^= len(a) + a = -2 if x == -1 else x + + if version == 2 and isinstance(a, (str, bytes, bytearray)): + if isinstance(a, str): + a = a.encode() + a += _sha512(a).digest() + a = int.from_bytes(a, 'big') + + super().seed(a) + self.gauss_next = None + + def getstate(self): + """Return internal state; can be passed to setstate() later.""" + return self.VERSION, super().getstate(), self.gauss_next + + def setstate(self, state): + """Restore internal state from object returned by getstate().""" + version = state[0] + if version == 3: + version, internalstate, self.gauss_next = state + super().setstate(internalstate) + elif version == 2: + version, internalstate, self.gauss_next = state + # In version 2, the state was saved as signed ints, which causes + # inconsistencies between 32/64-bit systems. The state is + # really unsigned 32-bit ints, so we convert negative ints from + # version 2 to positive longs for version 3. + try: + internalstate = tuple(x % (2**32) for x in internalstate) + except ValueError as e: + raise TypeError from e + super().setstate(internalstate) + else: + raise ValueError("state with version %s passed to " + "Random.setstate() of version %s" % + (version, self.VERSION)) + +## ---- Methods below this point do not need to be overridden when +## ---- subclassing for the purpose of using a different core generator. + +## -------------------- pickle support ------------------- + + # Issue 17489: Since __reduce__ was defined to fix #759889 this is no + # longer called; we leave it here because it has been here since random was + # rewritten back in 2001 and why risk breaking something. + def __getstate__(self): # for pickle + return self.getstate() + + def __setstate__(self, state): # for pickle + self.setstate(state) + + def __reduce__(self): + return self.__class__, (), self.getstate() + +## -------------------- integer methods ------------------- + + def randrange(self, start, stop=None, step=1, _int=int): + """Choose a random item from range(start, stop[, step]). + + This fixes the problem with randint() which includes the + endpoint; in Python this is usually not what you want. + + """ + + # This code is a bit messy to make it fast for the + # common case while still doing adequate error checking. + istart = _int(start) + if istart != start: + raise ValueError("non-integer arg 1 for randrange()") + if stop is None: + if istart > 0: + return self._randbelow(istart) + raise ValueError("empty range for randrange()") + + # stop argument supplied. + istop = _int(stop) + if istop != stop: + raise ValueError("non-integer stop for randrange()") + width = istop - istart + if step == 1 and width > 0: + return istart + self._randbelow(width) + if step == 1: + raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width)) + + # Non-unit step argument supplied. + istep = _int(step) + if istep != step: + raise ValueError("non-integer step for randrange()") + if istep > 0: + n = (width + istep - 1) // istep + elif istep < 0: + n = (width + istep + 1) // istep + else: + raise ValueError("zero step for randrange()") + + if n <= 0: + raise ValueError("empty range for randrange()") + + return istart + istep*self._randbelow(n) + + def randint(self, a, b): + """Return random integer in range [a, b], including both end points. + """ + + return self.randrange(a, b+1) + + def _randbelow(self, n, int=int, maxsize=1<= n: + r = getrandbits(k) + return r + # There's an overridden random() method but no new getrandbits() method, + # so we can only use random() from here. + if n >= maxsize: + _warn("Underlying random() generator does not supply \n" + "enough bits to choose from a population range this large.\n" + "To remove the range limitation, add a getrandbits() method.") + return int(random() * n) + if n == 0: + raise ValueError("Boundary cannot be zero") + rem = maxsize % n + limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0 + r = random() + while r >= limit: + r = random() + return int(r*maxsize) % n + +## -------------------- sequence methods ------------------- + + def choice(self, seq): + """Choose a random element from a non-empty sequence.""" + try: + i = self._randbelow(len(seq)) + except ValueError: + raise IndexError('Cannot choose from an empty sequence') from None + return seq[i] + + def shuffle(self, x, random=None): + """Shuffle list x in place, and return None. + + Optional argument random is a 0-argument function returning a + random float in [0.0, 1.0); if it is the default None, the + standard random.random will be used. + + """ + + if random is None: + randbelow = self._randbelow + for i in reversed(range(1, len(x))): + # pick an element in x[:i+1] with which to exchange x[i] + j = randbelow(i+1) + x[i], x[j] = x[j], x[i] + else: + _int = int + for i in reversed(range(1, len(x))): + # pick an element in x[:i+1] with which to exchange x[i] + j = _int(random() * (i+1)) + x[i], x[j] = x[j], x[i] + + def sample(self, population, k): + """Chooses k unique random elements from a population sequence or set. + + Returns a new list containing elements from the population while + leaving the original population unchanged. The resulting list is + in selection order so that all sub-slices will also be valid random + samples. This allows raffle winners (the sample) to be partitioned + into grand prize and second place winners (the subslices). + + Members of the population need not be hashable or unique. If the + population contains repeats, then each occurrence is a possible + selection in the sample. + + To choose a sample in a range of integers, use range as an argument. + This is especially fast and space efficient for sampling from a + large population: sample(range(10000000), 60) + """ + + # Sampling without replacement entails tracking either potential + # selections (the pool) in a list or previous selections in a set. + + # When the number of selections is small compared to the + # population, then tracking selections is efficient, requiring + # only a small set and an occasional reselection. For + # a larger number of selections, the pool tracking method is + # preferred since the list takes less space than the + # set and it doesn't suffer from frequent reselections. + + if isinstance(population, _Set): + population = tuple(population) + if not isinstance(population, _Sequence): + raise TypeError("Population must be a sequence or set. For dicts, use list(d).") + randbelow = self._randbelow + n = len(population) + if not 0 <= k <= n: + raise ValueError("Sample larger than population or is negative") + result = [None] * k + setsize = 21 # size of a small set minus size of an empty list + if k > 5: + setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets + if n <= setsize: + # An n-length list is smaller than a k-length set + pool = list(population) + for i in range(k): # invariant: non-selected at [0,n-i) + j = randbelow(n-i) + result[i] = pool[j] + pool[j] = pool[n-i-1] # move non-selected item into vacancy + else: + selected = set() + selected_add = selected.add + for i in range(k): + j = randbelow(n) + while j in selected: + j = randbelow(n) + selected_add(j) + result[i] = population[j] + return result + + def choices(self, population, weights=None, *, cum_weights=None, k=1): + """Return a k sized list of population elements chosen with replacement. + + If the relative weights or cumulative weights are not specified, + the selections are made with equal probability. + + """ + random = self.random + if cum_weights is None: + if weights is None: + _int = int + total = len(population) + return [population[_int(random() * total)] for i in range(k)] + cum_weights = list(_itertools.accumulate(weights)) + elif weights is not None: + raise TypeError('Cannot specify both weights and cumulative weights') + if len(cum_weights) != len(population): + raise ValueError('The number of weights does not match the population') + bisect = _bisect.bisect + total = cum_weights[-1] + return [population[bisect(cum_weights, random() * total)] for i in range(k)] + +## -------------------- real-valued distributions ------------------- + +## -------------------- uniform distribution ------------------- + + def uniform(self, a, b): + "Get a random number in the range [a, b) or [a, b] depending on rounding." + return a + (b-a) * self.random() + +## -------------------- triangular -------------------- + + def triangular(self, low=0.0, high=1.0, mode=None): + """Triangular distribution. + + Continuous distribution bounded by given lower and upper limits, + and having a given mode value in-between. + + http://en.wikipedia.org/wiki/Triangular_distribution + + """ + u = self.random() + try: + c = 0.5 if mode is None else (mode - low) / (high - low) + except ZeroDivisionError: + return low + if u > c: + u = 1.0 - u + c = 1.0 - c + low, high = high, low + return low + (high - low) * _sqrt(u * c) + +## -------------------- normal distribution -------------------- + + def normalvariate(self, mu, sigma): + """Normal distribution. + + mu is the mean, and sigma is the standard deviation. + + """ + # mu = mean, sigma = standard deviation + + # Uses Kinderman and Monahan method. Reference: Kinderman, + # A.J. and Monahan, J.F., "Computer generation of random + # variables using the ratio of uniform deviates", ACM Trans + # Math Software, 3, (1977), pp257-260. + + random = self.random + while 1: + u1 = random() + u2 = 1.0 - random() + z = NV_MAGICCONST*(u1-0.5)/u2 + zz = z*z/4.0 + if zz <= -_log(u2): + break + return mu + z*sigma + +## -------------------- lognormal distribution -------------------- + + def lognormvariate(self, mu, sigma): + """Log normal distribution. + + If you take the natural logarithm of this distribution, you'll get a + normal distribution with mean mu and standard deviation sigma. + mu can have any value, and sigma must be greater than zero. + + """ + return _exp(self.normalvariate(mu, sigma)) + +## -------------------- exponential distribution -------------------- + + def expovariate(self, lambd): + """Exponential distribution. + + lambd is 1.0 divided by the desired mean. It should be + nonzero. (The parameter would be called "lambda", but that is + a reserved word in Python.) Returned values range from 0 to + positive infinity if lambd is positive, and from negative + infinity to 0 if lambd is negative. + + """ + # lambd: rate lambd = 1/mean + # ('lambda' is a Python reserved word) + + # we use 1-random() instead of random() to preclude the + # possibility of taking the log of zero. + return -_log(1.0 - self.random())/lambd + +## -------------------- von Mises distribution -------------------- + + def vonmisesvariate(self, mu, kappa): + """Circular data distribution. + + mu is the mean angle, expressed in radians between 0 and 2*pi, and + kappa is the concentration parameter, which must be greater than or + equal to zero. If kappa is equal to zero, this distribution reduces + to a uniform random angle over the range 0 to 2*pi. + + """ + # mu: mean angle (in radians between 0 and 2*pi) + # kappa: concentration parameter kappa (>= 0) + # if kappa = 0 generate uniform random angle + + # Based upon an algorithm published in: Fisher, N.I., + # "Statistical Analysis of Circular Data", Cambridge + # University Press, 1993. + + # Thanks to Magnus Kessler for a correction to the + # implementation of step 4. + + random = self.random + if kappa <= 1e-6: + return TWOPI * random() + + s = 0.5 / kappa + r = s + _sqrt(1.0 + s * s) + + while 1: + u1 = random() + z = _cos(_pi * u1) + + d = z / (r + z) + u2 = random() + if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): + break + + q = 1.0 / r + f = (q + z) / (1.0 + q * z) + u3 = random() + if u3 > 0.5: + theta = (mu + _acos(f)) % TWOPI + else: + theta = (mu - _acos(f)) % TWOPI + + return theta + +## -------------------- gamma distribution -------------------- + + def gammavariate(self, alpha, beta): + """Gamma distribution. Not the gamma function! + + Conditions on the parameters are alpha > 0 and beta > 0. + + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + + """ + + # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 + + # Warning: a few older sources define the gamma distribution in terms + # of alpha > -1.0 + if alpha <= 0.0 or beta <= 0.0: + raise ValueError('gammavariate: alpha and beta must be > 0.0') + + random = self.random + if alpha > 1.0: + + # Uses R.C.H. Cheng, "The generation of Gamma + # variables with non-integral shape parameters", + # Applied Statistics, (1977), 26, No. 1, p71-74 + + ainv = _sqrt(2.0 * alpha - 1.0) + bbb = alpha - LOG4 + ccc = alpha + ainv + + while 1: + u1 = random() + if not 1e-7 < u1 < .9999999: + continue + u2 = 1.0 - random() + v = _log(u1/(1.0-u1))/ainv + x = alpha*_exp(v) + z = u1*u1*u2 + r = bbb+ccc*v-x + if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): + return x * beta + + elif alpha == 1.0: + # expovariate(1/beta) + u = random() + while u <= 1e-7: + u = random() + return -_log(u) * beta + + else: # alpha is between 0 and 1 (exclusive) + + # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle + + while 1: + u = random() + b = (_e + alpha)/_e + p = b*u + if p <= 1.0: + x = p ** (1.0/alpha) + else: + x = -_log((b-p)/alpha) + u1 = random() + if p > 1.0: + if u1 <= x ** (alpha - 1.0): + break + elif u1 <= _exp(-x): + break + return x * beta + +## -------------------- Gauss (faster alternative) -------------------- + + def gauss(self, mu, sigma): + """Gaussian distribution. + + mu is the mean, and sigma is the standard deviation. This is + slightly faster than the normalvariate() function. + + Not thread-safe without a lock around calls. + + """ + + # When x and y are two variables from [0, 1), uniformly + # distributed, then + # + # cos(2*pi*x)*sqrt(-2*log(1-y)) + # sin(2*pi*x)*sqrt(-2*log(1-y)) + # + # are two *independent* variables with normal distribution + # (mu = 0, sigma = 1). + # (Lambert Meertens) + # (corrected version; bug discovered by Mike Miller, fixed by LM) + + # Multithreading note: When two threads call this function + # simultaneously, it is possible that they will receive the + # same return value. The window is very small though. To + # avoid this, you have to use a lock around all calls. (I + # didn't want to slow this down in the serial case by using a + # lock here.) + + random = self.random + z = self.gauss_next + self.gauss_next = None + if z is None: + x2pi = random() * TWOPI + g2rad = _sqrt(-2.0 * _log(1.0 - random())) + z = _cos(x2pi) * g2rad + self.gauss_next = _sin(x2pi) * g2rad + + return mu + z*sigma + +## -------------------- beta -------------------- +## See +## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html +## for Ivan Frohne's insightful analysis of why the original implementation: +## +## def betavariate(self, alpha, beta): +## # Discrete Event Simulation in C, pp 87-88. +## +## y = self.expovariate(alpha) +## z = self.expovariate(1.0/beta) +## return z/(y+z) +## +## was dead wrong, and how it probably got that way. + + def betavariate(self, alpha, beta): + """Beta distribution. + + Conditions on the parameters are alpha > 0 and beta > 0. + Returned values range between 0 and 1. + + """ + + # This version due to Janne Sinkkonen, and matches all the std + # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). + y = self.gammavariate(alpha, 1.0) + if y == 0: + return 0.0 + else: + return y / (y + self.gammavariate(beta, 1.0)) + +## -------------------- Pareto -------------------- + + def paretovariate(self, alpha): + """Pareto distribution. alpha is the shape parameter.""" + # Jain, pg. 495 + + u = 1.0 - self.random() + return 1.0 / u ** (1.0/alpha) + +## -------------------- Weibull -------------------- + + def weibullvariate(self, alpha, beta): + """Weibull distribution. + + alpha is the scale parameter and beta is the shape parameter. + + """ + # Jain, pg. 499; bug fix courtesy Bill Arms + + u = 1.0 - self.random() + return alpha * (-_log(u)) ** (1.0/beta) + +## --------------- Operating System Random Source ------------------ + +class SystemRandom(Random): + """Alternate random number generator using sources provided + by the operating system (such as /dev/urandom on Unix or + CryptGenRandom on Windows). + + Not available on all systems (see os.urandom() for details). + """ + + def random(self): + """Get the next random number in the range [0.0, 1.0).""" + return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF + + def getrandbits(self, k): + """getrandbits(k) -> x. Generates an int with k random bits.""" + if k <= 0: + raise ValueError('number of bits must be greater than zero') + if k != int(k): + raise TypeError('number of bits should be an integer') + numbytes = (k + 7) // 8 # bits / 8 and rounded up + x = int.from_bytes(_urandom(numbytes), 'big') + return x >> (numbytes * 8 - k) # trim excess bits + + def seed(self, *args, **kwds): + "Stub method. Not used for a system random number generator." + return None + + def _notimplemented(self, *args, **kwds): + "Method should not be called for a system random number generator." + raise NotImplementedError('System entropy source does not have state.') + getstate = setstate = _notimplemented + +## -------------------- test program -------------------- + +def _test_generator(n, func, args): + import time + print(n, 'times', func.__name__) + total = 0.0 + sqsum = 0.0 + smallest = 1e10 + largest = -1e10 + t0 = time.time() + for i in range(n): + x = func(*args) + total += x + sqsum = sqsum + x*x + smallest = min(x, smallest) + largest = max(x, largest) + t1 = time.time() + print(round(t1-t0, 3), 'sec,', end=' ') + avg = total/n + stddev = _sqrt(sqsum/n - avg*avg) + print('avg %g, stddev %g, min %g, max %g\n' % \ + (avg, stddev, smallest, largest)) + + +def _test(N=2000): + _test_generator(N, random, ()) + _test_generator(N, normalvariate, (0.0, 1.0)) + _test_generator(N, lognormvariate, (0.0, 1.0)) + _test_generator(N, vonmisesvariate, (0.0, 1.0)) + _test_generator(N, gammavariate, (0.01, 1.0)) + _test_generator(N, gammavariate, (0.1, 1.0)) + _test_generator(N, gammavariate, (0.1, 2.0)) + _test_generator(N, gammavariate, (0.5, 1.0)) + _test_generator(N, gammavariate, (0.9, 1.0)) + _test_generator(N, gammavariate, (1.0, 1.0)) + _test_generator(N, gammavariate, (2.0, 1.0)) + _test_generator(N, gammavariate, (20.0, 1.0)) + _test_generator(N, gammavariate, (200.0, 1.0)) + _test_generator(N, gauss, (0.0, 1.0)) + _test_generator(N, betavariate, (3.0, 3.0)) + _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0)) + +# Create one instance, seeded from current time, and export its methods +# as module-level functions. The functions share state across all uses +#(both in the user's code and in the Python libraries), but that's fine +# for most programs and is easier for the casual user than making them +# instantiate their own Random() instance. + +_inst = Random() +seed = _inst.seed +random = _inst.random +uniform = _inst.uniform +triangular = _inst.triangular +randint = _inst.randint +choice = _inst.choice +randrange = _inst.randrange +sample = _inst.sample +shuffle = _inst.shuffle +choices = _inst.choices +normalvariate = _inst.normalvariate +lognormvariate = _inst.lognormvariate +expovariate = _inst.expovariate +vonmisesvariate = _inst.vonmisesvariate +gammavariate = _inst.gammavariate +gauss = _inst.gauss +betavariate = _inst.betavariate +paretovariate = _inst.paretovariate +weibullvariate = _inst.weibullvariate +getstate = _inst.getstate +setstate = _inst.setstate +getrandbits = _inst.getrandbits + +if hasattr(_os, "fork"): + _os.register_at_fork(after_in_child=_inst.seed) + + +if __name__ == '__main__': + _test() diff --git a/venv/Lib/re.py b/venv/Lib/re.py new file mode 100644 index 00000000..94d48657 --- /dev/null +++ b/venv/Lib/re.py @@ -0,0 +1,366 @@ +# +# Secret Labs' Regular Expression Engine +# +# re-compatible interface for the sre matching engine +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# This version of the SRE library can be redistributed under CNRI's +# Python 1.6 license. For any other use, please contact Secret Labs +# AB (info@pythonware.com). +# +# Portions of this engine have been developed in cooperation with +# CNRI. Hewlett-Packard provided funding for 1.6 integration and +# other compatibility work. +# + +r"""Support for regular expressions (RE). + +This module provides regular expression matching operations similar to +those found in Perl. It supports both 8-bit and Unicode strings; both +the pattern and the strings being processed can contain null bytes and +characters outside the US ASCII range. + +Regular expressions can contain both special and ordinary characters. +Most ordinary characters, like "A", "a", or "0", are the simplest +regular expressions; they simply match themselves. You can +concatenate ordinary characters, so last matches the string 'last'. + +The special characters are: + "." Matches any character except a newline. + "^" Matches the start of the string. + "$" Matches the end of the string or just before the newline at + the end of the string. + "*" Matches 0 or more (greedy) repetitions of the preceding RE. + Greedy means that it will match as many repetitions as possible. + "+" Matches 1 or more (greedy) repetitions of the preceding RE. + "?" Matches 0 or 1 (greedy) of the preceding RE. + *?,+?,?? Non-greedy versions of the previous three special characters. + {m,n} Matches from m to n repetitions of the preceding RE. + {m,n}? Non-greedy version of the above. + "\\" Either escapes special characters or signals a special sequence. + [] Indicates a set of characters. + A "^" as the first character indicates a complementing set. + "|" A|B, creates an RE that will match either A or B. + (...) Matches the RE inside the parentheses. + The contents can be retrieved or matched later in the string. + (?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below). + (?:...) Non-grouping version of regular parentheses. + (?P...) The substring matched by the group is accessible by name. + (?P=name) Matches the text matched earlier by the group named name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by ... (must be fixed length). + (?= _MAXCACHE: + # Drop the oldest item + try: + del _cache[next(iter(_cache))] + except (StopIteration, RuntimeError, KeyError): + pass + _cache[type(pattern), pattern, flags] = p + return p + +@functools.lru_cache(_MAXCACHE) +def _compile_repl(repl, pattern): + # internal: compile replacement pattern + return sre_parse.parse_template(repl, pattern) + +def _expand(pattern, match, template): + # internal: Match.expand implementation hook + template = sre_parse.parse_template(template, pattern) + return sre_parse.expand_template(template, match) + +def _subx(pattern, template): + # internal: Pattern.sub/subn implementation helper + template = _compile_repl(template, pattern) + if not template[0] and len(template[1]) == 1: + # literal replacement + return template[1][0] + def filter(match, template=template): + return sre_parse.expand_template(template, match) + return filter + +# register myself for pickling + +import copyreg + +def _pickle(p): + return _compile, (p.pattern, p.flags) + +copyreg.pickle(Pattern, _pickle, _compile) + +# -------------------------------------------------------------------- +# experimental stuff (see python-dev discussions for details) + +class Scanner: + def __init__(self, lexicon, flags=0): + from sre_constants import BRANCH, SUBPATTERN + if isinstance(flags, RegexFlag): + flags = flags.value + self.lexicon = lexicon + # combine phrases into a compound pattern + p = [] + s = sre_parse.Pattern() + s.flags = flags + for phrase, action in lexicon: + gid = s.opengroup() + p.append(sre_parse.SubPattern(s, [ + (SUBPATTERN, (gid, 0, 0, sre_parse.parse(phrase, flags))), + ])) + s.closegroup(gid, p[-1]) + p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) + self.scanner = sre_compile.compile(p) + def scan(self, string): + result = [] + append = result.append + match = self.scanner.scanner(string).match + i = 0 + while True: + m = match() + if not m: + break + j = m.end() + if i == j: + break + action = self.lexicon[m.lastindex-1][1] + if callable(action): + self.match = m + action = action(self, m.group()) + if action is not None: + append(action) + i = j + return result, string[i:] diff --git a/venv/Lib/reprlib.py b/venv/Lib/reprlib.py new file mode 100644 index 00000000..616b3439 --- /dev/null +++ b/venv/Lib/reprlib.py @@ -0,0 +1,161 @@ +"""Redo the builtin repr() (representation) but with limits on most sizes.""" + +__all__ = ["Repr", "repr", "recursive_repr"] + +import builtins +from itertools import islice +from _thread import get_ident + +def recursive_repr(fillvalue='...'): + 'Decorator to make a repr function return fillvalue for a recursive call' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__qualname__ = getattr(user_function, '__qualname__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + +class Repr: + + def __init__(self): + self.maxlevel = 6 + self.maxtuple = 6 + self.maxlist = 6 + self.maxarray = 5 + self.maxdict = 4 + self.maxset = 6 + self.maxfrozenset = 6 + self.maxdeque = 6 + self.maxstring = 30 + self.maxlong = 40 + self.maxother = 30 + + def repr(self, x): + return self.repr1(x, self.maxlevel) + + def repr1(self, x, level): + typename = type(x).__name__ + if ' ' in typename: + parts = typename.split() + typename = '_'.join(parts) + if hasattr(self, 'repr_' + typename): + return getattr(self, 'repr_' + typename)(x, level) + else: + return self.repr_instance(x, level) + + def _repr_iterable(self, x, level, left, right, maxiter, trail=''): + n = len(x) + if level <= 0 and n: + s = '...' + else: + newlevel = level - 1 + repr1 = self.repr1 + pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] + if n > maxiter: pieces.append('...') + s = ', '.join(pieces) + if n == 1 and trail: right = trail + right + return '%s%s%s' % (left, s, right) + + def repr_tuple(self, x, level): + return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') + + def repr_list(self, x, level): + return self._repr_iterable(x, level, '[', ']', self.maxlist) + + def repr_array(self, x, level): + if not x: + return "array('%s')" % x.typecode + header = "array('%s', [" % x.typecode + return self._repr_iterable(x, level, header, '])', self.maxarray) + + def repr_set(self, x, level): + if not x: + return 'set()' + x = _possibly_sorted(x) + return self._repr_iterable(x, level, '{', '}', self.maxset) + + def repr_frozenset(self, x, level): + if not x: + return 'frozenset()' + x = _possibly_sorted(x) + return self._repr_iterable(x, level, 'frozenset({', '})', + self.maxfrozenset) + + def repr_deque(self, x, level): + return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) + + def repr_dict(self, x, level): + n = len(x) + if n == 0: return '{}' + if level <= 0: return '{...}' + newlevel = level - 1 + repr1 = self.repr1 + pieces = [] + for key in islice(_possibly_sorted(x), self.maxdict): + keyrepr = repr1(key, newlevel) + valrepr = repr1(x[key], newlevel) + pieces.append('%s: %s' % (keyrepr, valrepr)) + if n > self.maxdict: pieces.append('...') + s = ', '.join(pieces) + return '{%s}' % (s,) + + def repr_str(self, x, level): + s = builtins.repr(x[:self.maxstring]) + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = builtins.repr(x[:i] + x[len(x)-j:]) + s = s[:i] + '...' + s[len(s)-j:] + return s + + def repr_int(self, x, level): + s = builtins.repr(x) # XXX Hope this isn't too slow... + if len(s) > self.maxlong: + i = max(0, (self.maxlong-3)//2) + j = max(0, self.maxlong-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + + def repr_instance(self, x, level): + try: + s = builtins.repr(x) + # Bugs in x.__repr__() can cause arbitrary + # exceptions -- then make up something + except Exception: + return '<%s instance at %#x>' % (x.__class__.__name__, id(x)) + if len(s) > self.maxother: + i = max(0, (self.maxother-3)//2) + j = max(0, self.maxother-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + + +def _possibly_sorted(x): + # Since not all sequences of items can be sorted and comparison + # functions may raise arbitrary exceptions, return an unsorted + # sequence in that case. + try: + return sorted(x) + except Exception: + return list(x) + +aRepr = Repr() +repr = aRepr.repr diff --git a/venv/Lib/rlcompleter.py b/venv/Lib/rlcompleter.py new file mode 100644 index 00000000..bca4a7bc --- /dev/null +++ b/venv/Lib/rlcompleter.py @@ -0,0 +1,205 @@ +"""Word completion for GNU readline. + +The completer completes keywords, built-ins and globals in a selectable +namespace (which defaults to __main__); when completing NAME.NAME..., it +evaluates (!) the expression up to the last dot and completes its attributes. + +It's very cool to do "import sys" type "sys.", hit the completion key (twice), +and see the list of names defined by the sys module! + +Tip: to use the tab key as the completion key, call + + readline.parse_and_bind("tab: complete") + +Notes: + +- Exceptions raised by the completer function are *ignored* (and generally cause + the completion to fail). This is a feature -- since readline sets the tty + device in raw (or cbreak) mode, printing a traceback wouldn't work well + without some complicated hoopla to save, reset and restore the tty state. + +- The evaluation of the NAME.NAME... form may cause arbitrary application + defined code to be executed if an object with a __getattr__ hook is found. + Since it is the responsibility of the application (or the user) to enable this + feature, I consider this an acceptable risk. More complicated expressions + (e.g. function calls or indexing operations) are *not* evaluated. + +- When the original stdin is not a tty device, GNU readline is never + used, and this module (and the readline module) are silently inactive. + +""" + +import atexit +import builtins +import __main__ + +__all__ = ["Completer"] + +class Completer: + def __init__(self, namespace = None): + """Create a new completer for the command line. + + Completer([namespace]) -> completer instance. + + If unspecified, the default namespace where completions are performed + is __main__ (technically, __main__.__dict__). Namespaces should be + given as dictionaries. + + Completer instances should be used as the completion mechanism of + readline via the set_completer() call: + + readline.set_completer(Completer(my_namespace).complete) + """ + + if namespace and not isinstance(namespace, dict): + raise TypeError('namespace must be a dictionary') + + # Don't bind to namespace quite yet, but flag whether the user wants a + # specific namespace or to use __main__.__dict__. This will allow us + # to bind to __main__.__dict__ at completion time, not now. + if namespace is None: + self.use_main_ns = 1 + else: + self.use_main_ns = 0 + self.namespace = namespace + + def complete(self, text, state): + """Return the next possible completion for 'text'. + + This is called successively with state == 0, 1, 2, ... until it + returns None. The completion should begin with 'text'. + + """ + if self.use_main_ns: + self.namespace = __main__.__dict__ + + if not text.strip(): + if state == 0: + if _readline_available: + readline.insert_text('\t') + readline.redisplay() + return '' + else: + return '\t' + else: + return None + + if state == 0: + if "." in text: + self.matches = self.attr_matches(text) + else: + self.matches = self.global_matches(text) + try: + return self.matches[state] + except IndexError: + return None + + def _callable_postfix(self, val, word): + if callable(val): + word = word + "(" + return word + + def global_matches(self, text): + """Compute matches when text is a simple name. + + Return a list of all keywords, built-in functions and names currently + defined in self.namespace that match. + + """ + import keyword + matches = [] + seen = {"__builtins__"} + n = len(text) + for word in keyword.kwlist: + if word[:n] == text: + seen.add(word) + if word in {'finally', 'try'}: + word = word + ':' + elif word not in {'False', 'None', 'True', + 'break', 'continue', 'pass', + 'else'}: + word = word + ' ' + matches.append(word) + for nspace in [self.namespace, builtins.__dict__]: + for word, val in nspace.items(): + if word[:n] == text and word not in seen: + seen.add(word) + matches.append(self._callable_postfix(val, word)) + return matches + + def attr_matches(self, text): + """Compute matches when text contains a dot. + + Assuming the text is of the form NAME.NAME....[NAME], and is + evaluable in self.namespace, it will be evaluated and its attributes + (as revealed by dir()) are used as possible completions. (For class + instances, class members are also considered.) + + WARNING: this can still invoke arbitrary C code, if an object + with a __getattr__ hook is evaluated. + + """ + import re + m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) + if not m: + return [] + expr, attr = m.group(1, 3) + try: + thisobject = eval(expr, self.namespace) + except Exception: + return [] + + # get the content of the object, except __builtins__ + words = set(dir(thisobject)) + words.discard("__builtins__") + + if hasattr(thisobject, '__class__'): + words.add('__class__') + words.update(get_class_members(thisobject.__class__)) + matches = [] + n = len(attr) + if attr == '': + noprefix = '_' + elif attr == '_': + noprefix = '__' + else: + noprefix = None + while True: + for word in words: + if (word[:n] == attr and + not (noprefix and word[:n+1] == noprefix)): + match = "%s.%s" % (expr, word) + try: + val = getattr(thisobject, word) + except Exception: + pass # Include even if attribute not set + else: + match = self._callable_postfix(val, match) + matches.append(match) + if matches or not noprefix: + break + if noprefix == '_': + noprefix = '__' + else: + noprefix = None + matches.sort() + return matches + +def get_class_members(klass): + ret = dir(klass) + if hasattr(klass,'__bases__'): + for base in klass.__bases__: + ret = ret + get_class_members(base) + return ret + +try: + import readline +except ImportError: + _readline_available = False +else: + readline.set_completer(Completer().complete) + # Release references early at shutdown (the readline module's + # contents are quasi-immortal, and the completer function holds a + # reference to globals). + atexit.register(lambda: readline.set_completer(None)) + _readline_available = True diff --git a/venv/Lib/shutil.py b/venv/Lib/shutil.py new file mode 100644 index 00000000..3c02776a --- /dev/null +++ b/venv/Lib/shutil.py @@ -0,0 +1,1169 @@ +"""Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +""" + +import os +import sys +import stat +import fnmatch +import collections +import errno + +try: + import zlib + del zlib + _ZLIB_SUPPORTED = True +except ImportError: + _ZLIB_SUPPORTED = False + +try: + import bz2 + del bz2 + _BZ2_SUPPORTED = True +except ImportError: + _BZ2_SUPPORTED = False + +try: + import lzma + del lzma + _LZMA_SUPPORTED = True +except ImportError: + _LZMA_SUPPORTED = False + +try: + from pwd import getpwnam +except ImportError: + getpwnam = None + +try: + from grp import getgrnam +except ImportError: + getgrnam = None + +__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", + "copytree", "move", "rmtree", "Error", "SpecialFileError", + "ExecError", "make_archive", "get_archive_formats", + "register_archive_format", "unregister_archive_format", + "get_unpack_formats", "register_unpack_format", + "unregister_unpack_format", "unpack_archive", + "ignore_patterns", "chown", "which", "get_terminal_size", + "SameFileError"] + # disk_usage is added later, if available on the platform + +class Error(OSError): + pass + +class SameFileError(Error): + """Raised when source and destination are the same file.""" + +class SpecialFileError(OSError): + """Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)""" + +class ExecError(OSError): + """Raised when a command could not be executed""" + +class ReadError(OSError): + """Raised when an archive cannot be read""" + +class RegistryError(Exception): + """Raised when a registry operation with the archiving + and unpacking registries fails""" + + +def copyfileobj(fsrc, fdst, length=16*1024): + """copy data from file-like object fsrc to file-like object fdst""" + while 1: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + +def _samefile(src, dst): + # Macintosh, Unix. + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + +def copyfile(src, dst, *, follow_symlinks=True): + """Copy data from src to dst. + + If follow_symlinks is not set and src is a symbolic link, a new + symlink will be created instead of copying the file it points to. + + """ + if _samefile(src, dst): + raise SameFileError("{!r} and {!r} are the same file".format(src, dst)) + + for fn in [src, dst]: + try: + st = os.stat(fn) + except OSError: + # File most likely does not exist + pass + else: + # XXX What about other special files? (sockets, devices...) + if stat.S_ISFIFO(st.st_mode): + raise SpecialFileError("`%s` is a named pipe" % fn) + + if not follow_symlinks and os.path.islink(src): + os.symlink(os.readlink(src), dst) + else: + with open(src, 'rb') as fsrc: + with open(dst, 'wb') as fdst: + copyfileobj(fsrc, fdst) + return dst + +def copymode(src, dst, *, follow_symlinks=True): + """Copy mode bits from src to dst. + + If follow_symlinks is not set, symlinks aren't followed if and only + if both `src` and `dst` are symlinks. If `lchmod` isn't available + (e.g. Linux) this method does nothing. + + """ + if not follow_symlinks and os.path.islink(src) and os.path.islink(dst): + if hasattr(os, 'lchmod'): + stat_func, chmod_func = os.lstat, os.lchmod + else: + return + elif hasattr(os, 'chmod'): + stat_func, chmod_func = os.stat, os.chmod + else: + return + + st = stat_func(src) + chmod_func(dst, stat.S_IMODE(st.st_mode)) + +if hasattr(os, 'listxattr'): + def _copyxattr(src, dst, *, follow_symlinks=True): + """Copy extended filesystem attributes from `src` to `dst`. + + Overwrite existing attributes. + + If `follow_symlinks` is false, symlinks won't be followed. + + """ + + try: + names = os.listxattr(src, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (errno.ENOTSUP, errno.ENODATA): + raise + return + for name in names: + try: + value = os.getxattr(src, name, follow_symlinks=follow_symlinks) + os.setxattr(dst, name, value, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA): + raise +else: + def _copyxattr(*args, **kwargs): + pass + +def copystat(src, dst, *, follow_symlinks=True): + """Copy all stat info (mode bits, atime, mtime, flags) from src to dst. + + If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and + only if both `src` and `dst` are symlinks. + + """ + def _nop(*args, ns=None, follow_symlinks=None): + pass + + # follow symlinks (aka don't not follow symlinks) + follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst)) + if follow: + # use the real function if it exists + def lookup(name): + return getattr(os, name, _nop) + else: + # use the real function only if it exists + # *and* it supports follow_symlinks + def lookup(name): + fn = getattr(os, name, _nop) + if fn in os.supports_follow_symlinks: + return fn + return _nop + + st = lookup("stat")(src, follow_symlinks=follow) + mode = stat.S_IMODE(st.st_mode) + lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns), + follow_symlinks=follow) + try: + lookup("chmod")(dst, mode, follow_symlinks=follow) + except NotImplementedError: + # if we got a NotImplementedError, it's because + # * follow_symlinks=False, + # * lchown() is unavailable, and + # * either + # * fchownat() is unavailable or + # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW. + # (it returned ENOSUP.) + # therefore we're out of options--we simply cannot chown the + # symlink. give up, suppress the error. + # (which is what shutil always did in this circumstance.) + pass + if hasattr(st, 'st_flags'): + try: + lookup("chflags")(dst, st.st_flags, follow_symlinks=follow) + except OSError as why: + for err in 'EOPNOTSUPP', 'ENOTSUP': + if hasattr(errno, err) and why.errno == getattr(errno, err): + break + else: + raise + _copyxattr(src, dst, follow_symlinks=follow) + +def copy(src, dst, *, follow_symlinks=True): + """Copy data and mode bits ("cp src dst"). Return the file's destination. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + + If source and destination are the same file, a SameFileError will be + raised. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst, follow_symlinks=follow_symlinks) + copymode(src, dst, follow_symlinks=follow_symlinks) + return dst + +def copy2(src, dst, *, follow_symlinks=True): + """Copy data and all stat info ("cp -p src dst"). Return the file's + destination." + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst, follow_symlinks=follow_symlinks) + copystat(src, dst, follow_symlinks=follow_symlinks) + return dst + +def ignore_patterns(*patterns): + """Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files""" + def _ignore_patterns(path, names): + ignored_names = [] + for pattern in patterns: + ignored_names.extend(fnmatch.filter(names, pattern)) + return set(ignored_names) + return _ignore_patterns + +def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, + ignore_dangling_symlinks=False): + """Recursively copy a directory tree. + + The destination directory must not already exist. + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + """ + names = os.listdir(src) + if ignore is not None: + ignored_names = ignore(src, names) + else: + ignored_names = set() + + os.makedirs(dst) + errors = [] + for name in names: + if name in ignored_names: + continue + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + try: + if os.path.islink(srcname): + linkto = os.readlink(srcname) + if symlinks: + # We can't just leave it to `copy_function` because legacy + # code with a custom `copy_function` may rely on copytree + # doing the right thing. + os.symlink(linkto, dstname) + copystat(srcname, dstname, follow_symlinks=not symlinks) + else: + # ignore dangling symlink if the flag is on + if not os.path.exists(linkto) and ignore_dangling_symlinks: + continue + # otherwise let the copy occurs. copy2 will raise an error + if os.path.isdir(srcname): + copytree(srcname, dstname, symlinks, ignore, + copy_function) + else: + copy_function(srcname, dstname) + elif os.path.isdir(srcname): + copytree(srcname, dstname, symlinks, ignore, copy_function) + else: + # Will raise a SpecialFileError for unsupported file types + copy_function(srcname, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Error as err: + errors.extend(err.args[0]) + except OSError as why: + errors.append((srcname, dstname, str(why))) + try: + copystat(src, dst) + except OSError as why: + # Copying file access times may fail on Windows + if getattr(why, 'winerror', None) is None: + errors.append((src, dst, str(why))) + if errors: + raise Error(errors) + return dst + +# version vulnerable to race conditions +def _rmtree_unsafe(path, onerror): + try: + with os.scandir(path) as scandir_it: + entries = list(scandir_it) + except OSError: + onerror(os.scandir, path, sys.exc_info()) + entries = [] + for entry in entries: + fullname = entry.path + try: + is_dir = entry.is_dir(follow_symlinks=False) + except OSError: + is_dir = False + if is_dir: + try: + if entry.is_symlink(): + # This can only happen if someone replaces + # a directory with a symlink after the call to + # os.scandir or entry.is_dir above. + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, fullname, sys.exc_info()) + continue + _rmtree_unsafe(fullname, onerror) + else: + try: + os.unlink(fullname) + except OSError: + onerror(os.unlink, fullname, sys.exc_info()) + try: + os.rmdir(path) + except OSError: + onerror(os.rmdir, path, sys.exc_info()) + +# Version using fd-based APIs to protect against races +def _rmtree_safe_fd(topfd, path, onerror): + try: + with os.scandir(topfd) as scandir_it: + entries = list(scandir_it) + except OSError as err: + err.filename = path + onerror(os.scandir, path, sys.exc_info()) + return + for entry in entries: + fullname = os.path.join(path, entry.name) + try: + is_dir = entry.is_dir(follow_symlinks=False) + if is_dir: + orig_st = entry.stat(follow_symlinks=False) + is_dir = stat.S_ISDIR(orig_st.st_mode) + except OSError: + is_dir = False + if is_dir: + try: + dirfd = os.open(entry.name, os.O_RDONLY, dir_fd=topfd) + except OSError: + onerror(os.open, fullname, sys.exc_info()) + else: + try: + if os.path.samestat(orig_st, os.fstat(dirfd)): + _rmtree_safe_fd(dirfd, fullname, onerror) + try: + os.rmdir(entry.name, dir_fd=topfd) + except OSError: + onerror(os.rmdir, fullname, sys.exc_info()) + else: + try: + # This can only happen if someone replaces + # a directory with a symlink after the call to + # os.scandir or stat.S_ISDIR above. + raise OSError("Cannot call rmtree on a symbolic " + "link") + except OSError: + onerror(os.path.islink, fullname, sys.exc_info()) + finally: + os.close(dirfd) + else: + try: + os.unlink(entry.name, dir_fd=topfd) + except OSError: + onerror(os.unlink, fullname, sys.exc_info()) + +_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <= + os.supports_dir_fd and + os.scandir in os.supports_fd and + os.stat in os.supports_follow_symlinks) + +def rmtree(path, ignore_errors=False, onerror=None): + """Recursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is platform and implementation dependent; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + """ + if ignore_errors: + def onerror(*args): + pass + elif onerror is None: + def onerror(*args): + raise + if _use_fd_functions: + # While the unsafe rmtree works fine on bytes, the fd based does not. + if isinstance(path, bytes): + path = os.fsdecode(path) + # Note: To guard against symlink races, we use the standard + # lstat()/open()/fstat() trick. + try: + orig_st = os.lstat(path) + except Exception: + onerror(os.lstat, path, sys.exc_info()) + return + try: + fd = os.open(path, os.O_RDONLY) + except Exception: + onerror(os.lstat, path, sys.exc_info()) + return + try: + if os.path.samestat(orig_st, os.fstat(fd)): + _rmtree_safe_fd(fd, path, onerror) + try: + os.rmdir(path) + except OSError: + onerror(os.rmdir, path, sys.exc_info()) + else: + try: + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, path, sys.exc_info()) + finally: + os.close(fd) + else: + try: + if os.path.islink(path): + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, path, sys.exc_info()) + # can't continue even if onerror hook returns + return + return _rmtree_unsafe(path, onerror) + +# Allow introspection of whether or not the hardening against symlink +# attacks is supported on the current platform +rmtree.avoids_symlink_attacks = _use_fd_functions + +def _basename(path): + # A basename() variant which first strips the trailing slash, if present. + # Thus we always get the last component of the path, even for directories. + sep = os.path.sep + (os.path.altsep or '') + return os.path.basename(path.rstrip(sep)) + +def move(src, dst, copy_function=copy2): + """Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. Return the file or directory's + destination. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. Symlinks are + recreated under the new name if os.rename() fails because of cross + filesystem renames. + + The optional `copy_function` argument is a callable that will be used + to copy the source or it will be delegated to `copytree`. + By default, copy2() is used, but any function that supports the same + signature (like copy()) can be used. + + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + """ + real_dst = dst + if os.path.isdir(dst): + if _samefile(src, dst): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + + real_dst = os.path.join(dst, _basename(src)) + if os.path.exists(real_dst): + raise Error("Destination path '%s' already exists" % real_dst) + try: + os.rename(src, real_dst) + except OSError: + if os.path.islink(src): + linkto = os.readlink(src) + os.symlink(linkto, real_dst) + os.unlink(src) + elif os.path.isdir(src): + if _destinsrc(src, dst): + raise Error("Cannot move a directory '%s' into itself" + " '%s'." % (src, dst)) + copytree(src, real_dst, copy_function=copy_function, + symlinks=True) + rmtree(src) + else: + copy_function(src, real_dst) + os.unlink(src) + return real_dst + +def _destinsrc(src, dst): + src = os.path.abspath(src) + dst = os.path.abspath(dst) + if not src.endswith(os.path.sep): + src += os.path.sep + if not dst.endswith(os.path.sep): + dst += os.path.sep + return dst.startswith(src) + +def _get_gid(name): + """Returns a gid, given a group name.""" + if getgrnam is None or name is None: + return None + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if getpwnam is None or name is None: + return None + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None, logger=None): + """Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", "xz", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", ".bz2", or ".xz"). + + Returns the output filename. + """ + if compress is None: + tar_compression = '' + elif _ZLIB_SUPPORTED and compress == 'gzip': + tar_compression = 'gz' + elif _BZ2_SUPPORTED and compress == 'bzip2': + tar_compression = 'bz2' + elif _LZMA_SUPPORTED and compress == 'xz': + tar_compression = 'xz' + else: + raise ValueError("bad value for 'compress', or compression format not " + "supported : {0}".format(compress)) + + import tarfile # late import for breaking circular dependency + + compress_ext = '.' + tar_compression if compress else '' + archive_name = base_name + '.tar' + compress_ext + archive_dir = os.path.dirname(archive_name) + + if archive_dir and not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # creating the tarball + if logger is not None: + logger.info('Creating tar archive') + + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + + if not dry_run: + tar = tarfile.open(archive_name, 'w|%s' % tar_compression) + try: + tar.add(base_dir, filter=_set_uid_gid) + finally: + tar.close() + + return archive_name + +def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): + """Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Returns the + name of the output zip file. + """ + import zipfile # late import for breaking circular dependency + + zip_filename = base_name + ".zip" + archive_dir = os.path.dirname(base_name) + + if archive_dir and not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + if logger is not None: + logger.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) + + if not dry_run: + with zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) as zf: + path = os.path.normpath(base_dir) + if path != os.curdir: + zf.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + for dirpath, dirnames, filenames in os.walk(base_dir): + for name in sorted(dirnames): + path = os.path.normpath(os.path.join(dirpath, name)) + zf.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + for name in filenames: + path = os.path.normpath(os.path.join(dirpath, name)) + if os.path.isfile(path): + zf.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + + return zip_filename + +_ARCHIVE_FORMATS = { + 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), +} + +if _ZLIB_SUPPORTED: + _ARCHIVE_FORMATS['gztar'] = (_make_tarball, [('compress', 'gzip')], + "gzip'ed tar-file") + _ARCHIVE_FORMATS['zip'] = (_make_zipfile, [], "ZIP file") + +if _BZ2_SUPPORTED: + _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], + "bzip2'ed tar-file") + +if _LZMA_SUPPORTED: + _ARCHIVE_FORMATS['xztar'] = (_make_tarball, [('compress', 'xz')], + "xz'ed tar-file") + +def get_archive_formats(): + """Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + """ + formats = [(name, registry[2]) for name, registry in + _ARCHIVE_FORMATS.items()] + formats.sort() + return formats + +def register_archive_format(name, function, extra_args=None, description=''): + """Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + """ + if extra_args is None: + extra_args = [] + if not callable(function): + raise TypeError('The %s object is not callable' % function) + if not isinstance(extra_args, (tuple, list)): + raise TypeError('extra_args needs to be a sequence') + for element in extra_args: + if not isinstance(element, (tuple, list)) or len(element) !=2: + raise TypeError('extra_args elements are : (arg_name, value)') + + _ARCHIVE_FORMATS[name] = (function, extra_args, description) + +def unregister_archive_format(name): + del _ARCHIVE_FORMATS[name] + +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None, logger=None): + """Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "gztar", + "bztar", or "xztar". Or any other registered format. + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + """ + save_cwd = os.getcwd() + if root_dir is not None: + if logger is not None: + logger.debug("changing into '%s'", root_dir) + base_name = os.path.abspath(base_name) + if not dry_run: + os.chdir(root_dir) + + if base_dir is None: + base_dir = os.curdir + + kwargs = {'dry_run': dry_run, 'logger': logger} + + try: + format_info = _ARCHIVE_FORMATS[format] + except KeyError: + raise ValueError("unknown archive format '%s'" % format) from None + + func = format_info[0] + for arg, val in format_info[1]: + kwargs[arg] = val + + if format != 'zip': + kwargs['owner'] = owner + kwargs['group'] = group + + try: + filename = func(base_name, base_dir, **kwargs) + finally: + if root_dir is not None: + if logger is not None: + logger.debug("changing back to '%s'", save_cwd) + os.chdir(save_cwd) + + return filename + + +def get_unpack_formats(): + """Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + """ + formats = [(name, info[0], info[3]) for name, info in + _UNPACK_FORMATS.items()] + formats.sort() + return formats + +def _check_unpack_options(extensions, function, extra_args): + """Checks what gets registered as an unpacker.""" + # first make sure no other unpacker is registered for this extension + existing_extensions = {} + for name, info in _UNPACK_FORMATS.items(): + for ext in info[0]: + existing_extensions[ext] = name + + for extension in extensions: + if extension in existing_extensions: + msg = '%s is already registered for "%s"' + raise RegistryError(msg % (extension, + existing_extensions[extension])) + + if not callable(function): + raise TypeError('The registered function must be a callable') + + +def register_unpack_format(name, extensions, function, extra_args=None, + description=''): + """Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + """ + if extra_args is None: + extra_args = [] + _check_unpack_options(extensions, function, extra_args) + _UNPACK_FORMATS[name] = extensions, function, extra_args, description + +def unregister_unpack_format(name): + """Removes the pack format from the registry.""" + del _UNPACK_FORMATS[name] + +def _ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def _unpack_zipfile(filename, extract_dir): + """Unpack zip `filename` to `extract_dir` + """ + import zipfile # late import for breaking circular dependency + + if not zipfile.is_zipfile(filename): + raise ReadError("%s is not a zip file" % filename) + + zip = zipfile.ZipFile(filename) + try: + for info in zip.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + target = os.path.join(extract_dir, *name.split('/')) + if not target: + continue + + _ensure_directory(target) + if not name.endswith('/'): + # file + data = zip.read(info.filename) + f = open(target, 'wb') + try: + f.write(data) + finally: + f.close() + del data + finally: + zip.close() + +def _unpack_tarfile(filename, extract_dir): + """Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir` + """ + import tarfile # late import for breaking circular dependency + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir) + finally: + tarobj.close() + +_UNPACK_FORMATS = { + 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), + 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file"), +} + +if _ZLIB_SUPPORTED: + _UNPACK_FORMATS['gztar'] = (['.tar.gz', '.tgz'], _unpack_tarfile, [], + "gzip'ed tar-file") + +if _BZ2_SUPPORTED: + _UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [], + "bzip2'ed tar-file") + +if _LZMA_SUPPORTED: + _UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [], + "xz'ed tar-file") + +def _find_unpack_format(filename): + for name, info in _UNPACK_FORMATS.items(): + for extension in info[0]: + if filename.endswith(extension): + return name + return None + +def unpack_archive(filename, extract_dir=None, format=None): + """Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", "gztar", "bztar", + or "xztar". Or any other registered format. If not provided, + unpack_archive will use the filename extension and see if an unpacker + was registered for that extension. + + In case none is found, a ValueError is raised. + """ + if extract_dir is None: + extract_dir = os.getcwd() + + extract_dir = os.fspath(extract_dir) + filename = os.fspath(filename) + + if format is not None: + try: + format_info = _UNPACK_FORMATS[format] + except KeyError: + raise ValueError("Unknown unpack format '{0}'".format(format)) from None + + func = format_info[1] + func(filename, extract_dir, **dict(format_info[2])) + else: + # we need to look at the registered unpackers supported extensions + format = _find_unpack_format(filename) + if format is None: + raise ReadError("Unknown archive format '{0}'".format(filename)) + + func = _UNPACK_FORMATS[format][1] + kwargs = dict(_UNPACK_FORMATS[format][2]) + func(filename, extract_dir, **kwargs) + + +if hasattr(os, 'statvfs'): + + __all__.append('disk_usage') + _ntuple_diskusage = collections.namedtuple('usage', 'total used free') + _ntuple_diskusage.total.__doc__ = 'Total space in bytes' + _ntuple_diskusage.used.__doc__ = 'Used space in bytes' + _ntuple_diskusage.free.__doc__ = 'Free space in bytes' + + def disk_usage(path): + """Return disk usage statistics about the given path. + + Returned value is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + """ + st = os.statvfs(path) + free = st.f_bavail * st.f_frsize + total = st.f_blocks * st.f_frsize + used = (st.f_blocks - st.f_bfree) * st.f_frsize + return _ntuple_diskusage(total, used, free) + +elif os.name == 'nt': + + import nt + __all__.append('disk_usage') + _ntuple_diskusage = collections.namedtuple('usage', 'total used free') + + def disk_usage(path): + """Return disk usage statistics about the given path. + + Returned values is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + """ + total, free = nt._getdiskusage(path) + used = total - free + return _ntuple_diskusage(total, used, free) + + +def chown(path, user=None, group=None): + """Change owner user and group of the given path. + + user and group can be the uid/gid or the user/group names, and in that case, + they are converted to their respective uid/gid. + """ + + if user is None and group is None: + raise ValueError("user and/or group must be set") + + _user = user + _group = group + + # -1 means don't change it + if user is None: + _user = -1 + # user can either be an int (the uid) or a string (the system username) + elif isinstance(user, str): + _user = _get_uid(user) + if _user is None: + raise LookupError("no such user: {!r}".format(user)) + + if group is None: + _group = -1 + elif not isinstance(group, int): + _group = _get_gid(group) + if _group is None: + raise LookupError("no such group: {!r}".format(group)) + + os.chown(path, _user, _group) + +def get_terminal_size(fallback=(80, 24)): + """Get the size of the terminal window. + + For each of the two dimensions, the environment variable, COLUMNS + and LINES respectively, is checked. If the variable is defined and + the value is a positive integer, it is used. + + When COLUMNS or LINES is not defined, which is the common case, + the terminal connected to sys.__stdout__ is queried + by invoking os.get_terminal_size. + + If the terminal size cannot be successfully queried, either because + the system doesn't support querying, or because we are not + connected to a terminal, the value given in fallback parameter + is used. Fallback defaults to (80, 24) which is the default + size used by many terminal emulators. + + The value returned is a named tuple of type os.terminal_size. + """ + # columns, lines are the working values + try: + columns = int(os.environ['COLUMNS']) + except (KeyError, ValueError): + columns = 0 + + try: + lines = int(os.environ['LINES']) + except (KeyError, ValueError): + lines = 0 + + # only query if necessary + if columns <= 0 or lines <= 0: + try: + size = os.get_terminal_size(sys.__stdout__.fileno()) + except (AttributeError, ValueError, OSError): + # stdout is None, closed, detached, or not a terminal, or + # os.get_terminal_size() is unsupported + size = os.terminal_size(fallback) + if columns <= 0: + columns = size.columns + if lines <= 0: + lines = size.lines + + return os.terminal_size((columns, lines)) + +def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + # Check that a given file can be accessed with the correct mode. + # Additionally check that `file` is not a directory, as on Windows + # directories pass the os.access check. + def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) + and not os.path.isdir(fn)) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to the + # current directory, e.g. ./script + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + # The current directory takes precedence on Windows. + if not os.curdir in path: + path.insert(0, os.curdir) + + # PATHEXT is necessary to check on Windows. + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + # See if the given file matches any of the expected path extensions. + # This will allow us to short circuit when given "python.exe". + # If it does match, only test that one, otherwise we have to try + # others. + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if not normdir in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None diff --git a/venv/Lib/site-packages/Click-7.0.dist-info/INSTALLER b/venv/Lib/site-packages/Click-7.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/Click-7.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/Click-7.0.dist-info/LICENSE.txt b/venv/Lib/site-packages/Click-7.0.dist-info/LICENSE.txt new file mode 100644 index 00000000..87ce152a --- /dev/null +++ b/venv/Lib/site-packages/Click-7.0.dist-info/LICENSE.txt @@ -0,0 +1,39 @@ +Copyright © 2014 by the Pallets team. + +Some rights reserved. + +Redistribution and use in source and binary forms of the software as +well as documentation, with or without modification, are permitted +provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +- Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +---- + +Click uses parts of optparse written by Gregory P. Ward and maintained +by the Python Software Foundation. This is limited to code in parser.py. + +Copyright © 2001-2006 Gregory P. Ward. All rights reserved. +Copyright © 2002-2006 Python Software Foundation. All rights reserved. diff --git a/venv/Lib/site-packages/Click-7.0.dist-info/METADATA b/venv/Lib/site-packages/Click-7.0.dist-info/METADATA new file mode 100644 index 00000000..625bdadd --- /dev/null +++ b/venv/Lib/site-packages/Click-7.0.dist-info/METADATA @@ -0,0 +1,121 @@ +Metadata-Version: 2.1 +Name: Click +Version: 7.0 +Summary: Composable command line interface toolkit +Home-page: https://palletsprojects.com/p/click/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: Pallets Team +Maintainer-email: contact@palletsprojects.com +License: BSD +Project-URL: Documentation, https://click.palletsprojects.com/ +Project-URL: Code, https://github.com/pallets/click +Project-URL: Issue tracker, https://github.com/pallets/click/issues +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* + +\$ click\_ +========== + +Click is a Python package for creating beautiful command line interfaces +in a composable way with as little code as necessary. It's the "Command +Line Interface Creation Kit". It's highly configurable but comes with +sensible defaults out of the box. + +It aims to make the process of writing command line tools quick and fun +while also preventing any frustration caused by the inability to +implement an intended CLI API. + +Click in three points: + +- Arbitrary nesting of commands +- Automatic help page generation +- Supports lazy loading of subcommands at runtime + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + $ pip install click + +Click supports Python 3.4 and newer, Python 2.7, and PyPy. + +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + +A Simple Example +---------------- + +What does it look like? Here is an example of a simple Click program: + +.. code-block:: python + + import click + + @click.command() + @click.option("--count", default=1, help="Number of greetings.") + @click.option("--name", prompt="Your name", + help="The person to greet.") + def hello(count, name): + """Simple program that greets NAME for a total of COUNT times.""" + for _ in range(count): + click.echo("Hello, %s!" % name) + + if __name__ == '__main__': + hello() + +And what it looks like when run: + +.. code-block:: text + + $ python hello.py --count=3 + Your name: Click + Hello, Click! + Hello, Click! + Hello, Click! + + +Donate +------ + +The Pallets organization develops and supports Click and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +* Website: https://palletsprojects.com/p/click/ +* Documentation: https://click.palletsprojects.com/ +* License: `BSD `_ +* Releases: https://pypi.org/project/click/ +* Code: https://github.com/pallets/click +* Issue tracker: https://github.com/pallets/click/issues +* Test status: + + * Linux, Mac: https://travis-ci.org/pallets/click + * Windows: https://ci.appveyor.com/project/pallets/click + +* Test coverage: https://codecov.io/gh/pallets/click + + diff --git a/venv/Lib/site-packages/Click-7.0.dist-info/RECORD b/venv/Lib/site-packages/Click-7.0.dist-info/RECORD new file mode 100644 index 00000000..ecf17c15 --- /dev/null +++ b/venv/Lib/site-packages/Click-7.0.dist-info/RECORD @@ -0,0 +1,40 @@ +Click-7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Click-7.0.dist-info/LICENSE.txt,sha256=4hIxn676T0Wcisk3_chVcECjyrivKTZsoqSNI5AlIlw,1876 +Click-7.0.dist-info/METADATA,sha256=-r8jeke3Zer4diRvT1MjFZuiJ6yTT_qFP39svLqdaLI,3516 +Click-7.0.dist-info/RECORD,, +Click-7.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 +Click-7.0.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6 +click/__init__.py,sha256=HjGThQ7tef9kkwCV371TBnrf0SAi6fKfU_jtEnbYTvQ,2789 +click/__pycache__/__init__.cpython-37.pyc,, +click/__pycache__/_bashcomplete.cpython-37.pyc,, +click/__pycache__/_compat.cpython-37.pyc,, +click/__pycache__/_termui_impl.cpython-37.pyc,, +click/__pycache__/_textwrap.cpython-37.pyc,, +click/__pycache__/_unicodefun.cpython-37.pyc,, +click/__pycache__/_winconsole.cpython-37.pyc,, +click/__pycache__/core.cpython-37.pyc,, +click/__pycache__/decorators.cpython-37.pyc,, +click/__pycache__/exceptions.cpython-37.pyc,, +click/__pycache__/formatting.cpython-37.pyc,, +click/__pycache__/globals.cpython-37.pyc,, +click/__pycache__/parser.cpython-37.pyc,, +click/__pycache__/termui.cpython-37.pyc,, +click/__pycache__/testing.cpython-37.pyc,, +click/__pycache__/types.cpython-37.pyc,, +click/__pycache__/utils.cpython-37.pyc,, +click/_bashcomplete.py,sha256=iaNUmtxag0YPfxba3TDYCNietiTMQIrvhRLj-H8okFU,11014 +click/_compat.py,sha256=vYmvoj4opPxo-c-2GMQQjYT_r_QkOKybkfGoeVrt0dA,23399 +click/_termui_impl.py,sha256=xHmLtOJhKUCVD6168yucJ9fknUJPAMs0eUTPgVUO-GQ,19611 +click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198 +click/_unicodefun.py,sha256=QHy2_5jYlX-36O-JVrTHNnHOqg8tquUR0HmQFev7Ics,4364 +click/_winconsole.py,sha256=PPWVak8Iikm_gAPsxMrzwsVFCvHgaW3jPaDWZ1JBl3U,8965 +click/core.py,sha256=q8FLcDZsagBGSRe5Y9Hi_FGvAeZvusNfoO5EkhkSQ8Y,75305 +click/decorators.py,sha256=idKt6duLUUfAFftrHoREi8MJSd39XW36pUVHthdglwk,11226 +click/exceptions.py,sha256=CNpAjBAE7qjaV4WChxQeak95e5yUOau8AsvT-8m6wss,7663 +click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889 +click/globals.py,sha256=oQkou3ZQ5DgrbVM6BwIBirwiqozbjfirzsLGAlLRRdg,1514 +click/parser.py,sha256=m-nGZz4VwprM42_qtFlWFGo7yRJQxkBlRcZodoH593Y,15510 +click/termui.py,sha256=o_ZXB2jyvL2Rce7P_bFGq452iyBq9ykJyRApIPMCZO0,23207 +click/testing.py,sha256=aYGqY_iWLu2p4k7lkuJ6t3fqpf6aPGqTsyLzNY_ngKg,13062 +click/types.py,sha256=2Q929p-aBP_ZYuMFJqJR-Ipucofv3fmDc5JzBDPmzJU,23287 +click/utils.py,sha256=6-D0WkAxvv9FkgHXSHwDIv0l9Gdx9Mm6Z5vuKNLIfZI,15763 diff --git a/venv/Lib/site-packages/Click-7.0.dist-info/WHEEL b/venv/Lib/site-packages/Click-7.0.dist-info/WHEEL new file mode 100644 index 00000000..1316c41d --- /dev/null +++ b/venv/Lib/site-packages/Click-7.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/Click-7.0.dist-info/top_level.txt b/venv/Lib/site-packages/Click-7.0.dist-info/top_level.txt new file mode 100644 index 00000000..dca9a909 --- /dev/null +++ b/venv/Lib/site-packages/Click-7.0.dist-info/top_level.txt @@ -0,0 +1 @@ +click diff --git a/venv/Lib/site-packages/Flask-1.0.2.dist-info/INSTALLER b/venv/Lib/site-packages/Flask-1.0.2.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/Flask-1.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/Flask-1.0.2.dist-info/LICENSE.txt b/venv/Lib/site-packages/Flask-1.0.2.dist-info/LICENSE.txt new file mode 100644 index 00000000..8f9252f4 --- /dev/null +++ b/venv/Lib/site-packages/Flask-1.0.2.dist-info/LICENSE.txt @@ -0,0 +1,31 @@ +Copyright © 2010 by the Pallets team. + +Some rights reserved. + +Redistribution and use in source and binary forms of the software as +well as documentation, with or without modification, are permitted +provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. diff --git a/venv/Lib/site-packages/Flask-1.0.2.dist-info/METADATA b/venv/Lib/site-packages/Flask-1.0.2.dist-info/METADATA new file mode 100644 index 00000000..c600e730 --- /dev/null +++ b/venv/Lib/site-packages/Flask-1.0.2.dist-info/METADATA @@ -0,0 +1,130 @@ +Metadata-Version: 2.1 +Name: Flask +Version: 1.0.2 +Summary: A simple framework for building complex web applications. +Home-page: https://www.palletsprojects.com/p/flask/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: Pallets team +Maintainer-email: contact@palletsprojects.com +License: BSD +Project-URL: Documentation, http://flask.pocoo.org/docs/ +Project-URL: Code, https://github.com/pallets/flask +Project-URL: Issue tracker, https://github.com/pallets/flask/issues +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Framework :: Flask +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application +Classifier: Topic :: Software Development :: Libraries :: Application Frameworks +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides-Extra: dev +Provides-Extra: docs +Provides-Extra: dotenv +Requires-Dist: Werkzeug (>=0.14) +Requires-Dist: Jinja2 (>=2.10) +Requires-Dist: itsdangerous (>=0.24) +Requires-Dist: click (>=5.1) +Provides-Extra: dev +Requires-Dist: pytest (>=3); extra == 'dev' +Requires-Dist: coverage; extra == 'dev' +Requires-Dist: tox; extra == 'dev' +Requires-Dist: sphinx; extra == 'dev' +Requires-Dist: pallets-sphinx-themes; extra == 'dev' +Requires-Dist: sphinxcontrib-log-cabinet; extra == 'dev' +Provides-Extra: docs +Requires-Dist: sphinx; extra == 'docs' +Requires-Dist: pallets-sphinx-themes; extra == 'docs' +Requires-Dist: sphinxcontrib-log-cabinet; extra == 'docs' +Provides-Extra: dotenv +Requires-Dist: python-dotenv; extra == 'dotenv' + +Flask +===== + +Flask is a lightweight `WSGI`_ web application framework. It is designed +to make getting started quick and easy, with the ability to scale up to +complex applications. It began as a simple wrapper around `Werkzeug`_ +and `Jinja`_ and has become one of the most popular Python web +application frameworks. + +Flask offers suggestions, but doesn't enforce any dependencies or +project layout. It is up to the developer to choose the tools and +libraries they want to use. There are many extensions provided by the +community that make adding new functionality easy. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U Flask + + +A Simple Example +---------------- + +.. code-block:: python + + from flask import Flask + + app = Flask(__name__) + + @app.route('/') + def hello(): + return 'Hello, World!' + +.. code-block:: text + + $ FLASK_APP=hello.py flask run + * Serving Flask app "hello" + * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) + + +Donate +------ + +The Pallets organization develops and supports Flask and the libraries +it uses. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://psfmember.org/civicrm/contribute/transact?reset=1&id=20 + + +Links +----- + +* Website: https://www.palletsprojects.com/p/flask/ +* Documentation: http://flask.pocoo.org/docs/ +* License: `BSD `_ +* Releases: https://pypi.org/project/Flask/ +* Code: https://github.com/pallets/flask +* Issue tracker: https://github.com/pallets/flask/issues +* Test status: + + * Linux, Mac: https://travis-ci.org/pallets/flask + * Windows: https://ci.appveyor.com/project/pallets/flask + +* Test coverage: https://codecov.io/gh/pallets/flask + +.. _WSGI: https://wsgi.readthedocs.io +.. _Werkzeug: https://www.palletsprojects.com/p/werkzeug/ +.. _Jinja: https://www.palletsprojects.com/p/jinja/ +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + diff --git a/venv/Lib/site-packages/Flask-1.0.2.dist-info/RECORD b/venv/Lib/site-packages/Flask-1.0.2.dist-info/RECORD new file mode 100644 index 00000000..8e53c32b --- /dev/null +++ b/venv/Lib/site-packages/Flask-1.0.2.dist-info/RECORD @@ -0,0 +1,48 @@ +../../Scripts/flask.exe,sha256=3AeCqU3L6fZALi7DvMP_MVAMHlU_voofdeJMdAyKh8E,93083 +Flask-1.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Flask-1.0.2.dist-info/LICENSE.txt,sha256=ziEXA3AIuaiUn1qe4cd1XxCESWTYrk4TjN7Qb06J3l8,1575 +Flask-1.0.2.dist-info/METADATA,sha256=iA5tiNWzTtgCVe80aTZGNWsckj853fJyfvHs9U-WZRk,4182 +Flask-1.0.2.dist-info/RECORD,, +Flask-1.0.2.dist-info/WHEEL,sha256=J3CsTk7Mf2JNUyhImI-mjX-fmI4oDjyiXgWT4qgZiCE,110 +Flask-1.0.2.dist-info/entry_points.txt,sha256=gBLA1aKg0OYR8AhbAfg8lnburHtKcgJLDU52BBctN0k,42 +Flask-1.0.2.dist-info/top_level.txt,sha256=dvi65F6AeGWVU0TBpYiC04yM60-FX1gJFkK31IKQr5c,6 +flask/__init__.py,sha256=qq8lK6QQbxJALf1igz7qsvUwOTAoKvFGfdLm7jPNsso,1673 +flask/__main__.py,sha256=pgIXrHhxM5MAMvgzAqWpw_t6AXZ1zG38us4JRgJKtxk,291 +flask/__pycache__/__init__.cpython-37.pyc,, +flask/__pycache__/__main__.cpython-37.pyc,, +flask/__pycache__/_compat.cpython-37.pyc,, +flask/__pycache__/app.cpython-37.pyc,, +flask/__pycache__/blueprints.cpython-37.pyc,, +flask/__pycache__/cli.cpython-37.pyc,, +flask/__pycache__/config.cpython-37.pyc,, +flask/__pycache__/ctx.cpython-37.pyc,, +flask/__pycache__/debughelpers.cpython-37.pyc,, +flask/__pycache__/globals.cpython-37.pyc,, +flask/__pycache__/helpers.cpython-37.pyc,, +flask/__pycache__/logging.cpython-37.pyc,, +flask/__pycache__/sessions.cpython-37.pyc,, +flask/__pycache__/signals.cpython-37.pyc,, +flask/__pycache__/templating.cpython-37.pyc,, +flask/__pycache__/testing.cpython-37.pyc,, +flask/__pycache__/views.cpython-37.pyc,, +flask/__pycache__/wrappers.cpython-37.pyc,, +flask/_compat.py,sha256=UDFGhosh6mOdNB-4evKPuneHum1OpcAlwTNJCRm0irQ,2892 +flask/app.py,sha256=ahpe3T8w98rQd_Er5d7uDxK57S1nnqGQx3V3hirBovU,94147 +flask/blueprints.py,sha256=Cyhl_x99tgwqEZPtNDJUFneAfVJxWfEU4bQA7zWS6VU,18331 +flask/cli.py,sha256=30QYAO10Do9LbZYCLgfI_xhKjASdLopL8wKKVUGS2oA,29442 +flask/config.py,sha256=kznUhj4DLYxsTF_4kfDG8GEHto1oZG_kqblyrLFtpqQ,9951 +flask/ctx.py,sha256=leFzS9fzmo0uaLCdxpHc5_iiJZ1H0X_Ig4yPCOvT--g,16224 +flask/debughelpers.py,sha256=1ceC-UyqZTd4KsJkf0OObHPsVt5R3T6vnmYhiWBjV-w,6479 +flask/globals.py,sha256=pGg72QW_-4xUfsI33I5L_y76c21AeqfSqXDcbd8wvXU,1649 +flask/helpers.py,sha256=YCl8D1plTO1evEYP4KIgaY3H8Izww5j4EdgRJ89oHTw,40106 +flask/json/__init__.py,sha256=Ns1Hj805XIxuBMh2z0dYnMVfb_KUgLzDmP3WoUYaPhw,10729 +flask/json/__pycache__/__init__.cpython-37.pyc,, +flask/json/__pycache__/tag.cpython-37.pyc,, +flask/json/tag.py,sha256=9ehzrmt5k7hxf7ZEK0NOs3swvQyU9fWNe-pnYe69N60,8223 +flask/logging.py,sha256=qV9h0vt7NIRkKM9OHDWndzO61E5CeBMlqPJyTt-W2Wc,2231 +flask/sessions.py,sha256=2XHV4ASREhSEZ8bsPQW6pNVNuFtbR-04BzfKg0AfvHo,14452 +flask/signals.py,sha256=BGQbVyCYXnzKK2DVCzppKFyWN1qmrtW1QMAYUs-1Nr8,2211 +flask/templating.py,sha256=FDfWMbpgpC3qObW8GGXRAVrkHFF8K4CHOJymB1wvULI,4914 +flask/testing.py,sha256=XD3gWNvLUV8dqVHwKd9tZzsj81fSHtjOphQ1wTNtlMs,9379 +flask/views.py,sha256=Wy-_WkUVtCfE2zCXYeJehNgHuEtviE4v3HYfJ--MpbY,5733 +flask/wrappers.py,sha256=1Z9hF5-hXQajn_58XITQFRY8efv3Vy3uZ0avBfZu6XI,7511 diff --git a/venv/Lib/site-packages/Flask-1.0.2.dist-info/WHEEL b/venv/Lib/site-packages/Flask-1.0.2.dist-info/WHEEL new file mode 100644 index 00000000..f21b51cd --- /dev/null +++ b/venv/Lib/site-packages/Flask-1.0.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/Flask-1.0.2.dist-info/entry_points.txt b/venv/Lib/site-packages/Flask-1.0.2.dist-info/entry_points.txt new file mode 100644 index 00000000..1eb02520 --- /dev/null +++ b/venv/Lib/site-packages/Flask-1.0.2.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +flask = flask.cli:main + diff --git a/venv/Lib/site-packages/Flask-1.0.2.dist-info/top_level.txt b/venv/Lib/site-packages/Flask-1.0.2.dist-info/top_level.txt new file mode 100644 index 00000000..7e106024 --- /dev/null +++ b/venv/Lib/site-packages/Flask-1.0.2.dist-info/top_level.txt @@ -0,0 +1 @@ +flask diff --git a/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/DESCRIPTION.rst new file mode 100644 index 00000000..e1187231 --- /dev/null +++ b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/DESCRIPTION.rst @@ -0,0 +1,3 @@ +UNKNOWN + + diff --git a/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/INSTALLER b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/METADATA b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/METADATA new file mode 100644 index 00000000..529c46e1 --- /dev/null +++ b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/METADATA @@ -0,0 +1,31 @@ +Metadata-Version: 2.0 +Name: Flask-RESTful +Version: 0.3.6 +Summary: Simple framework for creating REST APIs +Home-page: https://www.github.com/flask-restful/flask-restful/ +Author: Twilio API Team +Author-email: help@twilio.com +License: BSD +Platform: any +Classifier: Framework :: Flask +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: License :: OSI Approved :: BSD License +Requires-Dist: Flask (>=0.8) +Requires-Dist: aniso8601 (>=0.82) +Requires-Dist: pytz +Requires-Dist: six (>=1.3.0) +Provides-Extra: docs +Requires-Dist: sphinx; extra == 'docs' +Provides-Extra: paging +Requires-Dist: pycrypto (>=2.6); extra == 'paging' + +UNKNOWN + + diff --git a/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/RECORD b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/RECORD new file mode 100644 index 00000000..b698a54a --- /dev/null +++ b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/RECORD @@ -0,0 +1,29 @@ +Flask_RESTful-0.3.6.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 +Flask_RESTful-0.3.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Flask_RESTful-0.3.6.dist-info/METADATA,sha256=THyvrIu4SUuC9fqpfDBP-m6kdZcesFu2-Gm2oxgJnWM,985 +Flask_RESTful-0.3.6.dist-info/RECORD,, +Flask_RESTful-0.3.6.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +Flask_RESTful-0.3.6.dist-info/metadata.json,sha256=1zKAyvYdpplkpX_NJU8vKCZ9nvy0Y_paxRiMJXCNiXU,1178 +Flask_RESTful-0.3.6.dist-info/top_level.txt,sha256=lNpWPlejgBAtMhCUwz_FTyJH12ul1mBZ-Uv3ZK1HiGg,14 +flask_restful/__init__.py,sha256=dpZuahv5GMo2Rof4OWkGpQvIhnJIeOakL_1EHkkzUA4,28510 +flask_restful/__pycache__/__init__.cpython-37.pyc,, +flask_restful/__pycache__/__version__.cpython-37.pyc,, +flask_restful/__pycache__/fields.cpython-37.pyc,, +flask_restful/__pycache__/inputs.cpython-37.pyc,, +flask_restful/__pycache__/paging.cpython-37.pyc,, +flask_restful/__pycache__/reqparse.cpython-37.pyc,, +flask_restful/__version__.py,sha256=-RHjzBqfdsudCsVzQ9u-AoX7n-_90gcO1RlvPEVk7d4,45 +flask_restful/fields.py,sha256=9cbc0vXaGzt1Ur8gUf3sHlMjNSy1qOSV_qvi-3YQAL8,13051 +flask_restful/inputs.py,sha256=AOBF_1BpB8snY3XJT_vca_uWYzkCP2nla01lFU7xqLE,9114 +flask_restful/paging.py,sha256=H_nL-UlViLSHJhS7NbHJurHMwzxv7IsaAnuTC4L5Kec,1207 +flask_restful/representations/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +flask_restful/representations/__pycache__/__init__.cpython-37.pyc,, +flask_restful/representations/__pycache__/json.cpython-37.pyc,, +flask_restful/representations/json.py,sha256=swKwnbt7v2ioHfHkqhqbzIu_yrcP0ComlSl49IGFJOo,873 +flask_restful/reqparse.py,sha256=I9qxtSqjVzrDTxQtfSHM4yGx3XsiVoa-wMPlA1mod9s,13475 +flask_restful/utils/__init__.py,sha256=Qh5pyCIT2dfHmrUdS6lsMbBLjZmAhz1fl7vWyJ_n4XQ,719 +flask_restful/utils/__pycache__/__init__.cpython-37.pyc,, +flask_restful/utils/__pycache__/cors.cpython-37.pyc,, +flask_restful/utils/__pycache__/crypto.cpython-37.pyc,, +flask_restful/utils/cors.py,sha256=cZiqaHhIn0w66spRoSIdC-jIn4X_b6OlVms5eGF4Ess,2084 +flask_restful/utils/crypto.py,sha256=q3PBvAYMJYybbqqQlKNF_Pqeyo9h3x5jFJuVqtEA5bA,996 diff --git a/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/WHEEL b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/WHEEL new file mode 100644 index 00000000..8b6dd1b5 --- /dev/null +++ b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/metadata.json b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/metadata.json new file mode 100644 index 00000000..fd1720a5 --- /dev/null +++ b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Framework :: Flask", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "License :: OSI Approved :: BSD License"], "extensions": {"python.details": {"contacts": [{"email": "help@twilio.com", "name": "Twilio API Team", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://www.github.com/flask-restful/flask-restful/"}}}, "extras": ["docs", "paging"], "generator": "bdist_wheel (0.29.0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask-RESTful", "platform": "any", "run_requires": [{"requires": ["Flask (>=0.8)", "aniso8601 (>=0.82)", "pytz", "six (>=1.3.0)"]}, {"extra": "paging", "requires": ["pycrypto (>=2.6)"]}, {"extra": "docs", "requires": ["sphinx"]}], "summary": "Simple framework for creating REST APIs", "test_requires": [{"requires": ["Flask-RESTful[paging]", "blinker", "mock (>=0.8)"]}], "version": "0.3.6"} \ No newline at end of file diff --git a/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/top_level.txt b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/top_level.txt new file mode 100644 index 00000000..f7b85270 --- /dev/null +++ b/venv/Lib/site-packages/Flask_RESTful-0.3.6.dist-info/top_level.txt @@ -0,0 +1 @@ +flask_restful diff --git a/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/INSTALLER b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/LICENSE.rst b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/LICENSE.rst new file mode 100644 index 00000000..e506dcaf --- /dev/null +++ b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/LICENSE.rst @@ -0,0 +1,47 @@ +`BSD 3-Clause `_ + +Copyright © 2011 by the Pallets team. + +Some rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +- Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +We kindly ask you to use these themes in an unmodified manner only with +Pallets and Pallets-related projects, not for unrelated projects. If you +like the visual style and want to use it for your own projects, please +consider making some larger changes to the themes (such as changing font +faces, sizes, colors or margins). + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +---- + +The initial implementation of It's Dangerous was inspired by Django's +signing module. + +Copyright © Django Software Foundation and individual contributors. +All rights reserved. diff --git a/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/METADATA b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/METADATA new file mode 100644 index 00000000..0eeb6973 --- /dev/null +++ b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/METADATA @@ -0,0 +1,100 @@ +Metadata-Version: 2.1 +Name: ItsDangerous +Version: 1.0.0 +Summary: Various helpers to pass data to untrusted environments and back. +Home-page: https://palletsprojects.com/p/itsdangerous/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: Pallets Team +Maintainer-email: contact@palletsprojects.com +License: BSD +Project-URL: Documentation, https://itsdangerous.palletsprojects.com/ +Project-URL: Code, https://github.com/pallets/itsdangerous +Project-URL: Issue tracker, https://github.com/pallets/itsdangerous/issues +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* + +It's Dangerous +============== + +... so better sign this + +Various helpers to pass data to untrusted environments and to get it +back safe and sound. Data is cryptographically signed to ensure that a +token has not been tampered with. + +It's possible to customize how data is serialized. Data is compressed as +needed. A timestamp can be added and verified automatically while +loading a token. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U ItsDangerous + +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + +A Simple Example +---------------- + +Here's how you could generate a token for transmitting a user's id and +name between web requests. + +.. code-block:: python + + from itsdangerous import URLSafeSerializer + + auth_s = URLSafeSerializer("secret key", "auth") + token = auth_s.dumps({"id": 5, "name": "itsdangerous"}) + + print(token) + # eyJpZCI6NSwibmFtZSI6Iml0c2Rhbmdlcm91cyJ9.AmSPrPa_iZ6q-ERXXdQxt6ce8NEqt + # 3i2Uke3sIRnDG0riZD6OoqckqC72VJ9SBIu-vAf_XlwNHnt7dLEClT0JA + + data = auth_s.loads(token) + print(data["name"]) + # itsdangerous + + +Donate +------ + +The Pallets organization develops and supports ItsDangerous and other +popular packages. In order to grow the community of contributors and +users, and allow the maintainers to devote more time to the projects, +`please donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +* Website: https://palletsprojects.com/p/itsdangerous/ +* Documentation: https://itsdangerous.palletsprojects.com/ +* License: `BSD `_ +* Releases: https://pypi.org/project/itsdangerous/ +* Code: https://github.com/pallets/itsdangerous +* Issue tracker: https://github.com/pallets/itsdangerous/issues +* Test status: https://travis-ci.org/pallets/itsdangerous +* Test coverage: https://codecov.io/gh/pallets/itsdangerous + + diff --git a/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/RECORD b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/RECORD new file mode 100644 index 00000000..e94f8571 --- /dev/null +++ b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/RECORD @@ -0,0 +1,26 @@ +ItsDangerous-1.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +ItsDangerous-1.0.0.dist-info/LICENSE.rst,sha256=GGya2eePFqkG_3vuL5-7B0uc3470ASxi8cDAegCH6wg,2122 +ItsDangerous-1.0.0.dist-info/METADATA,sha256=Ti_ra8uJlLhrfRd85G3t6xcFZ7CNqibvUJYtpo8MtMs,3133 +ItsDangerous-1.0.0.dist-info/RECORD,, +ItsDangerous-1.0.0.dist-info/WHEEL,sha256=8T8fxefr_r-A79qbOJ9d_AaEgkpCGmEPHc-gpCq5BRg,110 +ItsDangerous-1.0.0.dist-info/top_level.txt,sha256=gKN1OKLk81i7fbWWildJA88EQ9NhnGMSvZqhfz9ICjk,13 +itsdangerous/__init__.py,sha256=wGMrVAUt6j6i8NmKLiJduJXLFeJcW7hzZTV39XZWVUs,675 +itsdangerous/__pycache__/__init__.cpython-37.pyc,, +itsdangerous/__pycache__/_compat.cpython-37.pyc,, +itsdangerous/__pycache__/_json.cpython-37.pyc,, +itsdangerous/__pycache__/encoding.cpython-37.pyc,, +itsdangerous/__pycache__/exc.cpython-37.pyc,, +itsdangerous/__pycache__/jws.cpython-37.pyc,, +itsdangerous/__pycache__/serializer.cpython-37.pyc,, +itsdangerous/__pycache__/signer.cpython-37.pyc,, +itsdangerous/__pycache__/timed.cpython-37.pyc,, +itsdangerous/__pycache__/url_safe.cpython-37.pyc,, +itsdangerous/_compat.py,sha256=oAAMcQAjwQXQpIbuHT3o-aL56ztm_7Fe-4lD7IteF6A,1133 +itsdangerous/_json.py,sha256=W7BLL4RPnSOjNdo2gfKT3BeARMCIikY6O75rwWV0XoE,431 +itsdangerous/encoding.py,sha256=KhY85PsH3bGHe5JANN4LMZ_3b0IwUWRRnnw1wvLlaIg,1224 +itsdangerous/exc.py,sha256=KFxg7K2XMliMQAxL4jkRNgE8e73z2jcRaLrzwqVObnI,2959 +itsdangerous/jws.py,sha256=6Lh9W-Lu8D9s7bRazs0Zb35eyAZm3pzLeZqHmRELeII,7470 +itsdangerous/serializer.py,sha256=gxHzYl-Ji91UZeNhVmdPWEJ22mzSTE7Qw0GsP2TORVU,6406 +itsdangerous/signer.py,sha256=iGQM2pJu3EBEM8HqCOL9nAvR2-rGIBdFaxLrBz1KvQ4,6547 +itsdangerous/timed.py,sha256=W3rbEUopjtPBgUd6lQLYGq5iWzEnhUPj0rPfbBDWE4o,5166 +itsdangerous/url_safe.py,sha256=xnFTaukIPmW6Qwn6uNQLgzdau8RuAKnp5N7ukuXykj0,2275 diff --git a/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/WHEEL b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/WHEEL new file mode 100644 index 00000000..10012355 --- /dev/null +++ b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.32.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/top_level.txt b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/top_level.txt new file mode 100644 index 00000000..e163955e --- /dev/null +++ b/venv/Lib/site-packages/ItsDangerous-1.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +itsdangerous diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst new file mode 100644 index 00000000..1594da5c --- /dev/null +++ b/venv/Lib/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst @@ -0,0 +1,37 @@ + +Jinja2 +~~~~~~ + +Jinja2 is a template engine written in pure Python. It provides a +`Django`_ inspired non-XML syntax but supports inline expressions and +an optional `sandboxed`_ environment. + +Nutshell +-------- + +Here a small example of a Jinja template:: + + {% extends 'base.html' %} + {% block title %}Memberlist{% endblock %} + {% block content %} + + {% endblock %} + +Philosophy +---------- + +Application logic is for the controller but don't try to make the life +for the template designer too hard by giving him too few functionality. + +For more informations visit the new `Jinja2 webpage`_ and `documentation`_. + +.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security) +.. _Django: https://www.djangoproject.com/ +.. _Jinja2 webpage: http://jinja.pocoo.org/ +.. _documentation: http://jinja.pocoo.org/2/documentation/ + + diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/INSTALLER b/venv/Lib/site-packages/Jinja2-2.10.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/Jinja2-2.10.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/LICENSE.txt b/venv/Lib/site-packages/Jinja2-2.10.dist-info/LICENSE.txt new file mode 100644 index 00000000..31bf900e --- /dev/null +++ b/venv/Lib/site-packages/Jinja2-2.10.dist-info/LICENSE.txt @@ -0,0 +1,31 @@ +Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details. + +Some rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/METADATA b/venv/Lib/site-packages/Jinja2-2.10.dist-info/METADATA new file mode 100644 index 00000000..40f2b46b --- /dev/null +++ b/venv/Lib/site-packages/Jinja2-2.10.dist-info/METADATA @@ -0,0 +1,68 @@ +Metadata-Version: 2.0 +Name: Jinja2 +Version: 2.10 +Summary: A small but fast and easy to use stand-alone template engine written in pure python. +Home-page: http://jinja.pocoo.org/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Description-Content-Type: UNKNOWN +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Dist: MarkupSafe (>=0.23) +Provides-Extra: i18n +Requires-Dist: Babel (>=0.8); extra == 'i18n' + + +Jinja2 +~~~~~~ + +Jinja2 is a template engine written in pure Python. It provides a +`Django`_ inspired non-XML syntax but supports inline expressions and +an optional `sandboxed`_ environment. + +Nutshell +-------- + +Here a small example of a Jinja template:: + + {% extends 'base.html' %} + {% block title %}Memberlist{% endblock %} + {% block content %} + + {% endblock %} + +Philosophy +---------- + +Application logic is for the controller but don't try to make the life +for the template designer too hard by giving him too few functionality. + +For more informations visit the new `Jinja2 webpage`_ and `documentation`_. + +.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security) +.. _Django: https://www.djangoproject.com/ +.. _Jinja2 webpage: http://jinja.pocoo.org/ +.. _documentation: http://jinja.pocoo.org/2/documentation/ + + diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/RECORD b/venv/Lib/site-packages/Jinja2-2.10.dist-info/RECORD new file mode 100644 index 00000000..50b16fa5 --- /dev/null +++ b/venv/Lib/site-packages/Jinja2-2.10.dist-info/RECORD @@ -0,0 +1,63 @@ +Jinja2-2.10.dist-info/DESCRIPTION.rst,sha256=b5ckFDoM7vVtz_mAsJD4OPteFKCqE7beu353g4COoYI,978 +Jinja2-2.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Jinja2-2.10.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554 +Jinja2-2.10.dist-info/METADATA,sha256=18EgU8zR6-av-0-5y_gXebzK4GnBB_76lALUsl-6QHM,2258 +Jinja2-2.10.dist-info/RECORD,, +Jinja2-2.10.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 +Jinja2-2.10.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72 +Jinja2-2.10.dist-info/metadata.json,sha256=NPUJ9TMBxVQAv_kTJzvU8HwmP-4XZvbK9mz6_4YUVl4,1473 +Jinja2-2.10.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 +jinja2/__init__.py,sha256=xJHjaMoy51_KXn1wf0cysH6tUUifUxZCwSOfcJGEYZw,2614 +jinja2/__pycache__/__init__.cpython-37.pyc,, +jinja2/__pycache__/_compat.cpython-37.pyc,, +jinja2/__pycache__/_identifier.cpython-37.pyc,, +jinja2/__pycache__/asyncfilters.cpython-37.pyc,, +jinja2/__pycache__/asyncsupport.cpython-37.pyc,, +jinja2/__pycache__/bccache.cpython-37.pyc,, +jinja2/__pycache__/compiler.cpython-37.pyc,, +jinja2/__pycache__/constants.cpython-37.pyc,, +jinja2/__pycache__/debug.cpython-37.pyc,, +jinja2/__pycache__/defaults.cpython-37.pyc,, +jinja2/__pycache__/environment.cpython-37.pyc,, +jinja2/__pycache__/exceptions.cpython-37.pyc,, +jinja2/__pycache__/ext.cpython-37.pyc,, +jinja2/__pycache__/filters.cpython-37.pyc,, +jinja2/__pycache__/idtracking.cpython-37.pyc,, +jinja2/__pycache__/lexer.cpython-37.pyc,, +jinja2/__pycache__/loaders.cpython-37.pyc,, +jinja2/__pycache__/meta.cpython-37.pyc,, +jinja2/__pycache__/nativetypes.cpython-37.pyc,, +jinja2/__pycache__/nodes.cpython-37.pyc,, +jinja2/__pycache__/optimizer.cpython-37.pyc,, +jinja2/__pycache__/parser.cpython-37.pyc,, +jinja2/__pycache__/runtime.cpython-37.pyc,, +jinja2/__pycache__/sandbox.cpython-37.pyc,, +jinja2/__pycache__/tests.cpython-37.pyc,, +jinja2/__pycache__/utils.cpython-37.pyc,, +jinja2/__pycache__/visitor.cpython-37.pyc,, +jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596 +jinja2/_identifier.py,sha256=W1QBSY-iJsyt6oR_nKSuNNCzV95vLIOYgUNPUI1d5gU,1726 +jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144 +jinja2/asyncsupport.py,sha256=UErQ3YlTLaSjFb94P4MVn08-aVD9jJxty2JVfMRb-1M,7878 +jinja2/bccache.py,sha256=nQldx0ZRYANMyfvOihRoYFKSlUdd5vJkS7BjxNwlOZM,12794 +jinja2/compiler.py,sha256=BqC5U6JxObSRhblyT_a6Tp5GtEU5z3US1a4jLQaxxgo,65386 +jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626 +jinja2/debug.py,sha256=WTVeUFGUa4v6ReCsYv-iVPa3pkNB75OinJt3PfxNdXs,12045 +jinja2/defaults.py,sha256=Em-95hmsJxIenDCZFB1YSvf9CNhe9rBmytN3yUrBcWA,1400 +jinja2/environment.py,sha256=VnkAkqw8JbjZct4tAyHlpBrka2vqB-Z58RAP-32P1ZY,50849 +jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428 +jinja2/ext.py,sha256=atMQydEC86tN1zUsdQiHw5L5cF62nDbqGue25Yiu3N4,24500 +jinja2/filters.py,sha256=yOAJk0MsH-_gEC0i0U6NweVQhbtYaC-uE8xswHFLF4w,36528 +jinja2/idtracking.py,sha256=2GbDSzIvGArEBGLkovLkqEfmYxmWsEf8c3QZwM4uNsw,9197 +jinja2/lexer.py,sha256=ySEPoXd1g7wRjsuw23uimS6nkGN5aqrYwcOKxCaVMBQ,28559 +jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382 +jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340 +jinja2/nativetypes.py,sha256=_sJhS8f-8Q0QMIC0dm1YEdLyxEyoO-kch8qOL5xUDfE,7308 +jinja2/nodes.py,sha256=L10L_nQDfubLhO3XjpF9qz46FSh2clL-3e49ogVlMmA,30853 +jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722 +jinja2/parser.py,sha256=lPzTEbcpTRBLw8ii6OYyExHeAhaZLMA05Hpv4ll3ULk,35875 +jinja2/runtime.py,sha256=DHdD38Pq8gj7uWQC5usJyWFoNWL317A9AvXOW_CLB34,27755 +jinja2/sandbox.py,sha256=TVyZHlNqqTzsv9fv2NvJNmSdWRHTguhyMHdxjWms32U,16708 +jinja2/tests.py,sha256=iJQLwbapZr-EKquTG_fVOVdwHUUKf3SX9eNkjQDF8oU,4237 +jinja2/utils.py,sha256=q24VupGZotQ-uOyrJxCaXtDWhZC1RgsQG7kcdmjck2Q,20629 +jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316 diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/WHEEL b/venv/Lib/site-packages/Jinja2-2.10.dist-info/WHEEL new file mode 100644 index 00000000..7332a419 --- /dev/null +++ b/venv/Lib/site-packages/Jinja2-2.10.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.30.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/entry_points.txt b/venv/Lib/site-packages/Jinja2-2.10.dist-info/entry_points.txt new file mode 100644 index 00000000..32e6b753 --- /dev/null +++ b/venv/Lib/site-packages/Jinja2-2.10.dist-info/entry_points.txt @@ -0,0 +1,4 @@ + + [babel.extractors] + jinja2 = jinja2.ext:babel_extract[i18n] + \ No newline at end of file diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/metadata.json b/venv/Lib/site-packages/Jinja2-2.10.dist-info/metadata.json new file mode 100644 index 00000000..7f5dc387 --- /dev/null +++ b/venv/Lib/site-packages/Jinja2-2.10.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://jinja.pocoo.org/"}}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "extras": ["i18n"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "Jinja2", "run_requires": [{"extra": "i18n", "requires": ["Babel (>=0.8)"]}, {"requires": ["MarkupSafe (>=0.23)"]}], "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "version": "2.10"} \ No newline at end of file diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/top_level.txt b/venv/Lib/site-packages/Jinja2-2.10.dist-info/top_level.txt new file mode 100644 index 00000000..7f7afbf3 --- /dev/null +++ b/venv/Lib/site-packages/Jinja2-2.10.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2 diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/PKG-INFO b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/PKG-INFO new file mode 100644 index 00000000..6f2568f6 --- /dev/null +++ b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/PKG-INFO @@ -0,0 +1,133 @@ +Metadata-Version: 1.1 +Name: MarkupSafe +Version: 1.0 +Summary: Implements a XML/HTML/XHTML Markup safe string for Python +Home-page: http://github.com/pallets/markupsafe +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Description: MarkupSafe + ========== + + Implements a unicode subclass that supports HTML strings: + + .. code-block:: python + + >>> from markupsafe import Markup, escape + >>> escape("") + Markup(u'<script>alert(document.cookie);</script>') + >>> tmpl = Markup("%s") + >>> tmpl % "Peter > Lustig" + Markup(u'Peter > Lustig') + + If you want to make an object unicode that is not yet unicode + but don't want to lose the taint information, you can use the + ``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which + is a different name for the same function). + + .. code-block:: python + + >>> from markupsafe import soft_unicode + >>> soft_unicode(42) + u'42' + >>> soft_unicode(Markup('foo')) + Markup(u'foo') + + HTML Representations + -------------------- + + Objects can customize their HTML markup equivalent by overriding + the ``__html__`` function: + + .. code-block:: python + + >>> class Foo(object): + ... def __html__(self): + ... return 'Nice' + ... + >>> escape(Foo()) + Markup(u'Nice') + >>> Markup(Foo()) + Markup(u'Nice') + + Silent Escapes + -------------- + + Since MarkupSafe 0.10 there is now also a separate escape function + called ``escape_silent`` that returns an empty string for ``None`` for + consistency with other systems that return empty strings for ``None`` + when escaping (for instance Pylons' webhelpers). + + If you also want to use this for the escape method of the Markup + object, you can create your own subclass that does that: + + .. code-block:: python + + from markupsafe import Markup, escape_silent as escape + + class SilentMarkup(Markup): + __slots__ = () + + @classmethod + def escape(cls, s): + return cls(escape(s)) + + New-Style String Formatting + --------------------------- + + Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and + 3.x are now fully supported. Previously the escape behavior of those + functions was spotty at best. The new implementations operates under the + following algorithm: + + 1. if an object has an ``__html_format__`` method it is called as + replacement for ``__format__`` with the format specifier. It either + has to return a string or markup object. + 2. if an object has an ``__html__`` method it is called. + 3. otherwise the default format system of Python kicks in and the result + is HTML escaped. + + Here is how you can implement your own formatting: + + .. code-block:: python + + class User(object): + + def __init__(self, id, username): + self.id = id + self.username = username + + def __html_format__(self, format_spec): + if format_spec == 'link': + return Markup('{1}').format( + self.id, + self.__html__(), + ) + elif format_spec: + raise ValueError('Invalid format spec') + return self.__html__() + + def __html__(self): + return Markup('{0}').format(self.username) + + And to format that user: + + .. code-block:: python + + >>> user = User(1, 'foo') + >>> Markup('

User: {0:link}').format(user) + Markup(u'

User: foo') + + Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher. + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/SOURCES.txt b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/SOURCES.txt new file mode 100644 index 00000000..210b339c --- /dev/null +++ b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/SOURCES.txt @@ -0,0 +1,18 @@ +AUTHORS +CHANGES +LICENSE +MANIFEST.in +README.rst +setup.cfg +setup.py +tests.py +MarkupSafe.egg-info/PKG-INFO +MarkupSafe.egg-info/SOURCES.txt +MarkupSafe.egg-info/dependency_links.txt +MarkupSafe.egg-info/not-zip-safe +MarkupSafe.egg-info/top_level.txt +markupsafe/__init__.py +markupsafe/_compat.py +markupsafe/_constants.py +markupsafe/_native.py +markupsafe/_speedups.c \ No newline at end of file diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/dependency_links.txt b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/dependency_links.txt new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/installed-files.txt b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/installed-files.txt new file mode 100644 index 00000000..316db21c --- /dev/null +++ b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/installed-files.txt @@ -0,0 +1,14 @@ +..\markupsafe\__init__.py +..\markupsafe\__pycache__\__init__.cpython-37.pyc +..\markupsafe\__pycache__\_compat.cpython-37.pyc +..\markupsafe\__pycache__\_constants.cpython-37.pyc +..\markupsafe\__pycache__\_native.cpython-37.pyc +..\markupsafe\_compat.py +..\markupsafe\_constants.py +..\markupsafe\_native.py +..\markupsafe\_speedups.c +PKG-INFO +SOURCES.txt +dependency_links.txt +not-zip-safe +top_level.txt diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/not-zip-safe b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/not-zip-safe new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/top_level.txt b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/top_level.txt new file mode 100644 index 00000000..75bf7292 --- /dev/null +++ b/venv/Lib/site-packages/MarkupSafe-1.0-py3.7.egg-info/top_level.txt @@ -0,0 +1 @@ +markupsafe diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/DESCRIPTION.rst new file mode 100644 index 00000000..675f08d1 --- /dev/null +++ b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/DESCRIPTION.rst @@ -0,0 +1,80 @@ +Werkzeug +======== + +Werkzeug is a comprehensive `WSGI`_ web application library. It began as +a simple collection of various utilities for WSGI applications and has +become one of the most advanced WSGI utility libraries. + +It includes: + +* An interactive debugger that allows inspecting stack traces and source + code in the browser with an interactive interpreter for any frame in + the stack. +* A full-featured request object with objects to interact with headers, + query args, form data, files, and cookies. +* A response object that can wrap other WSGI applications and handle + streaming data. +* A routing system for matching URLs to endpoints and generating URLs + for endpoints, with an extensible system for capturing variables from + URLs. +* HTTP utilities to handle entity tags, cache control, dates, user + agents, cookies, files, and more. +* A threaded WSGI server for use while developing applications locally. +* A test client for simulating HTTP requests during testing without + requiring running a server. + +Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up +to the developer to choose a template engine, database adapter, and even +how to handle requests. It can be used to build all sorts of end user +applications such as blogs, wikis, or bulletin boards. + +`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while +providing more structure and patterns for defining powerful +applications. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U Werkzeug + + +A Simple Example +---------------- + +.. code-block:: python + + from werkzeug.wrappers import Request, Response + + @Request.application + def application(request): + return Response('Hello, World!') + + if __name__ == '__main__': + from werkzeug.serving import run_simple + run_simple('localhost', 4000, application) + + +Links +----- + +* Website: https://www.palletsprojects.com/p/werkzeug/ +* Releases: https://pypi.org/project/Werkzeug/ +* Code: https://github.com/pallets/werkzeug +* Issue tracker: https://github.com/pallets/werkzeug/issues +* Test status: + + * Linux, Mac: https://travis-ci.org/pallets/werkzeug + * Windows: https://ci.appveyor.com/project/davidism/werkzeug + +* Test coverage: https://codecov.io/gh/pallets/werkzeug + +.. _WSGI: https://wsgi.readthedocs.io/en/latest/ +.. _Flask: https://www.palletsprojects.com/p/flask/ +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/INSTALLER b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/LICENSE.txt b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/LICENSE.txt new file mode 100644 index 00000000..1cc75bb0 --- /dev/null +++ b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/LICENSE.txt @@ -0,0 +1,31 @@ +Copyright © 2007 by the Pallets team. + +Some rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/METADATA b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/METADATA new file mode 100644 index 00000000..bfc3c4e8 --- /dev/null +++ b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/METADATA @@ -0,0 +1,116 @@ +Metadata-Version: 2.0 +Name: Werkzeug +Version: 0.14.1 +Summary: The comprehensive WSGI web application library. +Home-page: https://www.palletsprojects.org/p/werkzeug/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Description-Content-Type: UNKNOWN +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides-Extra: dev +Requires-Dist: coverage; extra == 'dev' +Requires-Dist: pytest; extra == 'dev' +Requires-Dist: sphinx; extra == 'dev' +Requires-Dist: tox; extra == 'dev' +Provides-Extra: termcolor +Requires-Dist: termcolor; extra == 'termcolor' +Provides-Extra: watchdog +Requires-Dist: watchdog; extra == 'watchdog' + +Werkzeug +======== + +Werkzeug is a comprehensive `WSGI`_ web application library. It began as +a simple collection of various utilities for WSGI applications and has +become one of the most advanced WSGI utility libraries. + +It includes: + +* An interactive debugger that allows inspecting stack traces and source + code in the browser with an interactive interpreter for any frame in + the stack. +* A full-featured request object with objects to interact with headers, + query args, form data, files, and cookies. +* A response object that can wrap other WSGI applications and handle + streaming data. +* A routing system for matching URLs to endpoints and generating URLs + for endpoints, with an extensible system for capturing variables from + URLs. +* HTTP utilities to handle entity tags, cache control, dates, user + agents, cookies, files, and more. +* A threaded WSGI server for use while developing applications locally. +* A test client for simulating HTTP requests during testing without + requiring running a server. + +Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up +to the developer to choose a template engine, database adapter, and even +how to handle requests. It can be used to build all sorts of end user +applications such as blogs, wikis, or bulletin boards. + +`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while +providing more structure and patterns for defining powerful +applications. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U Werkzeug + + +A Simple Example +---------------- + +.. code-block:: python + + from werkzeug.wrappers import Request, Response + + @Request.application + def application(request): + return Response('Hello, World!') + + if __name__ == '__main__': + from werkzeug.serving import run_simple + run_simple('localhost', 4000, application) + + +Links +----- + +* Website: https://www.palletsprojects.com/p/werkzeug/ +* Releases: https://pypi.org/project/Werkzeug/ +* Code: https://github.com/pallets/werkzeug +* Issue tracker: https://github.com/pallets/werkzeug/issues +* Test status: + + * Linux, Mac: https://travis-ci.org/pallets/werkzeug + * Windows: https://ci.appveyor.com/project/davidism/werkzeug + +* Test coverage: https://codecov.io/gh/pallets/werkzeug + +.. _WSGI: https://wsgi.readthedocs.io/en/latest/ +.. _Flask: https://www.palletsprojects.com/p/flask/ +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/RECORD b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/RECORD new file mode 100644 index 00000000..247ea821 --- /dev/null +++ b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/RECORD @@ -0,0 +1,97 @@ +Werkzeug-0.14.1.dist-info/DESCRIPTION.rst,sha256=rOCN36jwsWtWsTpqPG96z7FMilB5qI1CIARSKRuUmz8,2452 +Werkzeug-0.14.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Werkzeug-0.14.1.dist-info/LICENSE.txt,sha256=xndz_dD4m269AF9l_Xbl5V3tM1N3C1LoZC2PEPxWO-8,1534 +Werkzeug-0.14.1.dist-info/METADATA,sha256=FbfadrPdJNUWAxMOKxGUtHe5R3IDSBKYYmAz3FvI3uY,3872 +Werkzeug-0.14.1.dist-info/RECORD,, +Werkzeug-0.14.1.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 +Werkzeug-0.14.1.dist-info/metadata.json,sha256=4489UTt6HBp2NQil95-pBkjU4Je93SMHvMxZ_rjOpqA,1452 +Werkzeug-0.14.1.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9 +werkzeug/__init__.py,sha256=NR0d4n_-U9BLVKlOISean3zUt2vBwhvK-AZE6M0sC0k,6842 +werkzeug/__pycache__/__init__.cpython-37.pyc,, +werkzeug/__pycache__/_compat.cpython-37.pyc,, +werkzeug/__pycache__/_internal.cpython-37.pyc,, +werkzeug/__pycache__/_reloader.cpython-37.pyc,, +werkzeug/__pycache__/datastructures.cpython-37.pyc,, +werkzeug/__pycache__/exceptions.cpython-37.pyc,, +werkzeug/__pycache__/filesystem.cpython-37.pyc,, +werkzeug/__pycache__/formparser.cpython-37.pyc,, +werkzeug/__pycache__/http.cpython-37.pyc,, +werkzeug/__pycache__/local.cpython-37.pyc,, +werkzeug/__pycache__/posixemulation.cpython-37.pyc,, +werkzeug/__pycache__/routing.cpython-37.pyc,, +werkzeug/__pycache__/script.cpython-37.pyc,, +werkzeug/__pycache__/security.cpython-37.pyc,, +werkzeug/__pycache__/serving.cpython-37.pyc,, +werkzeug/__pycache__/test.cpython-37.pyc,, +werkzeug/__pycache__/testapp.cpython-37.pyc,, +werkzeug/__pycache__/urls.cpython-37.pyc,, +werkzeug/__pycache__/useragents.cpython-37.pyc,, +werkzeug/__pycache__/utils.cpython-37.pyc,, +werkzeug/__pycache__/websocket.cpython-37.pyc,, +werkzeug/__pycache__/wrappers.cpython-37.pyc,, +werkzeug/__pycache__/wsgi.cpython-37.pyc,, +werkzeug/_compat.py,sha256=8c4U9o6A_TR9nKCcTbpZNxpqCXcXDVIbFawwKM2s92c,6311 +werkzeug/_internal.py,sha256=GhEyGMlsSz_tYjsDWO9TG35VN7304MM8gjKDrXLEdVc,13873 +werkzeug/_reloader.py,sha256=AyPphcOHPbu6qzW0UbrVvTDJdre5WgpxbhIJN_TqzUc,9264 +werkzeug/contrib/__init__.py,sha256=f7PfttZhbrImqpr5Ezre8CXgwvcGUJK7zWNpO34WWrw,623 +werkzeug/contrib/__pycache__/__init__.cpython-37.pyc,, +werkzeug/contrib/__pycache__/atom.cpython-37.pyc,, +werkzeug/contrib/__pycache__/cache.cpython-37.pyc,, +werkzeug/contrib/__pycache__/fixers.cpython-37.pyc,, +werkzeug/contrib/__pycache__/iterio.cpython-37.pyc,, +werkzeug/contrib/__pycache__/jsrouting.cpython-37.pyc,, +werkzeug/contrib/__pycache__/limiter.cpython-37.pyc,, +werkzeug/contrib/__pycache__/lint.cpython-37.pyc,, +werkzeug/contrib/__pycache__/profiler.cpython-37.pyc,, +werkzeug/contrib/__pycache__/securecookie.cpython-37.pyc,, +werkzeug/contrib/__pycache__/sessions.cpython-37.pyc,, +werkzeug/contrib/__pycache__/testtools.cpython-37.pyc,, +werkzeug/contrib/__pycache__/wrappers.cpython-37.pyc,, +werkzeug/contrib/atom.py,sha256=qqfJcfIn2RYY-3hO3Oz0aLq9YuNubcPQ_KZcNsDwVJo,15575 +werkzeug/contrib/cache.py,sha256=xBImHNj09BmX_7kC5NUCx8f_l4L8_O7zi0jCL21UZKE,32163 +werkzeug/contrib/fixers.py,sha256=gR06T-w71ur-tHQ_31kP_4jpOncPJ4Wc1dOqTvYusr8,10179 +werkzeug/contrib/iterio.py,sha256=RlqDvGhz0RneTpzE8dVc-yWCUv4nkPl1jEc_EDp2fH0,10814 +werkzeug/contrib/jsrouting.py,sha256=QTmgeDoKXvNK02KzXgx9lr3cAH6fAzpwF5bBdPNvJPs,8564 +werkzeug/contrib/limiter.py,sha256=iS8-ahPZ-JLRnmfIBzxpm7O_s3lPsiDMVWv7llAIDCI,1334 +werkzeug/contrib/lint.py,sha256=Mj9NeUN7s4zIUWeQOAVjrmtZIcl3Mm2yDe9BSIr9YGE,12558 +werkzeug/contrib/profiler.py,sha256=ISwCWvwVyGpDLRBRpLjo_qUWma6GXYBrTAco4PEQSHY,5151 +werkzeug/contrib/securecookie.py,sha256=uWMyHDHY3lkeBRiCSayGqWkAIy4a7xAbSE_Hln9ecqc,12196 +werkzeug/contrib/sessions.py,sha256=39LVNvLbm5JWpbxM79WC2l87MJFbqeISARjwYbkJatw,12577 +werkzeug/contrib/testtools.py,sha256=G9xN-qeihJlhExrIZMCahvQOIDxdL9NiX874jiiHFMs,2453 +werkzeug/contrib/wrappers.py,sha256=v7OYlz7wQtDlS9fey75UiRZ1IkUWqCpzbhsLy4k14Hw,10398 +werkzeug/datastructures.py,sha256=3IgNKNqrz-ZjmAG7y3YgEYK-enDiMT_b652PsypWcYg,90080 +werkzeug/debug/__init__.py,sha256=uSn9BqCZ5E3ySgpoZtundpROGsn-uYvZtSFiTfAX24M,17452 +werkzeug/debug/__pycache__/__init__.cpython-37.pyc,, +werkzeug/debug/__pycache__/console.cpython-37.pyc,, +werkzeug/debug/__pycache__/repr.cpython-37.pyc,, +werkzeug/debug/__pycache__/tbtools.cpython-37.pyc,, +werkzeug/debug/console.py,sha256=n3-dsKk1TsjnN-u4ZgmuWCU_HO0qw5IA7ttjhyyMM6I,5607 +werkzeug/debug/repr.py,sha256=bKqstDYGfECpeLerd48s_hxuqK4b6UWnjMu3d_DHO8I,9340 +werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673 +werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507 +werkzeug/debug/shared/debugger.js,sha256=PKPVYuyO4SX1hkqLOwCLvmIEO5154WatFYaXE-zIfKI,6264 +werkzeug/debug/shared/jquery.js,sha256=7LkWEzqTdpEfELxcZZlS6wAx5Ff13zZ83lYO2_ujj7g,95957 +werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191 +werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200 +werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818 +werkzeug/debug/shared/style.css,sha256=IEO0PC2pWmh2aEyGCaN--txuWsRCliuhlbEhPDFwh0A,6270 +werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220 +werkzeug/debug/tbtools.py,sha256=rBudXCmkVdAKIcdhxANxgf09g6kQjJWW9_5bjSpr4OY,18451 +werkzeug/exceptions.py,sha256=3wp95Hqj9FqV8MdikV99JRcHse_fSMn27V8tgP5Hw2c,20505 +werkzeug/filesystem.py,sha256=hHWeWo_gqLMzTRfYt8-7n2wWcWUNTnDyudQDLOBEICE,2175 +werkzeug/formparser.py,sha256=mUuCwjzjb8_E4RzrAT2AioLuZSYpqR1KXTK6LScRYzA,21722 +werkzeug/http.py,sha256=RQg4MJuhRv2isNRiEh__Phh09ebpfT3Kuu_GfrZ54_c,40079 +werkzeug/local.py,sha256=QdQhWV5L8p1Y1CJ1CDStwxaUs24SuN5aebHwjVD08C8,14553 +werkzeug/posixemulation.py,sha256=xEF2Bxc-vUCPkiu4IbfWVd3LW7DROYAT-ExW6THqyzw,3519 +werkzeug/routing.py,sha256=2JVtdSgxKGeANy4Z_FP-dKESvKtkYGCZ1J2fARCLGCY,67214 +werkzeug/script.py,sha256=DwaVDcXdaOTffdNvlBdLitxWXjKaRVT32VbhDtljFPY,11365 +werkzeug/security.py,sha256=0m107exslz4QJLWQCpfQJ04z3re4eGHVggRvrQVAdWc,9193 +werkzeug/serving.py,sha256=A0flnIJHufdn2QJ9oeuHfrXwP3LzP8fn3rNW6hbxKUg,31926 +werkzeug/test.py,sha256=XmECSmnpASiYQTct4oMiWr0LT5jHWCtKqnpYKZd2ui8,36100 +werkzeug/testapp.py,sha256=3HQRW1sHZKXuAjCvFMet4KXtQG3loYTFnvn6LWt-4zI,9396 +werkzeug/urls.py,sha256=dUeLg2IeTm0WLmSvFeD4hBZWGdOs-uHudR5-t8n9zPo,36771 +werkzeug/useragents.py,sha256=BhYMf4cBTHyN4U0WsQedePIocmNlH_34C-UwqSThGCc,5865 +werkzeug/utils.py,sha256=BrY1j0DHQ8RTb0K1StIobKuMJhN9SQQkWEARbrh2qpk,22972 +werkzeug/websocket.py,sha256=PpSeDxXD_0UsPAa5hQhQNM6mxibeUgn8lA8eRqiS0vM,11344 +werkzeug/wrappers.py,sha256=kbyL_aFjxELwPgMwfNCYjKu-CR6kNkh-oO8wv3GXbk8,84511 +werkzeug/wsgi.py,sha256=1Nob-aeChWQf7MsiicO8RZt6J90iRzEcik44ev9Qu8s,49347 diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/WHEEL b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/WHEEL new file mode 100644 index 00000000..0de529b1 --- /dev/null +++ b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.26.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/metadata.json b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/metadata.json new file mode 100644 index 00000000..bca8d126 --- /dev/null +++ b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/metadata.json @@ -0,0 +1 @@ +{"generator": "bdist_wheel (0.26.0)", "summary": "The comprehensive WSGI web application library.", "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"project_urls": {"Home": "https://www.palletsprojects.org/p/werkzeug/"}, "contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}}}, "license": "BSD", "metadata_version": "2.0", "name": "Werkzeug", "platform": "any", "extras": ["dev", "termcolor", "watchdog"], "run_requires": [{"requires": ["coverage", "pytest", "sphinx", "tox"], "extra": "dev"}, {"requires": ["termcolor"], "extra": "termcolor"}, {"requires": ["watchdog"], "extra": "watchdog"}], "version": "0.14.1"} \ No newline at end of file diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/top_level.txt b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/top_level.txt new file mode 100644 index 00000000..6fe8da84 --- /dev/null +++ b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/top_level.txt @@ -0,0 +1 @@ +werkzeug diff --git a/venv/Lib/site-packages/__pycache__/easy_install.cpython-37.pyc b/venv/Lib/site-packages/__pycache__/easy_install.cpython-37.pyc new file mode 100644 index 00000000..4fa21647 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/easy_install.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/mccabe.cpython-37.pyc b/venv/Lib/site-packages/__pycache__/mccabe.cpython-37.pyc new file mode 100644 index 00000000..8c2671e5 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/mccabe.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/pep8.cpython-37.pyc b/venv/Lib/site-packages/__pycache__/pep8.cpython-37.pyc new file mode 100644 index 00000000..91c91cd2 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/pep8.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/pytest.cpython-37.pyc b/venv/Lib/site-packages/__pycache__/pytest.cpython-37.pyc new file mode 100644 index 00000000..44de3da5 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/pytest.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/six.cpython-37.pyc b/venv/Lib/site-packages/__pycache__/six.cpython-37.pyc new file mode 100644 index 00000000..0f35a416 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/six.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__init__.py b/venv/Lib/site-packages/_pytest/__init__.py new file mode 100644 index 00000000..46c7827e --- /dev/null +++ b/venv/Lib/site-packages/_pytest/__init__.py @@ -0,0 +1,8 @@ +__all__ = ["__version__"] + +try: + from ._version import version as __version__ +except ImportError: + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" diff --git a/venv/Lib/site-packages/_pytest/__pycache__/__init__.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..d2f83112 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/_argcomplete.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/_argcomplete.cpython-37.pyc new file mode 100644 index 00000000..9c2fb3d4 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/_argcomplete.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/_version.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/_version.cpython-37.pyc new file mode 100644 index 00000000..01dbfcf8 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/_version.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/cacheprovider.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/cacheprovider.cpython-37.pyc new file mode 100644 index 00000000..450ef5a7 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/cacheprovider.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/capture.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/capture.cpython-37.pyc new file mode 100644 index 00000000..2a890d6b Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/capture.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/compat.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/compat.cpython-37.pyc new file mode 100644 index 00000000..987fee1f Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/compat.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/debugging.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/debugging.cpython-37.pyc new file mode 100644 index 00000000..332c6876 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/debugging.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/deprecated.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/deprecated.cpython-37.pyc new file mode 100644 index 00000000..a58dd80a Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/deprecated.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/doctest.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/doctest.cpython-37.pyc new file mode 100644 index 00000000..f9f33f9c Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/doctest.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/fixtures.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/fixtures.cpython-37.pyc new file mode 100644 index 00000000..e68b9ad9 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/fixtures.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/freeze_support.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/freeze_support.cpython-37.pyc new file mode 100644 index 00000000..373dd4ec Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/freeze_support.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/helpconfig.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/helpconfig.cpython-37.pyc new file mode 100644 index 00000000..ac52d5d4 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/helpconfig.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/hookspec.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/hookspec.cpython-37.pyc new file mode 100644 index 00000000..c5155242 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/hookspec.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/junitxml.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/junitxml.cpython-37.pyc new file mode 100644 index 00000000..c988311c Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/junitxml.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/logging.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/logging.cpython-37.pyc new file mode 100644 index 00000000..0a43f355 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/logging.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/main.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/main.cpython-37.pyc new file mode 100644 index 00000000..9bcde9b0 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/main.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/monkeypatch.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/monkeypatch.cpython-37.pyc new file mode 100644 index 00000000..f05aa6f0 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/monkeypatch.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/nodes.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/nodes.cpython-37.pyc new file mode 100644 index 00000000..cfb11488 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/nodes.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/nose.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/nose.cpython-37.pyc new file mode 100644 index 00000000..241b3b55 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/nose.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/outcomes.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/outcomes.cpython-37.pyc new file mode 100644 index 00000000..ccd87e50 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/outcomes.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/pastebin.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/pastebin.cpython-37.pyc new file mode 100644 index 00000000..071ce4d0 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/pastebin.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/pathlib.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/pathlib.cpython-37.pyc new file mode 100644 index 00000000..5d957b6a Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/pathlib.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/pytester.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/pytester.cpython-37.pyc new file mode 100644 index 00000000..4f80de5e Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/pytester.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/python.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/python.cpython-37.pyc new file mode 100644 index 00000000..1b45cef7 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/python.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/python_api.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/python_api.cpython-37.pyc new file mode 100644 index 00000000..90b26023 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/python_api.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/recwarn.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/recwarn.cpython-37.pyc new file mode 100644 index 00000000..73536981 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/recwarn.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/reports.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/reports.cpython-37.pyc new file mode 100644 index 00000000..cb9ce25a Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/reports.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/resultlog.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/resultlog.cpython-37.pyc new file mode 100644 index 00000000..09c007d8 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/resultlog.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/runner.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/runner.cpython-37.pyc new file mode 100644 index 00000000..4bd4a333 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/runner.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/setuponly.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/setuponly.cpython-37.pyc new file mode 100644 index 00000000..c5ca09c8 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/setuponly.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/setupplan.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/setupplan.cpython-37.pyc new file mode 100644 index 00000000..77a65462 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/setupplan.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/skipping.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/skipping.cpython-37.pyc new file mode 100644 index 00000000..602fbca8 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/skipping.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/terminal.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/terminal.cpython-37.pyc new file mode 100644 index 00000000..9eacc25b Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/terminal.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/tmpdir.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/tmpdir.cpython-37.pyc new file mode 100644 index 00000000..880d085a Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/tmpdir.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/unittest.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/unittest.cpython-37.pyc new file mode 100644 index 00000000..7c141bf9 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/unittest.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/warning_types.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/warning_types.cpython-37.pyc new file mode 100644 index 00000000..1193da10 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/warning_types.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/warnings.cpython-37.pyc b/venv/Lib/site-packages/_pytest/__pycache__/warnings.cpython-37.pyc new file mode 100644 index 00000000..8873046c Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/warnings.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_argcomplete.py b/venv/Lib/site-packages/_pytest/_argcomplete.py new file mode 100644 index 00000000..0d1c2da6 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,106 @@ +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" +from __future__ import absolute_import, division, print_function +import sys +import os +from glob import glob + + +class FastFilesCompleter(object): + "Fast file completer class" + + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # we are on unix, otherwise no bash + if not prefix or prefix[-1] == os.path.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser, always_complete_options=False) + + +else: + + def try_argcomplete(parser): + pass + + filescompleter = None diff --git a/venv/Lib/site-packages/_pytest/_code/__init__.py b/venv/Lib/site-packages/_pytest/_code/__init__.py new file mode 100644 index 00000000..7885d51d --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,11 @@ +""" python inspection/code generation API """ +from __future__ import absolute_import, division, print_function +from .code import Code # noqa +from .code import ExceptionInfo # noqa +from .code import Frame # noqa +from .code import Traceback # noqa +from .code import filter_traceback # noqa +from .code import getrawcode # noqa +from .source import Source # noqa +from .source import compile_ as compile # noqa +from .source import getfslineno # noqa diff --git a/venv/Lib/site-packages/_pytest/_code/__pycache__/__init__.cpython-37.pyc b/venv/Lib/site-packages/_pytest/_code/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..745fdba5 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_code/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_code/__pycache__/_py2traceback.cpython-37.pyc b/venv/Lib/site-packages/_pytest/_code/__pycache__/_py2traceback.cpython-37.pyc new file mode 100644 index 00000000..d13f7c21 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_code/__pycache__/_py2traceback.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_code/__pycache__/code.cpython-37.pyc b/venv/Lib/site-packages/_pytest/_code/__pycache__/code.cpython-37.pyc new file mode 100644 index 00000000..ce666503 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_code/__pycache__/code.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_code/__pycache__/source.cpython-37.pyc b/venv/Lib/site-packages/_pytest/_code/__pycache__/source.cpython-37.pyc new file mode 100644 index 00000000..19e58f30 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_code/__pycache__/source.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_code/_py2traceback.py b/venv/Lib/site-packages/_pytest/_code/_py2traceback.py new file mode 100644 index 00000000..cceed40e --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_code/_py2traceback.py @@ -0,0 +1,89 @@ +# copied from python-2.7.3's traceback.py +# CHANGES: +# - some_str is replaced, trying to create unicode strings +# +from __future__ import absolute_import, division, print_function, unicode_literals +import types +from six import text_type + + +def format_exception_only(etype, value): + """Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + """ + + # An instance should not have a meaningful value parameter, but + # sometimes does, particularly for string exceptions, such as + # >>> raise string1, string2 # deprecated + # + # Clear these out first because issubtype(string1, SyntaxError) + # would throw another exception and mask the original problem. + if ( + isinstance(etype, BaseException) + or isinstance(etype, types.InstanceType) + or etype is None + or type(etype) is str + ): + return [_format_final_exc_line(etype, value)] + + stype = etype.__name__ + + if not issubclass(etype, SyntaxError): + return [_format_final_exc_line(stype, value)] + + # It was a syntax error; show exactly where the problem was found. + lines = [] + try: + msg, (filename, lineno, offset, badline) = value.args + except Exception: + pass + else: + filename = filename or "" + lines.append(' File "{}", line {}\n'.format(filename, lineno)) + if badline is not None: + if isinstance(badline, bytes): # python 2 only + badline = badline.decode("utf-8", "replace") + lines.append(" {}\n".format(badline.strip())) + if offset is not None: + caretspace = badline.rstrip("\n")[:offset].lstrip() + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c.isspace() and c or " ") for c in caretspace) + # only three spaces to account for offset1 == pos 0 + lines.append(" {}^\n".format("".join(caretspace))) + value = msg + + lines.append(_format_final_exc_line(stype, value)) + return lines + + +def _format_final_exc_line(etype, value): + """Return a list of a single line -- normal case for format_exception_only""" + valuestr = _some_str(value) + if value is None or not valuestr: + line = "{}\n".format(etype) + else: + line = "{}: {}\n".format(etype, valuestr) + return line + + +def _some_str(value): + try: + return text_type(value) + except Exception: + try: + return bytes(value).decode("UTF-8", "replace") + except Exception: + pass + return "".format(type(value).__name__) diff --git a/venv/Lib/site-packages/_pytest/_code/code.py b/venv/Lib/site-packages/_pytest/_code/code.py new file mode 100644 index 00000000..c0f6d21a --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_code/code.py @@ -0,0 +1,1044 @@ +from __future__ import absolute_import, division, print_function +import inspect +import pprint +import sys +import traceback +from inspect import CO_VARARGS, CO_VARKEYWORDS + +import attr +import pluggy +import re +from weakref import ref +import _pytest +from _pytest.compat import _PY2, _PY3, PY35, safe_str +from six import text_type +import py +import six + +builtin_repr = repr + +if _PY3: + from traceback import format_exception_only +else: + from ._py2traceback import format_exception_only + + +class Code(object): + """ wrapper around Python code objects """ + + def __init__(self, rawcode): + if not hasattr(rawcode, "co_filename"): + rawcode = getrawcode(rawcode) + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" % (rawcode,)) + self.raw = rawcode + + def __eq__(self, other): + return self.raw == other.raw + + __hash__ = None + + def __ne__(self, other): + return not self == other + + @property + def path(self): + """ return a path object pointing to source code (note that it + might not point to an actually existing file). """ + try: + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + raise OSError("py.path check failed.") + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + + return p + + @property + def fullsource(self): + """ return a _pytest._code.Source object for the full source file of the code + """ + from _pytest._code import source + + full, _ = source.findsource(self.raw) + return full + + def source(self): + """ return a _pytest._code.Source object for the code object's source only + """ + # return source only for that part of code + import _pytest._code + + return _pytest._code.Source(self.raw) + + def getargs(self, var=False): + """ return a tuple with the argument names for the code object + + if 'var' is set True also return the names of the variable and + keyword arguments when present + """ + # handfull shortcut for getting args + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + self.code = Code(frame.f_code) + + @property + def statement(self): + """ statement this frame is at """ + import _pytest._code + + if self.code.fullsource is None: + return _pytest._code.Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + six.exec_(code, self.f_globals, f_locals) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return py.io.saferepr(object) + + def is_true(self, object): + return object + + def getargs(self, var=False): + """ return a list of tuples (name, value) for all arguments + + if 'var' is set True also include the variable and keyword + arguments when present + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry(object): + """ a single entry in a traceback """ + + _repr_style = None + exprinfo = None + + def __init__(self, rawentry, excinfo=None): + self._excinfo = excinfo + self._rawentry = rawentry + self.lineno = rawentry.tb_lineno - 1 + + def set_repr_style(self, mode): + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self): + import _pytest._code + + return _pytest._code.Frame(self._rawentry.tb_frame) + + @property + def relline(self): + return self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" % (self.frame.code.path, self.lineno + 1) + + @property + def statement(self): + """ _pytest._code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + + @property + def path(self): + """ path to the source code """ + return self.frame.code.path + + def getlocals(self): + return self.frame.f_locals + + locals = property(getlocals, None, None, "locals of underlaying frame") + + def getfirstlinesource(self): + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) + + def getsource(self, astcache=None): + """ return failing source code. """ + # we use the passed in astcache to not reparse asttrees + # within exception info printing + from _pytest._code.source import getstatementrange_ast + + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + mostly for internal use + """ + try: + tbh = self.frame.f_locals["__tracebackhide__"] + except KeyError: + try: + tbh = self.frame.f_globals["__tracebackhide__"] + except KeyError: + return False + + if callable(tbh): + return tbh(None if self._excinfo is None else self._excinfo()) + else: + return tbh + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = "???" + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: # noqa + line = "???" + return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line) + + def name(self): + return self.frame.code.raw.co_name + + name = property(name, None, None, "co_name of underlaying code") + + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + + Entry = TracebackEntry + + def __init__(self, tb, excinfo=None): + """ initialize from given python traceback object and ExceptionInfo """ + self._excinfo = excinfo + if hasattr(tb, "tb_next"): + + def f(cur): + while cur is not None: + yield self.Entry(cur, excinfo=excinfo) + cur = cur.tb_next + + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ( + (path is None or codepath == path) + and ( + excludepath is None + or not hasattr(codepath, "relto") + or not codepath.relto(excludepath) + ) + and (lineno is None or x.lineno == lineno) + and (firstlineno is None or x.frame.code.firstlineno == firstlineno) + ): + return Traceback(x._rawentry, self._excinfo) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackEntry + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackEntries which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self), self._excinfo) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + for i in range(-1, -len(self) - 1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackEntry where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + # print "checking for recursion at", key + values = cache.setdefault(key, []) + if values: + f = entry.frame + loc = f.f_locals + for otherloc in values: + if f.is_true( + f.eval( + co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc, + ) + ): + return i + values.append(entry.frame.f_locals) + return None + + +co_equal = compile( + "__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval" +) + + +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + + _striptext = "" + _assert_start_repr = ( + "AssertionError(u'assert " if _PY2 else "AssertionError('assert " + ) + + def __init__(self, tup=None, exprinfo=None): + import _pytest._code + + if tup is None: + tup = sys.exc_info() + if exprinfo is None and isinstance(tup[1], AssertionError): + exprinfo = getattr(tup[1], "msg", None) + if exprinfo is None: + exprinfo = py.io.saferepr(tup[1]) + if exprinfo and exprinfo.startswith(self._assert_start_repr): + self._striptext = "AssertionError: " + self._excinfo = tup + #: the exception class + self.type = tup[0] + #: the exception instance + self.value = tup[1] + #: the exception raw traceback + self.tb = tup[2] + #: the exception type name + self.typename = self.type.__name__ + #: the exception traceback (_pytest._code.Traceback instance) + self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self)) + + def __repr__(self): + return "" % (self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + _pytest._code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno + 1, exconly) + + def getrepr( + self, + showlocals=False, + style="long", + abspath=False, + tbfilter=True, + funcargs=False, + truncate_locals=True, + chain=True, + ): + """ + Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: long|short|no|native traceback style + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param bool tbfilter: + Hide entries that contain a local variable ``__tracebackhide__==True``. + Ignored if ``style=="native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool chain: if chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + ReprTracebackNative( + traceback.format_exception( + self.type, self.value, self.traceback[0]._rawentry + ) + ), + self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def __str__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + + def __unicode__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return text_type(loc) + + def match(self, regexp): + """ + Match the regular expression 'regexp' on the string representation of + the exception. If it matches then True is returned (so that it is + possible to write 'assert excinfo.match()'). If it doesn't match an + AssertionError is raised. + """ + __tracebackhide__ = True + if not re.search(regexp, str(self.value)): + assert 0, "Pattern '{!s}' not found in '{!s}'".format(regexp, self.value) + return True + + +@attr.s +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + showlocals = attr.ib(default=False) + style = attr.ib(default="long") + abspath = attr.ib(default=True) + tbfilter = attr.ib(default=True) + funcargs = attr.ib(default=False) + truncate_locals = attr.ib(default=True) + chain = attr.ib(default=True) + astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False) + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except: # noqa + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: # noqa + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return py.io.saferepr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None, short=False): + """ return formatted and marked up source lines. """ + import _pytest._code + + lines = [] + if source is None or line_index >= len(source.lines): + source = _pytest._code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = self._saferepr(value) + else: + str_repr = pprint.pformat(value) + # if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" % (name, str_repr)) + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + import _pytest._code + + source = self._getentrysource(entry) + if source is None: + source = _pytest._code.Source("???") + line_index = 0 + else: + # entry.getfirstlinesource() can be -1, should be 0 on jython + line_index = entry.lineno - max(entry.getfirstlinesource(), 0) + + lines = [] + style = entry._repr_style + if style is None: + style = self.style + if style in ("short", "long"): + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" % (entry.name) + else: + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = None + if not short: + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path): + if not self.abspath: + try: + np = py.path.local().bestrelpath(path) + except OSError: + return path + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + + if is_recursion_error(excinfo): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + last = traceback[-1] + entries = [] + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback(self, traceback): + """ + Truncate the given recursive traceback trying to find the starting point + of the recursion. + + The detection is done by going through each traceback entry and finding the + point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``. + + Handle the situation where the recursion process might raise an exception (for example + comparing numpy arrays using equality raises a TypeError), in which case we do our best to + warn the user of the error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + " {exc_type}: {exc_msg}\n" + " Displaying first and last {max_frames} stack frames out of {total}." + ).format( + exc_type=type(e).__name__, + exc_msg=safe_str(e), + max_frames=max_frames, + total=len(traceback), + ) + traceback = traceback[:max_frames] + traceback[-max_frames:] + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo(self, excinfo): + if _PY2: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + + return ReprExceptionInfo(reprtraceback, reprcrash) + else: + repr_chain = [] + e = excinfo.value + descr = None + seen = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + if excinfo: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + else: + # fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work + reprtraceback = ReprTracebackNative( + traceback.format_exception(type(e), e, None) + ) + reprcrash = None + + repr_chain += [(reprtraceback, reprcrash, descr)] + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo = ( + ExceptionInfo((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None + and not e.__suppress_context__ + and self.chain + ): + e = e.__context__ + excinfo = ( + ExceptionInfo((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +class TerminalRepr(object): + def __str__(self): + s = self.__unicode__() + if _PY2: + s = s.encode("utf-8") + return s + + def __unicode__(self): + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = py.io.TextIO() + tw = py.io.TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" % (self.__class__, id(self)) + + +class ExceptionRepr(TerminalRepr): + def __init__(self): + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +class ExceptionChainRepr(ExceptionRepr): + def __init__(self, chain): + super(ExceptionChainRepr, self).__init__() + self.chain = chain + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain + self.reprtraceback = chain[-1][0] + self.reprcrash = chain[-1][1] + + def toterminal(self, tw): + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super(ExceptionChainRepr, self).toterminal(tw) + + +class ReprExceptionInfo(ExceptionRepr): + def __init__(self, reprtraceback, reprcrash): + super(ReprExceptionInfo, self).__init__() + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + super(ReprExceptionInfo, self).toterminal(tw) + + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + # the entries might have different styles + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if ( + entry.style == "long" + or entry.style == "short" + and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines): + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + + +class ReprEntryNative(TerminalRepr): + style = "native" + + def __init__(self, tblines): + self.lines = tblines + + def toterminal(self, tw): + tw.write("".join(self.lines)) + + +class ReprEntry(TerminalRepr): + localssep = "_ " + + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + self.style = style + + def toterminal(self, tw): + if self.style == "short": + self.reprfileloc.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + # tw.line("") + return + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + # tw.sep(self.localssep, "Locals") + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), self.reprlocals, self.reprfileloc) + + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(":%s: %s" % (self.lineno, msg)) + + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" % (safe_str(name), safe_str(value)) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getrawcode(obj, trycall=True): + """ return code object for given function. """ + try: + return obj.__code__ + except AttributeError: + obj = getattr(obj, "im_func", obj) + obj = getattr(obj, "func_code", obj) + obj = getattr(obj, "f_code", obj) + obj = getattr(obj, "__code__", obj) + if trycall and not hasattr(obj, "co_firstlineno"): + if hasattr(obj, "__call__") and not inspect.isclass(obj): + x = getrawcode(obj.__call__, trycall=False) + if hasattr(x, "co_firstlineno"): + return x + return obj + + +if PY35: # RecursionError introduced in 3.5 + + def is_recursion_error(excinfo): + return excinfo.errisinstance(RecursionError) # noqa + + +else: + + def is_recursion_error(excinfo): + if not excinfo.errisinstance(RuntimeError): + return False + try: + return "maximum recursion depth exceeded" in str(excinfo.value) + except UnicodeError: + return False + + +# relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance + +_PLUGGY_DIR = py.path.local(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.basename == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.dirpath() +_PYTEST_DIR = py.path.local(_pytest.__file__).dirpath() +_PY_DIR = py.path.local(py.__file__).dirpath() + + +def filter_traceback(entry): + """Return True if a TracebackEntry instance should be removed from tracebacks: + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code + # see https://bitbucket.org/pytest-dev/py/issues/71 + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + # entry.path might point to a non-existing file, in which case it will + # also return a str object. see #1133 + p = py.path.local(entry.path) + return ( + not p.relto(_PLUGGY_DIR) and not p.relto(_PYTEST_DIR) and not p.relto(_PY_DIR) + ) diff --git a/venv/Lib/site-packages/_pytest/_code/source.py b/venv/Lib/site-packages/_pytest/_code/source.py new file mode 100644 index 00000000..f78d8bef --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_code/source.py @@ -0,0 +1,322 @@ +from __future__ import absolute_import, division, print_function + +import ast +from ast import PyCF_ONLY_AST as _AST_FLAG +from bisect import bisect_right +import linecache +import sys +import six +import inspect +import textwrap +import tokenize +import py + +cpy_compile = compile + + +class Source(object): + """ an immutable object holding a source code fragment, + possibly deindenting it. + """ + + _compilecounter = 0 + + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get("deindent", True) + for part in parts: + if not part: + partlines = [] + elif isinstance(part, Source): + partlines = part.lines + elif isinstance(part, (tuple, list)): + partlines = [x.rstrip("\n") for x in part] + elif isinstance(part, six.string_types): + partlines = part.split("\n") + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + __hash__ = None + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + return newsource + + def __len__(self): + return len(self.lines) + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before="", after="", indent=" " * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [(indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=" " * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self): + """return a new source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + from parser import suite as syntax_checker + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + # compile(source+'\n', "x", "exec") + syntax_checker(source + "\n") + except KeyboardInterrupt: + raise + except Exception: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile( + self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None + ): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno + base = "<%d-codegen " % self._compilecounter + self.__class__._compilecounter += 1 + if not filename: + filename = base + "%s:%d>" % (fn, lineno) + else: + filename = base + "%r %s:%d>" % (filename, fn, lineno) + source = "\n".join(self.lines) + "\n" + try: + co = cpy_compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[: ex.lineno] + if ex.offset: + msglines.append(" " * ex.offset + "^") + msglines.append("(code was compiled probably from here: %s)" % filename) + newex = SyntaxError("\n".join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + lines = [(x + "\n") for x in self.lines] + linecache.cache[filename] = (1, None, lines, filename) + return co + + +# +# public API shortcut functions +# + + +def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0): + """ compile the given source to a raw code object, + and maintain an internal cache which allows later + retrieval of the source code for the code object + and any recursively created code objects. + """ + if isinstance(source, ast.AST): + # XXX should Source support having AST? + return cpy_compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + """ Return source location (path, lineno) for the given object. + If the source cannot be determined return ("", -1) + """ + from .code import Code + + try: + code = Code(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) + except TypeError: + return "", -1 + + fspath = fn and py.path.local(fn) or None + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + pass + else: + fspath = code.path + lineno = code.firstlineno + assert isinstance(lineno, int) + return fspath, lineno + + +# +# helper functions +# + + +def findsource(obj): + try: + sourcelines, lineno = inspect.findsource(obj) + except py.builtin._sysex: + raise + except: # noqa + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + + +def getsource(obj, **kwargs): + from .code import getrawcode + + obj = getrawcode(obj) + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = '"Buggy python version consider upgrading, cannot get source"' + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + + +def deindent(lines): + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno, node): + import ast + + # flatten all statements and except handlers into one lineno-list + # AST's line numbers start indexing at 1 + values = [] + for x in ast.walk(node): + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val = getattr(x, name, None) + if val: + # treat the finally/orelse part as its own statement + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast(lineno, source, assertion=False, astnode=None): + if astnode is None: + content = str(source) + astnode = compile(content, "source", "exec", 1024) # 1024 for AST + + start, end = get_statement_startend2(lineno, astnode) + # we need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself + block_finder = inspect.BlockFinder() + # if we start with an indented line, put blockfinder to "started" mode + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # the end might still point to a comment or empty line, correct it + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/venv/Lib/site-packages/_pytest/_version.py b/venv/Lib/site-packages/_pytest/_version.py new file mode 100644 index 00000000..ba2ee94b --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_version.py @@ -0,0 +1,4 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '3.9.2' diff --git a/venv/Lib/site-packages/_pytest/assertion/__init__.py b/venv/Lib/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 00000000..2c9a8890 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,151 @@ +""" +support for presenting detailed information in failing assertions. +""" +from __future__ import absolute_import, division, print_function +import sys +import six + +from _pytest.assertion import util +from _pytest.assertion import rewrite +from _pytest.assertion import truncate + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help="""Control assertion debugging tools. 'plain' + performs no assertion debugging. 'rewrite' + (the default) rewrites assert statements in + test modules on import to provide assert + expression information.""", + ) + + +def register_assert_rewrite(*names): + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :raise TypeError: if the given module names are not strings. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + importhook = DummyRewriteHook() + importhook.mark_rewrite(*names) + + +class DummyRewriteHook(object): + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names): + pass + + +class AssertionState(object): + """State for the assertion plugin.""" + + def __init__(self, config, mode): + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook = None + + +def install_importhook(config): + """Try to install the rewrite hook, raise SystemError if it fails.""" + # Jython has an AST bug that make the assertion rewriting hook malfunction. + if sys.platform.startswith("java"): + raise SystemError("rewrite not supported") + + config._assertstate = AssertionState(config, "rewrite") + config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config._assertstate.trace("installed rewrite import hook") + + def undo(): + hook = config._assertstate.hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session): + # this hook is only called when test modules are collected + # so for example not in the master process of pytest-xdist + # (which does not collect test modules) + assertstate = getattr(session.config, "_assertstate", None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +def pytest_runtest_setup(item): + """Setup the pytest_assertrepr_compare hook + + The newinterpret and rewrite modules will use util._reprcompare if + it exists to use custom reporting via the + pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + + def callbinrepr(op, left, right): + """Call the pytest_assertrepr_compare hook and prepare the result + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = item.ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = six.text_type("\n~").join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + + util._reprcompare = callbinrepr + + +def pytest_runtest_teardown(item): + util._reprcompare = None + + +def pytest_sessionfinish(session): + assertstate = getattr(session.config, "_assertstate", None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +# Expose this plugin's implementation for the pytest_assertrepr_compare hook +pytest_assertrepr_compare = util.assertrepr_compare diff --git a/venv/Lib/site-packages/_pytest/assertion/__pycache__/__init__.cpython-37.pyc b/venv/Lib/site-packages/_pytest/assertion/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..6385c622 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/assertion/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/assertion/__pycache__/rewrite.cpython-37.pyc b/venv/Lib/site-packages/_pytest/assertion/__pycache__/rewrite.cpython-37.pyc new file mode 100644 index 00000000..6177deb3 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/assertion/__pycache__/rewrite.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/assertion/__pycache__/truncate.cpython-37.pyc b/venv/Lib/site-packages/_pytest/assertion/__pycache__/truncate.cpython-37.pyc new file mode 100644 index 00000000..5c438070 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/assertion/__pycache__/truncate.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/assertion/__pycache__/util.cpython-37.pyc b/venv/Lib/site-packages/_pytest/assertion/__pycache__/util.cpython-37.pyc new file mode 100644 index 00000000..987cabec Binary files /dev/null and b/venv/Lib/site-packages/_pytest/assertion/__pycache__/util.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/assertion/rewrite.py b/venv/Lib/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 00000000..5e76563d --- /dev/null +++ b/venv/Lib/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1033 @@ +"""Rewrite assertion AST to produce nice error messages""" +from __future__ import absolute_import, division, print_function +import ast +import errno +import itertools +import imp +import marshal +import os +import re +import six +import string +import struct +import sys +import types + +import atomicwrites +import py + +from _pytest.assertion import util +from _pytest.pathlib import PurePath +from _pytest.compat import spec_from_file_location +from _pytest.pathlib import fnmatch_ex + +# pytest caches rewritten pycs in __pycache__. +if hasattr(imp, "get_tag"): + PYTEST_TAG = imp.get_tag() + "-PYTEST" +else: + if hasattr(sys, "pypy_version_info"): + impl = "pypy" + elif sys.platform == "java": + impl = "jython" + else: + impl = "cpython" + ver = sys.version_info + PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) + del ver, impl + +PYC_EXT = ".py" + (__debug__ and "c" or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 + +if sys.version_info >= (3, 5): + ast_Call = ast.Call +else: + + def ast_Call(a, b, c): + return ast.Call(a, b, c, None, None) + + +class AssertionRewritingHook(object): + """PEP302 Import hook which rewrites asserts.""" + + def __init__(self, config): + self.config = config + self.fnpats = config.getini("python_files") + self.session = None + self.modules = {} + self._rewritten_names = set() + self._register_with_pkg_resources() + self._must_rewrite = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache = {} + self._session_paths_checked = False + + def set_session(self, session): + self.session = session + self._session_paths_checked = False + + def _imp_find_module(self, name, path=None): + """Indirection so we can mock calls to find_module originated from the hook during testing""" + return imp.find_module(name, path) + + def find_module(self, name, path=None): + if self._writing_pyc: + return None + state = self.config._assertstate + if self._early_rewrite_bailout(name, state): + return None + state.trace("find_module called for: %s" % name) + names = name.rsplit(".", 1) + lastname = names[-1] + pth = None + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] + if pth is None: + try: + fd, fn, desc = self._imp_find_module(lastname, path) + except ImportError: + return None + if fd is not None: + fd.close() + tp = desc[2] + if tp == imp.PY_COMPILED: + if hasattr(imp, "source_from_cache"): + try: + fn = imp.source_from_cache(fn) + except ValueError: + # Python 3 doesn't like orphaned but still-importable + # .pyc files. + fn = fn[:-1] + else: + fn = fn[:-1] + elif tp != imp.PY_SOURCE: + # Don't know what this is. + return None + else: + fn = os.path.join(pth, name.rpartition(".")[2] + ".py") + + fn_pypath = py.path.local(fn) + if not self._should_rewrite(name, fn_pypath, state): + return None + + self._rewritten_names.add(name) + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = os.path.join(fn_pypath.dirname, "__pycache__") + if write: + try: + os.mkdir(cache_dir) + except OSError: + e = sys.exc_info()[1].errno + if e == errno.EEXIST: + # Either the __pycache__ directory already exists (the + # common case) or it's blocked by a non-dir node. In the + # latter case, we'll ignore it in _write_pyc. + pass + elif e in [errno.ENOENT, errno.ENOTDIR]: + # One of the path components was not a directory, likely + # because we're in a zip file. + write = False + elif e in [errno.EACCES, errno.EROFS, errno.EPERM]: + state.trace("read only directory: %r" % fn_pypath.dirname) + write = False + else: + raise + cache_name = fn_pypath.basename[:-3] + PYC_TAIL + pyc = os.path.join(cache_dir, cache_name) + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn_pypath, pyc, state.trace) + if co is None: + state.trace("rewriting %r" % (fn,)) + source_stat, co = _rewrite_test(self.config, fn_pypath) + if co is None: + # Probably a SyntaxError in the test. + return None + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace("found cached rewritten pyc for %r" % (fn,)) + self.modules[name] = co, pyc + return self + + def _early_rewrite_bailout(self, name, state): + """ + This is a fast way to get out of rewriting modules. Profiling has + shown that the call to imp.find_module (inside of the find_module + from this class) is a major slowdown, so, this method tries to + filter what we're sure won't be rewritten before getting to it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(path).split(os.path.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(os.path.sep.join(parts) + ".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace("early skip of rewriting module: %s" % (name,)) + return True + + def _should_rewrite(self, name, fn_pypath, state): + # always rewrite conftest files + fn = str(fn_pypath) + if fn_pypath.basename == "conftest.py": + state.trace("rewriting conftest file: %r" % (fn,)) + return True + + if self.session is not None: + if self.session.isinitpath(fn): + state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + for pat in self.fnpats: + if fn_pypath.fnmatch(pat): + state.trace("matched test file %r" % (fn,)) + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name, state): + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace("matched marked file %r (from %r)" % (name, marked)) + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names): + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + if not AssertionRewriter.is_rewrite_disabled( + sys.modules[name].__doc__ or "" + ): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name): + from _pytest.warning_types import PytestWarning + from _pytest.warnings import _issue_config_warning + + _issue_config_warning( + PytestWarning("Module already imported so cannot be rewritten: %s" % name), + self.config, + ) + + def load_module(self, name): + co, pyc = self.modules.pop(name) + if name in sys.modules: + # If there is an existing module object named 'fullname' in + # sys.modules, the loader must use that existing module. (Otherwise, + # the reload() builtin will not work correctly.) + mod = sys.modules[name] + else: + # I wish I could just call imp.load_compiled here, but __file__ has to + # be set properly. In Python 3.2+, this all would be handled correctly + # by load_compiled. + mod = sys.modules[name] = imp.new_module(name) + try: + mod.__file__ = co.co_filename + # Normally, this attribute is 3.2+. + mod.__cached__ = pyc + mod.__loader__ = self + # Normally, this attribute is 3.4+ + mod.__spec__ = spec_from_file_location(name, co.co_filename, loader=self) + six.exec_(co, mod.__dict__) + except: # noqa + if name in sys.modules: + del sys.modules[name] + raise + return sys.modules[name] + + def is_package(self, name): + try: + fd, fn, desc = self._imp_find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + def get_data(self, pathname): + """Optional PEP302 get_data API. + """ + with open(pathname, "rb") as f: + return f.read() + + +def _write_pyc(state, co, source_stat, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) + try: + with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp: + fp.write(imp.get_magic()) + mtime = int(source_stat.mtime) + size = source_stat.size & 0xFFFFFFFF + fp.write(struct.pack(">", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", +} +# Python 3.5+ compatibility +try: + binop_map[ast.MatMult] = "@" +except AttributeError: + pass + +# Python 3.4+ compatibility +if hasattr(ast, "NameConstant"): + _NameConstant = ast.NameConstant +else: + + def _NameConstant(c): + return ast.Name(str(c), ast.Load()) + + +def set_location(node, lineno, col_offset): + """Set node location information recursively.""" + + def _fix(node, lineno, col_offset): + if "lineno" in node._attributes: + node.lineno = lineno + if "col_offset" in node._attributes: + node.col_offset = col_offset + for child in ast.iter_child_nodes(node): + _fix(child, lineno, col_offset) + + _fix(node, lineno, col_offset) + return node + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :on_failure: The AST statements which will be executed if the + assertion test fails. This is the code which will construct + the failure message and raises the AssertionError. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + This state is reset on every new assert statement visited and used + by the other visitors. + + """ + + def __init__(self, module_path, config): + super(AssertionRewriter, self).__init__() + self.module_path = module_path + self.config = config + + def run(self, mod): + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + # Insert some special imports at the top of the module but after any + # docstrings and __future__ imports. + aliases = [ + ast.alias(py.builtin.builtins.__name__, "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), + ] + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + lineno = 1 + for item in mod.body: + if ( + expect_docstring + and isinstance(item, ast.Expr) + and isinstance(item.value, ast.Str) + ): + doc = item.value.s + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + elif ( + not isinstance(item, ast.ImportFrom) + or item.level > 0 + or item.module != "__future__" + ): + lineno = item.lineno + break + pos += 1 + else: + lineno = item.lineno + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + # Collect asserts. + nodes = [mod] + while nodes: + node = nodes.pop() + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + and + # Don't recurse into expressions as they can't contain + # asserts. + not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring): + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self): + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr): + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr): + """Call py.io.saferepr on the expression.""" + return self.helper("saferepr", expr) + + def helper(self, name, *args): + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, "_" + name, ast.Load()) + return ast_Call(attr, list(args), []) + + def builtin(self, name): + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr): + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self): + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + + """ + self.explanation_specifiers = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr): + """Format the %-formatted string with current format context. + + The expl_expr should be an ast.Str instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .on_failure and + return the ast.Name instance of the formatted string. + + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys = [ast.Str(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node): + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_): + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + from _pytest.warning_types import PytestWarning + import warnings + + warnings.warn_explicit( + PytestWarning("assertion is always true, perhaps remove parentheses?"), + category=None, + filename=str(self.module_path), + lineno=assert_.lineno, + ) + + self.statements = [] + self.variables = [] + self.variable_counter = itertools.count() + self.stack = [] + self.on_failure = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + # Create failure message. + body = self.on_failure + negation = ast.UnaryOp(ast.Not(), top_condition) + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Str("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast_Call(err_name, [fmt], []) + if sys.version_info[0] >= 3: + raise_ = ast.Raise(exc, None) + else: + raise_ = ast.Raise(exc, None, None) + body.append(raise_) + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, _NameConstant(None)) + self.statements.append(clear) + # Fix line numbers. + for stmt in self.statements: + set_location(stmt, assert_.lineno, assert_.col_offset) + return self.statements + + def visit_Name(self, name): + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast_Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop): + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.on_failure + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner = [] + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa + self.on_failure = fail_inner + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Str(expl)) + call = ast_Call(app, [expl_format], []) + self.on_failure.append(ast.Expr(call)) + if i < levels: + cond = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.on_failure = fail_save + expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop): + symbol = binop_map[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = "(%s %s %s)" % (left_expl, symbol, right_expl) + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call_35(self, call): + """ + visit `ast.Call` nodes on Python3.5 and after + """ + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) + return res, outer_expl + + def visit_Starred(self, starred): + # From Python 3.5, a Starred node can appear in a function call + res, expl = self.visit(starred.value) + return starred, "*" + expl + + def visit_Call_legacy(self, call): + """ + visit `ast.Call nodes on 3.4 and below` + """ + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + new_star = new_kwarg = None + for arg in call.args: + res, expl = self.visit(arg) + new_args.append(res) + arg_expls.append(expl) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + arg_expls.append(keyword.arg + "=" + expl) + if call.starargs: + new_star, expl = self.visit(call.starargs) + arg_expls.append("*" + expl) + if call.kwargs: + new_kwarg, expl = self.visit(call.kwargs) + arg_expls.append("**" + expl) + expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) + return res, outer_expl + + # ast.Call signature changed on 3.5, + # conditionally change which methods is named + # visit_Call depending on Python version + if sys.version_info >= (3, 5): + visit_Call = visit_Call_35 + else: + visit_Call = visit_Call_legacy + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp): + self.push_format_context() + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + left_expl = "({})".format(left_expl) + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls = [] + syms = [] + results = [left_res] + for i, op, next_operand in it: + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + next_expl = "({})".format(next_expl) + results.append(next_res) + sym = binop_map[op.__class__] + syms.append(ast.Str(sym)) + expl = "%s %s %s" % (left_expl, sym, next_expl) + expls.append(ast.Str(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + return res, self.explanation_param(self.pop_format_context(expl_call)) diff --git a/venv/Lib/site-packages/_pytest/assertion/truncate.py b/venv/Lib/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 00000000..79adeca6 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,99 @@ +""" +Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +~8 terminal lines, unless running in "-vv" mode or running on CI. +""" +from __future__ import absolute_import, division, print_function +import os + +import six + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required(explanation, item, max_length=None): + """ + Truncate this assertion explanation if the given test item is eligible. + """ + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item): + """ + Whether or not this test item is eligible for truncation. + """ + verbose = item.config.option.verbose + return verbose < 2 and not _running_on_ci() + + +def _running_on_ci(): + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) + + +def _truncate_explanation(input_lines, max_lines=None, max_chars=None): + """ + Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first. The remaining lines will be replaced by a usage message. + """ + + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + if len(input_lines) <= max_lines and input_char_count <= max_chars: + return input_lines + + # Truncate first to max_lines, and then truncate to max_chars if max_chars + # is exceeded. + truncated_explanation = input_lines[:max_lines] + truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) + + # Add ellipsis to final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + + # Append useful message to explanation + truncated_line_count = len(input_lines) - len(truncated_explanation) + truncated_line_count += 1 # Account for the part-truncated final line + msg = "...Full output truncated" + if truncated_line_count == 1: + msg += " ({} line hidden)".format(truncated_line_count) + else: + msg += " ({} lines hidden)".format(truncated_line_count) + msg += ", {}".format(USAGE_MSG) + truncated_explanation.extend([six.text_type(""), six.text_type(msg)]) + return truncated_explanation + + +def _truncate_by_char_count(input_lines, max_chars): + # Check if truncation required + if len("".join(input_lines)) <= max_chars: + return input_lines + + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/venv/Lib/site-packages/_pytest/assertion/util.py b/venv/Lib/site-packages/_pytest/assertion/util.py new file mode 100644 index 00000000..15724ed3 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/assertion/util.py @@ -0,0 +1,338 @@ +"""Utilities for assertion debugging""" +from __future__ import absolute_import, division, print_function +import pprint + +import _pytest._code +import py +import six +from ..compat import Sequence + +u = six.text_type + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare = None + + +# the re-encoding is needed for python2 repr +# with non-ascii characters (see issue 877 and 1379) +def ecu(s): + try: + return u(s, "utf-8", "replace") + except TypeError: + return s + + +def format_explanation(explanation): + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + explanation = ecu(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u("\n").join(result) + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u("")).split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = u("and ") + else: + s = u("where ") + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(u(" +") + u(" ") * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(u(" ") * indent + line[1:]) + assert len(stack) == 1 + return result + + +# Provide basestring in python3 +try: + basestring = basestring +except NameError: + basestring = str + + +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op + left_repr = py.io.saferepr(left, maxsize=int(width // 2)) + right_repr = py.io.saferepr(right, maxsize=width - len(left_repr)) + + summary = u("%s %s %s") % (ecu(left_repr), op, ecu(right_repr)) + + def issequence(x): + return isinstance(x, Sequence) and not isinstance(x, basestring) + + def istext(x): + return isinstance(x, basestring) + + def isdict(x): + return isinstance(x, dict) + + def isset(x): + return isinstance(x, (set, frozenset)) + + def isiterable(obj): + try: + iter(obj) + return not istext(obj) + except TypeError: + return False + + verbose = config.getoption("verbose") + explanation = None + try: + if op == "==": + if istext(left) and istext(right): + explanation = _diff_text(left, right, verbose) + else: + if issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, verbose) + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, verbose) + if explanation is not None: + explanation.extend(expl) + else: + explanation = expl + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + except Exception: + explanation = [ + u( + "(pytest_assertion plugin: representation of details failed. " + "Probably an object has a faulty __repr__.)" + ), + u(_pytest._code.ExceptionInfo()), + ] + + if not explanation: + return None + + return [summary] + explanation + + +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. + """ + from difflib import ndiff + + explanation = [] + + def escape_for_readable_diff(binary_text): + """ + Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode. + This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape + newlines and carriage returns (#429). + """ + r = six.text_type(repr(binary_text)[1:-1]) + r = r.replace(r"\n", "\n") + r = r.replace(r"\r", "\r") + return r + + if isinstance(left, bytes): + left = escape_for_readable_diff(left) + if isinstance(right, bytes): + right = escape_for_readable_diff(right) + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + u("Skipping %s identical leading characters in diff, use -v to show") + % i + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + u( + "Skipping %s identical trailing " + "characters in diff, use -v to show" + ) + % i + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += [u"Strings contain only whitespace, escaping them using repr()"] + explanation += [ + line.strip("\n") + for line in ndiff(left.splitlines(keepends), right.splitlines(keepends)) + ] + return explanation + + +def _compare_eq_iterable(left, right, verbose=False): + if not verbose: + return [u("Use -v to get the full diff")] + # dynamic import to speedup pytest + import difflib + + try: + left_formatting = pprint.pformat(left).splitlines() + right_formatting = pprint.pformat(right).splitlines() + explanation = [u("Full diff:")] + except Exception: + # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling + # sorted() on a list would raise. See issue #718. + # As a workaround, the full diff is generated by using the repr() string of each item of each container. + left_formatting = sorted(repr(x) for x in left) + right_formatting = sorted(repr(x) for x in right) + explanation = [u("Full diff (fallback to calling repr on each item):")] + explanation.extend( + line.strip() for line in difflib.ndiff(left_formatting, right_formatting) + ) + return explanation + + +def _compare_eq_sequence(left, right, verbose=False): + explanation = [] + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + explanation += [u("At index %s diff: %r != %r") % (i, left[i], right[i])] + break + if len(left) > len(right): + explanation += [ + u("Left contains more items, first extra item: %s") + % py.io.saferepr(left[len(right)]) + ] + elif len(left) < len(right): + explanation += [ + u("Right contains more items, first extra item: %s") + % py.io.saferepr(right[len(left)]) + ] + return explanation + + +def _compare_eq_set(left, right, verbose=False): + explanation = [] + diff_left = left - right + diff_right = right - left + if diff_left: + explanation.append(u("Extra items in the left set:")) + for item in diff_left: + explanation.append(py.io.saferepr(item)) + if diff_right: + explanation.append(u("Extra items in the right set:")) + for item in diff_right: + explanation.append(py.io.saferepr(item)) + return explanation + + +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += [u("Omitting %s identical items, use -vv to show") % len(same)] + elif same: + explanation += [u("Common items:")] + explanation += pprint.pformat(same).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += [u("Differing items:")] + for k in diff: + explanation += [ + py.io.saferepr({k: left[k]}) + " != " + py.io.saferepr({k: right[k]}) + ] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u("Left contains more items:")) + explanation.extend( + pprint.pformat({k: left[k] for k in extra_left}).splitlines() + ) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u("Right contains more items:")) + explanation.extend( + pprint.pformat({k: right[k] for k in extra_right}).splitlines() + ) + return explanation + + +def _notin_text(term, text, verbose=False): + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(correct_text, text, verbose) + newdiff = [u("%s is contained here:") % py.io.saferepr(term, maxsize=42)] + for line in diff: + if line.startswith(u("Skipping")): + continue + if line.startswith(u("- ")): + continue + if line.startswith(u("+ ")): + newdiff.append(u(" ") + line[2:]) + else: + newdiff.append(line) + return newdiff diff --git a/venv/Lib/site-packages/_pytest/cacheprovider.py b/venv/Lib/site-packages/_pytest/cacheprovider.py new file mode 100644 index 00000000..94f42e8c --- /dev/null +++ b/venv/Lib/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,362 @@ +""" +merged implementation of the cache provider + +the name cache was not chosen to ensure pluggy automatically +ignores the external pytest-cache +""" +from __future__ import absolute_import, division, print_function +from collections import OrderedDict + +import py +import six +import attr + +import pytest +import json + +from .compat import _PY2 as PY2 +from .pathlib import Path, resolve_from_str, rmtree + +README_CONTENT = u"""\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information. +""" + + +@attr.s +class Cache(object): + _cachedir = attr.ib(repr=False) + _config = attr.ib(repr=False) + + @classmethod + def for_config(cls, config): + cachedir = cls.cache_dir_from_config(config) + if config.getoption("cacheclear") and cachedir.exists(): + rmtree(cachedir, force=True) + cachedir.mkdir() + return cls(cachedir, config) + + @staticmethod + def cache_dir_from_config(config): + return resolve_from_str(config.getini("cache_dir"), config.rootdir) + + def warn(self, fmt, **args): + from _pytest.warnings import _issue_config_warning + from _pytest.warning_types import PytestWarning + + _issue_config_warning( + PytestWarning(fmt.format(**args) if args else fmt), self._config + ) + + def makedir(self, name): + """ return a directory path object with the given name. If the + directory does not yet exist, it will be created. You can use it + to manage files likes e. g. store/retrieve database + dumps across test sessions. + + :param name: must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + name = Path(name) + if len(name.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath("d", name) + res.mkdir(exist_ok=True, parents=True) + return py.path.local(res) + + def _getvaluepath(self, key): + return self._cachedir.joinpath("v", Path(key)) + + def get(self, key, default): + """ return cached value for the given key. If no value + was yet cached or the value cannot be read, the specified + default is returned. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: must be provided in case of a cache-miss or + invalid cache values. + + """ + path = self._getvaluepath(key) + try: + with path.open("r") as f: + return json.load(f) + except (ValueError, IOError, OSError): + return default + + def set(self, key, value): + """ save value for the given key. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: must be of any combination of basic + python types, including nested types + like e. g. lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + path.parent.mkdir(exist_ok=True, parents=True) + except (IOError, OSError): + self.warn("could not create cache path {path}", path=path) + return + try: + f = path.open("wb" if PY2 else "w") + except (IOError, OSError): + self.warn("cache could not write path {path}", path=path) + else: + with f: + json.dump(value, f, indent=2, sort_keys=True) + self._ensure_supporting_files() + + def _ensure_supporting_files(self): + """Create supporting files in the cache dir that are not really part of the cache.""" + if self._cachedir.is_dir(): + readme_path = self._cachedir / "README.md" + if not readme_path.is_file(): + readme_path.write_text(README_CONTENT) + + msg = u"# created by pytest automatically, do not change\n*" + self._cachedir.joinpath(".gitignore").write_text(msg, encoding="UTF-8") + + +class LFPlugin(object): + """ Plugin which implements the --lf (run last-failing) option """ + + def __init__(self, config): + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + self.lastfailed = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count = None + self._no_failures_behavior = self.config.getoption("last_failed_no_failures") + + def pytest_report_collectionfinish(self): + if self.active and self.config.getoption("verbose") >= 0: + if not self._previously_failed_count: + return None + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + mode = "rerun previous {count} {noun}{suffix}".format( + count=self._previously_failed_count, suffix=suffix, noun=noun + ) + return "run-last-failure: %s" % mode + + def pytest_runtest_logreport(self, report): + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report): + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + def pytest_collection_modifyitems(self, session, config, items): + if self.active: + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + if not previously_failed: + # running a subset of all tests with recorded failures outside + # of the set of tests currently executing + return + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: + items[:] = previously_failed + previously_passed + elif self._no_failures_behavior == "none": + config.hook.pytest_deselected(items=items) + items[:] = [] + + def pytest_sessionfinish(self, session): + config = self.config + if config.getoption("cacheshow") or hasattr(config, "slaveinput"): + return + + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin(object): + """ Plugin which implements the --nf (run new-first) option """ + + def __init__(self, config): + self.config = config + self.active = config.option.newfirst + self.cached_nodeids = config.cache.get("cache/nodeids", []) + + def pytest_collection_modifyitems(self, session, config, items): + if self.active: + new_items = OrderedDict() + other_items = OrderedDict() + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + six.itervalues(new_items) + ) + self._get_increasing_order(six.itervalues(other_items)) + self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)] + + def _get_increasing_order(self, items): + return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True) + + def pytest_sessionfinish(self, session): + config = self.config + if config.getoption("cacheshow") or hasattr(config, "slaveinput"): + return + + config.cache.set("cache/nodeids", self.cached_nodeids) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="rerun only the tests that failed " + "at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="run all tests but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="store_true", + dest="cacheshow", + help="show cache contents, don't perform collection or tests", + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="remove all cache contents at start of test run.", + ) + parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="change the behavior when no test failed in the last run or no " + "information about the last failures was found in the cache", + ) + + +def pytest_cmdline_main(config): + if config.option.cacheshow: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + + +@pytest.hookimpl(tryfirst=True) +def pytest_configure(config): + config.cache = Cache.for_config(config) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@pytest.fixture +def cache(request): + """ + Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be a ``/`` separated value, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + return request.config.cache + + +def pytest_report_header(config): + if config.option.verbose: + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootdir) + except ValueError: + displaypath = cachedir + return "cachedir: {}".format(displaypath) + + +def cacheshow(config, session): + from pprint import pformat + + tw = py.io.TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / "v" + tw.sep("-", "cache values") + for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()): + key = valpath.relative_to(vdir) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line("%s contains unreadable content, will be ignored" % key) + else: + tw.line("%s contains:" % key) + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / "d" + if ddir.is_dir(): + contents = sorted(ddir.rglob("*")) + tw.sep("-", "cache directories") + for p in contents: + # if p.check(dir=1): + # print("%s/" % p.relto(basedir)) + if p.is_file(): + key = p.relative_to(basedir) + tw.line("{} is a file of length {:d}".format(key, p.stat().st_size)) + return 0 diff --git a/venv/Lib/site-packages/_pytest/capture.py b/venv/Lib/site-packages/_pytest/capture.py new file mode 100644 index 00000000..dfb48896 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/capture.py @@ -0,0 +1,780 @@ +""" +per-test stdout/stderr capturing mechanism. + +""" +from __future__ import absolute_import, division, print_function + +import collections +import contextlib +import sys +import os +import io +from io import UnsupportedOperation +from tempfile import TemporaryFile + +import six +import pytest +from _pytest.compat import CaptureIO + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + "--capture", + action="store", + default="fd" if hasattr(os, "dup") else "sys", + metavar="method", + choices=["fd", "sys", "no"], + help="per-test capturing method: one of fd|sys|no.", + ) + group._addoption( + "-s", + action="store_const", + const="no", + dest="capture", + help="shortcut for --capture=no.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_load_initial_conftests(early_config, parser, args): + ns = early_config.known_args_namespace + if ns.capture == "fd": + _py36_windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + _readline_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + early_config.add_cleanup(capman.stop_global_capturing) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + + early_config.add_cleanup(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.start_global_capturing() + outcome = yield + capman.suspend_global_capture() + if outcome.excinfo is not None: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + +class CaptureManager(object): + """ + Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each + test phase (setup, call, teardown). After each of those points, the captured output is obtained and + attached to the collection/runtest report. + + There are two levels of capture: + * global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled + during collection and each test phase. + * fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this + case special handling is needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method): + self._method = method + self._global_capturing = None + self._current_item = None + + def _getcapture(self, method): + if method == "fd": + return MultiCapture(out=True, err=True, Capture=FDCapture) + elif method == "sys": + return MultiCapture(out=True, err=True, Capture=SysCapture) + elif method == "no": + return MultiCapture(out=False, err=False, in_=False) + else: + raise ValueError("unknown capturing method: %r" % method) + + # Global capturing control + + def start_global_capturing(self): + assert self._global_capturing is None + self._global_capturing = self._getcapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self): + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self): + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_=False): + cap = getattr(self, "_global_capturing", None) + if cap is not None: + cap.suspend_capturing(in_=in_) + + def read_global_capture(self): + return self._global_capturing.readouterr() + + # Fixture Control (its just forwarding, think about removing this later) + + def activate_fixture(self, item): + """If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over + the global capture. + """ + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._start() + + def deactivate_fixture(self, item): + """Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any.""" + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture.close() + + def suspend_fixture(self, item): + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._suspend() + + def resume_fixture(self, item): + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self): + """Context manager to temporarily disables global and current fixture capturing.""" + # Need to undo local capsys-et-al if exists before disabling global capture + self.suspend_fixture(self._current_item) + self.suspend_global_capture(in_=False) + try: + yield + finally: + self.resume_global_capture() + self.resume_fixture(self._current_item) + + @contextlib.contextmanager + def item_capture(self, when, item): + self.resume_global_capture() + self.activate_fixture(item) + try: + yield + finally: + self.deactivate_fixture(item) + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @pytest.hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector): + if isinstance(collector, pytest.File): + self.resume_global_capture() + outcome = yield + self.suspend_global_capture() + out, err = self.read_global_capture() + rep = outcome.get_result() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_protocol(self, item): + self._current_item = item + yield + self._current_item = None + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item): + with self.item_capture("setup", item): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item): + with self.item_capture("call", item): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item): + with self.item_capture("teardown", item): + yield + + @pytest.hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self, excinfo): + self.stop_global_capturing() + + @pytest.hookimpl(tryfirst=True) + def pytest_internalerror(self, excinfo): + self.stop_global_capturing() + + +capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"} + + +def _ensure_only_one_capture_fixture(request, name): + fixtures = set(request.fixturenames) & capture_fixtures - {name} + if fixtures: + fixtures = sorted(fixtures) + fixtures = fixtures[0] if len(fixtures) == 1 else fixtures + raise request.raiseerror( + "cannot use {} and {} at the same time".format(fixtures, name) + ) + + +@pytest.fixture +def capsys(request): + """Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make + captured output available via ``capsys.readouterr()`` method calls + which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text`` + objects. + """ + _ensure_only_one_capture_fixture(request, "capsys") + with _install_capture_fixture_on_item(request, SysCapture) as fixture: + yield fixture + + +@pytest.fixture +def capsysbinary(request): + """Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make + captured output available via ``capsys.readouterr()`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes`` + objects. + """ + _ensure_only_one_capture_fixture(request, "capsysbinary") + # Currently, the implementation uses the python3 specific `.buffer` + # property of CaptureIO. + if sys.version_info < (3,): + raise request.raiseerror("capsysbinary is only supported on python 3") + with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture: + yield fixture + + +@pytest.fixture +def capfd(request): + """Enable capturing of writes to file descriptors ``1`` and ``2`` and make + captured output available via ``capfd.readouterr()`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text`` + objects. + """ + _ensure_only_one_capture_fixture(request, "capfd") + if not hasattr(os, "dup"): + pytest.skip( + "capfd fixture needs os.dup function which is not available in this system" + ) + with _install_capture_fixture_on_item(request, FDCapture) as fixture: + yield fixture + + +@pytest.fixture +def capfdbinary(request): + """Enable capturing of write to file descriptors 1 and 2 and make + captured output available via ``capfdbinary.readouterr`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be + ``bytes`` objects. + """ + _ensure_only_one_capture_fixture(request, "capfdbinary") + if not hasattr(os, "dup"): + pytest.skip( + "capfdbinary fixture needs os.dup function which is not available in this system" + ) + with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture: + yield fixture + + +@contextlib.contextmanager +def _install_capture_fixture_on_item(request, capture_class): + """ + Context manager which creates a ``CaptureFixture`` instance and "installs" it on + the item/node of the given request. Used by ``capsys`` and ``capfd``. + + The CaptureFixture is added as attribute of the item because it needs to accessed + by ``CaptureManager`` during its ``pytest_runtest_*`` hooks. + """ + request.node._capture_fixture = fixture = CaptureFixture(capture_class, request) + capmanager = request.config.pluginmanager.getplugin("capturemanager") + # need to active this fixture right away in case it is being used by another fixture (setup phase) + # if this fixture is being used only by a test function (call phase), then we wouldn't need this + # activation, but it doesn't hurt + capmanager.activate_fixture(request.node) + yield fixture + fixture.close() + del request.node._capture_fixture + + +class CaptureFixture(object): + """ + Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary` + fixtures. + """ + + def __init__(self, captureclass, request): + self.captureclass = captureclass + self.request = request + self._capture = None + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + + def _start(self): + # Start if not started yet + if getattr(self, "_capture", None) is None: + self._capture = MultiCapture( + out=True, err=True, in_=False, Capture=self.captureclass + ) + self._capture.start_capturing() + + def close(self): + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self): + """Read and return the captured output so far, resetting the internal buffer. + + :return: captured content as a namedtuple with ``out`` and ``err`` string attributes + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self): + """Suspends this fixture's own capturing temporarily.""" + self._capture.suspend_capturing() + + def _resume(self): + """Resumes this fixture's own capturing temporarily.""" + self._capture.resume_capturing() + + @contextlib.contextmanager + def disabled(self): + """Temporarily disables capture while inside the 'with' block.""" + capmanager = self.request.config.pluginmanager.getplugin("capturemanager") + with capmanager.global_and_fixture_disabled(): + yield + + +def safe_text_dupfile(f, mode, default_encoding="UTF8"): + """ return an open text file object that's a duplicate of f on the + FD-level if possible. + """ + encoding = getattr(f, "encoding", None) + try: + fd = f.fileno() + except Exception: + if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"): + # we seem to have a text stream, let's just use it + return f + else: + newfd = os.dup(fd) + if "b" not in mode: + mode += "b" + f = os.fdopen(newfd, mode, 0) # no buffering + return EncodedFile(f, encoding or default_encoding) + + +class EncodedFile(object): + errors = "strict" # possibly needed by py3 code (issue555) + + def __init__(self, buffer, encoding): + self.buffer = buffer + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, six.text_type): + obj = obj.encode(self.encoding, "replace") + self.buffer.write(obj) + + def writelines(self, linelist): + data = "".join(linelist) + self.write(data) + + @property + def name(self): + """Ensure that file.name is a string.""" + return repr(self.buffer) + + def __getattr__(self, name): + return getattr(object.__getattribute__(self, "buffer"), name) + + +CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"]) + + +class MultiCapture(object): + out = err = in_ = None + + def __init__(self, out=True, err=True, in_=True, Capture=None): + if in_: + self.in_ = Capture(0) + if out: + self.out = Capture(1) + if err: + self.err = Capture(2) + + def start_capturing(self): + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self): + """ pop current snapshot out/err capture and flush to orig streams. """ + out, err = self.readouterr() + if out: + self.out.writeorg(out) + if err: + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_=False): + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self): + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if hasattr(self, "_in_suspended"): + self.in_.resume() + del self._in_suspended + + def stop_capturing(self): + """ stop capturing and reset capturing streams """ + if hasattr(self, "_reset"): + raise ValueError("was already stopped") + self._reset = True + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def readouterr(self): + """ return snapshot unicode value of stdout/stderr capturings. """ + return CaptureResult( + self.out.snap() if self.out is not None else "", + self.err.snap() if self.err is not None else "", + ) + + +class NoCapture(object): + EMPTY_BUFFER = None + __init__ = start = done = suspend = resume = lambda *args: None + + +class FDCaptureBinary(object): + """Capture IO to/from a given os-level filedescriptor. + + snap() produces `bytes` + """ + + EMPTY_BUFFER = bytes() + + def __init__(self, targetfd, tmpfile=None): + self.targetfd = targetfd + try: + self.targetfd_save = os.dup(self.targetfd) + except OSError: + self.start = lambda: None + self.done = lambda: None + else: + if targetfd == 0: + assert not tmpfile, "cannot set tmpfile with stdin" + tmpfile = open(os.devnull, "r") + self.syscapture = SysCapture(targetfd) + else: + if tmpfile is None: + f = TemporaryFile() + with f: + tmpfile = safe_text_dupfile(f, mode="wb+") + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, tmpfile) + else: + self.syscapture = NoCapture() + self.tmpfile = tmpfile + self.tmpfile_fd = tmpfile.fileno() + + def __repr__(self): + return "" % (self.targetfd, self.targetfd_save) + + def start(self): + """ Start capturing on targetfd using memorized tmpfile. """ + try: + os.fstat(self.targetfd_save) + except (AttributeError, OSError): + raise ValueError("saved filedescriptor not valid anymore") + os.dup2(self.tmpfile_fd, self.targetfd) + self.syscapture.start() + + def snap(self): + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self): + """ stop capturing, restore streams, return original capture file, + seeked to position zero. """ + targetfd_save = self.__dict__.pop("targetfd_save") + os.dup2(targetfd_save, self.targetfd) + os.close(targetfd_save) + self.syscapture.done() + _attempt_to_close_capture_file(self.tmpfile) + + def suspend(self): + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + + def resume(self): + self.syscapture.resume() + os.dup2(self.tmpfile_fd, self.targetfd) + + def writeorg(self, data): + """ write to original file descriptor. """ + if isinstance(data, six.text_type): + data = data.encode("utf8") # XXX use encoding of original stream + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBinary): + """Capture IO to/from a given os-level filedescriptor. + + snap() produces text + """ + + EMPTY_BUFFER = str() + + def snap(self): + res = FDCaptureBinary.snap(self) + enc = getattr(self.tmpfile, "encoding", None) + if enc and isinstance(res, bytes): + res = six.text_type(res, enc, "replace") + return res + + +class SysCapture(object): + + EMPTY_BUFFER = str() + + def __init__(self, fd, tmpfile=None): + name = patchsysdict[fd] + self._old = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() + self.tmpfile = tmpfile + + def start(self): + setattr(sys, self.name, self.tmpfile) + + def snap(self): + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self): + setattr(sys, self.name, self._old) + del self._old + _attempt_to_close_capture_file(self.tmpfile) + + def suspend(self): + setattr(sys, self.name, self._old) + + def resume(self): + setattr(sys, self.name, self.tmpfile) + + def writeorg(self, data): + self._old.write(data) + self._old.flush() + + +class SysCaptureBinary(SysCapture): + EMPTY_BUFFER = bytes() + + def snap(self): + res = self.tmpfile.buffer.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + +class DontReadFromInput(six.Iterator): + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + + encoding = None + + def read(self, *args): + raise IOError("reading from stdin while output is captured") + + readline = read + readlines = read + __next__ = read + + def __iter__(self): + return self + + def fileno(self): + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass + + @property + def buffer(self): + if sys.version_info >= (3, 0): + return self + else: + raise AttributeError("redirected stdin has no attribute buffer") + + +def _colorama_workaround(): + """ + Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + + if not sys.platform.startswith("win32"): + return + try: + import colorama # noqa + except ImportError: + pass + + +def _readline_workaround(): + """ + Ensure readline is imported so that it attaches to the correct stdio + handles on Windows. + + Pdb uses readline support where available--when not running from the Python + prompt, the readline module is not imported until running the pdb REPL. If + running pytest with the --pdb option this means the readline module is not + imported until after I/O capture has been started. + + This is a problem for pyreadline, which is often used to implement readline + support on Windows, as it does not attach to the correct handles for stdout + and/or stdin if they have been redirected by the FDCapture mechanism. This + workaround ensures that readline is imported before I/O capture is setup so + that it can attach to the actual stdin/out for the console. + + See https://github.com/pytest-dev/pytest/pull/1281 + """ + + if not sys.platform.startswith("win32"): + return + try: + import readline # noqa + except ImportError: + pass + + +def _py36_windowsconsoleio_workaround(stream): + """ + Python 3.6 implemented unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103 + """ + if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6): + return + + # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) + if not hasattr(stream, "buffer"): + return + + buffered = hasattr(stream.buffer, "raw") + raw_stdout = stream.buffer.raw if buffered else stream.buffer + + if not isinstance(raw_stdout, io._WindowsConsoleIO): + return + + def _reopen_stdio(f, mode): + if not buffered and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +def _attempt_to_close_capture_file(f): + """Suppress IOError when closing the temporary file used for capturing streams in py27 (#2370)""" + if six.PY2: + try: + f.close() + except IOError: + pass + else: + f.close() diff --git a/venv/Lib/site-packages/_pytest/compat.py b/venv/Lib/site-packages/_pytest/compat.py new file mode 100644 index 00000000..78bf1bc0 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/compat.py @@ -0,0 +1,419 @@ +""" +python version compatibility code +""" +from __future__ import absolute_import, division, print_function + +import codecs +import functools +import inspect +import re +import sys +from contextlib import contextmanager + +import py + +import _pytest +from _pytest.outcomes import TEST_OUTCOME, fail +from six import text_type +import six + +try: + import enum +except ImportError: # pragma: no cover + # Only available in Python 3.4+ or as a backport + enum = None + +_PY3 = sys.version_info > (3, 0) +_PY2 = not _PY3 + + +if _PY3: + from inspect import signature, Parameter as Parameter +else: + from funcsigs import signature, Parameter as Parameter + +NoneType = type(None) +NOTSET = object() + +PY35 = sys.version_info[:2] >= (3, 5) +PY36 = sys.version_info[:2] >= (3, 6) +MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError" + + +if _PY3: + from collections.abc import MutableMapping as MappingMixin + from collections.abc import Mapping, Sequence +else: + # those raise DeprecationWarnings in Python >=3.7 + from collections import MutableMapping as MappingMixin # noqa + from collections import Mapping, Sequence # noqa + + +if sys.version_info >= (3, 4): + from importlib.util import spec_from_file_location +else: + + def spec_from_file_location(*_, **__): + return None + + +def _format_args(func): + return str(signature(func)) + + +isfunction = inspect.isfunction +isclass = inspect.isclass +# used to work around a python2 exception info leak +exc_clear = getattr(sys, "exc_clear", lambda: None) +# The type of re.compile objects is not exposed in Python. +REGEX_TYPE = type(re.compile("")) + + +def is_generator(func): + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function. + + Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly, + which in turns also initializes the "logging" module as side-effect (see issue #8). + """ + return getattr(func, "_is_coroutine", False) or ( + hasattr(inspect, "iscoroutinefunction") and inspect.iscoroutinefunction(func) + ) + + +def getlocation(function, curdir): + function = get_real_func(function) + fn = py.path.local(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if fn.relto(curdir): + fn = fn.relto(curdir) + return "%s:%d" % (fn, lineno + 1) + + +def num_mock_patch_args(function): + """ return number of arguments used up by mock arguments (if any) """ + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")] + if any(mock_modules): + sentinels = [m.DEFAULT for m in mock_modules if m is not None] + return len( + [p for p in patchings if not p.attribute_name and p.new in sentinels] + ) + return len(patchings) + + +def getfuncargnames(function, is_method=False, cls=None): + """Returns the names of a function's mandatory arguments. + + This should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The is_method and cls arguments indicate that the function should + be treated as a bound method even though it's not unless, only in + the case of cls, the function is a static method. + + @RonnyPfannschmidt: This function should be refactored when we + revisit fixtures. The fixture mechanism should ask the node for + the fixture names, and not try to obtain directly from the + function object well after collection has occurred. + + """ + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters + except (ValueError, TypeError) as e: + fail( + "Could not determine arguments of {!r}: {}".format(function, e), + pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters.values() + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, remove the first + # parameter name. + if is_method or ( + cls and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +@contextmanager +def dummy_context_manager(): + """Context manager that does nothing, useful in situations where you might need an actual context manager or not + depending on some condition. Using this allow to keep the same code""" + yield + + +def get_default_arg_names(function): + # Note: this code intentionally mirrors the code at the beginning of getfuncargnames, + # to get the arguments which were excluded from its result because they had default values + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +if _PY3: + STRING_TYPES = bytes, str + UNICODE_TYPES = six.text_type + + if PY35: + + def _bytes_to_ascii(val): + return val.decode("ascii", "backslashreplace") + + else: + + def _bytes_to_ascii(val): + if val: + # source: http://goo.gl/bGsnwC + encoded_bytes, _ = codecs.escape_encode(val) + return encoded_bytes.decode("ascii") + else: + # empty bytes crashes codecs.escape_encode (#1087) + return "" + + def ascii_escaped(val): + """If val is pure ascii, returns it as a str(). Otherwise, escapes + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6' + + and escapes unicode objects into a sequence of escaped unicode + ids, e.g.: + + '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944' + + note: + the obvious "v.decode('unicode-escape')" will return + valid utf-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a utf-8 string. + + """ + if isinstance(val, bytes): + return _bytes_to_ascii(val) + else: + return val.encode("unicode_escape").decode("ascii") + + +else: + STRING_TYPES = six.string_types + UNICODE_TYPES = six.text_type + + def ascii_escaped(val): + """In py2 bytes and str are the same type, so return if it's a bytes + object, return it unchanged if it is a full ascii string, + otherwise escape it into its binary form. + + If it's a unicode string, change the unicode characters into + unicode escapes. + + """ + if isinstance(val, bytes): + try: + return val.encode("ascii") + except UnicodeDecodeError: + return val.encode("string-escape") + else: + return val.encode("unicode-escape") + + +class _PytestWrapper(object): + """Dummy wrapper around a function object for internal use only. + + Used to correctly unwrap the underlying function object + when we are creating fixtures, because we wrap the function object ourselves with a decorator + to issue warnings when the fixture function is called directly. + """ + + def __init__(self, obj): + self.obj = obj + + +def get_real_func(obj): + """ gets the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial. + """ + start_obj = obj + for i in range(100): + # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function + # to trigger a warning if it gets called directly instead of by pytest: we don't + # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) + new_obj = getattr(obj, "__pytest_wrapped__", None) + if isinstance(new_obj, _PytestWrapper): + obj = new_obj.obj + break + new_obj = getattr(obj, "__wrapped__", None) + if new_obj is None: + break + obj = new_obj + else: + raise ValueError( + ("could not find real function of {start}\nstopped at {current}").format( + start=py.io.saferepr(start_obj), current=py.io.saferepr(obj) + ) + ) + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def get_real_method(obj, holder): + """ + Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time + returning a bound method to ``holder`` if the original object was a bound method. + """ + try: + is_method = hasattr(obj, "__func__") + obj = get_real_func(obj) + except Exception: + return obj + if is_method and hasattr(obj, "__get__") and callable(obj.__get__): + obj = obj.__get__(holder) + return obj + + +def getfslineno(obj): + # xxx let decorators etc specify a sane ordering + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as + fslineno = _pytest._code.getfslineno(obj) + assert isinstance(fslineno[1], int), obj + return fslineno + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object, name, default): + """ Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException + instead of Exception (for more details check #2707) + """ + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def _is_unittest_unexpected_success_a_failure(): + """Return if the test suite should fail if an @expectedFailure unittest test PASSES. + + From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: + Changed in version 3.4: Returns False if there were any + unexpectedSuccesses from tests marked with the expectedFailure() decorator. + """ + return sys.version_info >= (3, 4) + + +if _PY3: + + def safe_str(v): + """returns v as string""" + return str(v) + + +else: + + def safe_str(v): + """returns v as string, converting to ascii if necessary""" + try: + return str(v) + except UnicodeError: + if not isinstance(v, text_type): + v = text_type(v) + errors = "replace" + return v.encode("utf-8", errors) + + +COLLECT_FAKEMODULE_ATTRIBUTES = ( + "Collector", + "Module", + "Generator", + "Function", + "Instance", + "Session", + "Item", + "Class", + "File", + "_fillfuncargs", +) + + +def _setup_collect_fakemodule(): + from types import ModuleType + import pytest + + pytest.collect = ModuleType("pytest.collect") + pytest.collect.__all__ = [] # used for setns + for attr in COLLECT_FAKEMODULE_ATTRIBUTES: + setattr(pytest.collect, attr, getattr(pytest, attr)) + + +if _PY2: + # Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO. + from py.io import TextIO + + class CaptureIO(TextIO): + @property + def encoding(self): + return getattr(self, "_encoding", "UTF-8") + + +else: + import io + + class CaptureIO(io.TextIOWrapper): + def __init__(self): + super(CaptureIO, self).__init__( + io.BytesIO(), encoding="UTF-8", newline="", write_through=True + ) + + def getvalue(self): + return self.buffer.getvalue().decode("UTF-8") + + +class FuncargnamesCompatAttr(object): + """ helper class so that Metafunc, Function and FixtureRequest + don't need to each define the "funcargnames" compatibility attribute. + """ + + @property + def funcargnames(self): + """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" + return self.fixturenames diff --git a/venv/Lib/site-packages/_pytest/config/__init__.py b/venv/Lib/site-packages/_pytest/config/__init__.py new file mode 100644 index 00000000..88cbf14b --- /dev/null +++ b/venv/Lib/site-packages/_pytest/config/__init__.py @@ -0,0 +1,1035 @@ +""" command line options, ini-file and conftest.py processing. """ +from __future__ import absolute_import, division, print_function +import argparse +import inspect +import shlex +import types +import warnings +import copy +import six +import py + +# DON't import pytest here because it causes import cycle troubles +import sys +import os +from _pytest.outcomes import Skipped + +import _pytest._code +import _pytest.hookspec # the extension point definitions +import _pytest.assertion +from pluggy import PluginManager, HookimplMarker, HookspecMarker +from _pytest._code import ExceptionInfo, filter_traceback +from _pytest.compat import safe_str +from .exceptions import UsageError, PrintHelp +from .findpaths import determine_setup, exists + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +class ConftestImportFailure(Exception): + def __init__(self, path, excinfo): + Exception.__init__(self, path, excinfo) + self.path = path + self.excinfo = excinfo + + +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + from _pytest.main import EXIT_USAGEERROR + + try: + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + exc_info = ExceptionInfo(e.excinfo) + tw = py.io.TerminalWriter(sys.stderr) + tw.line( + "ImportError while loading conftest '{e.path}'.".format(e=e), red=True + ) + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = safe_str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + return 4 + else: + try: + return config.hook.pytest_cmdline_main(config=config) + finally: + config._ensure_unconfigure() + except UsageError as e: + tw = py.io.TerminalWriter(sys.stderr) + for msg in e.args: + tw.line("ERROR: {}\n".format(msg), red=True) + return EXIT_USAGEERROR + + +class cmdline(object): # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path, optname): + """ Argparse type validator for filename arguments. + + :path: path of filename + :optname: name of the option + """ + if os.path.isdir(path): + raise UsageError("{} must be a filename, given: {}".format(optname, path)) + return path + + +def directory_arg(path, optname): + """Argparse type validator for directory arguments. + + :path: path of directory + :optname: name of the option + """ + if not os.path.isdir(path): + raise UsageError("{} must be a directory, given: {}".format(optname, path)) + return path + + +default_plugins = ( + "mark", + "main", + "terminal", + "runner", + "python", + "fixtures", + "debugging", + "unittest", + "capture", + "skipping", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "helpconfig", + "nose", + "assertion", + "junitxml", + "resultlog", + "doctest", + "cacheprovider", + "freeze_support", + "setuponly", + "setupplan", + "warnings", + "logging", +) + + +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") + + +def get_config(): + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config(pluginmanager) + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return config + + +def get_plugin_manager(): + """ + Obtain a new instance of the + :py:class:`_pytest.config.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig(args=None, plugins=None): + warning = None + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = shlex.split(args, posix=sys.platform != "win32") + from _pytest import deprecated + + warning = deprecated.MAIN_STR_ARGS + config = get_config() + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, six.string_types): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + if warning: + from _pytest.warnings import _issue_config_warning + + _issue_config_warning(warning, config=config) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + except BaseException: + config._ensure_unconfigure() + raise + + +class PytestPluginManager(PluginManager): + """ + Overwrites :py:class:`pluggy.PluginManager ` to add pytest-specific + functionality: + + * loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded; + * ``conftest.py`` loading during start-up; + """ + + def __init__(self): + super(PytestPluginManager, self).__init__("pytest") + self._conftest_plugins = set() + + # state related to local conftest plugins + self._path2confmods = {} + self._conftestpath2mod = {} + self._confcutdir = None + self._noconftest = False + self._duplicatepaths = set() + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err = sys.stderr + encoding = getattr(err, "encoding", "utf8") + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage + self._configured = False + + def addhooks(self, module_or_class): + """ + .. deprecated:: 2.8 + + Use :py:meth:`pluggy.PluginManager.add_hookspecs ` + instead. + """ + warning = dict( + code="I2", + fslocation=_pytest._code.getfslineno(sys._getframe(1)), + nodeid=None, + message="use pluginmanager.add_hookspecs instead of " + "deprecated addhooks() method.", + ) + self._warn(warning) + return self.add_hookspecs(module_or_class) + + def parse_hookimpl_opts(self, plugin, name): + # pytest hooks are always prefixed with pytest_ + # so we avoid accessing possibly non-readable attributes + # (see issue #1073) + if not name.startswith("pytest_"): + return + # ignore some historic special names which can not be hooks anyway + if name == "pytest_plugins" or name.startswith("pytest_funcarg__"): + return + + method = getattr(plugin, name) + opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name) + + # consider only actual functions for hooks (#3775) + if not inspect.isroutine(method): + return + + # collect unmarked hooks as long as they have the `pytest_' prefix + if opts is None and name.startswith("pytest_"): + opts = {} + + if opts is not None: + for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): + opts.setdefault(name, hasattr(method, name)) + return opts + + def parse_hookspec_opts(self, module_or_class, name): + opts = super(PytestPluginManager, self).parse_hookspec_opts( + module_or_class, name + ) + if opts is None: + method = getattr(module_or_class, name) + if name.startswith("pytest_"): + opts = { + "firstresult": hasattr(method, "firstresult"), + "historic": hasattr(method, "historic"), + } + return opts + + def register(self, plugin, name=None): + if name in ["pytest_catchlog", "pytest_capturelog"]: + self._warn( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + return + ret = super(PytestPluginManager, self).register(plugin, name) + if ret: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict(plugin=plugin, manager=self) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return ret + + def getplugin(self, name): + # support deprecated naming because plugins (xdist e.g.) use it + return self.get_plugin(name) + + def hasplugin(self, name): + """Return True if the plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config): + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.", + ) + self._configured = True + + def _warn(self, message): + kwargs = ( + message + if isinstance(message, dict) + else {"code": "I1", "message": message, "fslocation": None, "nodeid": None} + ) + self.hook.pytest_logwarning.call_historic(kwargs=kwargs) + + # + # internal API for local conftest plugin handling + # + def _set_initial_conftests(self, namespace): + """ load initial conftest files given a preparsed "namespace". + As conftest files may add their own command line options + which have arguments ('--my-opt somepath') we might get some + false positives. All builtin and 3rd party plugins will have + been loaded, however, so common options will not confuse our logic + here. + """ + current = py.path.local() + self._confcutdir = ( + current.join(namespace.confcutdir, abs=True) + if namespace.confcutdir + else None + ) + self._noconftest = namespace.noconftest + self._using_pyargs = namespace.pyargs + testpaths = namespace.file_or_dir + foundanchor = False + for path in testpaths: + path = str(path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = current.join(path, abs=1) + if exists(anchor): # we found some file object + self._try_load_conftest(anchor) + foundanchor = True + if not foundanchor: + self._try_load_conftest(current) + + def _try_load_conftest(self, anchor): + self._getconftestmodules(anchor) + # let's also consider test* subdirs + if anchor.check(dir=1): + for x in anchor.listdir("test*"): + if x.check(dir=1): + self._getconftestmodules(x) + + def _getconftestmodules(self, path): + if self._noconftest: + return [] + + if path.isfile(): + directory = path.dirpath() + else: + directory = path + try: + return self._path2confmods[directory] + except KeyError: + # XXX these days we may rather want to use config.rootdir + # and allow users to opt into looking into the rootdir parent + # directories instead of requiring to specify confcutdir + clist = [] + for parent in directory.parts(): + if self._confcutdir and self._confcutdir.relto(parent): + continue + conftestpath = parent.join("conftest.py") + if conftestpath.isfile(): + mod = self._importconftest(conftestpath) + clist.append(mod) + + self._path2confmods[directory] = clist + return clist + + def _rget_with_confmod(self, name, path): + modules = self._getconftestmodules(path) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest(self, conftestpath): + try: + return self._conftestpath2mod[conftestpath] + except KeyError: + pkgpath = conftestpath.pypkgpath() + if pkgpath is None: + _ensure_removed_sysmodule(conftestpath.purebasename) + try: + mod = conftestpath.pyimport() + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + from _pytest.deprecated import ( + PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST + ) + + warnings.warn_explicit( + PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST, + category=None, + filename=str(conftestpath), + lineno=0, + ) + except Exception: + raise ConftestImportFailure(conftestpath, sys.exc_info()) + + self._conftest_plugins.add(mod) + self._conftestpath2mod[conftestpath] = mod + dirpath = conftestpath.dirpath() + if dirpath in self._path2confmods: + for path, mods in self._path2confmods.items(): + if path and path.relto(dirpath) or path == dirpath: + assert mod not in mods + mods.append(mod) + self.trace("loaded conftestmodule %r" % (mod)) + self.consider_conftest(mod) + return mod + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse(self, args): + for opt1, opt2 in zip(args, args[1:]): + if opt1 == "-p": + self.consider_pluginarg(opt2) + + def consider_pluginarg(self, arg): + if arg.startswith("no:"): + name = arg[3:] + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + self.import_plugin(arg) + + def consider_conftest(self, conftestmodule): + self.register(conftestmodule, name=conftestmodule.__file__) + + def consider_env(self): + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod): + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs(self, spec): + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname): + # most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, (six.text_type, str)), ( + "module name as text required, got %r" % modname + ) + modname = str(modname) + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + if modname in builtin_plugins: + importspec = "_pytest." + modname + else: + importspec = modname + self.rewrite_hook.mark_rewrite(importspec) + try: + __import__(importspec) + except ImportError as e: + new_exc_type = ImportError + new_exc_message = 'Error importing plugin "%s": %s' % ( + modname, + safe_str(e.args[0]), + ) + new_exc = new_exc_type(new_exc_message) + + six.reraise(new_exc_type, new_exc, sys.exc_info()[2]) + + except Skipped as e: + self._warn("skipped plugin %r: %s" % ((modname, e.msg))) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list(specs): + """ + Parses a list of "plugin specs" and returns a list of plugin names. + + Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in + which case it is returned as a list. Specs can also be `None` in which case an + empty list is returned. + """ + if specs is not None: + if isinstance(specs, str): + specs = specs.split(",") if specs else [] + if not isinstance(specs, (list, tuple)): + raise UsageError( + "Plugin specs must be a ','-separated string or a " + "list/tuple of strings for plugin names. Given: %r" % specs + ) + return list(specs) + return [] + + +def _ensure_removed_sysmodule(modname): + try: + del sys.modules[modname] + except KeyError: + pass + + +class Notset(object): + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files): + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + yield package_name + + +class Config(object): + """ access to configuration values, pluginmanager and plugin hooks. """ + + def __init__(self, pluginmanager): + #: access to command line option as attributes. + #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead + self.option = argparse.Namespace() + from .argparsing import Parser, FILE_OR_DIR + + _a = FILE_OR_DIR + self._parser = Parser( + usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a), + processopt=self._processopt, + ) + #: a pluginmanager instance + self.pluginmanager = pluginmanager + self.trace = self.pluginmanager.trace.root.get("config") + self.hook = self.pluginmanager.hook + self._inicache = {} + self._override_ini = () + self._opt2dest = {} + self._cleanup = [] + self._warn = self.pluginmanager._warn + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + + def do_setns(dic): + import pytest + + setns(pytest, dic) + + self.hook.pytest_namespace.call_historic(do_setns, {}) + self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser)) + + def add_cleanup(self, func): + """ Add a function to be called when the config object gets out of + use (usually coninciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self): + assert not self._configured + self._configured = True + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self): + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() + + def warn(self, code, message, fslocation=None, nodeid=None): + """ + .. deprecated:: 3.8 + + Use :py:func:`warnings.warn` or :py:func:`warnings.warn_explicit` directly instead. + + Generate a warning for this test session. + """ + from _pytest.warning_types import RemovedInPytest4Warning + + if isinstance(fslocation, (tuple, list)) and len(fslocation) > 2: + filename, lineno = fslocation[:2] + else: + filename = "unknown file" + lineno = 0 + msg = "config.warn has been deprecated, use warnings.warn instead" + if nodeid: + msg = "{}: {}".format(nodeid, msg) + warnings.warn_explicit( + RemovedInPytest4Warning(msg), + category=None, + filename=filename, + lineno=lineno, + ) + self.hook.pytest_logwarning.call_historic( + kwargs=dict( + code=code, message=message, fslocation=fslocation, nodeid=nodeid + ) + ) + + def get_terminal_writer(self): + return self.pluginmanager.get_plugin("terminalreporter")._tw + + def pytest_cmdline_parse(self, pluginmanager, args): + # REF1 assert self == pluginmanager.config, (self, pluginmanager.config) + self.parse(args) + return self + + def notify_exception(self, excinfo, option=None): + if option and option.fulltrace: + style = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write("INTERNALERROR> %s\n" % line) + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid): + # nodeid's are relative to the rootpath, compute relative to cwd + if self.invocation_dir != self.rootdir: + fullpath = self.rootdir.join(nodeid) + nodeid = self.invocation_dir.bestrelpath(fullpath) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args): + """ constructor useable for subprocesses. """ + config = get_config() + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt): + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default") and opt.dest: + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config): + self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) + + def _initini(self, args): + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + r = determine_setup( + ns.inifilename, + ns.file_or_dir + unknown_args, + rootdir_cmd_arg=ns.rootdir or None, + config=self, + ) + self.rootdir, self.inifile, self.inicfg = r + self._parser.extra_info["rootdir"] = self.rootdir + self._parser.extra_info["inifile"] = self.inifile + self.invocation_dir = py.path.local() + self._parser.addini("addopts", "extra command line options", "args") + self._parser.addini("minversion", "minimally required pytest version") + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args): + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = ns.assertmode + if mode == "rewrite": + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook) + _warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite(self, hook): + """ + Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins. + """ + import pkg_resources + + self.pluginmanager.rewrite_hook = hook + + if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # We don't autoload from setuptools entry points, no need to continue. + return + + # 'RECORD' available for plugins installed normally (pip install) + # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e) + # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa + # so it shouldn't be an issue + metadata_files = "RECORD", "SOURCES.txt" + + package_files = ( + entry.split(",")[0] + for entrypoint in pkg_resources.iter_entry_points("pytest11") + for metadata in metadata_files + for entry in entrypoint.dist._get_metadata(metadata) + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _preparse(self, args, addopts=True): + if addopts: + args[:] = shlex.split(os.environ.get("PYTEST_ADDOPTS", "")) + args + self._initini(args) + if addopts: + args[:] = self.getini("addopts") + args + self._checkversion() + self._consider_importhook(args) + self.pluginmanager.consider_preparse(args) + if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # Don't autoload from setuptools entry point. Only explicitly specified + # plugins are going to be loaded. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + self.pluginmanager.consider_env() + self.known_args_namespace = ns = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + if self.known_args_namespace.confcutdir is None and self.inifile: + confcutdir = py.path.local(self.inifile).dirname + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure: + e = sys.exc_info()[1] + if ns.help or ns.version: + # we don't want to prevent --help/--version to work + # so just let is pass and print a warning at the end + self._warn("could not load initial conftests (%s)\n" % e.path) + else: + raise + + def _checkversion(self): + import pytest + + minver = self.inicfg.get("minversion", None) + if minver: + ver = minver.split(".") + myver = pytest.__version__.split(".") + if myver < ver: + raise pytest.UsageError( + "%s:%d: requires pytest-%s, actual pytest-%s'" + % ( + self.inicfg.config.path, + self.inicfg.lineof("minversion"), + minver, + pytest.__version__, + ) + ) + + def parse(self, args, addopts=True): + # parse given cmdline arguments into this config object. + assert not hasattr( + self, "args" + ), "can only parse cmdline args at most once per Config object" + self._origargs = args + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + self._preparse(args, addopts=addopts) + # XXX deprecated hook: + self.hook.pytest_cmdline_preparse(config=self, args=args) + self._parser.after_preparse = True + try: + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) + if not args: + cwd = os.getcwd() + if cwd == self.rootdir: + args = self.getini("testpaths") + if not args: + args = [cwd] + self.args = args + except PrintHelp: + pass + + def addinivalue_line(self, name, line): + """ add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes the + the first line in its value. """ + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name): + """ return configuration value from an :ref:`ini file `. If the + specified name hasn't been registered through a prior + :py:func:`parser.addini <_pytest.config.Parser.addini>` + call (usually from a plugin), a ValueError is raised. """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + def _getini(self, name): + try: + description, type, default = self._parser._inidict[name] + except KeyError: + raise ValueError("unknown configuration value: %r" % (name,)) + value = self._get_override_ini_value(name) + if value is None: + try: + value = self.inicfg[name] + except KeyError: + if default is not None: + return default + if type is None: + return "" + return [] + if type == "pathlist": + dp = py.path.local(self.inicfg.config.path).dirpath() + values = [] + for relpath in shlex.split(value): + values.append(dp.join(relpath, abs=True)) + return values + elif type == "args": + return shlex.split(value) + elif type == "linelist": + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + elif type == "bool": + return bool(_strtobool(value.strip())) + else: + assert type is None + return value + + def _getconftest_pathlist(self, name, path): + try: + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) + except KeyError: + return None + modpath = py.path.local(mod.__file__).dirpath() + values = [] + for relroot in relroots: + if not isinstance(relroot, py.path.local): + relroot = relroot.replace("/", py.path.local.sep) + relroot = modpath.join(relroot, abs=True) + values.append(relroot) + return values + + def _get_override_ini_value(self, name): + value = None + # override_ini is a list of "ini=value" options + # always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2 + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError: + raise UsageError("-o/--override-ini expects option=value style.") + else: + if key == name: + value = user_ini_value + return value + + def getoption(self, name, default=notset, skip=False): + """ return command line option value. + + :arg name: name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :arg default: default value if no option of that name exists. + :arg skip: if True raise pytest.skip if option does not exists + or has a None value. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip("no %r option found" % (name,)) + raise ValueError("no option named %r" % (name,)) + + def getvalue(self, name, path=None): + """ (deprecated, use getoption()) """ + return self.getoption(name) + + def getvalueorskip(self, name, path=None): + """ (deprecated, use getoption(skip=True)) """ + return self.getoption(name, skip=True) + + +def _assertion_supported(): + try: + assert False + except AssertionError: + return True + else: + return False + + +def _warn_about_missing_assertion(mode): + if not _assertion_supported(): + if mode == "plain": + sys.stderr.write( + "WARNING: ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + sys.stderr.write( + "WARNING: assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + + +def setns(obj, dic): + import pytest + + for name, value in dic.items(): + if isinstance(value, dict): + mod = getattr(obj, name, None) + if mod is None: + modname = "pytest.%s" % name + mod = types.ModuleType(modname) + sys.modules[modname] = mod + mod.__all__ = [] + setattr(obj, name, mod) + obj.__all__.append(name) + setns(mod, value) + else: + setattr(obj, name, value) + obj.__all__.append(name) + # if obj != pytest: + # pytest.__all__.append(name) + setattr(pytest, name, value) + + +def create_terminal_writer(config, *args, **kwargs): + """Create a TerminalWriter instance configured according to the options + in the config object. Every code which requires a TerminalWriter object + and has access to a config object should use this function. + """ + tw = py.io.TerminalWriter(*args, **kwargs) + if config.option.color == "yes": + tw.hasmarkup = True + if config.option.color == "no": + tw.hasmarkup = False + return tw + + +def _strtobool(val): + """Convert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: copied from distutils.util + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif val in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError("invalid truth value %r" % (val,)) diff --git a/venv/Lib/site-packages/_pytest/config/__pycache__/__init__.cpython-37.pyc b/venv/Lib/site-packages/_pytest/config/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..47bf4cf8 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/config/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/config/__pycache__/argparsing.cpython-37.pyc b/venv/Lib/site-packages/_pytest/config/__pycache__/argparsing.cpython-37.pyc new file mode 100644 index 00000000..ee55b4a7 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/config/__pycache__/argparsing.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/config/__pycache__/exceptions.cpython-37.pyc b/venv/Lib/site-packages/_pytest/config/__pycache__/exceptions.cpython-37.pyc new file mode 100644 index 00000000..ce01d606 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/config/__pycache__/exceptions.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/config/__pycache__/findpaths.cpython-37.pyc b/venv/Lib/site-packages/_pytest/config/__pycache__/findpaths.cpython-37.pyc new file mode 100644 index 00000000..dd7e935c Binary files /dev/null and b/venv/Lib/site-packages/_pytest/config/__pycache__/findpaths.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/config/argparsing.py b/venv/Lib/site-packages/_pytest/config/argparsing.py new file mode 100644 index 00000000..784b2b21 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,410 @@ +import six +import warnings +import argparse + +from gettext import gettext as _ +import sys as _sys + +import py + +from ..main import EXIT_USAGEERROR + +FILE_OR_DIR = "file_or_dir" + + +class Parser(object): + """ Parser for command line arguments and ini-file values. + + :ivar extra_info: dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + def __init__(self, usage=None, processopt=None): + self._anonymous = OptionGroup("custom options", parser=self) + self._groups = [] + self._processopt = processopt + self._usage = usage + self._inidict = {} + self._ininames = [] + self.extra_info = {} + + def processoption(self, option): + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup(self, name, description="", after=None): + """ get (or create) a named option Group. + + :name: name of the option group. + :description: long description for --help output. + :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + return group + + def addoption(self, *opts, **attrs): + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse(self, args, namespace=None): + from _pytest._argcomplete import try_argcomplete + + self.optparser = self._getparser() + try_argcomplete(self.optparser) + args = [str(x) if isinstance(x, py.path.local) else x for x in args] + return self.optparser.parse_args(args, namespace=namespace) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + + optparser = MyOptionParser(self, self.extra_info) + groups = self._groups + [self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs="*").completer = filescompleter + return optparser + + def parse_setoption(self, args, option, namespace=None): + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args, namespace=None): + """parses and returns a namespace object with known arguments at this + point. + """ + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args(self, args, namespace=None): + """parses and returns a namespace object with known arguments, and + the remaining arguments unknown at this point. + """ + optparser = self._getparser() + args = [str(x) if isinstance(x, py.path.local) else x for x in args] + return optparser.parse_known_args(args, namespace=namespace) + + def addini(self, name, help, type=None, default=None): + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args``, ``linelist`` + or ``bool``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ + assert type in (None, "pathlist", "args", "linelist", "bool") + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument(object): + """class that mimics the necessary behaviour of optparse.Option + + its currently a least effort implementation + and ignoring choices and integer prefixes + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + _typ_map = {"int": int, "string": str, "float": float, "complex": complex} + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get("dest") + if "%default" in (attrs.get("help") or ""): + warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + DeprecationWarning, + stacklevel=3, + ) + try: + typ = attrs["type"] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, six.string_types): + if typ == "choice": + warnings.warn( + "`type` argument to addoption() is the string %r." + " For choices this is optional and can be omitted, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: %s)" % (typ, names), + DeprecationWarning, + stacklevel=4, + ) + # argparse expects a type here take it from + # the type of the first element + attrs["type"] = type(attrs["choices"][0]) + else: + warnings.warn( + "`type` argument to addoption() is the string %r, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: %s)" % (typ, names), + DeprecationWarning, + stacklevel=4, + ) + attrs["type"] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs["type"] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError("need a long or short option", self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = "default dest help".split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get("help"): + a = self._attrs["help"] + a = a.replace("%default", "%(default)s") + # a = a.replace('%prog', '%(prog)s') + self._attrs["help"] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self, + ) + self._long_opts.append(opt) + + def __repr__(self): + args = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup(object): + def __init__(self, name, description="", parser=None): + self.name = name + self.description = description + self.options = [] + self.parser = parser + + def addoption(self, *optnames, **attrs): + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + conflict = set(optnames).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError("option names %s already added" % conflict) + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *optnames, **attrs): + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option, shortupper=False): + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__(self, parser, extra_info=None): + if not extra_info: + extra_info = {} + self._parser = parser + argparse.ArgumentParser.__init__( + self, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user + self.extra_info = extra_info + + def error(self, message): + """error(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + Overrides the method in parent class to change exit code""" + self.print_usage(_sys.stderr) + args = {"prog": self.prog, "message": message} + self.exit(EXIT_USAGEERROR, _("%(prog)s: error: %(message)s\n") % args) + + def parse_args(self, args=None, namespace=None): + """allow splitting of positional arguments""" + args, argv = self.parse_known_args(args, namespace) + if argv: + for arg in argv: + if arg and arg[0] == "-": + lines = ["unrecognized arguments: %s" % (" ".join(argv))] + for k, v in sorted(self.extra_info.items()): + lines.append(" %s: %s" % (k, v)) + self.error("\n".join(lines)) + getattr(args, FILE_OR_DIR).extend(argv) + return args + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """shorten help for long options that differ only in extra hyphens + + - collapse **long** options that are the same except for extra hyphens + - special action attribute map_long_option allows surpressing additional + long options + - shortcut if there are only two options and one of them is a short one + - cache result on action object as this is called at least 2 times + """ + + def _format_action_invocation(self, action): + orgstr = argparse.HelpFormatter._format_action_invocation(self, action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr + return orgstr + return_list = [] + option_map = getattr(action, "map_long_option", {}) + if option_map is None: + option_map = {} + short_long = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + 'long optional argument without "--": [%s]' % (option), self + ) + xxoption = option[2:] + if xxoption.split()[0] not in option_map: + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + action._formatted_action_invocation = ", ".join(return_list) + return action._formatted_action_invocation diff --git a/venv/Lib/site-packages/_pytest/config/exceptions.py b/venv/Lib/site-packages/_pytest/config/exceptions.py new file mode 100644 index 00000000..19fe5cb0 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,9 @@ +class UsageError(Exception): + """ error in pytest usage or invocation""" + + +class PrintHelp(Exception): + """Raised when pytest should print it's help to skip the rest of the + argument parsing and validation.""" + + pass diff --git a/venv/Lib/site-packages/_pytest/config/findpaths.py b/venv/Lib/site-packages/_pytest/config/findpaths.py new file mode 100644 index 00000000..f9943019 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,146 @@ +import py +import os +from .exceptions import UsageError + + +def exists(path, ignore=EnvironmentError): + try: + return path.check() + except ignore: + return False + + +def getcfg(args, config=None): + """ + Search the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict). + + note: config is optional and used only to issue warnings explicitly (#2891). + """ + from _pytest.deprecated import CFG_PYTEST_SECTION + + inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [py.path.local()] + for arg in args: + arg = py.path.local(arg) + for base in arg.parts(reverse=True): + for inibasename in inibasenames: + p = base.join(inibasename) + if exists(p): + iniconfig = py.iniconfig.IniConfig(p) + if "pytest" in iniconfig.sections: + if inibasename == "setup.cfg" and config is not None: + from _pytest.warnings import _issue_config_warning + from _pytest.warning_types import RemovedInPytest4Warning + + _issue_config_warning( + RemovedInPytest4Warning( + CFG_PYTEST_SECTION.format(filename=inibasename) + ), + config=config, + ) + return base, p, iniconfig["pytest"] + if ( + inibasename == "setup.cfg" + and "tool:pytest" in iniconfig.sections + ): + return base, p, iniconfig["tool:pytest"] + elif inibasename == "pytest.ini": + # allowed to be empty + return base, p, {} + return None, None, None + + +def get_common_ancestor(paths): + common_ancestor = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if path.relto(common_ancestor) or path == common_ancestor: + continue + elif common_ancestor.relto(path): + common_ancestor = path + else: + shared = path.common(common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = py.path.local() + elif common_ancestor.isfile(): + common_ancestor = common_ancestor.dirpath() + return common_ancestor + + +def get_dirs_from_args(args): + def is_option(x): + return str(x).startswith("-") + + def get_file_part_from_node_id(x): + return str(x).split("::")[0] + + def get_dir_from_path(path): + if path.isdir(): + return path + return py.path.local(path.dirname) + + # These look like paths but may not exist + possible_paths = ( + py.path.local(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if path.exists()] + + +def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None): + dirs = get_dirs_from_args(args) + if inifile: + iniconfig = py.iniconfig.IniConfig(inifile) + is_cfg_file = str(inifile).endswith(".cfg") + sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"] + for section in sections: + try: + inicfg = iniconfig[section] + if is_cfg_file and section == "pytest" and config is not None: + from _pytest.deprecated import CFG_PYTEST_SECTION + from _pytest.warnings import _issue_config_warning + + # TODO: [pytest] section in *.cfg files is deprecated. Need refactoring once + # the deprecation expires. + _issue_config_warning( + CFG_PYTEST_SECTION.format(filename=str(inifile)), config + ) + break + except KeyError: + inicfg = None + rootdir = get_common_ancestor(dirs) + else: + ancestor = get_common_ancestor(dirs) + rootdir, inifile, inicfg = getcfg([ancestor], config=config) + if rootdir is None: + for rootdir in ancestor.parts(reverse=True): + if rootdir.join("setup.py").exists(): + break + else: + rootdir, inifile, inicfg = getcfg(dirs, config=config) + if rootdir is None: + rootdir = get_common_ancestor([py.path.local(), ancestor]) + is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/" + if is_fs_root: + rootdir = ancestor + if rootdir_cmd_arg: + rootdir_abs_path = py.path.local(os.path.expandvars(rootdir_cmd_arg)) + if not os.path.isdir(str(rootdir_abs_path)): + raise UsageError( + "Directory '{}' not found. Check your '--rootdir' option.".format( + rootdir_abs_path + ) + ) + rootdir = rootdir_abs_path + return rootdir, inifile, inicfg or {} diff --git a/venv/Lib/site-packages/_pytest/debugging.py b/venv/Lib/site-packages/_pytest/debugging.py new file mode 100644 index 00000000..cc9bf5c2 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/debugging.py @@ -0,0 +1,199 @@ +""" interactive debugging with PDB, the Python Debugger. """ +from __future__ import absolute_import, division, print_function + +import os +import pdb +import sys +from doctest import UnexpectedException + +from _pytest import outcomes +from _pytest.config import hookimpl + +try: + from builtins import breakpoint # noqa + + SUPPORTS_BREAKPOINT_BUILTIN = True +except ImportError: + SUPPORTS_BREAKPOINT_BUILTIN = False + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="start the interactive Python debugger on errors or KeyboardInterrupt.", + ) + group._addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + help="start a custom interactive Python debugger on errors. " + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group._addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test.", + ) + + +def pytest_configure(config): + if config.getvalue("usepdb_cls"): + modname, classname = config.getvalue("usepdb_cls").split(":") + __import__(modname) + pdb_cls = getattr(sys.modules[modname], classname) + else: + pdb_cls = pdb.Pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + # Use custom Pdb class set_trace instead of default Pdb on breakpoint() call + if SUPPORTS_BREAKPOINT_BUILTIN: + _environ_pythonbreakpoint = os.environ.get("PYTHONBREAKPOINT", "") + if _environ_pythonbreakpoint == "": + sys.breakpointhook = pytestPDB.set_trace + + old = (pdb.set_trace, pytestPDB._pluginmanager) + + def fin(): + pdb.set_trace, pytestPDB._pluginmanager = old + pytestPDB._config = None + pytestPDB._pdb_cls = pdb.Pdb + if SUPPORTS_BREAKPOINT_BUILTIN: + sys.breakpointhook = sys.__breakpointhook__ + + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + pytestPDB._pdb_cls = pdb_cls + config._cleanup.append(fin) + + +class pytestPDB(object): + """ Pseudo PDB that defers to the real pdb. """ + + _pluginmanager = None + _config = None + _pdb_cls = pdb.Pdb + + @classmethod + def set_trace(cls, set_break=True): + """ invoke PDB set_trace debugging, dropping any IO capturing. """ + import _pytest.config + + frame = sys._getframe().f_back + if cls._pluginmanager is not None: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + tw.sep(">", "PDB set_trace (IO-capturing turned off)") + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config) + if set_break: + cls._pdb_cls().set_trace(frame) + + +class PdbInvoke(object): + def pytest_exception_interact(self, node, call, report): + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excrepr, excinfo): + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + + +class PdbTrace(object): + @hookimpl(hookwrapper=True) + def pytest_pyfunc_call(self, pyfuncitem): + _test_pytest_function(pyfuncitem) + yield + + +def _test_pytest_function(pyfuncitem): + pytestPDB.set_trace(set_break=False) + testfunction = pyfuncitem.obj + pyfuncitem.obj = pdb.runcall + if pyfuncitem._isyieldedfunction(): + arg_list = list(pyfuncitem._args) + arg_list.insert(0, testfunction) + pyfuncitem._args = tuple(arg_list) + else: + if "func" in pyfuncitem._fixtureinfo.argnames: + raise ValueError("--trace can't be used with a fixture named func!") + pyfuncitem.funcargs["func"] = testfunction + new_list = list(pyfuncitem._fixtureinfo.argnames) + new_list.append("func") + pyfuncitem._fixtureinfo.argnames = tuple(new_list) + + +def _enter_pdb(node, excinfo, rep): + # XXX we re-use the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb = _postmortem_traceback(excinfo) + rep._pdbshown = True + if post_mortem(tb): + outcomes.exit("Quitting debugger") + return rep + + +def _postmortem_traceback(excinfo): + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.exc_info[2] + else: + return excinfo._excinfo[2] + + +def _find_last_non_hidden_frame(stack): + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return i + + +def post_mortem(t): + class Pdb(pytestPDB._pdb_cls): + def get_stack(self, f, t): + stack, i = pdb.Pdb.get_stack(self, f, t) + if f is None: + i = _find_last_non_hidden_frame(stack) + return stack, i + + p = Pdb() + p.reset() + p.interaction(None, t) + return p.quitting diff --git a/venv/Lib/site-packages/_pytest/deprecated.py b/venv/Lib/site-packages/_pytest/deprecated.py new file mode 100644 index 00000000..54886c99 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/deprecated.py @@ -0,0 +1,116 @@ +""" +This module contains deprecation messages and bits of code used elsewhere in the codebase +that is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either PytestWarning instances or UnformattedWarning +in case of warnings which need to format their messages. +""" +from __future__ import absolute_import, division, print_function + + +from _pytest.warning_types import UnformattedWarning, RemovedInPytest4Warning + + +MAIN_STR_ARGS = RemovedInPytest4Warning( + "passing a string to pytest.main() is deprecated, " + "pass a list of arguments instead." +) + +YIELD_TESTS = RemovedInPytest4Warning( + "yield tests are deprecated, and scheduled to be removed in pytest 4.0" +) + +CACHED_SETUP = RemovedInPytest4Warning( + "cached_setup is deprecated and will be removed in a future release. " + "Use standard fixture functions instead." +) + +COMPAT_PROPERTY = UnformattedWarning( + RemovedInPytest4Warning, + "usage of {owner}.{name} is deprecated, please use pytest.{name} instead", +) + +CUSTOM_CLASS = UnformattedWarning( + RemovedInPytest4Warning, + 'use of special named "{name}" objects in collectors of type "{type_name}" to ' + "customize the created nodes is deprecated. " + "Use pytest_pycollect_makeitem(...) to create custom " + "collection nodes instead.", +) + +FUNCARG_PREFIX = UnformattedWarning( + RemovedInPytest4Warning, + '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated ' + "and scheduled to be removed in pytest 4.0. " + "Please remove the prefix and use the @pytest.fixture decorator instead.", +) + +FIXTURE_FUNCTION_CALL = UnformattedWarning( + RemovedInPytest4Warning, + 'Fixture "{name}" called directly. Fixtures are not meant to be called directly, ' + "are created automatically when test functions request them as parameters. " + "See https://docs.pytest.org/en/latest/fixture.html for more information.", +) + +CFG_PYTEST_SECTION = UnformattedWarning( + RemovedInPytest4Warning, + "[pytest] section in {filename} files is deprecated, use [tool:pytest] instead.", +) + +GETFUNCARGVALUE = RemovedInPytest4Warning( + "getfuncargvalue is deprecated, use getfixturevalue" +) + +RESULT_LOG = RemovedInPytest4Warning( + "--result-log is deprecated and scheduled for removal in pytest 4.0.\n" + "See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information." +) + +MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning( + "MarkInfo objects are deprecated as they contain merged marks which are hard to deal with correctly.\n" + "Please use node.get_closest_marker(name) or node.iter_markers(name).\n" + "Docs: https://docs.pytest.org/en/latest/mark.html#updating-code" +) + +MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning( + "Applying marks directly to parameters is deprecated," + " please use pytest.param(..., marks=...) instead.\n" + "For more details, see: https://docs.pytest.org/en/latest/parametrize.html" +) + +NODE_WARN = RemovedInPytest4Warning( + "Node.warn(code, message) form has been deprecated, use Node.warn(warning_instance) instead." +) + +RECORD_XML_PROPERTY = RemovedInPytest4Warning( + 'Fixture renamed from "record_xml_property" to "record_property" as user ' + "properties are now available to all reporters.\n" + '"record_xml_property" is now deprecated.' +) + +COLLECTOR_MAKEITEM = RemovedInPytest4Warning( + "pycollector makeitem was removed as it is an accidentially leaked internal api" +) + +METAFUNC_ADD_CALL = RemovedInPytest4Warning( + "Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.\n" + "Please use Metafunc.parametrize instead." +) + +PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = RemovedInPytest4Warning( + "Defining pytest_plugins in a non-top-level conftest is deprecated, " + "because it affects the entire directory tree in a non-explicit way.\n" + "Please move it to the top level conftest file instead." +) + +PYTEST_NAMESPACE = RemovedInPytest4Warning( + "pytest_namespace is deprecated and will be removed soon" +) + +PYTEST_ENSURETEMP = RemovedInPytest4Warning( + "pytest/tmpdir_factory.ensuretemp is deprecated, \n" + "please use the tmp_path fixture or tmp_path_factory.mktemp" +) diff --git a/venv/Lib/site-packages/_pytest/doctest.py b/venv/Lib/site-packages/_pytest/doctest.py new file mode 100644 index 00000000..12b871f9 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/doctest.py @@ -0,0 +1,512 @@ +""" discover and run doctests in modules and test files.""" +from __future__ import absolute_import, division, print_function + +import traceback +import sys +import platform + +import pytest +from _pytest._code.code import ExceptionInfo, ReprFileLocation, TerminalRepr +from _pytest.fixtures import FixtureRequest + + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None + + +def pytest_addoption(parser): + parser.addini( + "doctest_optionflags", + "option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="ignore doctest ImportErrors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="for a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_collect_file(path, parent): + config = parent.config + if path.ext == ".py": + if config.option.doctestmodules and not _is_setup_py(config, path, parent): + return DoctestModule(path, parent) + elif _is_doctest(config, path, parent): + return DoctestTextfile(path, parent) + + +def _is_setup_py(config, path, parent): + if path.basename != "setup.py": + return False + contents = path.read() + return "setuptools" in contents or "distutils" in contents + + +def _is_doctest(config, path, parent): + if path.ext in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + for glob in globs: + if path.check(fnmatch=glob): + return True + return False + + +class ReprFailDoctest(TerminalRepr): + def __init__(self, reprlocation_lines): + # List of (reprlocation, lines) tuples + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw): + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures): + super(MultipleDoctestFailures, self).__init__() + self.failures = failures + + +def _init_runner_class(): + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """ + Runner to collect failures. Note that the out variable in this case is + a list instead of a stdout-like object + """ + + def __init__( + self, checker=None, verbose=None, optionflags=0, continue_on_failure=True + ): + doctest.DebugRunner.__init__( + self, checker=checker, verbose=verbose, optionflags=optionflags + ) + self.continue_on_failure = continue_on_failure + + def report_failure(self, out, test, example, got): + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception(self, out, test, example, exc_info): + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True): + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + return RUNNER_CLASS( + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(pytest.Item): + def __init__(self, name, parent, runner=None, dtest=None): + super(DoctestItem, self).__init__(name, parent) + self.runner = runner + self.dtest = dtest + self.obj = None + self.fixture_request = None + + def setup(self): + if self.dtest is not None: + self.fixture_request = _setup_fixtures(self) + globs = dict(getfixture=self.fixture_request.getfixturevalue) + for name, value in self.fixture_request.getfixturevalue( + "doctest_namespace" + ).items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self): + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures = [] + self.runner.run(self.dtest, out=failures) + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self): + """ + Disable output capturing. Otherwise, stdout is lost to doctest (#985) + """ + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + def repr_failure(self, excinfo): + import doctest + + failures = None + if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)): + failures = [excinfo.value] + elif excinfo.errisinstance(MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is not None: + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + reprlocation = ReprFileLocation(filename, lineno, message) + checker = _get_checker() + report_choice = _get_report_choice( + self.config.getoption("doctestreport") + ) + if lineno is not None: + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + lines = [ + "%03d %s" % (i + test.lineno + 1, x) + for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append("??? %s %s" % (indent, line)) + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo(failure.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] + lines += traceback.format_exception(*failure.exc_info) + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + else: + return super(DoctestItem, self).repr_failure(excinfo) + + def reportinfo(self): + return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name + + +def _get_flag_lookup(): + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + ) + + +def get_optionflags(parent): + optionflags_str = parent.config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config): + continue_on_failure = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(pytest.Module): + obj = None + + def collect(self): + import doctest + + # inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker + encoding = self.config.getini("doctest_encoding") + text = self.fspath.read_text(encoding) + filename = str(self.fspath) + name = self.fspath.basename + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self) + + runner = _get_runner( + verbose=0, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + _fix_spoof_python2(runner, encoding) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem(test.name, self, runner, test) + + +def _check_all_skipped(test): + """raises pytest.skip() if all examples in the given DocTest have the SKIP + option set. + """ + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + pytest.skip("all tests skipped by +SKIP option") + + +class DoctestModule(pytest.Module): + def collect(self): + import doctest + + if self.fspath.basename == "conftest.py": + module = self.config.pluginmanager._importconftest(self.fspath) + else: + try: + module = self.fspath.pyimport() + except ImportError: + if self.config.getvalue("doctest_ignore_import_errors"): + pytest.skip("unable to import module %r" % self.fspath) + else: + raise + # uses internal doctest module parsing mechanism + finder = doctest.DocTestFinder() + optionflags = get_optionflags(self) + runner = _get_runner( + verbose=0, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem(test.name, self, runner, test) + + +def _setup_fixtures(doctest_item): + """ + Used by DoctestTextfile and DoctestItem to setup fixture information. + """ + + def func(): + pass + + doctest_item.funcargs = {} + fm = doctest_item.session._fixturemanager + doctest_item._fixtureinfo = fm.getfixtureinfo( + node=doctest_item, func=func, cls=None, funcargs=False + ) + fixture_request = FixtureRequest(doctest_item) + fixture_request._fillfixtures() + return fixture_request + + +def _get_checker(): + """ + Returns a doctest.OutputChecker subclass that takes in account the + ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES + to strip b'' prefixes. + Useful when the same doctest should run in Python 2 and Python 3. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + if hasattr(_get_checker, "LiteralsOutputChecker"): + return _get_checker.LiteralsOutputChecker() + + import doctest + import re + + class LiteralsOutputChecker(doctest.OutputChecker): + """ + Copied from doctest_nose_plugin.py from the nltk project: + https://github.com/nltk/nltk + + Further extended to also support byte literals. + """ + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + + def check_output(self, want, got, optionflags): + res = doctest.OutputChecker.check_output(self, want, got, optionflags) + if res: + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + if not allow_unicode and not allow_bytes: + return False + + else: # pragma: no cover + + def remove_prefixes(regex, txt): + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + res = doctest.OutputChecker.check_output(self, want, got, optionflags) + return res + + _get_checker.LiteralsOutputChecker = LiteralsOutputChecker + return _get_checker.LiteralsOutputChecker() + + +def _get_allow_unicode_flag(): + """ + Registers and returns the ALLOW_UNICODE flag. + """ + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag(): + """ + Registers and returns the ALLOW_BYTES flag. + """ + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_report_choice(key): + """ + This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid + importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +def _fix_spoof_python2(runner, encoding): + """ + Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This + should patch only doctests for text files because they don't have a way to declare their + encoding. Doctests in docstrings from Python modules don't have the same problem given that + Python already decoded the strings. + + This fixes the problem related in issue #2434. + """ + from _pytest.compat import _PY2 + + if not _PY2: + return + + from doctest import _SpoofOut + + class UnicodeSpoof(_SpoofOut): + def getvalue(self): + result = _SpoofOut.getvalue(self) + if encoding and isinstance(result, bytes): + result = result.decode(encoding) + return result + + runner._fakeout = UnicodeSpoof() + + +@pytest.fixture(scope="session") +def doctest_namespace(): + """ + Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. + """ + return dict() diff --git a/venv/Lib/site-packages/_pytest/fixtures.py b/venv/Lib/site-packages/_pytest/fixtures.py new file mode 100644 index 00000000..cfc187bc --- /dev/null +++ b/venv/Lib/site-packages/_pytest/fixtures.py @@ -0,0 +1,1384 @@ +from __future__ import absolute_import, division, print_function + +import functools +import inspect +import sys +import warnings +from collections import OrderedDict, deque, defaultdict + +import six +from more_itertools import flatten + +import attr +import py +from py._code.code import FormattedExcinfo + +import _pytest +from _pytest import nodes +from _pytest._code.code import TerminalRepr +from _pytest.compat import ( + NOTSET, + exc_clear, + _format_args, + getfslineno, + get_real_func, + is_generator, + isclass, + getimfunc, + getlocation, + getfuncargnames, + safe_getattr, + FuncargnamesCompatAttr, + get_real_method, + _PytestWrapper, +) +from _pytest.deprecated import FIXTURE_FUNCTION_CALL +from _pytest.outcomes import fail, TEST_OUTCOME + +FIXTURE_MSG = 'fixtures cannot have "pytest_funcarg__" prefix and be decorated with @pytest.fixture:\n{}' + + +@attr.s(frozen=True) +class PseudoFixtureDef(object): + cached_result = attr.ib() + scope = attr.ib() + + +def pytest_sessionstart(session): + import _pytest.python + import _pytest.nodes + + scopename2class.update( + { + "package": _pytest.python.Package, + "class": _pytest.python.Class, + "module": _pytest.python.Module, + "function": _pytest.nodes.Item, + "session": _pytest.main.Session, + } + ) + session._fixturemanager = FixtureManager(session) + + +scopename2class = {} + + +scope2props = dict(session=()) +scope2props["package"] = ("fspath",) +scope2props["module"] = ("fspath", "module") +scope2props["class"] = scope2props["module"] + ("cls",) +scope2props["instance"] = scope2props["class"] + ("instance",) +scope2props["function"] = scope2props["instance"] + ("function", "keywords") + + +def scopeproperty(name=None, doc=None): + def decoratescope(func): + scopename = name or func.__name__ + + def provide(self): + if func.__name__ in scope2props[self.scope]: + return func(self) + raise AttributeError( + "%s not available in %s-scoped context" % (scopename, self.scope) + ) + + return property(provide, None, None, func.__doc__) + + return decoratescope + + +def get_scope_package(node, fixturedef): + import pytest + + cls = pytest.Package + current = node + fixture_package_name = "%s/%s" % (fixturedef.baseid, "__init__.py") + while current and ( + type(current) is not cls or fixture_package_name != current.nodeid + ): + current = current.parent + if current is None: + return node.session + return current + + +def get_scope_node(node, scope): + cls = scopename2class.get(scope) + if cls is None: + raise ValueError("unknown scope") + return node.getparent(cls) + + +def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): + # this function will transform all collected calls to a functions + # if they use direct funcargs (i.e. direct parametrization) + # because we want later test execution to be able to rely on + # an existing FixtureDef structure for all arguments. + # XXX we can probably avoid this algorithm if we modify CallSpec2 + # to directly care for creating the fixturedefs within its methods. + if not metafunc._calls[0].funcargs: + return # this function call does not have direct parametrization + # collect funcargs of all callspecs into a list of values + arg2params = {} + arg2scope = {} + for callspec in metafunc._calls: + for argname, argvalue in callspec.funcargs.items(): + assert argname not in callspec.params + callspec.params[argname] = argvalue + arg2params_list = arg2params.setdefault(argname, []) + callspec.indices[argname] = len(arg2params_list) + arg2params_list.append(argvalue) + if argname not in arg2scope: + scopenum = callspec._arg2scopenum.get(argname, scopenum_function) + arg2scope[argname] = scopes[scopenum] + callspec.funcargs.clear() + + # register artificial FixtureDef's so that later at test execution + # time we can rely on a proper FixtureDef to exist for fixture setup. + arg2fixturedefs = metafunc._arg2fixturedefs + for argname, valuelist in arg2params.items(): + # if we have a scope that is higher than function we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + scope = arg2scope[argname] + node = None + if scope != "function": + node = get_scope_node(collector, scope) + if node is None: + assert scope == "class" and isinstance(collector, _pytest.python.Module) + # use module-level collector for class-scope (for now) + node = collector + if node and argname in node._name2pseudofixturedef: + arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] + else: + fixturedef = FixtureDef( + fixturemanager, + "", + argname, + get_direct_param_fixture_func, + arg2scope[argname], + valuelist, + False, + False, + ) + arg2fixturedefs[argname] = [fixturedef] + if node is not None: + node._name2pseudofixturedef[argname] = fixturedef + + +def getfixturemarker(obj): + """ return fixturemarker or None if it doesn't exist or raised + exceptions.""" + try: + return getattr(obj, "_pytestfixturefunction", None) + except TEST_OUTCOME: + # some objects raise errors like request (from flask import request) + # we don't expect them to be fixture functions + return None + + +def get_parametrized_fixture_keys(item, scopenum): + """ return list of keys for all parametrized arguments which match + the specified scope. """ + assert scopenum < scopenum_function # function + try: + cs = item.callspec + except AttributeError: + pass + else: + # cs.indices.items() is random order of argnames. Need to + # sort this so that different calls to + # get_parametrized_fixture_keys will be deterministic. + for argname, param_index in sorted(cs.indices.items()): + if cs._arg2scopenum[argname] != scopenum: + continue + if scopenum == 0: # session + key = (argname, param_index) + elif scopenum == 1: # package + key = (argname, param_index, item.fspath.dirpath()) + elif scopenum == 2: # module + key = (argname, param_index, item.fspath) + elif scopenum == 3: # class + key = (argname, param_index, item.fspath, item.cls) + yield key + + +# algorithm for sorting on a per-parametrized resource setup basis +# it is called for scopenum==0 (session) first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns + + +def reorder_items(items): + argkeys_cache = {} + items_by_argkey = {} + for scopenum in range(0, scopenum_function): + argkeys_cache[scopenum] = d = {} + items_by_argkey[scopenum] = item_d = defaultdict(deque) + for item in items: + keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum)) + if keys: + d[item] = keys + for key in keys: + item_d[key].append(item) + items = OrderedDict.fromkeys(items) + return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0)) + + +def fix_cache_order(item, argkeys_cache, items_by_argkey): + for scopenum in range(0, scopenum_function): + for key in argkeys_cache[scopenum].get(item, []): + items_by_argkey[scopenum][key].appendleft(item) + + +def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum): + if scopenum >= scopenum_function or len(items) < 3: + return items + ignore = set() + items_deque = deque(items) + items_done = OrderedDict() + scoped_items_by_argkey = items_by_argkey[scopenum] + scoped_argkeys_cache = argkeys_cache[scopenum] + while items_deque: + no_argkey_group = OrderedDict() + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_group: + continue + argkeys = OrderedDict.fromkeys( + k for k in scoped_argkeys_cache.get(item, []) if k not in ignore + ) + if not argkeys: + no_argkey_group[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # we don't have to remove relevant items from later in the deque because they'll just be ignored + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + fix_cache_order(i, argkeys_cache, items_by_argkey) + items_deque.appendleft(i) + break + if no_argkey_group: + no_argkey_group = reorder_items_atscope( + no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1 + ) + for item in no_argkey_group: + items_done[item] = None + ignore.add(slicing_argkey) + return items_done + + +def fillfixtures(function): + """ fill missing funcargs for a test function. """ + try: + request = function._request + except AttributeError: + # XXX this special code path is only expected to execute + # with the oejskit plugin. It uses classes with funcargs + # and we thus have to work a bit to allow this. + fm = function.session._fixturemanager + fi = fm.getfixtureinfo(function.parent, function.obj, None) + function._fixtureinfo = fi + request = function._request = FixtureRequest(function) + request._fillfixtures() + # prune out funcargs for jstests + newfuncargs = {} + for name in fi.argnames: + newfuncargs[name] = function.funcargs[name] + function.funcargs = newfuncargs + else: + request._fillfixtures() + + +def get_direct_param_fixture_func(request): + return request.param + + +@attr.s(slots=True) +class FuncFixtureInfo(object): + # original function argument names + argnames = attr.ib(type=tuple) + # argnames that function immediately requires. These include argnames + + # fixture names specified via usefixtures and via autouse=True in fixture + # definitions. + initialnames = attr.ib(type=tuple) + names_closure = attr.ib() # type: List[str] + name2fixturedefs = attr.ib() # type: List[str, List[FixtureDef]] + + def prune_dependency_tree(self): + """Recompute names_closure from initialnames and name2fixturedefs + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # argname may be smth not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest(FuncargnamesCompatAttr): + """ A request for a fixture from a test or fixture function. + + A request object gives access to the requesting test context + and has an optional ``param`` attribute in case + the fixture is parametrized indirectly. + """ + + def __init__(self, pyfuncitem): + self._pyfuncitem = pyfuncitem + #: fixture for which this request is being performed + self.fixturename = None + #: Scope string, one of "function", "class", "module", "session" + self.scope = "function" + self._fixture_defs = {} # argname -> FixtureDef + fixtureinfo = pyfuncitem._fixtureinfo + self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() + self._arg2index = {} + self._fixturemanager = pyfuncitem.session._fixturemanager + + @property + def fixturenames(self): + """names of all active fixtures in this request""" + result = list(self._pyfuncitem._fixtureinfo.names_closure) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + def node(self): + """ underlying collection node (depends on current request scope)""" + return self._getscopeitem(self.scope) + + def _getnextfixturedef(self, argname): + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # we arrive here because of a dynamic call to + # getfixturevalue(argname) usage which was naturally + # not known at parsing/collection time + parentid = self._pyfuncitem.parent.nodeid + fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) + self._arg2fixturedefs[argname] = fixturedefs + # fixturedefs list is immutable so we maintain a decreasing index + index = self._arg2index.get(argname, 0) - 1 + if fixturedefs is None or (-index > len(fixturedefs)): + raise FixtureLookupError(argname, self) + self._arg2index[argname] = index + return fixturedefs[index] + + @property + def config(self): + """ the pytest config object associated with this request. """ + return self._pyfuncitem.config + + @scopeproperty() + def function(self): + """ test function object if the request has a per-function scope. """ + return self._pyfuncitem.obj + + @scopeproperty("class") + def cls(self): + """ class (can be None) where the test function was collected. """ + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """ instance (can be None) on which test function was collected. """ + # unittest support hack, see _pytest.unittest.TestCaseFunction + try: + return self._pyfuncitem._testcase + except AttributeError: + function = getattr(self, "function", None) + return getattr(function, "__self__", None) + + @scopeproperty() + def module(self): + """ python module object where the test function was collected. """ + return self._pyfuncitem.getparent(_pytest.python.Module).obj + + @scopeproperty() + def fspath(self): + """ the file system path of the test module which collected this test. """ + return self._pyfuncitem.fspath + + @property + def keywords(self): + """ keywords/markers dictionary for the underlying node. """ + return self.node.keywords + + @property + def session(self): + """ pytest session object. """ + return self._pyfuncitem.session + + def addfinalizer(self, finalizer): + """ add finalizer/teardown function to be called after the + last test within the requesting test context finished + execution. """ + # XXX usually this method is shadowed by fixturedef specific ones + self._addfinalizer(finalizer, scope=self.scope) + + def _addfinalizer(self, finalizer, scope): + colitem = self._getscopeitem(scope) + self._pyfuncitem.session._setupstate.addfinalizer( + finalizer=finalizer, colitem=colitem + ) + + def applymarker(self, marker): + """ Apply a marker to a single test function invocation. + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object + created by a call to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg): + """ raise a FixtureLookupError with the given message. """ + raise self._fixturemanager.FixtureLookupError(None, self, msg) + + def _fillfixtures(self): + item = self._pyfuncitem + fixturenames = getattr(item, "fixturenames", self.fixturenames) + for argname in fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): + """ (deprecated) Return a testing resource managed by ``setup`` & + ``teardown`` calls. ``scope`` and ``extrakey`` determine when the + ``teardown`` function will be called so that subsequent calls to + ``setup`` would recreate the resource. With pytest-2.3 you often + do not need ``cached_setup()`` as you can directly declare a scope + on a fixture function and register a finalizer through + ``request.addfinalizer()``. + + :arg teardown: function receiving a previously setup resource. + :arg setup: a no-argument function creating a resource. + :arg scope: a string value out of ``function``, ``class``, ``module`` + or ``session`` indicating the caching lifecycle of the resource. + :arg extrakey: added to internal caching key of (funcargname, scope). + """ + from _pytest.deprecated import CACHED_SETUP + + warnings.warn(CACHED_SETUP, stacklevel=2) + if not hasattr(self.config, "_setupcache"): + self.config._setupcache = {} # XXX weakref? + cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) + cache = self.config._setupcache + try: + val = cache[cachekey] + except KeyError: + self._check_scope(self.fixturename, self.scope, scope) + val = setup() + cache[cachekey] = val + if teardown is not None: + + def finalizer(): + del cache[cachekey] + teardown(val) + + self._addfinalizer(finalizer, scope=scope) + return val + + def getfixturevalue(self, argname): + """ Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + """ + return self._get_active_fixturedef(argname).cached_result[0] + + def getfuncargvalue(self, argname): + """ Deprecated, use getfixturevalue. """ + from _pytest import deprecated + + warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2) + return self.getfixturevalue(argname) + + def _get_active_fixturedef(self, argname): + try: + return self._fixture_defs[argname] + except KeyError: + try: + fixturedef = self._getnextfixturedef(argname) + except FixtureLookupError: + if argname == "request": + cached_result = (self, [0], None) + scope = "function" + return PseudoFixtureDef(cached_result, scope) + raise + # remove indent to prevent the python3 exception + # from leaking into the call + self._compute_fixture_value(fixturedef) + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _get_fixturestack(self): + current = self + values = [] + while 1: + fixturedef = getattr(current, "_fixturedef", None) + if fixturedef is None: + values.reverse() + return values + values.append(fixturedef) + current = current._parent_request + + def _compute_fixture_value(self, fixturedef): + """ + Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will + force the FixtureDef object to throw away any previous results and compute a new fixture value, which + will be stored into the FixtureDef object itself. + + :param FixtureDef fixturedef: + """ + # prepare a subrequest object before calling fixture function + # (latter managed by fixturedef) + argname = fixturedef.argname + funcitem = self._pyfuncitem + scope = fixturedef.scope + try: + param = funcitem.callspec.getparam(argname) + except (AttributeError, ValueError): + param = NOTSET + param_index = 0 + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" + "Node id: {nodeid}\n" + "Function type: {typename}" + ).format( + name=funcitem.name, + nodeid=funcitem.nodeid, + typename=type(funcitem).__name__, + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = frameinfo.filename + source_lineno = frameinfo.lineno + source_path = py.path.local(source_path) + if source_path.relto(funcitem.config.rootdir): + source_path = source_path.relto(funcitem.config.rootdir) + msg = ( + "The requested fixture has no parameter defined for test:\n" + " {}\n\n" + "Requested fixture '{}' defined in:\n{}" + "\n\nRequested here:\n{}:{}".format( + funcitem.nodeid, + fixturedef.argname, + getlocation(fixturedef.func, funcitem.config.rootdir), + source_path, + source_lineno, + ) + ) + fail(msg, pytrace=False) + else: + # indices might not be set if old-style metafunc.addcall() was used + param_index = funcitem.callspec.indices.get(argname, 0) + # if a parametrize invocation set a scope it will override + # the static scope defined with the fixture function + paramscopenum = funcitem.callspec._arg2scopenum.get(argname) + if paramscopenum is not None: + scope = scopes[paramscopenum] + + subrequest = SubRequest(self, scope, param, param_index, fixturedef) + + # check if a higher-level scoped fixture accesses a lower level one + subrequest._check_scope(argname, self.scope, scope) + + # clear sys.exc_info before invoking the fixture (python bug?) + # if its not explicitly cleared it will leak into the call + exc_clear() + try: + # call the fixture function + fixturedef.execute(request=subrequest) + finally: + # if fixture function failed it might have registered finalizers + self.session._setupstate.addfinalizer( + functools.partial(fixturedef.finish, request=subrequest), + subrequest.node, + ) + + def _check_scope(self, argname, invoking_scope, requested_scope): + if argname == "request": + return + if scopemismatch(invoking_scope, requested_scope): + # try to report something helpful + lines = self._factorytraceback() + fail( + "ScopeMismatch: You tried to access the %r scoped " + "fixture %r with a %r scoped request object, " + "involved factories\n%s" + % ((requested_scope, argname, invoking_scope, "\n".join(lines))), + pytrace=False, + ) + + def _factorytraceback(self): + lines = [] + for fixturedef in self._get_fixturestack(): + factory = fixturedef.func + fs, lineno = getfslineno(factory) + p = self._pyfuncitem.session.fspath.bestrelpath(fs) + args = _format_args(factory) + lines.append("%s:%d: def %s%s" % (p, lineno, factory.__name__, args)) + return lines + + def _getscopeitem(self, scope): + if scope == "function": + # this might also be a non-function Item despite its attribute name + return self._pyfuncitem + if scope == "package": + node = get_scope_package(self._pyfuncitem, self._fixturedef) + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope == "class": + # fallback to function item itself + node = self._pyfuncitem + assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( + scope, self._pyfuncitem + ) + return node + + def __repr__(self): + return "" % (self.node) + + +class SubRequest(FixtureRequest): + """ a sub request for handling getting a fixture from a + test function/fixture. """ + + def __init__(self, request, scope, param, param_index, fixturedef): + self._parent_request = request + self.fixturename = fixturedef.argname + if param is not NOTSET: + self.param = param + self.param_index = param_index + self.scope = scope + self._fixturedef = fixturedef + self._pyfuncitem = request._pyfuncitem + self._fixture_defs = request._fixture_defs + self._arg2fixturedefs = request._arg2fixturedefs + self._arg2index = request._arg2index + self._fixturemanager = request._fixturemanager + + def __repr__(self): + return "" % (self.fixturename, self._pyfuncitem) + + def addfinalizer(self, finalizer): + self._fixturedef.addfinalizer(finalizer) + + +class ScopeMismatchError(Exception): + """ A fixture function tries to use a different fixture function which + which has a lower scope (e.g. a Session one calls a function one) + """ + + +scopes = "session package module class function".split() +scopenum_function = scopes.index("function") + + +def scopemismatch(currentscope, newscope): + return scopes.index(newscope) > scopes.index(currentscope) + + +def scope2index(scope, descr, where=None): + """Look up the index of ``scope`` and raise a descriptive value error + if not defined. + """ + try: + return scopes.index(scope) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, "from {} ".format(where) if where else "", scope + ), + pytrace=False, + ) + + +class FixtureLookupError(LookupError): + """ could not return a requested Fixture (missing or invalid). """ + + def __init__(self, argname, request, msg=None): + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self): + tblines = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # the last fixture raise an error, let's present + # it at the requesting side + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (IOError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline("file %s, line %s" % (fspath, lineno + 1)) + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parentid = self.request._pyfuncitem.parent.nodeid + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parentid)) + if faclist: + available.add(name) + if self.argname in available: + msg = " recursive dependency involving fixture '{}' detected".format( + self.argname + ) + else: + msg = "fixture '{}' not found".format(self.argname) + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__(self, filename, firstlineno, tblines, errorstring, argname): + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw): + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + "{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()), + red=True, + ) + for line in lines[1:]: + tw.line( + "{} {}".format(FormattedExcinfo.flow_marker, line.strip()), + red=True, + ) + tw.line() + tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) + + +def fail_fixturefunc(fixturefunc, msg): + fs, lineno = getfslineno(fixturefunc) + location = "%s:%s" % (fs, lineno + 1) + source = _pytest._code.Source(fixturefunc) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) + + +def call_fixture_func(fixturefunc, request, kwargs): + yieldctx = is_generator(fixturefunc) + if yieldctx: + it = fixturefunc(**kwargs) + res = next(it) + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it) + request.addfinalizer(finalizer) + else: + res = fixturefunc(**kwargs) + return res + + +def _teardown_yield_fixture(fixturefunc, it): + """Executes the teardown of a fixture function by advancing the iterator after the + yield and ensure the iteration ends (if not it means there is more than one yield in the function)""" + try: + next(it) + except StopIteration: + pass + else: + fail_fixturefunc( + fixturefunc, "yield_fixture function has more than one 'yield'" + ) + + +class FixtureDef(object): + """ A container for a factory definition. """ + + def __init__( + self, + fixturemanager, + baseid, + argname, + func, + scope, + params, + unittest=False, + ids=None, + ): + self._fixturemanager = fixturemanager + self.baseid = baseid or "" + self.has_location = baseid is not None + self.func = func + self.argname = argname + self.scope = scope + self.scopenum = scope2index( + scope or "function", + descr="Fixture '{}'".format(func.__name__), + where=baseid, + ) + self.params = params + self.argnames = getfuncargnames(func, is_method=unittest) + self.unittest = unittest + self.ids = ids + self._finalizers = [] + + def addfinalizer(self, finalizer): + self._finalizers.append(finalizer) + + def finish(self, request): + exceptions = [] + try: + while self._finalizers: + try: + func = self._finalizers.pop() + func() + except: # noqa + exceptions.append(sys.exc_info()) + if exceptions: + e = exceptions[0] + del exceptions # ensure we don't keep all frames alive because of the traceback + six.reraise(*e) + + finally: + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + hook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # even if finalization fails, we invalidate + # the cached fixture value and remove + # all finalizers because they may be bound methods which will + # keep instances alive + if hasattr(self, "cached_result"): + del self.cached_result + self._finalizers = [] + + def execute(self, request): + # get required arguments and register our own finish() + # with their finalization + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + if argname != "request": + fixturedef.addfinalizer(functools.partial(self.finish, request=request)) + + my_cache_key = request.param_index + cached_result = getattr(self, "cached_result", None) + if cached_result is not None: + result, cache_key, err = cached_result + if my_cache_key == cache_key: + if err is not None: + six.reraise(*err) + else: + return result + # we have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one + self.finish(request) + assert not hasattr(self, "cached_result") + + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + return hook.pytest_fixture_setup(fixturedef=self, request=request) + + def __repr__(self): + return "" % ( + self.argname, + self.scope, + self.baseid, + ) + + +def resolve_fixture_function(fixturedef, request): + """Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific + instances and bound methods. + """ + fixturefunc = fixturedef.func + if fixturedef.unittest: + if request.instance is not None: + # bind the unbound method to the TestCase instance + fixturefunc = fixturedef.func.__get__(request.instance) + else: + # the fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + if request.instance is not None: + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(request.instance) + return fixturefunc + + +def pytest_fixture_setup(fixturedef, request): + """ Execution of fixture setup. """ + kwargs = {} + for argname in fixturedef.argnames: + fixdef = request._get_active_fixturedef(argname) + result, arg_cache_key, exc = fixdef.cached_result + request._check_scope(argname, request.scope, fixdef.scope) + kwargs[argname] = result + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = request.param_index + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME: + fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +def _ensure_immutable_ids(ids): + if ids is None: + return + if callable(ids): + return ids + return tuple(ids) + + +def wrap_function_to_warning_if_called_directly(function, fixture_marker): + """Wrap the given fixture function so we can issue warnings about it being called directly, instead of + used as an argument in a test function. + """ + is_yield_function = is_generator(function) + warning = FIXTURE_FUNCTION_CALL.format( + name=fixture_marker.name or function.__name__ + ) + + if is_yield_function: + + @functools.wraps(function) + def result(*args, **kwargs): + __tracebackhide__ = True + warnings.warn(warning, stacklevel=3) + for x in function(*args, **kwargs): + yield x + + else: + + @functools.wraps(function) + def result(*args, **kwargs): + __tracebackhide__ = True + warnings.warn(warning, stacklevel=3) + return function(*args, **kwargs) + + if six.PY2: + result.__wrapped__ = function + + # keep reference to the original function in our own custom attribute so we don't unwrap + # further than this point and lose useful wrappings like @mock.patch (#3774) + result.__pytest_wrapped__ = _PytestWrapper(function) + + return result + + +@attr.s(frozen=True) +class FixtureFunctionMarker(object): + scope = attr.ib() + params = attr.ib(converter=attr.converters.optional(tuple)) + autouse = attr.ib(default=False) + ids = attr.ib(default=None, converter=_ensure_immutable_ids) + name = attr.ib(default=None) + + def __call__(self, function): + if isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if getattr(function, "_pytestfixturefunction", False): + raise ValueError( + "fixture is being applied more than once to the same function" + ) + + function = wrap_function_to_warning_if_called_directly(function, self) + + function._pytestfixturefunction = self + return function + + +def fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test + modules or classes can use the ``pytest.mark.usefixtures(fixturename)`` + marker. + + Test functions can directly use fixture names as input + arguments in which case the fixture instance returned from the fixture + function will be injected. + + Fixtures can provide their values to test functions using ``return`` or ``yield`` + statements. When using ``yield`` the code block after the ``yield`` statement is executed + as teardown code regardless of the test outcome, and must yield exactly once. + + :arg scope: the scope for which this fixture is shared, one of + ``"function"`` (default), ``"class"``, ``"module"``, + ``"package"`` or ``"session"``. + + ``"package"`` is considered **experimental** at this time. + + :arg params: an optional list of parameters which will cause multiple + invocations of the fixture function and all of the tests + using it. + + :arg autouse: if True, the fixture func is activated for all tests that + can see it. If False (the default) then an explicit + reference is needed to activate the fixture. + + :arg ids: list of string ids each corresponding to the params + so that they are part of the test id. If no ids are provided + they will be generated automatically from the params. + + :arg name: the name of the fixture. This defaults to the name of the + decorated function. If a fixture is used in the same module in + which it is defined, the function name of the fixture will be + shadowed by the function arg that requests the fixture; one way + to resolve this is to name the decorated function + ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + if callable(scope) and params is None and autouse is False: + # direct decoration + return FixtureFunctionMarker("function", params, autouse, name=name)(scope) + if params is not None and not isinstance(params, (list, tuple)): + params = list(params) + return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) + + +def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """ (return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name) + + +defaultfuncargprefixmarker = fixture() + + +@fixture(scope="session") +def pytestconfig(request): + """Session-scoped fixture that returns the :class:`_pytest.config.Config` object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.getoption("verbose"): + ... + + """ + return request.config + + +class FixtureManager(object): + """ + pytest fixtures definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i. e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + _argprefix = "pytest_funcarg__" + FixtureLookupError = FixtureLookupError + FixtureLookupErrorRepr = FixtureLookupErrorRepr + + def __init__(self, session): + self.session = session + self.config = session.config + self._arg2fixturedefs = {} + self._holderobjseen = set() + self._arg2finish = {} + self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] + session.config.pluginmanager.register(self, "funcmanage") + + def getfixtureinfo(self, node, func, cls, funcargs=True): + if funcargs and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, cls=cls) + else: + argnames = () + usefixtures = flatten( + mark.args for mark in node.iter_markers(name="usefixtures") + ) + initialnames = tuple(usefixtures) + argnames + fm = node.session._fixturemanager + initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure( + initialnames, node + ) + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin): + nodeid = None + try: + p = py.path.local(plugin.__file__).realpath() + except AttributeError: + pass + else: + # construct the base nodeid which is later used to check + # what fixtures are visible for particular tests (as denoted + # by their test id) + if p.basename.startswith("conftest.py"): + nodeid = p.dirpath().relto(self.config.rootdir) + if p.sep != nodes.SEP: + nodeid = nodeid.replace(p.sep, nodes.SEP) + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, nodeid): + """ return a tuple of fixture names to be used. """ + autousenames = [] + for baseid, basenames in self._nodeid_and_autousenames: + if nodeid.startswith(baseid): + if baseid: + i = len(baseid) + nextchar = nodeid[i : i + 1] + if nextchar and nextchar not in ":/": + continue + autousenames.extend(basenames) + return autousenames + + def getfixtureclosure(self, fixturenames, parentnode): + # collect the closure of all fixtures , starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive) + + parentid = parentnode.nodeid + fixturenames_closure = self._getautousenames(parentid) + + def merge(otherlist): + for arg in otherlist: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + merge(fixturenames) + + # at this point, fixturenames_closure contains what we call "initialnames", + # which is a set of fixturenames the function immediately requests. We + # need to return it as well, so save this. + initialnames = tuple(fixturenames_closure) + + arg2fixturedefs = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentid) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + merge(fixturedefs[-1].argnames) + + def sort_by_scope(arg_name): + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return scopes.index("function") + else: + return fixturedefs[-1].scopenum + + fixturenames_closure.sort(key=sort_by_scope) + return initialnames, fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc): + for argname in metafunc.fixturenames: + faclist = metafunc._arg2fixturedefs.get(argname) + if faclist: + fixturedef = faclist[-1] + if fixturedef.params is not None: + parametrize_func = getattr(metafunc.function, "parametrize", None) + if parametrize_func is not None: + parametrize_func = parametrize_func.combined + func_params = getattr(parametrize_func, "args", [[None]]) + func_kwargs = getattr(parametrize_func, "kwargs", {}) + # skip directly parametrized arguments + if "argnames" in func_kwargs: + argnames = parametrize_func.kwargs["argnames"] + else: + argnames = func_params[0] + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + if argname not in func_params and argname not in argnames: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + else: + continue # will raise FixtureLookupError at setup time + + def pytest_collection_modifyitems(self, items): + # separate parametrized setups + items[:] = reorder_items(items) + + def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): + from _pytest import deprecated + + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + holderobj = node_or_obj.obj + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + self._holderobjseen.add(holderobj) + autousenames = [] + for name in dir(holderobj): + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getatt() ignores such exceptions. + obj = safe_getattr(holderobj, name, None) + marker = getfixturemarker(obj) + # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) + # or are "@pytest.fixture" marked + if marker is None: + if not name.startswith(self._argprefix): + continue + if not callable(obj): + continue + marker = defaultfuncargprefixmarker + + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + deprecated.FUNCARG_PREFIX.format(name=name), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + name = name[len(self._argprefix) :] + elif not isinstance(marker, FixtureFunctionMarker): + # magic globals with __getattr__ might have got us a wrong + # fixture attribute + continue + else: + if marker.name: + name = marker.name + assert not name.startswith(self._argprefix), FIXTURE_MSG.format(name) + + # during fixture definition we wrap the original fixture function + # to issue a warning if called directly, so here we unwrap it in order to not emit the warning + # when pytest itself calls the fixture function + if six.PY2 and unittest: + # hack on Python 2 because of the unbound methods + obj = get_real_func(obj) + else: + obj = get_real_method(obj, holderobj) + + fixture_def = FixtureDef( + self, + nodeid, + name, + obj, + marker.scope, + marker.params, + unittest=unittest, + ids=marker.ids, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if marker.autouse: + autousenames.append(name) + + if autousenames: + self._nodeid_and_autousenames.append((nodeid or "", autousenames)) + + def getfixturedefs(self, argname, nodeid): + """ + Gets a list of fixtures which are applicable to the given node id. + + :param str argname: name of the fixture to search for + :param str nodeid: full node id of the requesting test. + :return: list[FixtureDef] + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, nodeid)) + + def _matchfactories(self, fixturedefs, nodeid): + for fixturedef in fixturedefs: + if nodes.ischildnode(fixturedef.baseid, nodeid): + yield fixturedef diff --git a/venv/Lib/site-packages/_pytest/freeze_support.py b/venv/Lib/site-packages/_pytest/freeze_support.py new file mode 100644 index 00000000..002e0773 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/freeze_support.py @@ -0,0 +1,45 @@ +""" +Provides a function to report all internal modules for using freezing tools +pytest +""" +from __future__ import absolute_import, division, print_function + + +def freeze_includes(): + """ + Returns a list of module names used by pytest that should be + included by cx_freeze. + """ + import py + import _pytest + + result = list(_iter_all_modules(py)) + result += list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules(package, prefix=""): + """ + Iterates over the names of all modules that can be found in the given + package, recursively. + Example: + _iter_all_modules(_pytest) -> + ['_pytest.assertion.newinterpret', + '_pytest.capture', + '_pytest.core', + ... + ] + """ + import os + import pkgutil + + if type(package) is not str: + path, prefix = package.__path__[0], package.__name__ + "." + else: + path = package + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/venv/Lib/site-packages/_pytest/helpconfig.py b/venv/Lib/site-packages/_pytest/helpconfig.py new file mode 100644 index 00000000..88bef95f --- /dev/null +++ b/venv/Lib/site-packages/_pytest/helpconfig.py @@ -0,0 +1,213 @@ +""" version info, help messages, tracing configuration. """ +from __future__ import absolute_import, division, print_function + +import py +import pytest +from _pytest.config import PrintHelp +import os +import sys +from argparse import Action + + +class HelpAction(Action): + """This is an argparse Action that will raise an exception in + order to skip the rest of the argument parsing when --help is passed. + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, option_strings, dest=None, default=False, help=None): + super(HelpAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done + if getattr(parser._parser, "after_preparse", False): + raise PrintHelp + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + action="store_true", + help="display pytest lib version and import information.", + ) + group._addoption( + "-h", + "--help", + action=HelpAction, + dest="help", + help="show help message and configuration info", + ) + group._addoption( + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="early-load given plugin (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="trace considerations of conftest.py files.", + ), + group.addoption( + "--debug", + action="store_true", + dest="debug", + default=False, + help="store internal tracing debug information in 'pytestdebug.log'.", + ) + group._addoption( + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.', + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_cmdline_parse(): + outcome = yield + config = outcome.get_result() + if config.option.debug: + path = os.path.abspath("pytestdebug.log") + debugfile = open(path, "w") + debugfile.write( + "versions pytest-%s, py-%s, " + "python-%s\ncwd=%s\nargs=%s\n\n" + % ( + pytest.__version__, + py.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), + config._origargs, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write("writing pytestdebug information to %s\n" % path) + + def unset_tracing(): + debugfile.close() + sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name) + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + +def pytest_cmdline_main(config): + if config.option.version: + p = py.path.local(pytest.__file__) + sys.stderr.write( + "This is pytest version %s, imported from %s\n" % (pytest.__version__, p) + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stderr.write(line + "\n") + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + + +def showhelp(config): + reporter = config.pluginmanager.get_plugin("terminalreporter") + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line() + tw.line( + "[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:" + ) + tw.line() + + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + spec = "%s (%s)" % (name, type) + line = " %-24s %s" % (spec, help) + tw.line(line[: tw.fullwidth]) + + tw.line() + tw.line("environment variables:") + vars = [ + ("PYTEST_ADDOPTS", "extra command line options"), + ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"), + ] + for name, help in vars: + tw.line(" %-24s %s" % (name, help)) + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + return + + +conftest_options = [("pytest_plugins", "list of plugin names to load")] + + +def getpluginversioninfo(config): + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("setuptools registered plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = "%s-%s at %s" % (dist.project_name, dist.version, loc) + lines.append(" " + content) + return lines + + +def pytest_report_header(config): + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append("using: pytest-%s pylib-%s" % (pytest.__version__, py.__version__)) + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(" %-20s: %s" % (name, r)) + return lines diff --git a/venv/Lib/site-packages/_pytest/hookspec.py b/venv/Lib/site-packages/_pytest/hookspec.py new file mode 100644 index 00000000..53380696 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/hookspec.py @@ -0,0 +1,611 @@ +""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ + +from pluggy import HookspecMarker +from .deprecated import PYTEST_NAMESPACE + + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager): + """called at plugin registration time to allow adding new hooks via a call to + ``pluginmanager.add_hookspecs(module_or_class, prefix)``. + + + :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True, warn_on_impl=PYTEST_NAMESPACE) +def pytest_namespace(): + """ + return dict of name->object to be made globally available in + the pytest namespace. + + This hook is called at plugin registration time. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + + .. warning:: + This hook has been **deprecated** and will be removed in pytest 4.0. + + Plugins whose users depend on the current namespace functionality should prepare to migrate to a + namespace they actually own. + + To support the migration its suggested to trigger ``DeprecationWarnings`` for objects they put into the + pytest namespace. + + An stopgap measure to avoid the warning is to monkeypatch the ``pytest`` module, but just as the + ``pytest_namespace`` hook this should be seen as a temporary measure to be removed in future versions after + an appropriate transition period. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered(plugin, manager): + """ a new pytest plugin got registered. + + :param plugin: the plugin module or instance + :param _pytest.config.PytestPluginManager manager: pytest plugin manager + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser): + """register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + + :arg _pytest.config.Parser parser: To add command line options, call + :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`. + To add ini-file values call :py:func:`parser.addini(...) + <_pytest.config.Parser.addini>`. + + Options can later be accessed through the + :py:class:`config <_pytest.config.Config>` object, respectively: + + - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_configure(config): + """ + Allows plugins and conftest files to perform initial configuration. + + This hook is called for every plugin and initial conftest file + after command line options have been parsed. + + After that, the hook is called for other conftest files as they are + imported. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + + :arg _pytest.config.Config config: pytest config object + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse(pluginmanager, args): + """return initialized config object, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult` + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager + :param list[str] args: list of arguments passed on the command line + """ + + +def pytest_cmdline_preparse(config, args): + """(**Deprecated**) modify command line arguments before option parsing. + + This hook is considered deprecated and will be removed in a future pytest version. Consider + using :func:`pytest_load_initial_conftests` instead. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config config: pytest config object + :param list[str] args: list of arguments passed on the command line + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config): + """ called for performing the main command line action. The default + implementation will invoke the configure hooks and runtest_mainloop. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.config.Config config: pytest config object + """ + + +def pytest_load_initial_conftests(early_config, parser, args): + """ implements the loading of initial conftest files ahead + of command line option parsing. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config early_config: pytest config object + :param list[str] args: list of arguments passed on the command line + :param _pytest.config.Parser parser: to add command line options + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session): + """Perform the collection protocol for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + + :param _pytest.main.Session session: the pytest session object + """ + + +def pytest_collection_modifyitems(session, config, items): + """ called after collection has been performed, may filter or re-order + the items in-place. + + :param _pytest.main.Session session: the pytest session object + :param _pytest.config.Config config: pytest config object + :param List[_pytest.nodes.Item] items: list of item objects + """ + + +def pytest_collection_finish(session): + """ called after collection has been performed and modified. + + :param _pytest.main.Session session: the pytest session object + """ + + +@hookspec(firstresult=True) +def pytest_ignore_collect(path, config): + """ return True to prevent considering this path for collection. + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult` + + :param str path: the path to analyze + :param _pytest.config.Config config: pytest config object + """ + + +@hookspec(firstresult=True) +def pytest_collect_directory(path, parent): + """ called before traversing a directory for collection files. + + Stops at first non-None result, see :ref:`firstresult` + + :param str path: the path to analyze + """ + + +def pytest_collect_file(path, parent): + """ return collection Node or None for the given path. Any new node + needs to have the specified ``parent`` as a parent. + + :param str path: the path to collect + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector): + """ collector starts collecting. """ + + +def pytest_itemcollected(item): + """ we just collected a test item. """ + + +def pytest_collectreport(report): + """ collector finished collecting. """ + + +def pytest_deselected(items): + """ called for test items deselected by keyword. """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector): + """ perform ``collector.collect()`` and return a CollectReport. + + Stops at first non-None result, see :ref:`firstresult` """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_pycollect_makemodule(path, parent): + """ return a Module collector or None for the given path. + This hook will be called for each matching test module path. + The pytest_collect_file hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult` """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem(collector, name, obj): + """ return custom item/collector for a python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult` """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem): + """ call underlying test function. + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_generate_tests(metafunc): + """ generate (multiple) parametrized calls to a test function.""" + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id(config, val, argname): + """Return a user-friendly string representation of the given ``val`` that will be used + by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.config.Config config: pytest config object + :param val: the parametrized value + :param str argname: the automatic parameter name produced by pytest + """ + + +# ------------------------------------------------------------------------- +# generic runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session): + """ called for performing the main runtest loop + (after collection finished). + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.main.Session session: the pytest session object + """ + + +def pytest_itemstart(item, node): + """(**Deprecated**) use pytest_runtest_logstart. """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol(item, nextitem): + """ implements the runtest_setup/call/teardown protocol for + the given test item, including capturing exceptions and calling + reporting hooks. + + :arg item: test item for which the runtest protocol is performed. + + :arg nextitem: the scheduled-to-be-next test item (or None if this + is the end my friend). This argument is passed on to + :py:func:`pytest_runtest_teardown`. + + :return boolean: True if no further hook implementations should be invoked. + + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_runtest_logstart(nodeid, location): + """ signal the start of running a single test item. + + This hook will be called **before** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and + :func:`pytest_runtest_teardown` hooks. + + :param str nodeid: full id of the item + :param location: a triple of ``(filename, linenum, testname)`` + """ + + +def pytest_runtest_logfinish(nodeid, location): + """ signal the complete finish of running a single test item. + + This hook will be called **after** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and + :func:`pytest_runtest_teardown` hooks. + + :param str nodeid: full id of the item + :param location: a triple of ``(filename, linenum, testname)`` + """ + + +def pytest_runtest_setup(item): + """ called before ``pytest_runtest_call(item)``. """ + + +def pytest_runtest_call(item): + """ called to execute the test ``item``. """ + + +def pytest_runtest_teardown(item, nextitem): + """ called after ``pytest_runtest_call``. + + :arg nextitem: the scheduled-to-be-next test item (None if no further + test item is scheduled). This argument can be used to + perform exact teardowns, i.e. calling just enough finalizers + so that nextitem only needs to call setup-functions. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport(item, call): + """ return a :py:class:`_pytest.runner.TestReport` object + for the given :py:class:`pytest.Item <_pytest.main.Item>` and + :py:class:`_pytest.runner.CallInfo`. + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_runtest_logreport(report): + """ process a test setup/call/teardown report relating to + the respective phase of executing a test. """ + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup(fixturedef, request): + """ performs fixture setup execution. + + :return: The return value of the call to the fixture function + + Stops at first non-None result, see :ref:`firstresult` + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + """ + + +def pytest_fixture_post_finalizer(fixturedef, request): + """ called after fixture teardown, but before the cache is cleared so + the fixture result cache ``fixturedef.cached_result`` can + still be accessed.""" + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session): + """ called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param _pytest.main.Session session: the pytest session object + """ + + +def pytest_sessionfinish(session, exitstatus): + """ called after whole test run finished, right before returning the exit status to the system. + + :param _pytest.main.Session session: the pytest session object + :param int exitstatus: the status which pytest will return to the system + """ + + +def pytest_unconfigure(config): + """ called before test process is exited. + + :param _pytest.config.Config config: pytest config object + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare(config, op, left, right): + """return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param _pytest.config.Config config: pytest config object + """ + + +# ------------------------------------------------------------------------- +# hooks for influencing reporting (invoked from _pytest_terminal) +# ------------------------------------------------------------------------- + + +def pytest_report_header(config, startdir): + """ return a string or list of strings to be displayed as header info for terminal reporting. + + :param _pytest.config.Config config: pytest config object + :param startdir: py.path object with the starting dir + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + """ + + +def pytest_report_collectionfinish(config, startdir, items): + """ + .. versionadded:: 3.2 + + return a string or list of strings to be displayed after collection has finished successfully. + + This strings will be displayed after the standard "collected X items" message. + + :param _pytest.config.Config config: pytest config object + :param startdir: py.path object with the starting dir + :param items: list of pytest items that are going to be executed; this list should not be modified. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus(report): + """ return result-category, shortletter and verbose word for reporting. + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_terminal_summary(terminalreporter, exitstatus): + """Add a section to terminal summary reporting. + + :param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object + :param int exitstatus: the exit status that will be reported back to the OS + + .. versionadded:: 3.5 + The ``config`` parameter. + """ + + +@hookspec(historic=True) +def pytest_logwarning(message, code, nodeid, fslocation): + """ + .. deprecated:: 3.8 + + This hook is will stop working in a future release. + + pytest no longer triggers this hook, but the + terminal writer still implements it to display warnings issued by + :meth:`_pytest.config.Config.warn` and :meth:`_pytest.nodes.Node.warn`. Calling those functions will be + an error in future releases. + + process a warning specified by a message, a code string, + a nodeid and fslocation (both of which may be None + if the warning is not tied to a particular node/location). + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_warning_captured(warning_message, when, item): + """ + Process a warning captured by the internal pytest warnings plugin. + + :param warnings.WarningMessage warning_message: + The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains + the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param str when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param pytest.Item|None item: + **DEPRECATED**: This parameter is incompatible with ``pytest-xdist``, and will always receive ``None`` + in a future release. + + The item being executed if ``when`` is ``"runtest"``, otherwise ``None``. + """ + + +# ------------------------------------------------------------------------- +# doctest hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_doctest_prepare_content(content): + """ return processed content for a given doctest + + Stops at first non-None result, see :ref:`firstresult` """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror(excrepr, excinfo): + """ called for internal errors. """ + + +def pytest_keyboard_interrupt(excinfo): + """ called for keyboard interrupt. """ + + +def pytest_exception_interact(node, call, report): + """called when an exception was raised which can potentially be + interactively handled. + + This hook is only called if an exception was raised + that is not an internal exception like ``skip.Exception``. + """ + + +def pytest_enter_pdb(config): + """ called upon pdb.set_trace(), can be used by plugins to take special + action just before the python debugger enters in interactive mode. + + :param _pytest.config.Config config: pytest config object + """ diff --git a/venv/Lib/site-packages/_pytest/junitxml.py b/venv/Lib/site-packages/_pytest/junitxml.py new file mode 100644 index 00000000..a39c94c1 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/junitxml.py @@ -0,0 +1,565 @@ +""" + report test results in JUnit-XML format, + for use with Jenkins and build integration servers. + + +Based on initial code from Ross Lawley. + +Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ +src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" +from __future__ import absolute_import, division, print_function + +import functools +import py +import os +import re +import sys +import time +import pytest +from _pytest import nodes +from _pytest.config import filename_arg + +# Python 2.X and 3.X compatibility +if sys.version_info[0] < 3: + from codecs import open +else: + unichr = chr + unicode = str + long = int + + +class Junit(py.xml.Namespace): + pass + + +# We need to get the subset of the invalid unicode ranges according to +# XML 1.0 which are valid in this python build. Hence we calculate +# this dynamically instead of hardcoding it. The spec range of valid +# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] +# | [#x10000-#x10FFFF] +_legal_chars = (0x09, 0x0A, 0x0D) +_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF)) +_legal_xml_re = [ + unicode("%s-%s") % (unichr(low), unichr(high)) + for (low, high) in _legal_ranges + if low < sys.maxunicode +] +_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re +illegal_xml_re = re.compile(unicode("[^%s]") % unicode("").join(_legal_xml_re)) +del _legal_chars +del _legal_ranges +del _legal_xml_re + +_py_ext_re = re.compile(r"\.py$") + + +def bin_xml_escape(arg): + def repl(matchobj): + i = ord(matchobj.group()) + if i <= 0xFF: + return unicode("#x%02X") % i + else: + return unicode("#x%04X") % i + + return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg))) + + +class _NodeReporter(object): + def __init__(self, nodeid, xml): + + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.duration = 0 + self.properties = [] + self.nodes = [] + self.testcase = None + self.attrs = {} + + def append(self, node): + self.xml.add_stats(type(node).__name__) + self.nodes.append(node) + + def add_property(self, name, value): + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name, value): + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self): + """Return a Junit node containing custom properties, if any. + """ + if self.properties: + return Junit.properties( + [ + Junit.property(name=name, value=value) + for name, value in self.properties + ] + ) + return "" + + def record_testreport(self, testreport): + assert not self.testcase + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = testreport.location[1] + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # restore any user-defined attributes + + def to_xml(self): + testcase = Junit.testcase(time=self.duration, **self.attrs) + testcase.append(self.make_properties_node()) + for node in self.nodes: + testcase.append(node) + return testcase + + def _add_simple(self, kind, message, data=None): + data = bin_xml_escape(data) + node = kind(data, message=message) + self.append(node) + + def write_captured_output(self, report): + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + + if content_log or content_out: + if content_log and self.xml.logging == "system-out": + if content_out: + # syncing stdout and the log-output is not done yet. It's + # probably not worth the effort. Therefore, first the captured + # stdout is shown and then the captured logs. + content = "\n".join( + [ + " Captured Stdout ".center(80, "-"), + content_out, + "", + " Captured Log ".center(80, "-"), + content_log, + ] + ) + else: + content = content_log + else: + content = content_out + + if content: + tag = getattr(Junit, "system-out") + self.append(tag(bin_xml_escape(content))) + + if content_log or content_err: + if content_log and self.xml.logging == "system-err": + if content_err: + content = "\n".join( + [ + " Captured Stderr ".center(80, "-"), + content_err, + "", + " Captured Log ".center(80, "-"), + content_log, + ] + ) + else: + content = content_log + else: + content = content_err + + if content: + tag = getattr(Junit, "system-err") + self.append(tag(bin_xml_escape(content))) + + def append_pass(self, report): + self.add_stats("passed") + + def append_failure(self, report): + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly") + else: + if hasattr(report.longrepr, "reprcrash"): + message = report.longrepr.reprcrash.message + elif isinstance(report.longrepr, (unicode, str)): + message = report.longrepr + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + fail = Junit.failure(message=message) + fail.append(bin_xml_escape(report.longrepr)) + self.append(fail) + + def append_collect_error(self, report): + # msg = str(report.longrepr.reprtraceback.extraline) + self.append( + Junit.error(bin_xml_escape(report.longrepr), message="collection failure") + ) + + def append_collect_skipped(self, report): + self._add_simple(Junit.skipped, "collection skipped", report.longrepr) + + def append_error(self, report): + if getattr(report, "when", None) == "teardown": + msg = "test teardown failure" + else: + msg = "test setup failure" + self._add_simple(Junit.error, msg, report.longrepr) + + def append_skipped(self, report): + if hasattr(report, "wasxfail"): + self._add_simple(Junit.skipped, "expected test failure", report.wasxfail) + else: + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = "%s:%s: %s" % (filename, lineno, skipreason) + + self.append( + Junit.skipped( + bin_xml_escape(details), + type="pytest.skip", + message=bin_xml_escape(skipreason), + ) + ) + self.write_captured_output(report) + + def finalize(self): + data = self.to_xml().unicode(indent=0) + self.__dict__.clear() + self.to_xml = lambda: py.xml.raw(data) + + +@pytest.fixture +def record_property(request): + """Add an extra properties the calling test. + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + The fixture is callable with ``(name, value)``, with value being automatically + xml-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + + def append_property(name, value): + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_property(record_property, request): + """(Deprecated) use record_property.""" + from _pytest import deprecated + + request.node.warn(deprecated.RECORD_XML_PROPERTY) + + return record_property + + +@pytest.fixture +def record_xml_attribute(request): + """Add extra xml attributes to the tag for the calling test. + The fixture is callable with ``(name, value)``, with value being + automatically xml-encoded + """ + from _pytest.warning_types import PytestWarning + + request.node.warn(PytestWarning("record_xml_attribute is an experimental feature")) + xml = getattr(request.config, "_xml", None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + return node_reporter.add_attribute + else: + + def add_attr_noop(name, value): + pass + + return add_attr_noop + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="create junit-xml style report file at given path.", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|system-out|system-err", + default="no", + ) # choices=['no', 'stdout', 'stderr']) + + +def pytest_configure(config): + xmlpath = config.option.xmlpath + # prevent opening xmllog on slave nodes (xdist) + if xmlpath and not hasattr(config, "slaveinput"): + config._xml = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + ) + config.pluginmanager.register(config._xml) + + +def pytest_unconfigure(config): + xml = getattr(config, "_xml", None) + if xml: + del config._xml + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address): + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + try: + names.remove("()") + except ValueError: + pass + # convert file path to dotted path + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = _py_ext_re.sub("", names[0]) + # put any params back + names[-1] += possible_open_bracket + params + return names + + +class LogXML(object): + def __init__(self, logfile, prefix, suite_name="pytest", logging="no"): + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0) + self.node_reporters = {} # nodeid -> _NodeReporter + self.node_reporters_ordered = [] + self.global_properties = [] + # List of reports that failed on call but teardown is pending. + self.open_reports = [] + self.cnt_double_fail_tests = 0 + + def finalize(self, report): + nodeid = getattr(report, "nodeid", report) + # local hack to handle xdist report order + slavenode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, slavenode)) + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report): + nodeid = getattr(report, "nodeid", report) + # local hack to handle xdist report order + slavenode = getattr(report, "node", None) + + key = nodeid, slavenode + + if key in self.node_reporters: + # TODO: breasks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key): + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report): + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report): + """handle a setup/call/teardown report, generating the appropriate + xml tags as necessary. + + note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. for example: + + usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, propvalue) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report): + """accumulates total duration for nodeid from given report and updates + the Junit.testcase with the new total if already created. + """ + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report): + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr): + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple(Junit.error, "internal error", excrepr) + + def pytest_sessionstart(self): + self.suite_start_time = time.time() + + def pytest_sessionfinish(self): + dirname = os.path.dirname(os.path.abspath(self.logfile)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logfile = open(self.logfile, "w", encoding="utf-8") + suite_stop_time = time.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + logfile.write( + Junit.testsuite( + self._get_global_properties_node(), + [x.to_xml() for x in self.node_reporters_ordered], + name=self.suite_name, + errors=self.stats["error"], + failures=self.stats["failure"], + skips=self.stats["skipped"], + tests=numtests, + time="%.3f" % suite_time_delta, + ).unicode(indent=0) + ) + logfile.close() + + def pytest_terminal_summary(self, terminalreporter): + terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) + + def add_global_property(self, name, value): + self.global_properties.append((str(name), bin_xml_escape(value))) + + def _get_global_properties_node(self): + """Return a Junit node containing custom properties, if any. + """ + if self.global_properties: + return Junit.properties( + [ + Junit.property(name=name, value=value) + for name, value in self.global_properties + ] + ) + return "" diff --git a/venv/Lib/site-packages/_pytest/logging.py b/venv/Lib/site-packages/_pytest/logging.py new file mode 100644 index 00000000..8244c803 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/logging.py @@ -0,0 +1,622 @@ +""" Access and control log capturing. """ +from __future__ import absolute_import, division, print_function + +import logging +from contextlib import contextmanager +import re +import six + +from _pytest.compat import dummy_context_manager +from _pytest.config import create_terminal_writer +import pytest +import py + + +DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" + + +class ColoredLevelFormatter(logging.Formatter): + """ + Colorize the %(levelname)..s part of the log format passed to __init__. + """ + + LOGLEVEL_COLOROPTS = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)") + + def __init__(self, terminalwriter, *args, **kwargs): + super(ColoredLevelFormatter, self).__init__(*args, **kwargs) + if six.PY2: + self._original_fmt = self._fmt + else: + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping = {} + + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + formatted_levelname = levelname_fmt % { + "levelname": logging.getLevelName(level) + } + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record): + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + if six.PY2: + self._fmt = fmt + else: + self._style._fmt = fmt + return super(ColoredLevelFormatter, self).format(record) + + +def get_option_ini(config, *names): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser): + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--no-print-logs", + dest="log_print", + action="store_const", + const=False, + default=True, + type="bool", + help="disable printing caught logs on failed tests.", + ) + add_option_ini( + "--log-level", + dest="log_level", + default=None, + help="logging level used by the logging module", + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='enable log display during test run (also known as "live logging").', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="cli logging level." + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="log date format as used by the logging module.", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="path to a file when logging will be written to.", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="log file logging level.", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + + +@contextmanager +def catching_logs(handler, formatter=None, level=None): + """Context manager that prepares the whole logging machinery properly.""" + root_logger = logging.getLogger() + + if formatter is not None: + handler.setFormatter(formatter) + if level is not None: + handler.setLevel(level) + + # Adding the same handler twice would confuse logging system. + # Just don't do that. + add_new_handler = handler not in root_logger.handlers + + if add_new_handler: + root_logger.addHandler(handler) + if level is not None: + orig_level = root_logger.level + root_logger.setLevel(min(orig_level, level)) + try: + yield handler + finally: + if level is not None: + root_logger.setLevel(orig_level) + if add_new_handler: + root_logger.removeHandler(handler) + + +class LogCaptureHandler(logging.StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self): + """Creates a new log handler.""" + logging.StreamHandler.__init__(self, py.io.TextIO()) + self.records = [] + + def emit(self, record): + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + logging.StreamHandler.emit(self, record) + + def reset(self): + self.records = [] + self.stream = py.io.TextIO() + + +class LogCaptureFixture(object): + """Provides access and control of log capturing.""" + + def __init__(self, item): + """Creates a new funcarg.""" + self._item = item + # dict of log name -> log level + self._initial_log_levels = {} # type: Dict[str, int] + + def _finalize(self): + """Finalizes the fixture. + + This restores the log levels changed by :meth:`set_level`. + """ + # restore log levels + for logger_name, level in self._initial_log_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + + @property + def handler(self): + """ + :rtype: LogCaptureHandler + """ + return self._item.catch_log_handler + + def get_records(self, when): + """ + Get the logging records for one of the possible test phases. + + :param str when: + Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown". + + :rtype: List[logging.LogRecord] + :return: the list of captured records at the given stage + + .. versionadded:: 3.4 + """ + handler = self._item.catch_log_handlers.get(when) + if handler: + return handler.records + else: + return [] + + @property + def text(self): + """Returns the log text.""" + return self.handler.stream.getvalue() + + @property + def records(self): + """Returns the list of log records.""" + return self.handler.records + + @property + def record_tuples(self): + """Returns a list of a striped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self): + """Returns a list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list + are all interpolated. + Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with + levels, timestamps, etc, making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments + to the logging functions) is not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self): + """Reset the list of log records and the captured log text.""" + self.handler.reset() + + def set_level(self, level, logger=None): + """Sets the level for capturing of logs. The level will be restored to its previous value at the end of + the test. + + :param int level: the logger to level. + :param str logger: the logger to update the level. If not given, the root logger level is updated. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be restored to their initial values at the + end of the test. + """ + logger_name = logger + logger = logging.getLogger(logger_name) + # save the original log-level to restore it during teardown + self._initial_log_levels.setdefault(logger_name, logger.level) + logger.setLevel(level) + + @contextmanager + def at_level(self, level, logger=None): + """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the + level is restored to its original value. + + :param int level: the logger to level. + :param str logger: the logger to update the level. If not given, the root logger level is updated. + """ + logger = logging.getLogger(logger) + orig_level = logger.level + logger.setLevel(level) + try: + yield + finally: + logger.setLevel(orig_level) + + +@pytest.fixture +def caplog(request): + """Access and control log capturing. + + Captured logs are available through the following methods:: + + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node) + yield result + result._finalize() + + +def get_actual_log_level(config, *setting_names): + """Return the actual logging level.""" + + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return + + if isinstance(log_level, six.string_types): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError: + # Python logging does not recognise this as a logging level + raise pytest.UsageError( + "'{}' is not recognized as a logging level name for " + "'{}'. Please consider passing the " + "logging level num instead.".format(log_level, setting_name) + ) + + +def pytest_configure(config): + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin(object): + """Attaches to the logging module and captures log messages for each test. + """ + + def __init__(self, config): + """Creates a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # enable verbose output automatically if live logging is enabled + if self._log_cli_enabled() and not config.getoption("verbose"): + # sanity check: terminal reporter should not have been loaded at this point + assert self._config.pluginmanager.get_plugin("terminalreporter") is None + config.option.verbose = 1 + + self.print_logs = get_option_ini(config, "log_print") + self.formatter = logging.Formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + ) + self.log_level = get_actual_log_level(config, "log_level") + + log_file = get_option_ini(config, "log_file") + if log_file: + self.log_file_level = get_actual_log_level(config, "log_file_level") + + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + # Each pytest runtests session will write to a clean logfile + self.log_file_handler = logging.FileHandler( + log_file, mode="w", encoding="UTF-8" + ) + log_file_formatter = logging.Formatter( + log_file_format, datefmt=log_file_date_format + ) + self.log_file_handler.setFormatter(log_file_formatter) + else: + self.log_file_handler = None + + self.log_cli_handler = None + + def _log_cli_enabled(self): + """Return True if log_cli should be considered enabled, either explicitly + or because --log-cli-level was given in the command-line. + """ + return self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection(self): + # This has to be called before the first log message is logged, + # so we can access the terminal reporter plugin. + self._setup_cli_logging() + + with self.live_logs_context(): + if self.log_cli_handler: + self.log_cli_handler.set_when("collection") + + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + else: + yield + + @contextmanager + def _runtest_for(self, item, when): + """Implements the internals of pytest_runtest_xxx() hook.""" + with catching_logs( + LogCaptureHandler(), formatter=self.formatter, level=self.log_level + ) as log_handler: + if self.log_cli_handler: + self.log_cli_handler.set_when(when) + + if item is None: + yield # run the test + return + + if not hasattr(item, "catch_log_handlers"): + item.catch_log_handlers = {} + item.catch_log_handlers[when] = log_handler + item.catch_log_handler = log_handler + try: + yield # run test + finally: + if when == "teardown": + del item.catch_log_handler + del item.catch_log_handlers + + if self.print_logs: + # Add a captured log section to the report. + log = log_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item): + with self._runtest_for(item, "setup"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item): + with self._runtest_for(item, "call"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item): + with self._runtest_for(item, "teardown"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_logstart(self): + if self.log_cli_handler: + self.log_cli_handler.reset() + with self._runtest_for(None, "start"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_logfinish(self): + with self._runtest_for(None, "finish"): + yield + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionfinish(self): + with self.live_logs_context(): + if self.log_cli_handler: + self.log_cli_handler.set_when("sessionfinish") + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + else: + yield + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionstart(self): + self._setup_cli_logging() + with self.live_logs_context(): + if self.log_cli_handler: + self.log_cli_handler.set_when("sessionstart") + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + else: + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtestloop(self, session): + """Runs all collected test items.""" + with self.live_logs_context(): + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield # run all the tests + else: + yield # run all the tests + + def _setup_cli_logging(self): + """Sets up the handler and logger for the Live Logs feature, if enabled.""" + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") + if self._log_cli_enabled() and terminal_reporter is not None: + capture_manager = self._config.pluginmanager.get_plugin("capturemanager") + log_cli_handler = _LiveLoggingStreamHandler( + terminal_reporter, capture_manager + ) + log_cli_format = get_option_ini( + self._config, "log_cli_format", "log_format" + ) + log_cli_date_format = get_option_ini( + self._config, "log_cli_date_format", "log_date_format" + ) + if ( + self._config.option.color != "no" + and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format) + ): + log_cli_formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), + log_cli_format, + datefmt=log_cli_date_format, + ) + else: + log_cli_formatter = logging.Formatter( + log_cli_format, datefmt=log_cli_date_format + ) + log_cli_level = get_actual_log_level( + self._config, "log_cli_level", "log_level" + ) + self.log_cli_handler = log_cli_handler + self.live_logs_context = lambda: catching_logs( + log_cli_handler, formatter=log_cli_formatter, level=log_cli_level + ) + else: + self.live_logs_context = lambda: dummy_context_manager() + # Note that the lambda for the live_logs_context is needed because + # live_logs_context can otherwise not be entered multiple times due + # to limitations of contextlib.contextmanager + + +class _LiveLoggingStreamHandler(logging.StreamHandler): + """ + Custom StreamHandler used by the live logging feature: it will write a newline before the first log message + in each test. + + During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured + and won't appear in the terminal. + """ + + def __init__(self, terminal_reporter, capture_manager): + """ + :param _pytest.terminal.TerminalReporter terminal_reporter: + :param _pytest.capture.CaptureManager capture_manager: + """ + logging.StreamHandler.__init__(self, stream=terminal_reporter) + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self): + """Reset the handler; should be called before the start of each test""" + self._first_record_emitted = False + + def set_when(self, when): + """Prepares for the given test phase (setup/call/teardown)""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record): + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else dummy_context_manager() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + logging.StreamHandler.emit(self, record) diff --git a/venv/Lib/site-packages/_pytest/main.py b/venv/Lib/site-packages/_pytest/main.py new file mode 100644 index 00000000..8d4176ae --- /dev/null +++ b/venv/Lib/site-packages/_pytest/main.py @@ -0,0 +1,669 @@ +""" core implementation of testing process: init, session, runtest loop. """ +from __future__ import absolute_import, division, print_function + +import contextlib +import functools +import os +import pkgutil +import six +import sys + +import _pytest +from _pytest import nodes +import _pytest._code +import py + +from _pytest.config import directory_arg, UsageError, hookimpl +from _pytest.outcomes import exit +from _pytest.runner import collect_one_node + + +# exitcodes for the command line +EXIT_OK = 0 +EXIT_TESTSFAILED = 1 +EXIT_INTERRUPTED = 2 +EXIT_INTERNALERROR = 3 +EXIT_USAGEERROR = 4 +EXIT_NOTESTSCOLLECTED = 5 + + +def pytest_addoption(parser): + parser.addini( + "norecursedirs", + "directory patterns to avoid for recursion", + type="args", + default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"], + ) + parser.addini( + "testpaths", + "directories to search for tests when no files or directories are given in the " + "command line.", + type="args", + default=[], + ) + # parser.addini("dirpatterns", + # "patterns specifying possible locations of test files", + # type="linelist", default=["**/test_*.txt", + # "**/test_*.py", "**/*_test.py"] + # ) + group = parser.getgroup("general", "running and selection options") + group._addoption( + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="exit instantly on first error or failed test.", + ), + group._addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="exit after first num failures or errors.", + ) + group._addoption( + "--strict", + action="store_true", + help="marks not registered in configuration file raise errors.", + ) + group._addoption( + "-c", + metavar="file", + type=str, + dest="inifilename", + help="load configuration from `file` instead of trying to locate one of the implicit " + "configuration files.", + ) + group._addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.", + ) + group._addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + action="store_true", + help="only collect tests, don't execute them.", + ), + group.addoption( + "--pyargs", + action="store_true", + help="try to interpret all arguments as python packages.", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="ignore path during collection (multi-allowed).", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="deselect item during collection (multi-allowed).", + ) + # when changing this to --conf-cut-dir, config.py Conftest.setinitial + # needs upgrading as well + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="only load conftest.py's relative to specified dir.", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files.", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests.", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + metavar="dir", + help=( + "base temporary directory for this test run." + "(warning: this directory is removed if it exists)" + ), + ) + + +def pytest_configure(config): + __import__("pytest").config = config # compatibility + + +def wrap_session(config, doit): + """Skeleton command line program""" + session = Session(config) + session.exitstatus = EXIT_OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + raise + except Failed: + session.exitstatus = EXIT_TESTSFAILED + except KeyboardInterrupt: + excinfo = _pytest._code.ExceptionInfo() + exitstatus = EXIT_INTERRUPTED + if initstate <= 2 and isinstance(excinfo.value, exit.Exception): + sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg)) + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except: # noqa + excinfo = _pytest._code.ExceptionInfo() + config.notify_exception(excinfo, config.option) + session.exitstatus = EXIT_INTERNALERROR + if excinfo.errisinstance(SystemExit): + sys.stderr.write("mainloop: caught Spurious SystemExit!\n") + + finally: + excinfo = None # Explicitly break reference cycle. + session.startdir.chdir() + if initstate >= 2: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config): + return wrap_session(config, _main) + + +def _main(config, session): + """ default command line protocol for initialization, session, + running tests and reporting. """ + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return EXIT_TESTSFAILED + elif session.testscollected == 0: + return EXIT_NOTESTSCOLLECTED + + +def pytest_collection(session): + return session.perform_collect() + + +def pytest_runtestloop(session): + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted("%d errors during collection" % session.testsfailed) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path): + """Attempts to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the appropriate activate script""" + bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin") + if not bindir.isdir(): + return False + activates = ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ) + return any([fname.basename in activates for fname in bindir.listdir()]) + + +def pytest_ignore_collect(path, config): + ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend([py.path.local(x) for x in excludeopt]) + + if py.path.local(path) in ignore_paths: + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if _in_venv(path) and not allow_in_venv: + return True + + # Skip duplicate paths. + keepduplicates = config.getoption("keepduplicates") + duplicate_paths = config.pluginmanager._duplicatepaths + if not keepduplicates: + if path in duplicate_paths: + return True + else: + duplicate_paths.add(path) + + return False + + +def pytest_collection_modifyitems(items, config): + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@contextlib.contextmanager +def _patched_find_module(): + """Patch bug in pkgutil.ImpImporter.find_module + + When using pkgutil.find_loader on python<3.4 it removes symlinks + from the path due to a call to os.path.realpath. This is not consistent + with actually doing the import (in these versions, pkgutil and __import__ + did not share the same underlying code). This can break conftest + discovery for pytest where symlinks are involved. + + The only supported python<3.4 by pytest is python 2.7. + """ + if six.PY2: # python 3.4+ uses importlib instead + + def find_module_patched(self, fullname, path=None): + # Note: we ignore 'path' argument since it is only used via meta_path + subname = fullname.split(".")[-1] + if subname != fullname and self.path is None: + return None + if self.path is None: + path = None + else: + # original: path = [os.path.realpath(self.path)] + path = [self.path] + try: + file, filename, etc = pkgutil.imp.find_module(subname, path) + except ImportError: + return None + return pkgutil.ImpLoader(fullname, file, filename, etc) + + old_find_module = pkgutil.ImpImporter.find_module + pkgutil.ImpImporter.find_module = find_module_patched + try: + yield + finally: + pkgutil.ImpImporter.find_module = old_find_module + else: + yield + + +class FSHookProxy(object): + def __init__(self, fspath, pm, remove_mods): + self.fspath = fspath + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name): + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class NoMatch(Exception): + """ raised if matching cannot locate a matching names. """ + + +class Interrupted(KeyboardInterrupt): + """ signals an interrupted test run. """ + + __module__ = "builtins" # for py3 + + +class Failed(Exception): + """ signals a stop as failed test run. """ + + +class Session(nodes.FSCollector): + Interrupted = Interrupted + Failed = Failed + + def __init__(self, config): + nodes.FSCollector.__init__( + self, config.rootdir, parent=None, config=config, session=self, nodeid="" + ) + self.testsfailed = 0 + self.testscollected = 0 + self.shouldstop = False + self.shouldfail = False + self.trace = config.trace.root.get("collection") + self._norecursepatterns = config.getini("norecursedirs") + self.startdir = py.path.local() + self._initialpaths = frozenset() + # Keep track of any collected nodes in here, so we don't duplicate fixtures + self._node_cache = {} + + self.config.pluginmanager.register(self, name="session") + + @hookimpl(tryfirst=True) + def pytest_collectstart(self): + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport(self, report): + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = "stopping after %d failures" % (self.testsfailed) + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath(self, path): + return path in self._initialpaths + + def gethookproxy(self, fspath): + # check if we have the common case of running + # hooks with all conftest.py files + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules(fspath) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # one or more conftests are not in use at this fspath + proxy = FSHookProxy(fspath, pm, remove_mods) + else: + # all plugis are active for this fspath + proxy = self.config.hook + return proxy + + def perform_collect(self, args=None, genitems=True): + hook = self.config.hook + try: + items = self._perform_collect(args, genitems) + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + hook.pytest_collection_finish(session=self) + self.testscollected = len(items) + return items + + def _perform_collect(self, args, genitems): + if args is None: + args = self.config.args + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + self._notfound = [] + initialpaths = [] + self._initialparts = [] + self.items = items = [] + for arg in args: + parts = self._parsearg(arg) + self._initialparts.append(parts) + initialpaths.append(parts[0]) + self._initialpaths = frozenset(initialpaths) + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, exc in self._notfound: + line = "(no name %r in any of %r)" % (arg, exc.args[0]) + errors.append("not found: %s\n%s" % (arg, line)) + # XXX: test this + raise UsageError(*errors) + if not genitems: + return rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + return items + + def collect(self): + for parts in self._initialparts: + arg = "::".join(map(str, parts)) + self.trace("processing argument", arg) + self.trace.root.indent += 1 + try: + for x in self._collect(arg): + yield x + except NoMatch: + # we are inside a make_report hook so + # we cannot directly pass through the exception + self._notfound.append((arg, sys.exc_info()[1])) + + self.trace.root.indent -= 1 + + def _collect(self, arg): + from _pytest.python import Package + + names = self._parsearg(arg) + argpath = names.pop(0).realpath() + paths = [] + + root = self + # Start with a Session root, and delve to argpath item (dir or file) + # and stack all Packages found on the way. + # No point in finding packages when collecting doctests + if not self.config.option.doctestmodules: + for parent in argpath.parts(): + pm = self.config.pluginmanager + if pm._confcutdir and pm._confcutdir.relto(parent): + continue + + if parent.isdir(): + pkginit = parent.join("__init__.py") + if pkginit.isfile(): + if pkginit in self._node_cache: + root = self._node_cache[pkginit][0] + else: + col = root._collectfile(pkginit) + if col: + if isinstance(col[0], Package): + root = col[0] + # always store a list in the cache, matchnodes expects it + self._node_cache[root.fspath] = [root] + + # If it's a directory argument, recurse and look for any Subpackages. + # Let the Package collector deal with subnodes, don't collect here. + if argpath.check(dir=1): + assert not names, "invalid arg %r" % (arg,) + for path in argpath.visit( + fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True + ): + pkginit = path.dirpath().join("__init__.py") + if pkginit.exists() and not any(x in pkginit.parts() for x in paths): + for x in root._collectfile(pkginit): + yield x + paths.append(x.fspath.dirpath()) + + if not any(x in path.parts() for x in paths): + for x in root._collectfile(path): + if (type(x), x.fspath) in self._node_cache: + yield self._node_cache[(type(x), x.fspath)] + else: + self._node_cache[(type(x), x.fspath)] = x + yield x + else: + assert argpath.check(file=1) + + if argpath in self._node_cache: + col = self._node_cache[argpath] + else: + col = root._collectfile(argpath) + if col: + self._node_cache[argpath] = col + for y in self.matchnodes(col, names): + yield y + + def _collectfile(self, path): + ihook = self.gethookproxy(path) + if not self.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + return ihook.pytest_collect_file(path=path, parent=self) + + def _recurse(self, path): + ihook = self.gethookproxy(path.dirpath()) + if ihook.pytest_ignore_collect(path=path, config=self.config): + return + for pat in self._norecursepatterns: + if path.check(fnmatch=pat): + return False + ihook = self.gethookproxy(path) + ihook.pytest_collect_directory(path=path, parent=self) + return True + + def _tryconvertpyarg(self, x): + """Convert a dotted module name to path.""" + try: + with _patched_find_module(): + loader = pkgutil.find_loader(x) + except ImportError: + return x + if loader is None: + return x + # This method is sometimes invoked when AssertionRewritingHook, which + # does not define a get_filename method, is already in place: + try: + with _patched_find_module(): + path = loader.get_filename(x) + except AttributeError: + # Retrieve path from AssertionRewritingHook: + path = loader.modules[x][0].co_filename + if loader.is_package(x): + path = os.path.dirname(path) + return path + + def _parsearg(self, arg): + """ return (fspath, names) tuple after checking the file exists. """ + parts = str(arg).split("::") + if self.config.option.pyargs: + parts[0] = self._tryconvertpyarg(parts[0]) + relpath = parts[0].replace("/", os.sep) + path = self.config.invocation_dir.join(relpath, abs=True) + if not path.check(): + if self.config.option.pyargs: + raise UsageError( + "file or package not found: " + arg + " (missing __init__.py?)" + ) + raise UsageError("file not found: " + arg) + parts[0] = path + return parts + + def matchnodes(self, matching, names): + self.trace("matchnodes", matching, names) + self.trace.root.indent += 1 + nodes = self._matchnodes(matching, names) + num = len(nodes) + self.trace("matchnodes finished -> ", num, "nodes") + self.trace.root.indent -= 1 + if num == 0: + raise NoMatch(matching, names[:1]) + return nodes + + def _matchnodes(self, matching, names): + if not matching or not names: + return matching + name = names[0] + assert name + nextnames = names[1:] + resultnodes = [] + for node in matching: + if isinstance(node, nodes.Item): + if not names: + resultnodes.append(node) + continue + assert isinstance(node, nodes.Collector) + key = (type(node), node.nodeid) + if key in self._node_cache: + rep = self._node_cache[key] + else: + rep = collect_one_node(node) + self._node_cache[key] = rep + if rep.passed: + has_matched = False + for x in rep.result: + # TODO: remove parametrized workaround once collection structure contains parametrization + if x.name == name or x.name.split("[")[0] == name: + resultnodes.extend(self.matchnodes([x], nextnames)) + has_matched = True + # XXX accept IDs that don't have "()" for class instances + if not has_matched and len(rep.result) == 1 and x.name == "()": + nextnames.insert(0, name) + resultnodes.extend(self.matchnodes([x], nextnames)) + else: + # report collection failures here to avoid failing to run some test + # specified in the command line because the module could not be + # imported (#134) + node.ihook.pytest_collectreport(report=rep) + return resultnodes + + def genitems(self, node): + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + rep = collect_one_node(node) + if rep.passed: + for subnode in rep.result: + for x in self.genitems(subnode): + yield x + node.ihook.pytest_collectreport(report=rep) diff --git a/venv/Lib/site-packages/_pytest/mark/__init__.py b/venv/Lib/site-packages/_pytest/mark/__init__.py new file mode 100644 index 00000000..39005742 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,169 @@ +""" generic mechanism for marking and selecting python functions. """ +from __future__ import absolute_import, division, print_function +from _pytest.config import UsageError +from .structures import ( + ParameterSet, + EMPTY_PARAMETERSET_OPTION, + MARK_GEN, + Mark, + MarkInfo, + MarkDecorator, + MarkGenerator, + transfer_markers, + get_empty_parameterset_mark, +) +from .legacy import matchkeyword, matchmark + +__all__ = [ + "Mark", + "MarkInfo", + "MarkDecorator", + "MarkGenerator", + "transfer_markers", + "get_empty_parameterset_mark", +] + + +def param(*values, **kw): + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize("test_input,expected", [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ]) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: variable args of the values of the parameter set, in order. + :keyword marks: a single mark or a list of marks to be applied to this parameter set. + :keyword str id: the id to attribute to this parameter set. + """ + return ParameterSet.param(*values, **kw) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="only run tests which match the given substring expression. " + "An expression is a python evaluatable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them.", + ) + + group._addoption( + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="only run tests matching given mark expression. " + "example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets") + + +def pytest_cmdline_main(config): + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write("@pytest.mark.%s:" % name, bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + +pytest_cmdline_main.tryfirst = True + + +def deselect_by_keyword(items, config): + keywordexpr = config.option.keyword.lstrip() + if keywordexpr.startswith("-"): + keywordexpr = "not " + keywordexpr[1:] + selectuntil = False + if keywordexpr[-1:] == ":": + selectuntil = True + keywordexpr = keywordexpr[:-1] + + remaining = [] + deselected = [] + for colitem in items: + if keywordexpr and not matchkeyword(colitem, keywordexpr): + deselected.append(colitem) + else: + if selectuntil: + keywordexpr = None + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def deselect_by_mark(items, config): + matchexpr = config.option.markexpr + if not matchexpr: + return + + remaining = [] + deselected = [] + for item in items: + if matchmark(item, matchexpr): + remaining.append(item) + else: + deselected.append(item) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def pytest_collection_modifyitems(items, config): + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config): + config._old_mark_config = MARK_GEN._config + if config.option.strict: + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + "{!s} must be one of skip, xfail or fail_at_collect" + " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) + ) + + +def pytest_unconfigure(config): + MARK_GEN._config = getattr(config, "_old_mark_config", None) diff --git a/venv/Lib/site-packages/_pytest/mark/__pycache__/__init__.cpython-37.pyc b/venv/Lib/site-packages/_pytest/mark/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..520c72bf Binary files /dev/null and b/venv/Lib/site-packages/_pytest/mark/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/mark/__pycache__/evaluate.cpython-37.pyc b/venv/Lib/site-packages/_pytest/mark/__pycache__/evaluate.cpython-37.pyc new file mode 100644 index 00000000..a11adc26 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/mark/__pycache__/evaluate.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/mark/__pycache__/legacy.cpython-37.pyc b/venv/Lib/site-packages/_pytest/mark/__pycache__/legacy.cpython-37.pyc new file mode 100644 index 00000000..d602d601 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/mark/__pycache__/legacy.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/mark/__pycache__/structures.cpython-37.pyc b/venv/Lib/site-packages/_pytest/mark/__pycache__/structures.cpython-37.pyc new file mode 100644 index 00000000..ed8056b9 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/mark/__pycache__/structures.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/_pytest/mark/evaluate.py b/venv/Lib/site-packages/_pytest/mark/evaluate.py new file mode 100644 index 00000000..6736cb79 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/mark/evaluate.py @@ -0,0 +1,123 @@ +import os +import six +import sys +import platform +import traceback + +from ..outcomes import fail, TEST_OUTCOME + + +def cached_eval(config, expr, d): + if not hasattr(config, "_evalcache"): + config._evalcache = {} + try: + return config._evalcache[expr] + except KeyError: + import _pytest._code + + exprcode = _pytest._code.compile(expr, mode="eval") + config._evalcache[expr] = x = eval(exprcode, d) + return x + + +class MarkEvaluator(object): + def __init__(self, item, name): + self.item = item + self._marks = None + self._mark = None + self._mark_name = name + + def __bool__(self): + # dont cache here to prevent staleness + return bool(self._get_marks()) + + __nonzero__ = __bool__ + + def wasvalid(self): + return not hasattr(self, "exc") + + def _get_marks(self): + return list(self.item.iter_markers(name=self._mark_name)) + + def invalidraise(self, exc): + raises = self.get("raises") + if not raises: + return + return not isinstance(exc, raises) + + def istrue(self): + try: + return self._istrue() + except TEST_OUTCOME: + self.exc = sys.exc_info() + if isinstance(self.exc[1], SyntaxError): + msg = [" " * (self.exc[1].offset + 4) + "^"] + msg.append("SyntaxError: invalid syntax") + else: + msg = traceback.format_exception_only(*self.exc[:2]) + fail( + "Error evaluating %r expression\n" + " %s\n" + "%s" % (self._mark_name, self.expr, "\n".join(msg)), + pytrace=False, + ) + + def _getglobals(self): + d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} + if hasattr(self.item, "obj"): + d.update(self.item.obj.__globals__) + return d + + def _istrue(self): + if hasattr(self, "result"): + return self.result + self._marks = self._get_marks() + + if self._marks: + self.result = False + for mark in self._marks: + self._mark = mark + if "condition" in mark.kwargs: + args = (mark.kwargs["condition"],) + else: + args = mark.args + + for expr in args: + self.expr = expr + if isinstance(expr, six.string_types): + d = self._getglobals() + result = cached_eval(self.item.config, expr, d) + else: + if "reason" not in mark.kwargs: + # XXX better be checked at collection time + msg = ( + "you need to specify reason=STRING " + "when using booleans as conditions." + ) + fail(msg) + result = bool(expr) + if result: + self.result = True + self.reason = mark.kwargs.get("reason", None) + self.expr = expr + return self.result + + if not args: + self.result = True + self.reason = mark.kwargs.get("reason", None) + return self.result + return False + + def get(self, attr, default=None): + if self._mark is None: + return default + return self._mark.kwargs.get(attr, default) + + def getexplanation(self): + expl = getattr(self, "reason", None) or self.get("reason", None) + if not expl: + if not hasattr(self, "expr"): + return "" + else: + return "condition: " + str(self.expr) + return expl diff --git a/venv/Lib/site-packages/_pytest/mark/legacy.py b/venv/Lib/site-packages/_pytest/mark/legacy.py new file mode 100644 index 00000000..0d0d852c --- /dev/null +++ b/venv/Lib/site-packages/_pytest/mark/legacy.py @@ -0,0 +1,100 @@ +""" +this is a place where we put datastructures used by legacy apis +we hope ot remove +""" +import attr +import keyword + +from _pytest.config import UsageError + + +@attr.s +class MarkMapping(object): + """Provides a local mapping for markers where item access + resolves to True if the marker is present. """ + + own_mark_names = attr.ib() + + @classmethod + def from_item(cls, item): + mark_names = {mark.name for mark in item.iter_markers()} + return cls(mark_names) + + def __getitem__(self, name): + return name in self.own_mark_names + + +class KeywordMapping(object): + """Provides a local mapping for keywords. + Given a list of names, map any substring of one of these names to True. + """ + + def __init__(self, names): + self._names = names + + @classmethod + def from_item(cls, item): + mapped_names = set() + + # Add the names of the current item and any parent items + import pytest + + for item in item.listchain(): + if not isinstance(item, pytest.Instance): + mapped_names.add(item.name) + + # Add the names added as extra keywords to current or parent items + for name in item.listextrakeywords(): + mapped_names.add(name) + + # Add the names attached to the current function through direct assignment + if hasattr(item, "function"): + for name in item.function.__dict__: + mapped_names.add(name) + + return cls(mapped_names) + + def __getitem__(self, subname): + for name in self._names: + if subname in name: + return True + return False + + +python_keywords_allowed_list = ["or", "and", "not"] + + +def matchmark(colitem, markexpr): + """Tries to match on any marker names, attached to the given colitem.""" + try: + return eval(markexpr, {}, MarkMapping.from_item(colitem)) + except SyntaxError as e: + raise SyntaxError(str(e) + "\nMarker expression must be valid Python!") + + +def matchkeyword(colitem, keywordexpr): + """Tries to match given keyword expression to given collector item. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + mapping = KeywordMapping.from_item(colitem) + if " " not in keywordexpr: + # special case to allow for simple "-k pass" and "-k 1.3" + return mapping[keywordexpr] + elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]: + return not mapping[keywordexpr[4:]] + for kwd in keywordexpr.split(): + if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list: + raise UsageError( + "Python keyword '{}' not accepted in expressions passed to '-k'".format( + kwd + ) + ) + try: + return eval(keywordexpr, {}, mapping) + except SyntaxError: + raise UsageError("Wrong expression passed to '-k': {}".format(keywordexpr)) diff --git a/venv/Lib/site-packages/_pytest/mark/structures.py b/venv/Lib/site-packages/_pytest/mark/structures.py new file mode 100644 index 00000000..32822c2b --- /dev/null +++ b/venv/Lib/site-packages/_pytest/mark/structures.py @@ -0,0 +1,468 @@ +import inspect +import warnings +from collections import namedtuple +from functools import reduce +from operator import attrgetter + +import attr + +from _pytest.outcomes import fail +from ..deprecated import MARK_PARAMETERSET_UNPACKING, MARK_INFO_ATTRIBUTE +from ..compat import NOTSET, getfslineno, MappingMixin +from six.moves import map + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +def alias(name, warning=None): + getter = attrgetter(name) + + def warned(self): + warnings.warn(warning, stacklevel=2) + return getter(self) + + return property(getter if warning is None else warned, doc="alias for " + name) + + +def istestfunc(func): + return ( + hasattr(func, "__call__") + and getattr(func, "__name__", "") != "" + ) + + +def get_empty_parameterset_mark(config, argnames, func): + from ..nodes import Collector + + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(run=False) + elif requested_mark == "fail_at_collect": + f_name = func.__name__ + _, lineno = getfslineno(func) + raise Collector.CollectError( + "Empty parameter set in '%s' at line %d" % (f_name, lineno) + ) + else: + raise LookupError(requested_mark) + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, + func.__name__, + fs, + lineno, + ) + return mark(reason=reason) + + +class ParameterSet(namedtuple("ParameterSet", "values, marks, id")): + @classmethod + def param(cls, *values, **kw): + marks = kw.pop("marks", ()) + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, (tuple, list, set)) + + def param_extract_id(id=None): + return id + + id_ = param_extract_id(**kw) + return cls(values, marks, id_) + + @classmethod + def extract_from(cls, parameterset, belonging_definition, legacy_force_tuple=False): + """ + :param parameterset: + a legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects + + :param legacy_force_tuple: + enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests + + :param belonging_definition: the item that we will be extracting the parameters from. + """ + + if isinstance(parameterset, cls): + return parameterset + if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple: + return cls.param(parameterset) + + newmarks = [] + argval = parameterset + while isinstance(argval, MarkDecorator): + newmarks.append( + MarkDecorator(Mark(argval.markname, argval.args[:-1], argval.kwargs)) + ) + argval = argval.args[-1] + assert not isinstance(argval, ParameterSet) + if legacy_force_tuple: + argval = (argval,) + + if newmarks and belonging_definition is not None: + belonging_definition.warn(MARK_PARAMETERSET_UNPACKING) + + return cls(argval, marks=newmarks, id=None) + + @classmethod + def _for_parametrize(cls, argnames, argvalues, func, config, function_definition): + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + parameters = [ + ParameterSet.extract_from( + x, + legacy_force_tuple=force_tuple, + belonging_definition=function_definition, + ) + for x in argvalues + ] + del argvalues + + if parameters: + # check all parameter sets have the correct number of values + for param in parameters: + if len(param.values) != len(argnames): + raise ValueError( + 'In "parametrize" the number of values ({}) must be ' + "equal to the number of names ({})".format( + param.values, argnames + ) + ) + else: + # empty parameter set (likely computed at runtime): create a single + # parameter set with NOSET values, with the "empty parameter set" mark applied to it + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) + ) + return argnames, parameters + + +@attr.s(frozen=True) +class Mark(object): + #: name of the mark + name = attr.ib(type=str) + #: positional arguments of the mark decorator + args = attr.ib() # type: List[object] + #: keyword arguments of the mark decorator + kwargs = attr.ib() # type: Dict[str, object] + + def combined_with(self, other): + """ + :param other: the mark to combine with + :type other: Mark + :rtype: Mark + + combines by appending aargs and merging the mappings + """ + assert self.name == other.name + return Mark( + self.name, self.args + other.args, dict(self.kwargs, **other.kwargs) + ) + + +@attr.s +class MarkDecorator(object): + """ A decorator for test functions and test classes. When applied + it will create :class:`MarkInfo` objects which may be + :ref:`retrieved by hooks as item keywords `. + MarkDecorator instances are often created like this:: + + mark1 = pytest.mark.NAME # simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a MarkDecorator instance is called it does the following: + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches itself to the class so it + gets applied automatically to all test cases found in that class. + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches a MarkInfo object to the + function, containing all the arguments already stored internally in + the MarkDecorator. + 3. When called in any other case, it performs a 'fake construction' call, + i.e. it returns a new MarkDecorator instance with the original + MarkDecorator's content updated with the arguments passed to this + call. + + Note: The rules above prevent MarkDecorator objects from storing only a + single function or class reference as their positional argument with no + additional keyword or positional arguments. + + """ + + mark = attr.ib(validator=attr.validators.instance_of(Mark)) + + name = alias("mark.name") + args = alias("mark.args") + kwargs = alias("mark.kwargs") + + @property + def markname(self): + return self.name # for backward-compat (2.4.1 had this attr) + + def __eq__(self, other): + return self.mark == other.mark if isinstance(other, MarkDecorator) else False + + def __repr__(self): + return "" % (self.mark,) + + def with_args(self, *args, **kwargs): + """ return a MarkDecorator with extra arguments added + + unlike call this can be used even if the sole argument is a callable/class + + :return: MarkDecorator + """ + + mark = Mark(self.name, args, kwargs) + return self.__class__(self.mark.combined_with(mark)) + + def __call__(self, *args, **kwargs): + """ if passed a single callable argument: decorate it with mark info. + otherwise add *args/**kwargs in-place to mark information. """ + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + if len(args) == 1 and (istestfunc(func) or is_class): + if is_class: + store_mark(func, self.mark) + else: + store_legacy_markinfo(func, self.mark) + store_mark(func, self.mark) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks(obj): + """ + obtain the unpacked marks that are stored on an object + """ + mark_list = getattr(obj, "pytestmark", []) + if not isinstance(mark_list, list): + mark_list = [mark_list] + return normalize_mark_list(mark_list) + + +def normalize_mark_list(mark_list): + """ + normalizes marker decorating helpers to mark objects + + :type mark_list: List[Union[Mark, Markdecorator]] + :rtype: List[Mark] + """ + return [getattr(mark, "mark", mark) for mark in mark_list] # unpack MarkDecorator + + +def store_mark(obj, mark): + """store a Mark on an object + this is used to implement the Mark declarations/decorators correctly + """ + assert isinstance(mark, Mark), mark + # always reassign name to avoid updating pytestmark + # in a reference that was only borrowed + obj.pytestmark = get_unpacked_marks(obj) + [mark] + + +def store_legacy_markinfo(func, mark): + """create the legacy MarkInfo objects and put them onto the function + """ + if not isinstance(mark, Mark): + raise TypeError("got {mark!r} instead of a Mark".format(mark=mark)) + holder = getattr(func, mark.name, None) + if holder is None: + holder = MarkInfo.for_mark(mark) + setattr(func, mark.name, holder) + elif isinstance(holder, MarkInfo): + holder.add_mark(mark) + + +def transfer_markers(funcobj, cls, mod): + """ + this function transfers class level markers and module level markers + into function level markinfo objects + + this is the main reason why marks are so broken + the resolution will involve phasing out function level MarkInfo objects + + """ + for obj in (cls, mod): + for mark in get_unpacked_marks(obj): + if not _marked(funcobj, mark): + store_legacy_markinfo(funcobj, mark) + + +def _marked(func, mark): + """ Returns True if :func: is already marked with :mark:, False otherwise. + This can happen if marker is applied to class and the test file is + invoked more than once. + """ + try: + func_mark = getattr(func, getattr(mark, "combined", mark).name) + except AttributeError: + return False + return any(mark == info.combined for info in func_mark) + + +@attr.s(repr=False) +class MarkInfo(object): + """ Marking object created by :class:`MarkDecorator` instances. """ + + _marks = attr.ib(converter=list) + + @_marks.validator + def validate_marks(self, attribute, value): + for item in value: + if not isinstance(item, Mark): + raise ValueError( + "MarkInfo expects Mark instances, got {!r} ({!r})".format( + item, type(item) + ) + ) + + combined = attr.ib( + repr=False, + default=attr.Factory( + lambda self: reduce(Mark.combined_with, self._marks), takes_self=True + ), + ) + + name = alias("combined.name", warning=MARK_INFO_ATTRIBUTE) + args = alias("combined.args", warning=MARK_INFO_ATTRIBUTE) + kwargs = alias("combined.kwargs", warning=MARK_INFO_ATTRIBUTE) + + @classmethod + def for_mark(cls, mark): + return cls([mark]) + + def __repr__(self): + return "".format(self.combined) + + def add_mark(self, mark): + """ add a MarkInfo with the given args and kwargs. """ + self._marks.append(mark) + self.combined = self.combined.combined_with(mark) + + def __iter__(self): + """ yield MarkInfo objects each relating to a marking-call. """ + return map(MarkInfo.for_mark, self._marks) + + +class MarkGenerator(object): + """ Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. Example:: + + import pytest + @pytest.mark.slowtest + def test_function(): + pass + + will set a 'slowtest' :class:`MarkInfo` object + on the ``test_function`` object. """ + + _config = None + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + if self._config is not None: + self._check(name) + return MarkDecorator(Mark(name, (), {})) + + def _check(self, name): + try: + if name in self._markers: + return + except AttributeError: + pass + self._markers = values = set() + for line in self._config.getini("markers"): + marker = line.split(":", 1)[0] + marker = marker.rstrip() + x = marker.split("(", 1)[0] + values.add(x) + if name not in self._markers: + fail("{!r} not a registered marker".format(name), pytrace=False) + + +MARK_GEN = MarkGenerator() + + +class NodeKeywords(MappingMixin): + def __init__(self, node): + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key): + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key, value): + self._markers[key] = value + + def __delitem__(self, key): + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self): + seen = self._seen() + return iter(seen) + + def _seen(self): + seen = set(self._markers) + if self.parent is not None: + seen.update(self.parent.keywords) + return seen + + def __len__(self): + return len(self._seen()) + + def __repr__(self): + return "" % (self.node,) + + +@attr.s(cmp=False, hash=False) +class NodeMarkers(object): + """ + internal strucutre for storing marks belongong to a node + + ..warning:: + + unstable api + + """ + + own_markers = attr.ib(default=attr.Factory(list)) + + def update(self, add_markers): + """update the own markers + """ + self.own_markers.extend(add_markers) + + def find(self, name): + """ + find markers in own nodes or parent nodes + needs a better place + """ + for mark in self.own_markers: + if mark.name == name: + yield mark + + def __iter__(self): + return iter(self.own_markers) diff --git a/venv/Lib/site-packages/_pytest/monkeypatch.py b/venv/Lib/site-packages/_pytest/monkeypatch.py new file mode 100644 index 00000000..4bd6b607 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,304 @@ +""" monkeypatching and mocking functionality. """ +from __future__ import absolute_import, division, print_function + +import os +import sys +import re +import warnings +from contextlib import contextmanager + +import six + +import pytest +from _pytest.fixtures import fixture + +RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$") + + +@fixture +def monkeypatch(): + """The returned ``monkeypatch`` fixture provides these + helper methods to modify objects, dictionaries or os.environ:: + + monkeypatch.setattr(obj, name, value, raising=True) + monkeypatch.delattr(obj, name, raising=True) + monkeypatch.setitem(mapping, name, value) + monkeypatch.delitem(obj, name, raising=True) + monkeypatch.setenv(name, value, prepend=False) + monkeypatch.delenv(name, raising=True) + monkeypatch.syspath_prepend(path) + monkeypatch.chdir(path) + + All modifications will be undone after the requesting + test function or fixture has finished. The ``raising`` + parameter determines if a KeyError or AttributeError + will be raised if the set/deletion operation has no target. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name): + # simplified from zope.dottedname + parts = name.split(".") + + used = parts.pop(0) + found = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # we use explicit un-nesting of the handling block in order + # to avoid nested exceptions on python 3 + try: + __import__(used) + except ImportError as ex: + # str is used for py2 vs py3 + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError("import error in %s: %s" % (used, ex)) + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj, name, ann): + try: + obj = getattr(obj, name) + except AttributeError: + raise AttributeError( + "%r object at %s has no attribute %r" % (type(obj).__name__, ann, name) + ) + return obj + + +def derive_importpath(import_path, raising): + if not isinstance(import_path, six.string_types) or "." not in import_path: + raise TypeError("must be absolute import path string, not %r" % (import_path,)) + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset(object): + def __repr__(self): + return "" + + +notset = Notset() + + +class MonkeyPatch(object): + """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes. + """ + + def __init__(self): + self._setattr = [] + self._setitem = [] + self._cwd = None + self._savesyspath = None + + @contextmanager + def context(self): + """ + Context manager that returns a new :class:`MonkeyPatch` object which + undoes any patching done inside the ``with`` block upon exit: + + .. code-block:: python + + import functools + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see `#3290 `_. + """ + m = MonkeyPatch() + try: + yield m + finally: + m.undo() + + def setattr(self, target, name, value=notset, raising=True): + """ Set attribute value on target, memorizing the old value. + By default raise AttributeError if the attribute did not exist. + + For convenience you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name. Example: + ``monkeypatch.setattr("os.getcwd", lambda: "/")`` + would set the ``getcwd`` function of the ``os`` module. + + The ``raising`` value determines if the setattr should fail + if the attribute is not already present (defaults to True + which means it will raise). + """ + __tracebackhide__ = True + import inspect + + if value is notset: + if not isinstance(target, six.string_types): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError("%r has no attribute %r" % (target, name)) + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr(self, target, name=notset, raising=True): + """ Delete attribute ``name`` from ``target``, by default raise + AttributeError it the attribute did not previously exist. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + If ``raising`` is set to False, no exception will be raised if the + attribute is missing. + """ + __tracebackhide__ = True + if name is notset: + if not isinstance(target, six.string_types): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + self._setattr.append((target, name, getattr(target, name, notset))) + delattr(target, name) + + def setitem(self, dic, name, value): + """ Set dictionary entry ``name`` to value. """ + self._setitem.append((dic, name, dic.get(name, notset))) + dic[name] = value + + def delitem(self, dic, name, raising=True): + """ Delete ``name`` from dict. Raise KeyError if it doesn't exist. + + If ``raising`` is set to False, no exception will be raised if the + key is missing. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + del dic[name] + + def _warn_if_env_name_is_not_str(self, name): + """On Python 2, warn if the given environment variable name is not a native str (#4056)""" + if six.PY2 and not isinstance(name, str): + warnings.warn( + pytest.PytestWarning( + "Environment variable name {!r} should be str".format(name) + ) + ) + + def setenv(self, name, value, prepend=None): + """ Set environment variable ``name`` to ``value``. If ``prepend`` + is a character, read the current environment variable value + and prepend the ``value`` adjoined with the ``prepend`` character.""" + if not isinstance(value, str): + warnings.warn( + pytest.PytestWarning( + "Environment variable value {!r} should be str, converted to str implicitly".format( + value + ) + ) + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self._warn_if_env_name_is_not_str(name) + self.setitem(os.environ, name, value) + + def delenv(self, name, raising=True): + """ Delete ``name`` from the environment. Raise KeyError if it does + not exist. + + If ``raising`` is set to False, no exception will be raised if the + environment variable is missing. + """ + self._warn_if_env_name_is_not_str(name) + self.delitem(os.environ, name, raising=raising) + + def syspath_prepend(self, path): + """ Prepend ``path`` to ``sys.path`` list of import locations. """ + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + def chdir(self, path): + """ Change the current working directory to the specified path. + Path can be a string or a py.path.local object. + """ + if self._cwd is None: + self._cwd = os.getcwd() + if hasattr(path, "chdir"): + path.chdir() + else: + os.chdir(path) + + def undo(self): + """ Undo previous changes. This call consumes the + undo stack. Calling it a second time has no effect unless + you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + Note that the same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, name, value in reversed(self._setitem): + if value is notset: + try: + del dictionary[name] + except KeyError: + pass # was already deleted, so we have the desired state + else: + dictionary[name] = value + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/venv/Lib/site-packages/_pytest/nodes.py b/venv/Lib/site-packages/_pytest/nodes.py new file mode 100644 index 00000000..d80895ab --- /dev/null +++ b/venv/Lib/site-packages/_pytest/nodes.py @@ -0,0 +1,533 @@ +from __future__ import absolute_import, division, print_function +import os +import warnings + +import six +import py +import attr + +import _pytest +import _pytest._code +from _pytest.compat import getfslineno +from _pytest.outcomes import fail + +from _pytest.mark.structures import NodeKeywords, MarkInfo + +SEP = "/" + +tracebackcutdir = py.path.local(_pytest.__file__).dirpath() + + +def _splitnode(nodeid): + """Split a nodeid into constituent 'parts'. + + Node IDs are strings, and can be things like: + '' + 'testing/code' + 'testing/code/test_excinfo.py' + 'testing/code/test_excinfo.py::TestFormattedExcinfo::()' + + Return values are lists e.g. + [] + ['testing', 'code'] + ['testing', 'code', 'test_excinfo.py'] + ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()'] + """ + if nodeid == "": + # If there is no root node at all, return an empty list so the caller's logic can remain sane + return [] + parts = nodeid.split(SEP) + # Replace single last element 'test_foo.py::Bar::()' with multiple elements 'test_foo.py', 'Bar', '()' + parts[-1:] = parts[-1].split("::") + return parts + + +def ischildnode(baseid, nodeid): + """Return True if the nodeid is a child node of the baseid. + + E.g. 'foo/bar::Baz::()' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp' + """ + base_parts = _splitnode(baseid) + node_parts = _splitnode(nodeid) + if len(node_parts) < len(base_parts): + return False + return node_parts[: len(base_parts)] == base_parts + + +@attr.s +class _CompatProperty(object): + name = attr.ib() + + def __get__(self, obj, owner): + if obj is None: + return self + + from _pytest.deprecated import COMPAT_PROPERTY + + warnings.warn( + COMPAT_PROPERTY.format(name=self.name, owner=owner.__name__), stacklevel=2 + ) + return getattr(__import__("pytest"), self.name) + + +class Node(object): + """ base class for Collector and Item the test collection tree. + Collector subclasses have children, Items are terminal nodes.""" + + def __init__( + self, name, parent=None, config=None, session=None, fspath=None, nodeid=None + ): + #: a unique name within the scope of the parent node + self.name = name + + #: the parent collector node. + self.parent = parent + + #: the pytest config object + self.config = config or parent.config + + #: the session this node is part of + self.session = session or parent.session + + #: filesystem path where this node was collected from (can be None) + self.fspath = fspath or getattr(parent, "fspath", None) + + #: keywords/markers collected from all scopes + self.keywords = NodeKeywords(self) + + #: the marker objects belonging to this node + self.own_markers = [] + + #: allow adding of extra keywords to use for matching + self.extra_keyword_matches = set() + + # used for storing artificial fixturedefs for direct parametrization + self._name2pseudofixturedef = {} + + if nodeid is not None: + self._nodeid = nodeid + else: + assert parent is not None + self._nodeid = self.parent.nodeid + "::" + self.name + + @property + def ihook(self): + """ fspath sensitive hook proxy used to call pytest hooks""" + return self.session.gethookproxy(self.fspath) + + Module = _CompatProperty("Module") + Class = _CompatProperty("Class") + Instance = _CompatProperty("Instance") + Function = _CompatProperty("Function") + File = _CompatProperty("File") + Item = _CompatProperty("Item") + + def _getcustomclass(self, name): + maybe_compatprop = getattr(type(self), name) + if isinstance(maybe_compatprop, _CompatProperty): + return getattr(__import__("pytest"), name) + else: + from _pytest.deprecated import CUSTOM_CLASS + + cls = getattr(self, name) + self.warn(CUSTOM_CLASS.format(name=name, type_name=type(self).__name__)) + return cls + + def __repr__(self): + return "<%s %r>" % (self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, _code_or_warning=None, message=None, code=None): + """Issue a warning for this item. + + Warnings will be displayed after the test session, unless explicitly suppressed. + + This can be called in two forms: + + **Warning instance** + + This was introduced in pytest 3.8 and uses the standard warning mechanism to issue warnings. + + .. code-block:: python + + node.warn(PytestWarning("some message")) + + The warning instance must be a subclass of :class:`pytest.PytestWarning`. + + **code/message (deprecated)** + + This form was used in pytest prior to 3.8 and is considered deprecated. Using this form will emit another + warning about the deprecation: + + .. code-block:: python + + node.warn("CI", "some message") + + :param Union[Warning,str] _code_or_warning: + warning instance or warning code (legacy). This parameter receives an underscore for backward + compatibility with the legacy code/message form, and will be replaced for something + more usual when the legacy form is removed. + + :param Union[str,None] message: message to display when called in the legacy form. + :param str code: code for the warning, in legacy form when using keyword arguments. + :return: + """ + if message is None: + if _code_or_warning is None: + raise ValueError("code_or_warning must be given") + self._std_warn(_code_or_warning) + else: + if _code_or_warning and code: + raise ValueError( + "code_or_warning and code cannot both be passed to this function" + ) + code = _code_or_warning or code + self._legacy_warn(code, message) + + def _legacy_warn(self, code, message): + """ + .. deprecated:: 3.8 + + Use :meth:`Node.std_warn <_pytest.nodes.Node.std_warn>` instead. + + Generate a warning with the given code and message for this item. + """ + from _pytest.deprecated import NODE_WARN + + self._std_warn(NODE_WARN) + + assert isinstance(code, str) + fslocation = get_fslocation_from_item(self) + self.ihook.pytest_logwarning.call_historic( + kwargs=dict( + code=code, message=message, nodeid=self.nodeid, fslocation=fslocation + ) + ) + + def _std_warn(self, warning): + """Issue a warning for this item. + + Warnings will be displayed after the test session, unless explicitly suppressed + + :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning. + + :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning. + """ + from _pytest.warning_types import PytestWarning + + if not isinstance(warning, PytestWarning): + raise ValueError( + "warning must be an instance of PytestWarning or subclass, got {!r}".format( + warning + ) + ) + path, lineno = get_fslocation_from_item(self) + warnings.warn_explicit( + warning, + category=None, + filename=str(path), + lineno=lineno + 1 if lineno is not None else None, + ) + + # methods for ordering nodes + @property + def nodeid(self): + """ a ::-separated string denoting its collection tree address. """ + return self._nodeid + + def __hash__(self): + return hash(self.nodeid) + + def setup(self): + pass + + def teardown(self): + pass + + def listchain(self): + """ return list of all parent collectors up to self, + starting from root of collection tree. """ + chain = [] + item = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker(self, marker, append=True): + """dynamically add a marker object to the node. + + :type marker: ``str`` or ``pytest.mark.*`` object + :param marker: + ``append=True`` whether to append the marker, + if ``False`` insert at position ``0``. + """ + from _pytest.mark import MarkDecorator, MARK_GEN + + if isinstance(marker, six.string_types): + marker = getattr(MARK_GEN, marker) + elif not isinstance(marker, MarkDecorator): + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker.name] = marker + if append: + self.own_markers.append(marker.mark) + else: + self.own_markers.insert(0, marker.mark) + + def iter_markers(self, name=None): + """ + :param name: if given, filter the results by the name attribute + + iterate over all markers of the node + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node(self, name=None): + """ + :param name: if given, filter the results by the name attribute + + iterate over all markers of the node + returns sequence of tuples (node, mark) + """ + for node in reversed(self.listchain()): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + def get_closest_marker(self, name, default=None): + """return the first marker matching the name, from closest (for example function) to farther level (for example + module level). + + :param default: fallback return value of no marker was found + :param name: name to filter by + """ + return next(self.iter_markers(name=name), default) + + def get_marker(self, name): + """ get a marker object from this node or None if + the node doesn't have a marker with that name. + + .. deprecated:: 3.6 + This function has been deprecated in favor of + :meth:`Node.get_closest_marker <_pytest.nodes.Node.get_closest_marker>` and + :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>`, see :ref:`update marker code` + for more details. + """ + markers = list(self.iter_markers(name=name)) + if markers: + return MarkInfo(markers) + + def listextrakeywords(self): + """ Return a set of all extra keywords in self and any parents.""" + extra_keywords = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self): + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin): + """ register a function to be called when this node is finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls): + """ get the next parent node (including ourself) + which is an instance of the given class""" + current = self + while current and not isinstance(current, cls): + current = current.parent + return current + + def _prunetraceback(self, excinfo): + pass + + def _repr_failure_py(self, excinfo, style=None): + if excinfo.errisinstance(fail.Exception): + if not excinfo.value.pytrace: + return six.text_type(excinfo.value) + fm = self.session._fixturemanager + if excinfo.errisinstance(fm.FixtureLookupError): + return excinfo.value.formatrepr() + tbfilter = True + if self.config.option.fulltrace: + style = "long" + else: + tb = _pytest._code.Traceback([excinfo.traceback[-1]]) + self._prunetraceback(excinfo) + if len(excinfo.traceback) == 0: + excinfo.traceback = tb + tbfilter = False # prunetraceback already does it + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.option.tbstyle == "short": + style = "short" + else: + style = "long" + + if self.config.option.verbose > 1: + truncate_locals = False + else: + truncate_locals = True + + try: + os.getcwd() + abspath = False + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.option.showlocals, + style=style, + tbfilter=tbfilter, + truncate_locals=truncate_locals, + ) + + repr_failure = _repr_failure_py + + +def get_fslocation_from_item(item): + """Tries to extract the actual location from an item, depending on available attributes: + + * "fslocation": a pair (path, lineno) + * "obj": a Python object that the item wraps. + * "fspath": just a path + + :rtype: a tuple of (str|LocalPath, int) with filename and line number. + """ + result = getattr(item, "location", None) + if result is not None: + return result[:2] + obj = getattr(item, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(item, "fspath", "unknown location"), -1 + + +class Collector(Node): + """ Collector instances create children through collect() + and thus iteratively build a tree. + """ + + class CollectError(Exception): + """ an error during collection, contains a custom message. """ + + def collect(self): + """ returns a list of children (items and collectors) + for this collection node. + """ + raise NotImplementedError("abstract") + + def repr_failure(self, excinfo): + """ represent a collection failure. """ + if excinfo.errisinstance(self.CollectError): + exc = excinfo.value + return str(exc.args[0]) + return self._repr_failure_py(excinfo, style="short") + + def _prunetraceback(self, excinfo): + if hasattr(self, "fspath"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.fspath) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + excinfo.traceback = ntraceback.filter() + + +def _check_initialpaths_for_relpath(session, fspath): + for initial_path in session._initialpaths: + if fspath.common(initial_path) == initial_path: + return fspath.relto(initial_path.dirname) + + +class FSCollector(Collector): + def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): + fspath = py.path.local(fspath) # xxx only for test_resultlog.py? + name = fspath.basename + if parent is not None: + rel = fspath.relto(parent.fspath) + if rel: + name = rel + name = name.replace(os.sep, SEP) + self.fspath = fspath + + session = session or parent.session + + if nodeid is None: + nodeid = self.fspath.relto(session.config.rootdir) + + if not nodeid: + nodeid = _check_initialpaths_for_relpath(session, fspath) + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super(FSCollector, self).__init__( + name, parent, config, session, nodeid=nodeid, fspath=fspath + ) + + +class File(FSCollector): + """ base class for collecting tests from a file. """ + + +class Item(Node): + """ a basic test invocation item. Note that for a single function + there might be multiple test invocation items. + """ + + nextitem = None + + def __init__(self, name, parent=None, config=None, session=None, nodeid=None): + super(Item, self).__init__(name, parent, config, session, nodeid=nodeid) + self._report_sections = [] + + #: user properties is a list of tuples (name, value) that holds user + #: defined properties for this test. + self.user_properties = [] + + def add_report_section(self, when, key, content): + """ + Adds a new report section, similar to what's done internally to add stdout and + stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self): + return self.fspath, None, "" + + @property + def location(self): + try: + return self._location + except AttributeError: + location = self.reportinfo() + # bestrelpath is a quite slow function + cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) + try: + fspath = cache[location[0]] + except KeyError: + fspath = self.session.fspath.bestrelpath(location[0]) + cache[location[0]] = fspath + location = (fspath, location[1], str(location[2])) + self._location = location + return location diff --git a/venv/Lib/site-packages/_pytest/nose.py b/venv/Lib/site-packages/_pytest/nose.py new file mode 100644 index 00000000..bb2e4277 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/nose.py @@ -0,0 +1,72 @@ +""" run test suites written for nose. """ +from __future__ import absolute_import, division, print_function + +import sys + +from _pytest import unittest, runner, python +from _pytest.config import hookimpl + + +def get_skip_exceptions(): + skip_classes = set() + for module_name in ("unittest", "unittest2", "nose"): + mod = sys.modules.get(module_name) + if hasattr(mod, "SkipTest"): + skip_classes.add(mod.SkipTest) + return tuple(skip_classes) + + +def pytest_runtest_makereport(item, call): + if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): + # let's substitute the excinfo with a pytest.skip one + call2 = call.__class__(lambda: runner.skip(str(call.excinfo.value)), call.when) + call.excinfo = call2.excinfo + + +@hookimpl(trylast=True) +def pytest_runtest_setup(item): + if is_potential_nosetest(item): + if isinstance(item.parent, python.Generator): + gen = item.parent + if not hasattr(gen, "_nosegensetup"): + call_optional(gen.obj, "setup") + if isinstance(gen.parent, python.Instance): + call_optional(gen.parent.obj, "setup") + gen._nosegensetup = True + if not call_optional(item.obj, "setup"): + # call module level setup if there is no object level one + call_optional(item.parent.obj, "setup") + # XXX this implies we only call teardown when setup worked + item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) + + +def teardown_nose(item): + if is_potential_nosetest(item): + if not call_optional(item.obj, "teardown"): + call_optional(item.parent.obj, "teardown") + # if hasattr(item.parent, '_nosegensetup'): + # #call_optional(item._nosegensetup, 'teardown') + # del item.parent._nosegensetup + + +def pytest_make_collect_report(collector): + if isinstance(collector, python.Generator): + call_optional(collector.obj, "setup") + + +def is_potential_nosetest(item): + # extra check needed since we do not do nose style setup/teardown + # on direct unittest style classes + return isinstance(item, python.Function) and not isinstance( + item, unittest.TestCaseFunction + ) + + +def call_optional(obj, name): + method = getattr(obj, name, None) + isfixture = hasattr(method, "_pytestfixturefunction") + if method is not None and not isfixture and callable(method): + # If there's any problems allow the exception to raise rather than + # silently ignoring them + method() + return True diff --git a/venv/Lib/site-packages/_pytest/outcomes.py b/venv/Lib/site-packages/_pytest/outcomes.py new file mode 100644 index 00000000..4c795838 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/outcomes.py @@ -0,0 +1,179 @@ +""" +exception classes and constants handling test outcomes +as well as functions creating them +""" +from __future__ import absolute_import, division, print_function +import sys + + +class OutcomeException(BaseException): + """ OutcomeException and its subclass instances indicate and + contain info about test and collection outcomes. + """ + + def __init__(self, msg=None, pytrace=True): + BaseException.__init__(self, msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self): + if self.msg: + val = self.msg + if isinstance(val, bytes): + val = val.decode("UTF-8", errors="replace") + return val + return "<%s instance>" % (self.__class__.__name__,) + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__(self, msg=None, pytrace=True, allow_module_level=False): + OutcomeException.__init__(self, msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + + +class Failed(OutcomeException): + """ raised from an explicit call to pytest.fail() """ + + __module__ = "builtins" + + +class Exit(KeyboardInterrupt): + """ raised for immediate program exits (no tracebacks/summaries)""" + + def __init__(self, msg="unknown reason", returncode=None): + self.msg = msg + self.returncode = returncode + KeyboardInterrupt.__init__(self, msg) + + +# exposed helper methods + + +def exit(msg, returncode=None): + """ + Exit testing process as if KeyboardInterrupt was triggered. + + :param str msg: message to display upon exit. + :param int returncode: return code to be used when exiting pytest. + """ + __tracebackhide__ = True + raise Exit(msg, returncode) + + +exit.Exception = Exit + + +def skip(msg="", **kwargs): + """ + Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. + + :kwarg bool allow_module_level: allows this function to be called at + module level, skipping the rest of the module. Default to False. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be + skipped under certain conditions like mismatching platforms or + dependencies. + """ + __tracebackhide__ = True + allow_module_level = kwargs.pop("allow_module_level", False) + if kwargs: + keys = [k for k in kwargs.keys()] + raise TypeError("unexpected keyword arguments: {}".format(keys)) + raise Skipped(msg=msg, allow_module_level=allow_module_level) + + +skip.Exception = Skipped + + +def fail(msg="", pytrace=True): + """ + Explicitly fail an executing test with the given message. + + :param str msg: the message to show the user as reason for the failure. + :param bool pytrace: if false the msg represents the full failure information and no + python traceback will be reported. + """ + __tracebackhide__ = True + raise Failed(msg=msg, pytrace=pytrace) + + +fail.Exception = Failed + + +class XFailed(fail.Exception): + """ raised from an explicit call to pytest.xfail() """ + + +def xfail(reason=""): + """ + Imperatively xfail an executing test or setup functions with the given reason. + + This function should be called only during testing (setup, call or teardown). + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be + xfailed under certain conditions like known bugs or missing features. + """ + __tracebackhide__ = True + raise XFailed(reason) + + +xfail.Exception = XFailed + + +def importorskip(modname, minversion=None): + """ return imported module if it has at least "minversion" as its + __version__ attribute. If no minversion is specified the a skip + is only triggered if the module can not be imported. + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + should_skip = False + + with warnings.catch_warnings(): + # make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file + warnings.simplefilter("ignore") + try: + __import__(modname) + except ImportError: + # Do not raise chained exception here(#1485) + should_skip = True + if should_skip: + raise Skipped("could not import %r" % (modname,), allow_module_level=True) + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + try: + from pkg_resources import parse_version as pv + except ImportError: + raise Skipped( + "we have a required version for %r but can not import " + "pkg_resources to parse version strings." % (modname,), + allow_module_level=True, + ) + if verattr is None or pv(verattr) < pv(minversion): + raise Skipped( + "module %r has __version__ %r, required is: %r" + % (modname, verattr, minversion), + allow_module_level=True, + ) + return mod diff --git a/venv/Lib/site-packages/_pytest/pastebin.py b/venv/Lib/site-packages/_pytest/pastebin.py new file mode 100644 index 00000000..6af202d1 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/pastebin.py @@ -0,0 +1,109 @@ +""" submit failure or test session information to a pastebin service. """ +from __future__ import absolute_import, division, print_function + +import pytest +import six +import sys +import tempfile + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group._addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="send failed|all info to bpaste.net pastebin service.", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config): + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # if no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a slave node + # when using pytest-xdist, for example + if tr is not None: + # pastebin file will be utf-8 encoded binary file + config._pastebinfile = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, six.text_type): + s = s.encode("utf-8") + config._pastebinfile.write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config): + if hasattr(config, "_pastebinfile"): + # get terminal contents and delete file + config._pastebinfile.seek(0) + sessionlog = config._pastebinfile.read() + config._pastebinfile.close() + del config._pastebinfile + # undo our patching in the terminal reporter + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # write summary + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line("pastebin session-log: %s\n" % pastebinurl) + + +def create_new_paste(contents): + """ + Creates a new paste using bpaste.net service. + + :contents: paste contents as utf-8 encoded bytes + :returns: url to the pasted contents + """ + import re + + if sys.version_info < (3, 0): + from urllib import urlopen, urlencode + else: + from urllib.request import urlopen + from urllib.parse import urlencode + + params = { + "code": contents, + "lexer": "python3" if sys.version_info[0] == 3 else "python", + "expiry": "1week", + } + url = "https://bpaste.net" + response = urlopen(url, data=urlencode(params).encode("ascii")).read() + m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8")) + if m: + return "%s/show/%s" % (url, m.group(1)) + else: + return "bad response: " + response + + +def pytest_terminal_summary(terminalreporter): + import _pytest.config + + if terminalreporter.config.option.pastebin != "failed": + return + tr = terminalreporter + if "failed" in tr.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats.get("failed"): + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = tr._getfailureheadline(rep) + tw = _pytest.config.create_terminal_writer( + terminalreporter.config, stringio=True + ) + rep.toterminal(tw) + s = tw.stringio.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + tr.write_line("%s --> %s" % (msg, pastebinurl)) diff --git a/venv/Lib/site-packages/_pytest/pathlib.py b/venv/Lib/site-packages/_pytest/pathlib.py new file mode 100644 index 00000000..081fce90 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/pathlib.py @@ -0,0 +1,304 @@ + +import os +import errno +import atexit +import operator +import six +import sys +from functools import reduce +import uuid +from six.moves import map +import itertools +import shutil +from os.path import expanduser, expandvars, isabs, sep +from posixpath import sep as posix_sep +import fnmatch +import stat + +from .compat import PY36 + + +if PY36: + from pathlib import Path, PurePath +else: + from pathlib2 import Path, PurePath + +__all__ = ["Path", "PurePath"] + + +LOCK_TIMEOUT = 60 * 60 * 3 + +get_lock_path = operator.methodcaller("joinpath", ".lock") + + +def ensure_reset_dir(path): + """ + ensures the given path is a empty directory + """ + if path.exists(): + rmtree(path, force=True) + path.mkdir() + + +def _shutil_rmtree_remove_writable(func, fspath, _): + "Clear the readonly bit and reattempt the removal" + os.chmod(fspath, stat.S_IWRITE) + func(fspath) + + +def rmtree(path, force=False): + if force: + # ignore_errors leaves dead folders around + # python needs a rm -rf as a followup + # the trick with _shutil_rmtree_remove_writable is unreliable + shutil.rmtree(str(path), ignore_errors=True) + else: + shutil.rmtree(str(path)) + + +def find_prefixed(root, prefix): + """finds all elements in root that begin with the prefix, case insensitive""" + l_prefix = prefix.lower() + for x in root.iterdir(): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter, prefix): + """ + :param iter: iterator over path names + :param prefix: expected prefix of the path names + :returns: the parts of the paths following the prefix + """ + p_len = len(prefix) + for p in iter: + yield p.name[p_len:] + + +def find_suffixes(root, prefix): + """combines find_prefixes and extract_suffixes + """ + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num): + """parses number path suffixes, returns -1 on error""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +if six.PY2: + + def _max(iterable, default): + """needed due to python2.7 lacking the default argument for max""" + return reduce(max, iterable, default) + + +else: + _max = max + + +def _force_symlink(root, target, link_to): + """helper to create the current symlink + + its full of race conditions that are reasonably ok to ignore + for the contex of best effort linking to the latest testrun + + the presumption being thatin case of much parallelism + the inaccuracy is going to be acceptable + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root, prefix): + """create a directory with a increased number as suffix for the given prefix""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath("{}{}".format(prefix, new_number)) + try: + new_path.mkdir() + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise EnvironmentError( + "could not create numbered dir with prefix " + "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root) + ) + + +def create_cleanup_lock(p): + """crates a lock to prevent premature folder cleanup""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except OSError as e: + if e.errno == errno.EEXIST: + six.raise_from( + EnvironmentError("cannot create lockfile in {path}".format(path=p)), e + ) + else: + raise + else: + pid = os.getpid() + spid = str(pid) + if not isinstance(spid, six.binary_type): + spid = spid.encode("ascii") + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise EnvironmentError("lock path got renamed after sucessfull creation") + return lock_path + + +def register_cleanup_lock_removal(lock_path, register=atexit.register): + """registers a cleanup function for removing a lock, by default on atexit""" + pid = os.getpid() + + def cleanup_on_exit(lock_path=lock_path, original_pid=pid): + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except (OSError, IOError): + pass + + return register(cleanup_on_exit) + + +def delete_a_numbered_dir(path): + """removes a numbered directory""" + create_cleanup_lock(path) + parent = path.parent + + garbage = parent.joinpath("garbage-{}".format(uuid.uuid4())) + path.rename(garbage) + rmtree(garbage, force=True) + + +def ensure_deletable(path, consider_lock_dead_if_created_before): + """checks if a lock exists and breaks it if its considered dead""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + if not lock.exists(): + return True + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + lock.unlink() + return True + else: + return False + + +def try_cleanup(path, consider_lock_dead_if_created_before): + """tries to cleanup a folder if we can ensure its deletable""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + delete_a_numbered_dir(path) + + +def cleanup_candidates(root, prefix, keep): + """lists candidates for numbered directories to be removed - follows py.path""" + max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + paths = find_prefixed(root, prefix) + paths, paths2 = itertools.tee(paths) + numbers = map(parse_num, extract_suffixes(paths2, prefix)) + for path, number in zip(paths, numbers): + if number <= max_delete: + yield path + + +def cleanup_numbered_dir(root, prefix, keep, consider_lock_dead_if_created_before): + """cleanup for lock driven numbered directories""" + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + +def make_numbered_dir_with_cleanup(root, prefix, keep, lock_timeout): + """creates a numbered dir with a cleanup lock and removes old ones""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix) + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as e: + pass + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + cleanup_numbered_dir( + root=root, + prefix=prefix, + keep=keep, + consider_lock_dead_if_created_before=consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input, root): + assert not isinstance(input, Path), "would break on py2" + root = Path(root) + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return root.joinpath(input) + + +def fnmatch_ex(pattern, path): + """FNMatcher port from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions + for each part of the path, while this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with + PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according + this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = six.text_type(path) + return fnmatch.fnmatch(name, pattern) diff --git a/venv/Lib/site-packages/_pytest/pytester.py b/venv/Lib/site-packages/_pytest/pytester.py new file mode 100644 index 00000000..8782a30b --- /dev/null +++ b/venv/Lib/site-packages/_pytest/pytester.py @@ -0,0 +1,1381 @@ +"""(disabled by default) support for testing pytest and pytest plugins.""" +from __future__ import absolute_import, division, print_function + +import codecs +import gc +import os +import platform +import re +import subprocess +import six +import sys +import time +import traceback +from fnmatch import fnmatch + +from weakref import WeakKeyDictionary + +from _pytest.capture import MultiCapture, SysCapture +from _pytest._code import Source +from _pytest.main import Session, EXIT_INTERRUPTED, EXIT_OK +from _pytest.assertion.rewrite import AssertionRewritingHook +from _pytest.pathlib import Path +from _pytest.compat import safe_str + +import py +import pytest + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + u"/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser): + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="directory to take the pytester example files from" + ) + + +def pytest_configure(config): + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + +def raise_on_kwargs(kwargs): + if kwargs: + raise TypeError("Unexpected arguments: {}".format(", ".join(sorted(kwargs)))) + + +class LsofFdLeakChecker(object): + def get_open_files(self): + out = self._exec_lsof() + open_files = self._parse_lsof_output(out) + return open_files + + def _exec_lsof(self): + pid = os.getpid() + return py.process.cmdexec("lsof -Ffn0 -p %d" % pid) + + def _parse_lsof_output(self, out): + def isopen(line): + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self): + try: + py.process.cmdexec("lsof -v") + except (py.process.cmdexec.Error, UnicodeDecodeError): + # cmdexec may raise UnicodeDecodeError on Windows systems with + # locale other than English: + # https://bitbucket.org/pytest-dev/py/issues/66 + return False + else: + return True + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item): + lines1 = self.get_open_files() + yield + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [] + error.append("***** %s FD leakage detected" % len(leaked_files)) + error.extend([str(f) for f in leaked_files]) + error.append("*** Before:") + error.extend([str(f) for f in lines1]) + error.append("*** After:") + error.extend([str(f) for f in lines2]) + error.append(error[0]) + error.append("*** function %s:%s: %s " % item.location) + error.append("See issue #2366") + item.warn(pytest.PytestWarning("\n".join(error))) + + +# XXX copied from execnet's conftest.py - needs to be merged +winpymap = { + "python2.7": r"C:\Python27\python.exe", + "python3.4": r"C:\Python34\python.exe", + "python3.5": r"C:\Python35\python.exe", + "python3.6": r"C:\Python36\python.exe", +} + + +def getexecutable(name, cache={}): + try: + return cache[name] + except KeyError: + executable = py.path.local.sysfind(name) + if executable: + import subprocess + + popen = subprocess.Popen( + [str(executable), "--version"], + universal_newlines=True, + stderr=subprocess.PIPE, + ) + out, err = popen.communicate() + if name == "jython": + if not err or "2.5" not in err: + executable = None + if "2.5.2" in err: + executable = None # http://bugs.jython.org/issue1790 + elif popen.returncode != 0: + # handle pyenv's 127 + executable = None + cache[name] = executable + return executable + + +@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"]) +def anypython(request): + name = request.param + executable = getexecutable(name) + if executable is None: + if sys.platform == "win32": + executable = winpymap.get(name, None) + if executable: + executable = py.path.local(executable) + if executable.check(): + return executable + pytest.skip("no suitable %s found" % (name,)) + return executable + + +# used at least by pytest-xdist plugin + + +@pytest.fixture +def _pytest(request): + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks. + + """ + return PytestArg(request) + + +class PytestArg(object): + def __init__(self, request): + self.request = request + + def gethookrecorder(self, hook): + hookrecorder = HookRecorder(hook._pm) + self.request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values): + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +class ParsedCall(object): + def __init__(self, name, kwargs): + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self): + d = self.__dict__.copy() + del d["_name"] + return "" % (self._name, d) + + +class HookRecorder(object): + """Record all hooks called in a plugin manager. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + + """ + + def __init__(self, pluginmanager): + self._pluginmanager = pluginmanager + self.calls = [] + + def before(hook_name, hook_impls, kwargs): + self.calls.append(ParsedCall(hook_name, kwargs)) + + def after(outcome, hook_name, hook_impls, kwargs): + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self): + self._undo_wrapping() + + def getcalls(self, names): + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries): + __tracebackhide__ = True + i = 0 + entries = list(entries) + backlocals = sys._getframe(1).f_locals + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + pytest.fail("could not find %r check %r" % (name, check)) + + def popcall(self, name): + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = ["could not find call %r, in:" % (name,)] + lines.extend([" %s" % x for x in self.calls]) + pytest.fail("\n".join(lines)) + + def getcall(self, name): + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart="", + names="pytest_runtest_logreport pytest_collectreport", + when=None, + ): + """return a testreport whose dotted import path matches""" + values = [] + for rep in self.getreports(names=names): + try: + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + except AttributeError: + pass + if when and getattr(rep, "when", None) != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + "could not find test report matching %r: " + "no test reports at all!" % (inamepart,) + ) + if len(values) > 1: + raise ValueError( + "found 2 or more testreports matching %r: %s" % (inamepart, values) + ) + return values[0] + + def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"): + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self): + return self.getfailures("pytest_collectreport") + + def listoutcomes(self): + passed = [] + skipped = [] + failed = [] + for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"): + if rep.passed: + if getattr(rep, "when", None) == "call": + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + elif rep.failed: + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self): + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed=0, skipped=0, failed=0): + realpassed, realskipped, realfailed = self.listoutcomes() + assert passed == len(realpassed) + assert skipped == len(realskipped) + assert failed == len(realfailed) + + def clear(self): + self.calls[:] = [] + + +@pytest.fixture +def linecomp(request): + return LineComp() + + +@pytest.fixture(name="LineMatcher") +def LineMatcher_fixture(request): + return LineMatcher + + +@pytest.fixture +def testdir(request, tmpdir_factory): + return Testdir(request, tmpdir_factory) + + +rex_outcome = re.compile(r"(\d+) ([\w-]+)") + + +class RunResult(object): + """The result of running a command. + + Attributes: + + :ret: the return value + :outlines: list of lines captured from stdout + :errlines: list of lines captures from stderr + :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to + reconstruct stdout or the commonly used ``stdout.fnmatch_lines()`` + method + :stderr: :py:class:`LineMatcher` of stderr + :duration: duration in seconds + + """ + + def __init__(self, ret, outlines, errlines, duration): + self.ret = ret + self.outlines = outlines + self.errlines = errlines + self.stdout = LineMatcher(outlines) + self.stderr = LineMatcher(errlines) + self.duration = duration + + def parseoutcomes(self): + """Return a dictionary of outcomestring->num from parsing the terminal + output that the test process produced. + + """ + for line in reversed(self.outlines): + if "seconds" in line: + outcomes = rex_outcome.findall(line) + if outcomes: + d = {} + for num, cat in outcomes: + d[cat] = int(num) + return d + raise ValueError("Pytest terminal report not found") + + def assert_outcomes( + self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0 + ): + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + """ + d = self.parseoutcomes() + obtained = { + "passed": d.get("passed", 0), + "skipped": d.get("skipped", 0), + "failed": d.get("failed", 0), + "error": d.get("error", 0), + "xpassed": d.get("xpassed", 0), + "xfailed": d.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "error": error, + "xpassed": xpassed, + "xfailed": xfailed, + } + assert obtained == expected + + +class CwdSnapshot(object): + def __init__(self): + self.__saved = os.getcwd() + + def restore(self): + os.chdir(self.__saved) + + +class SysModulesSnapshot(object): + def __init__(self, preserve=None): + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self): + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot(object): + def __init__(self): + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self): + sys.path[:], sys.meta_path[:] = self.__saved + + +class Testdir(object): + """Temporary test directory with tools to test/run pytest itself. + + This is based on the ``tmpdir`` fixture but provides a number of methods + which aid with testing pytest itself. Unless :py:meth:`chdir` is used all + methods will use :py:attr:`tmpdir` as their current working directory. + + Attributes: + + :tmpdir: The :py:class:`py.path.local` instance of the temporary directory. + + :plugins: A list of plugins to use with :py:meth:`parseconfig` and + :py:meth:`runpytest`. Initially this is an empty list but plugins can + be added to the list. The type of items to add to the list depends on + the method using them so refer to them for details. + + """ + + class TimeoutExpired(Exception): + pass + + def __init__(self, request, tmpdir_factory): + self.request = request + self._mod_collections = WeakKeyDictionary() + name = request.function.__name__ + self.tmpdir = tmpdir_factory.mktemp(name, numbered=True) + self.test_tmproot = tmpdir_factory.mktemp("tmp-" + name, numbered=True) + os.environ["PYTEST_DEBUG_TEMPROOT"] = str(self.test_tmproot) + self.plugins = [] + self._cwd_snapshot = CwdSnapshot() + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self.chdir() + self.request.addfinalizer(self.finalize) + method = self.request.config.getoption("--runpytest") + if method == "inprocess": + self._runpytest_method = self.runpytest_inprocess + elif method == "subprocess": + self._runpytest_method = self.runpytest_subprocess + + def __repr__(self): + return "" % (self.tmpdir,) + + def finalize(self): + """Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + self._cwd_snapshot.restore() + os.environ.pop("PYTEST_DEBUG_TEMPROOT", None) + + def __take_sys_modules_snapshot(self): + # some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example + def preserve_module(name): + return name.startswith("zope") + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager): + """Create a new :py:class:`HookRecorder` for a PluginManager.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager) + self.request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self): + """Cd into the temporary directory. + + This is done automatically upon instantiation. + + """ + self.tmpdir.chdir() + + def _makefile(self, ext, args, kwargs, encoding="utf-8"): + items = list(kwargs.items()) + + def to_text(s): + return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s) + + if args: + source = u"\n".join(to_text(x) for x in args) + basename = self.request.function.__name__ + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.tmpdir.join(basename).new(ext=ext) + p.dirpath().ensure_dir() + source = Source(value) + source = u"\n".join(to_text(line) for line in source.lines) + p.write(source.strip().encode(encoding), "wb") + if ret is None: + ret = p + return ret + + def makefile(self, ext, *args, **kwargs): + r"""Create new file(s) in the testdir. + + :param str ext: The extension the file(s) should use, including the dot, e.g. `.py`. + :param list[str] args: All args will be treated as strings and joined using newlines. + The result will be written as contents to the file. The name of the + file will be based on the test function requesting this fixture. + :param kwargs: Each keyword is the name of a file, while the value of it will + be written as contents of the file. + + Examples: + + .. code-block:: python + + testdir.makefile(".txt", "line1", "line2") + + testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source): + """Write a contest.py file with 'source' as contents.""" + return self.makepyfile(conftest=source) + + def makeini(self, source): + """Write a tox.ini file with 'source' as contents.""" + return self.makefile(".ini", tox=source) + + def getinicfg(self, source): + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return py.iniconfig.IniConfig(p)["pytest"] + + def makepyfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .py extension.""" + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .txt extension.""" + return self._makefile(".txt", args, kwargs) + + def syspathinsert(self, path=None): + """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. + + This is undone automatically when this object dies at the end of each + test. + + """ + if path is None: + path = self.tmpdir + sys.path.insert(0, str(path)) + # a call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches + self._possibly_invalidate_import_caches() + + def _possibly_invalidate_import_caches(self): + # invalidate caches if we can (py33 and above) + try: + import importlib + except ImportError: + pass + else: + if hasattr(importlib, "invalidate_caches"): + importlib.invalidate_caches() + + def mkdir(self, name): + """Create a new (sub)directory.""" + return self.tmpdir.mkdir(name) + + def mkpydir(self, name): + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a python package. + + """ + p = self.mkdir(name) + p.ensure("__init__.py") + return p + + def copy_example(self, name=None): + import warnings + from _pytest.warning_types import PYTESTER_COPY_EXAMPLE + + warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2) + example_dir = self.request.config.getini("pytester_example_dir") + if example_dir is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir = self.request.config.rootdir.join(example_dir) + + for extra_element in self.request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.join(*extra_element.args) + + if name is None: + func_name = self.request.function.__name__ + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.isdir(): + example_path = maybe_dir + elif maybe_file.isfile(): + example_path = maybe_file + else: + raise LookupError( + "{} cant be found as module or package in {}".format( + func_name, example_dir.bestrelpath(self.request.confg.rootdir) + ) + ) + else: + example_path = example_dir.join(name) + + if example_path.isdir() and not example_path.join("__init__.py").isfile(): + example_path.copy(self.tmpdir) + return self.tmpdir + elif example_path.isfile(): + result = self.tmpdir.join(example_path.basename) + example_path.copy(result) + return result + else: + raise LookupError( + 'example "{}" is not found as a file or directory'.format(example_path) + ) + + Session = Session + + def getnode(self, config, arg): + """Return the collection node of a file. + + :param config: :py:class:`_pytest.config.Config` instance, see + :py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the + configuration + + :param arg: a :py:class:`py.path.local` instance of the file + + """ + session = Session(config) + assert "::" not in str(arg) + p = py.path.local(arg) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res + + def getpathnode(self, path): + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param path: a :py:class:`py.path.local` instance of the file + + """ + config = self.parseconfigure(path) + session = Session(config) + x = session.fspath.bestrelpath(path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res + + def genitems(self, colitems): + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + + """ + session = colitems[0].session + result = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source): + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + :py:func:`_pytest.runner.runtestprotocol`. + + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self.request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source, *cmdlineargs): + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: the source code of the test module + + :param cmdlineargs: any extra command line arguments to use + + :return: :py:class:`HookRecorder` instance of the result + + """ + p = self.makepyfile(source) + values = list(cmdlineargs) + [p] + return self.inline_run(*values) + + def inline_genitems(self, *args): + """Run ``pytest.main(['--collectonly'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run(self, *args, **kwargs): + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: command line arguments to pass to :py:func:`pytest.main` + + :param plugin: (keyword-only) extra plugin instances the + ``pytest.main()`` instance should use + + :return: a :py:class:`HookRecorder` instance + + """ + finalizers = [] + try: + # When running pytest inline any plugins active in the main test + # process are already imported. So this disables the warning which + # will trigger to say they can no longer be rewritten, which is + # fine as they have already been rewritten. + orig_warn = AssertionRewritingHook._warn_already_imported + + def revert_warn_already_imported(): + AssertionRewritingHook._warn_already_imported = orig_warn + + finalizers.append(revert_warn_already_imported) + AssertionRewritingHook._warn_already_imported = lambda *a: None + + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class Collect(object): + def pytest_configure(x, config): + rec.append(self.make_hook_recorder(config.pluginmanager)) + + plugins = kwargs.get("plugins") or [] + plugins.append(Collect()) + ret = pytest.main(list(args), plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec(object): + pass + + reprec.ret = ret + + # typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing + if ret == EXIT_INTERRUPTED and not kwargs.get("no_reraise_ctrlc"): + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess(self, *args, **kwargs): + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides. + + """ + if kwargs.get("syspathinsert"): + self.syspathinsert() + now = time.time() + capture = MultiCapture(Capture=SysCapture) + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + + class reprec(object): + ret = e.args[0] + + except Exception: + traceback.print_exc() + + class reprec(object): + ret = 3 + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now) + res.reprec = reprec + return res + + def runpytest(self, *args, **kwargs): + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`RunResult`. + + """ + args = self._ensure_basetemp(args) + return self._runpytest_method(*args, **kwargs) + + def _ensure_basetemp(self, args): + args = list(args) + for x in args: + if safe_str(x).startswith("--basetemp"): + break + else: + args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp")) + return args + + def parseconfig(self, *args): + """Return a new pytest Config instance from given commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create + a new :py:class:`_pytest.core.PluginManager` and call the + pytest_cmdline_parse hook to create a new + :py:class:`_pytest.config.Config` instance. + + If :py:attr:`plugins` has been populated they should be plugin modules + to be registered with the PluginManager. + + """ + args = self._ensure_basetemp(args) + + import _pytest.config + + config = _pytest.config._prepareconfig(args, self.plugins) + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self.request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args): + """Return a new pytest configured Config instance. + + This returns a new :py:class:`_pytest.config.Config` instance like + :py:meth:`parseconfig`, but also calls the pytest_configure hook. + + """ + config = self.parseconfig(*args) + config._do_configure() + self.request.addfinalizer(config._ensure_unconfigure) + return config + + def getitem(self, source, funcname="test_func"): + """Return the test item for a test function. + + This writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: the module source + + :param funcname: the name of the test function for which to return a + test item + + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, "%r item not found in module:\n%s\nitems: %s" % ( + funcname, + source, + items, + ) + + def getitems(self, source): + """Return all test items collected from the module. + + This writes the source to a python file and runs pytest's collection on + the resulting module, returning all test items contained within. + + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol(self, source, configargs=(), withinit=False): + """Return the module collection node for ``source``. + + This writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: the source code of the module to collect + + :param configargs: any extra arguments to pass to + :py:meth:`parseconfigure` + + :param withinit: whether to also write an ``__init__.py`` file to the + same directory to ensure it is a package + + """ + if isinstance(source, Path): + path = self.tmpdir.join(str(source)) + assert not withinit, "not supported for paths" + else: + kw = {self.request.function.__name__: Source(source).strip()} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name(self, modcol, name): + """Return the collection node for name from the module collection. + + This will search a module collection node for a collection node + matching the given name. + + :param modcol: a module collection node; see :py:meth:`getmodulecol` + + :param name: the name of the node to return + + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + + def popen(self, cmdargs, stdout, stderr, **kw): + """Invoke subprocess.Popen. + + This calls subprocess.Popen making sure the current working directory + is in the PYTHONPATH. + + You probably want to use :py:meth:`run` instead. + + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env + + popen = subprocess.Popen( + cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw + ) + popen.stdin.close() + + return popen + + def run(self, *cmdargs, **kwargs): + """Run a command with arguments. + + Run a process using subprocess.Popen saving the stdout and stderr. + + :param args: the sequence of arguments to pass to `subprocess.Popen()` + :param timeout: the period in seconds after which to timeout and raise + :py:class:`Testdir.TimeoutExpired` + + Returns a :py:class:`RunResult`. + + """ + __tracebackhide__ = True + + timeout = kwargs.pop("timeout", None) + raise_on_kwargs(kwargs) + + cmdargs = [ + str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs + ] + p1 = self.tmpdir.join("stdout") + p2 = self.tmpdir.join("stderr") + print("running:", *cmdargs) + print(" in:", py.path.local()) + f1 = codecs.open(str(p1), "w", encoding="utf8") + f2 = codecs.open(str(p2), "w", encoding="utf8") + try: + now = time.time() + popen = self.popen( + cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32") + ) + + def handle_timeout(): + __tracebackhide__ = True + + timeout_message = ( + "{seconds} second timeout expired running:" + " {command}".format(seconds=timeout, command=cmdargs) + ) + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + elif six.PY3: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + else: + end = time.time() + timeout + + resolution = min(0.1, timeout / 10) + + while True: + ret = popen.poll() + if ret is not None: + break + + if time.time() > end: + handle_timeout() + + time.sleep(resolution) + finally: + f1.close() + f2.close() + f1 = codecs.open(str(p1), "r", encoding="utf8") + f2 = codecs.open(str(p2), "r", encoding="utf8") + try: + out = f1.read().splitlines() + err = f2.read().splitlines() + finally: + f1.close() + f2.close() + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + return RunResult(ret, out, err, time.time() - now) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print("couldn't print to %s because of encoding" % (fp,)) + + def _getpytestargs(self): + return sys.executable, "-mpytest" + + def runpython(self, script): + """Run a python script using sys.executable as interpreter. + + Returns a :py:class:`RunResult`. + + """ + return self.run(sys.executable, script) + + def runpython_c(self, command): + """Run python -c "command", return a :py:class:`RunResult`.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess(self, *args, **kwargs): + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will added using the + ``-p`` command line option. Additionally ``--basetemp`` is used put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" so they do not conflict with the normal numbered + pytest location for temporary files and directories. + + :param args: the sequence of arguments to pass to the pytest subprocess + :param timeout: the period in seconds after which to timeout and raise + :py:class:`Testdir.TimeoutExpired` + + Returns a :py:class:`RunResult`. + + """ + __tracebackhide__ = True + + p = py.path.local.make_numbered_dir( + prefix="runpytest-", keep=None, rootdir=self.tmpdir + ) + args = ("--basetemp=%s" % p,) + args + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ("-p", plugins[0]) + args + args = self._getpytestargs() + args + return self.run(*args, timeout=kwargs.get("timeout")) + + def spawn_pytest(self, string, expect_timeout=10.0): + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + + """ + basetemp = self.tmpdir.mkdir("temp-pexpect") + invoke = " ".join(map(str, self._getpytestargs())) + cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd, expect_timeout=10.0): + """Run a command using pexpect. + + The pexpect child is returned. + + """ + pexpect = pytest.importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + pytest.skip("pypy-64 bit not supported") + if sys.platform.startswith("freebsd"): + pytest.xfail("pexpect does not work reliably on freebsd") + logfile = self.tmpdir.join("spawn.out").open("wb") + child = pexpect.spawn(cmd, logfile=logfile) + self.request.addfinalizer(logfile.close) + child.timeout = expect_timeout + return child + + +def getdecoded(out): + try: + return out.decode("utf-8") + except UnicodeDecodeError: + return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( + py.io.saferepr(out), + ) + + +class LineComp(object): + def __init__(self): + self.stringio = py.io.TextIO() + + def assert_contains_lines(self, lines2): + """Assert that lines2 are contained (linearly) in lines1. + + Return a list of extralines found. + + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + return LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher(object): + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + + """ + + def __init__(self, lines): + self.lines = lines + self._log_output = [] + + def str(self): + """Return the entire original text.""" + return "\n".join(self.lines) + + def _getlines(self, lines2): + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2): + """Check lines exist in the output using in any order. + + Lines are checked using ``fnmatch.fnmatch``. The argument is a list of + lines which have to occur in the output, in any order. + + """ + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2): + """Check lines exist in the output using ``re.match``, in any order. + + The argument is a list of lines which have to occur in the output, in + any order. + + """ + self._match_lines_random(lines2, lambda name, pat: re.match(pat, name)) + + def _match_lines_random(self, lines2, match_func): + """Check lines exist in the output. + + The argument is a list of lines which have to occur in the output, in + any order. Each line can contain glob whildcards. + + """ + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + self._log("line %r not found in output" % line) + raise ValueError(self._log_text) + + def get_lines_after(self, fnline): + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError("line %r not found in output" % fnline) + + def _log(self, *args): + self._log_output.append(" ".join((str(x) for x in args))) + + @property + def _log_text(self): + return "\n".join(self._log_output) + + def fnmatch_lines(self, lines2): + """Search captured text for matching lines using ``fnmatch.fnmatch``. + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also printed on stdout. + + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch") + + def re_match_lines(self, lines2): + """Search captured text for matching lines using ``re.match``. + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also printed on stdout. + + """ + __tracebackhide__ = True + self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match") + + def _match_lines(self, lines2, match_func, match_nickname): + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param list[str] lines2: list of string patterns to match. The actual + format depends on ``match_func`` + :param match_func: a callable ``match_func(line, pattern)`` where line + is the captured line from stdout/stderr and pattern is the matching + pattern + :param str match_nickname: the nickname for the match function that + will be logged to stdout when a match occurs + + """ + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + nextline = None + extralines = [] + __tracebackhide__ = True + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + break + elif match_func(nextline, line): + self._log("%s:" % match_nickname, repr(line)) + self._log(" with:", repr(nextline)) + break + else: + if not nomatchprinted: + self._log("nomatch:", repr(line)) + nomatchprinted = True + self._log(" and:", repr(nextline)) + extralines.append(nextline) + else: + self._log("remains unmatched: %r" % (line,)) + pytest.fail(self._log_text) diff --git a/venv/Lib/site-packages/_pytest/python.py b/venv/Lib/site-packages/_pytest/python.py new file mode 100644 index 00000000..ef3e3a73 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/python.py @@ -0,0 +1,1451 @@ +""" Python test discovery, setup and run of test functions. """ +from __future__ import absolute_import, division, print_function + +import fnmatch +import inspect +import sys +import os +import collections +import warnings +from textwrap import dedent + + +import py +import six +from _pytest.main import FSHookProxy +from _pytest.config import hookimpl + +import _pytest +from _pytest._code import filter_traceback +from _pytest import fixtures +from _pytest import nodes +from _pytest import deprecated +from _pytest.compat import ( + isclass, + isfunction, + is_generator, + ascii_escaped, + REGEX_TYPE, + STRING_TYPES, + NoneType, + NOTSET, + get_real_func, + getfslineno, + safe_getattr, + safe_str, + getlocation, + enum, + get_default_arg_names, + getimfunc, +) +from _pytest.outcomes import fail +from _pytest.mark.structures import ( + transfer_markers, + get_unpacked_marks, + normalize_mark_list, +) +from _pytest.warning_types import RemovedInPytest4Warning, PytestWarning + + +def pyobj_property(name): + def get(self): + node = self.getparent(getattr(__import__("pytest"), name)) + if node is not None: + return node.obj + + doc = "python %s object this node was collected from (can be None)." % ( + name.lower(), + ) + return property(get, None, None, doc) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="show fixtures per test", + ) + parser.addini( + "usefixtures", + type="args", + default=[], + help="list of default fixtures to be used with this project", + ) + parser.addini( + "python_files", + type="args", + default=["test_*.py", "*_test.py"], + help="glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="prefixes or glob names for Python test function and method discovery", + ) + + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append"], + dest="importmode", + help="prepend/append to sys.path when importing test modules, " + "default is to prepend.", + ) + + +def pytest_cmdline_main(config): + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + + +def pytest_generate_tests(metafunc): + # those alternative spellings are common - raise a specific error to alert + # the user + alt_spellings = ["parameterize", "parametrise", "parameterise"] + for attr in alt_spellings: + if hasattr(metafunc.function, attr): + msg = "{0} has '{1}' mark, spelling should be 'parametrize'" + fail(msg.format(metafunc.function.__name__, attr), pytrace=False) + for marker in metafunc.definition.iter_markers(name="parametrize"): + metafunc.parametrize(*marker.args, **marker.kwargs) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/latest/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/latest/fixture.html#usefixtures ", + ) + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem): + testfunction = pyfuncitem.obj + if pyfuncitem._isyieldedfunction(): + testfunction(*pyfuncitem._args) + else: + funcargs = pyfuncitem.funcargs + testargs = {} + for arg in pyfuncitem._fixtureinfo.argnames: + testargs[arg] = funcargs[arg] + testfunction(**testargs) + return True + + +def pytest_collect_file(path, parent): + ext = path.ext + if ext == ".py": + if not parent.session.isinitpath(path): + if not path_matches_patterns( + path, parent.config.getini("python_files") + ["__init__.py"] + ): + return + ihook = parent.session.gethookproxy(path) + return ihook.pytest_pycollect_makemodule(path=path, parent=parent) + + +def path_matches_patterns(path, patterns): + """Returns True if the given py.path.local matches one of the patterns in the list of globs given""" + return any(path.fnmatch(pattern) for pattern in patterns) + + +def pytest_pycollect_makemodule(path, parent): + if path.basename == "__init__.py": + return Package(path, parent) + return Module(path, parent) + + +@hookimpl(hookwrapper=True) +def pytest_pycollect_makeitem(collector, name, obj): + outcome = yield + res = outcome.get_result() + if res is not None: + return + # nothing was collected elsewhere, let's do it here + if isclass(obj): + if collector.istestclass(obj, name): + Class = collector._getcustomclass("Class") + outcome.force_result(Class(name, parent=collector)) + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a funtools.wrapped. + # We musn't if it's been wrapped with mock.patch (python 2 only) + if not (isfunction(obj) or isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestWarning( + "cannot collect %r because it is not a function." % name + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if is_generator(obj): + res = Generator(name, parent=collector) + else: + res = list(collector._genfunctions(name, obj)) + outcome.force_result(res) + + +def pytest_make_parametrize_id(config, val, argname=None): + return None + + +class PyobjContext(object): + module = pyobj_property("Module") + cls = pyobj_property("Class") + instance = pyobj_property("Instance") + + +class PyobjMixin(PyobjContext): + _ALLOW_MARKERS = True + + def __init__(self, *k, **kw): + super(PyobjMixin, self).__init__(*k, **kw) + + def obj(): + def fget(self): + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Instance collector marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + return obj + + def fset(self, value): + self._obj = value + + return property(fget, fset, None, "underlying python object") + + obj = obj() + + def _getobj(self): + return getattr(self.parent.obj, self.name) + + def getmodpath(self, stopatmodule=True, includemodule=False): + """ return python path relative to the containing module. """ + chain = self.listchain() + chain.reverse() + parts = [] + for node in chain: + if isinstance(node, Instance): + continue + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + s = ".".join(parts) + return s.replace(".[", "[") + + def _getfslineno(self): + return getfslineno(self.obj) + + def reportinfo(self): + # XXX caching? + obj = self.obj + compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) + if isinstance(compat_co_firstlineno, int): + # nose compatibility + fspath = sys.modules[obj.__module__].__file__ + if fspath.endswith(".pyc"): + fspath = fspath[:-1] + lineno = compat_co_firstlineno + else: + fspath, lineno = getfslineno(obj) + modpath = self.getmodpath() + assert isinstance(lineno, int) + return fspath, lineno, modpath + + +class PyCollector(PyobjMixin, nodes.Collector): + def funcnamefilter(self, name): + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj): + """ Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name): + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj, name): + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, staticmethod): + # static methods need to be unwrapped + obj = safe_getattr(obj, "__func__", False) + return ( + safe_getattr(obj, "__call__", False) + and fixtures.getfixturemarker(obj) is None + ) + else: + return False + + def istestclass(self, obj, name): + return self.classnamefilter(name) or self.isnosetest(obj) + + def _matches_prefix_or_glob_option(self, option_name, name): + """ + checks if the given name matches the prefix or glob-pattern defined + in ini configuration. + """ + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self): + if not getattr(self.obj, "__test__", True): + return [] + + # NB. we avoid random getattrs and peek in the __dict__ instead + # (XXX originally introduced from a PyPy need, still true?) + dicts = [getattr(self.obj, "__dict__", {})] + for basecls in inspect.getmro(self.obj.__class__): + dicts.append(basecls.__dict__) + seen = {} + values = [] + for dic in dicts: + for name, obj in list(dic.items()): + if name in seen: + continue + seen[name] = True + res = self._makeitem(name, obj) + if res is None: + continue + if not isinstance(res, list): + res = [res] + values.extend(res) + values.sort(key=lambda item: item.reportinfo()[:2]) + return values + + def makeitem(self, name, obj): + warnings.warn(deprecated.COLLECTOR_MAKEITEM, stacklevel=2) + self._makeitem(name, obj) + + def _makeitem(self, name, obj): + # assert self.ihook.fspath == self.fspath, self + return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj) + + def _genfunctions(self, name, funcobj): + module = self.getparent(Module).obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + transfer_markers(funcobj, cls, module) + fm = self.session._fixturemanager + + definition = FunctionDefinition(name=name, parent=self, callobj=funcobj) + fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls) + + metafunc = Metafunc( + definition, fixtureinfo, self.config, cls=cls, module=module + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + if methods: + self.ihook.pytest_generate_tests.call_extra( + methods, dict(metafunc=metafunc) + ) + else: + self.ihook.pytest_generate_tests(metafunc=metafunc) + + Function = self._getcustomclass("Function") + if not metafunc._calls: + yield Function(name, parent=self, fixtureinfo=fixtureinfo) + else: + # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs + fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) + + # add_funcarg_pseudo_fixture_def may have shadowed some fixtures + # with direct parametrization, so make sure we update what the + # function really needs. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = "%s[%s]" % (name, callspec.id) + yield Function( + name=subname, + parent=self, + callspec=callspec, + callobj=funcobj, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +class Module(nodes.File, PyCollector): + """ Collector for test classes and functions. """ + + def _getobj(self): + return self._importtestmodule() + + def collect(self): + self.session._fixturemanager.parsefactories(self) + return super(Module, self).collect() + + def _importtestmodule(self): + # we assume we are only called once per module + importmode = self.config.getoption("--import-mode") + try: + mod = self.fspath.pyimport(ensuresyspath=importmode) + except SyntaxError: + raise self.CollectError( + _pytest._code.ExceptionInfo().getrepr(style="short") + ) + except self.fspath.ImportMismatchError: + e = sys.exc_info()[1] + raise self.CollectError( + "import file mismatch:\n" + "imported module %r has this __file__ attribute:\n" + " %s\n" + "which is not the same as the test file we want to collect:\n" + " %s\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules" % e.args + ) + except ImportError: + from _pytest._code.code import ExceptionInfo + + exc_info = ExceptionInfo() + if self.config.getoption("verbose") < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = safe_str(exc_repr) + raise self.CollectError( + "ImportError while importing test module '{fspath}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) + ) + except _pytest.runner.Skipped as e: + if e.allow_module_level: + raise + raise self.CollectError( + "Using pytest.skip outside of a test is not allowed. " + "To decorate a test function, use the @pytest.mark.skip " + "or @pytest.mark.skipif decorators instead, and to skip a " + "module use `pytestmark = pytest.mark.{skip,skipif}." + ) + self.config.pluginmanager.consider_module(mod) + return mod + + def setup(self): + setup_module = _get_xunit_setup_teardown(self.obj, "setUpModule") + if setup_module is None: + setup_module = _get_xunit_setup_teardown(self.obj, "setup_module") + if setup_module is not None: + setup_module() + + teardown_module = _get_xunit_setup_teardown(self.obj, "tearDownModule") + if teardown_module is None: + teardown_module = _get_xunit_setup_teardown(self.obj, "teardown_module") + if teardown_module is not None: + self.addfinalizer(teardown_module) + + +class Package(Module): + def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): + session = parent.session + nodes.FSCollector.__init__( + self, fspath, parent=parent, config=config, session=session, nodeid=nodeid + ) + self.name = fspath.dirname + self.trace = session.trace + self._norecursepatterns = session._norecursepatterns + self.fspath = fspath + + def _recurse(self, path): + ihook = self.gethookproxy(path.dirpath()) + if ihook.pytest_ignore_collect(path=path, config=self.config): + return False + for pat in self._norecursepatterns: + if path.check(fnmatch=pat): + return False + ihook = self.gethookproxy(path) + ihook.pytest_collect_directory(path=path, parent=self) + return True + + def gethookproxy(self, fspath): + # check if we have the common case of running + # hooks with all conftest.py filesall conftest.py + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules(fspath) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # one or more conftests are not in use at this fspath + proxy = FSHookProxy(fspath, pm, remove_mods) + else: + # all plugis are active for this fspath + proxy = self.config.hook + return proxy + + def _collectfile(self, path): + ihook = self.gethookproxy(path) + if not self.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + return ihook.pytest_collect_file(path=path, parent=self) + + def isinitpath(self, path): + return path in self.session._initialpaths + + def collect(self): + # XXX: HACK! + # Before starting to collect any files from this package we need + # to cleanup the duplicate paths added by the session's collect(). + # Proper fix is to not track these as duplicates in the first place. + for path in list(self.session.config.pluginmanager._duplicatepaths): + # if path.parts()[:len(self.fspath.dirpath().parts())] == self.fspath.dirpath().parts(): + if path.dirname.startswith(self.name): + self.session.config.pluginmanager._duplicatepaths.remove(path) + + this_path = self.fspath.dirpath() + init_module = this_path.join("__init__.py") + if init_module.check(file=1) and path_matches_patterns( + init_module, self.config.getini("python_files") + ): + yield Module(init_module, self) + pkg_prefixes = set() + for path in this_path.visit(rec=self._recurse, bf=True, sort=True): + # we will visit our own __init__.py file, in which case we skip it + skip = False + if path.basename == "__init__.py" and path.dirpath() == this_path: + continue + + for pkg_prefix in pkg_prefixes: + if ( + pkg_prefix in path.parts() + and pkg_prefix.join("__init__.py") != path + ): + skip = True + + if skip: + continue + + if path.isdir() and path.join("__init__.py").check(file=1): + pkg_prefixes.add(path) + + for x in self._collectfile(path): + yield x + + +def _get_xunit_setup_teardown(holder, attr_name, param_obj=None): + """ + Return a callable to perform xunit-style setup or teardown if + the function exists in the ``holder`` object. + The ``param_obj`` parameter is the parameter which will be passed to the function + when the callable is called without arguments, defaults to the ``holder`` object. + Return ``None`` if a suitable callable is not found. + """ + param_obj = param_obj if param_obj is not None else holder + result = _get_xunit_func(holder, attr_name) + if result is not None: + arg_count = result.__code__.co_argcount + if inspect.ismethod(result): + arg_count -= 1 + if arg_count: + return lambda: result(param_obj) + else: + return result + + +def _get_xunit_func(obj, name): + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to + avoid calling it twice. + """ + meth = getattr(obj, name, None) + if fixtures.getfixturemarker(meth) is None: + return meth + + +class Class(PyCollector): + """ Collector for test methods. """ + + def collect(self): + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + self.warn( + PytestWarning( + "cannot collect test class %r because it has a " + "__init__ constructor" % self.obj.__name__ + ) + ) + return [] + elif hasnew(self.obj): + self.warn( + PytestWarning( + "cannot collect test class %r because it has a " + "__new__ constructor" % self.obj.__name__ + ) + ) + return [] + return [self._getcustomclass("Instance")(name="()", parent=self)] + + def setup(self): + setup_class = _get_xunit_func(self.obj, "setup_class") + if setup_class is not None: + setup_class = getimfunc(setup_class) + setup_class(self.obj) + + fin_class = getattr(self.obj, "teardown_class", None) + if fin_class is not None: + fin_class = getimfunc(fin_class) + self.addfinalizer(lambda: fin_class(self.obj)) + + +class Instance(PyCollector): + _ALLOW_MARKERS = False # hack, destroy later + # instances share the object with their parents in a way + # that duplicates markers instances if not taken out + # can be removed at node strucutre reorganization time + + def _getobj(self): + return self.parent.obj() + + def collect(self): + self.session._fixturemanager.parsefactories(self) + return super(Instance, self).collect() + + def newinstance(self): + self.obj = self._getobj() + return self.obj + + +class FunctionMixin(PyobjMixin): + """ mixin for the code common to Function and Generator. + """ + + def setup(self): + """ perform setup for this test function. """ + if hasattr(self, "_preservedparent"): + obj = self._preservedparent + elif isinstance(self.parent, Instance): + obj = self.parent.newinstance() + self.obj = self._getobj() + else: + obj = self.parent.obj + if inspect.ismethod(self.obj): + setup_name = "setup_method" + teardown_name = "teardown_method" + else: + setup_name = "setup_function" + teardown_name = "teardown_function" + setup_func_or_method = _get_xunit_setup_teardown( + obj, setup_name, param_obj=self.obj + ) + if setup_func_or_method is not None: + setup_func_or_method() + teardown_func_or_method = _get_xunit_setup_teardown( + obj, teardown_name, param_obj=self.obj + ) + if teardown_func_or_method is not None: + self.addfinalizer(teardown_func_or_method) + + def _prunetraceback(self, excinfo): + if hasattr(self, "_obj") and not self.config.option.fulltrace: + code = _pytest._code.Code(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + + excinfo.traceback = ntraceback.filter() + # issue364: mark all but first and last frames to + # only show a single-line message for each frame + if self.config.option.tbstyle == "auto": + if len(excinfo.traceback) > 2: + for entry in excinfo.traceback[1:-1]: + entry.set_repr_style("short") + + def repr_failure(self, excinfo, outerr=None): + assert outerr is None, "XXX outerr usage is deprecated" + style = self.config.option.tbstyle + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class Generator(FunctionMixin, PyCollector): + def collect(self): + # test generators are seen as collectors but they also + # invoke setup/teardown on popular request + # (induced by the common "test_*" naming shared with normal tests) + from _pytest import deprecated + + self.session._setupstate.prepare(self) + # see FunctionMixin.setup and test_setupstate_is_preserved_134 + self._preservedparent = self.parent.obj + values = [] + seen = {} + for i, x in enumerate(self.obj()): + name, call, args = self.getcallargs(x) + if not callable(call): + raise TypeError("%r yielded non callable test %r" % (self.obj, call)) + if name is None: + name = "[%d]" % i + else: + name = "['%s']" % name + if name in seen: + raise ValueError( + "%r generated tests with non-unique name %r" % (self, name) + ) + seen[name] = True + with warnings.catch_warnings(): + # ignore our own deprecation warning + function_class = self.Function + values.append(function_class(name, self, args=args, callobj=call)) + self.warn(deprecated.YIELD_TESTS) + return values + + def getcallargs(self, obj): + if not isinstance(obj, (tuple, list)): + obj = (obj,) + # explicit naming + if isinstance(obj[0], six.string_types): + name = obj[0] + obj = obj[1:] + else: + name = None + call, args = obj[0], obj[1:] + return name, call, args + + +def hasinit(obj): + init = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + + +def hasnew(obj): + new = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + + +class CallSpec2(object): + def __init__(self, metafunc): + self.metafunc = metafunc + self.funcargs = {} + self._idlist = [] + self.params = {} + self._globalid = NOTSET + self._globalparam = NOTSET + self._arg2scopenum = {} # used for sorting parametrized resources + self.marks = [] + self.indices = {} + + def copy(self): + cs = CallSpec2(self.metafunc) + cs.funcargs.update(self.funcargs) + cs.params.update(self.params) + cs.marks.extend(self.marks) + cs.indices.update(self.indices) + cs._arg2scopenum.update(self._arg2scopenum) + cs._idlist = list(self._idlist) + cs._globalid = self._globalid + cs._globalparam = self._globalparam + return cs + + def _checkargnotcontained(self, arg): + if arg in self.params or arg in self.funcargs: + raise ValueError("duplicate %r" % (arg,)) + + def getparam(self, name): + try: + return self.params[name] + except KeyError: + if self._globalparam is NOTSET: + raise ValueError(name) + return self._globalparam + + @property + def id(self): + return "-".join(map(str, filter(None, self._idlist))) + + def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index): + for arg, val in zip(argnames, valset): + self._checkargnotcontained(arg) + valtype_for_arg = valtypes[arg] + getattr(self, valtype_for_arg)[arg] = val + self.indices[arg] = param_index + self._arg2scopenum[arg] = scopenum + self._idlist.append(id) + self.marks.extend(normalize_mark_list(marks)) + + def setall(self, funcargs, id, param): + for x in funcargs: + self._checkargnotcontained(x) + self.funcargs.update(funcargs) + if id is not NOTSET: + self._idlist.append(id) + if param is not NOTSET: + assert self._globalparam is NOTSET + self._globalparam = param + for arg in funcargs: + self._arg2scopenum[arg] = fixtures.scopenum_function + + +class Metafunc(fixtures.FuncargnamesCompatAttr): + """ + Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook. + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__(self, definition, fixtureinfo, config, cls=None, module=None): + assert ( + isinstance(definition, FunctionDefinition) + or type(definition).__name__ == "DefinitionMock" + ) + self.definition = definition + + #: access to the :class:`_pytest.config.Config` object for the test session + self.config = config + + #: the module object where the test function is defined in. + self.module = module + + #: underlying python test function + self.function = definition.obj + + #: set of fixture names required by the test function + self.fixturenames = fixtureinfo.names_closure + + #: class object where the test function is defined in or ``None``. + self.cls = cls + + self._calls = [] + self._ids = set() + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None): + """ Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather at test setup time. + + :arg argnames: a comma-separated string denoting one or more argument + names, or a list/tuple of argument strings. + + :arg argvalues: The list of argvalues determines how often a + test is invoked with different argument values. If only one + argname was specified argvalues is a list of values. If N + argnames were specified, argvalues must be a list of N-tuples, + where each tuple-element specifies a value for its respective + argname. + + :arg indirect: The list of argnames or boolean. A list of arguments' + names (subset of argnames). If True the list contains all names from + the argnames. Each argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :arg ids: list of string ids, or a callable. + If strings, each is corresponding to the argvalues so that they are + part of the test id. If None is given as id of specific test, the + automatically generated id for that argument will be used. + If callable, it should take one argument (a single argvalue) and return + a string or return None. If None, the automatically generated id for that + argument will be used. + If no ids are provided they will be generated automatically from + the argvalues. + + :arg scope: if specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + from _pytest.fixtures import scope2index + from _pytest.mark import ParameterSet + + argnames, parameters = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + function_definition=self.definition, + ) + del argvalues + + if scope is None: + scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + arg_values_types = self._resolve_arg_value_types(argnames, indirect) + + ids = self._resolve_arg_ids(argnames, ids, parameters, item=self.definition) + + scopenum = scope2index( + scope, descr="parametrize() call in {}".format(self.function.__name__) + ) + + # create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls + newcalls = [] + for callspec in self._calls or [CallSpec2(self)]: + for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)): + newcallspec = callspec.copy() + newcallspec.setmulti2( + arg_values_types, + argnames, + param_set.values, + param_id, + param_set.marks, + scopenum, + param_index, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_arg_ids(self, argnames, ids, parameters, item): + """Resolves the actual ids for the given argnames, based on the ``ids`` parameter given + to ``parametrize``. + + :param List[str] argnames: list of argument names passed to ``parametrize()``. + :param ids: the ids parameter of the parametrized call (see docs). + :param List[ParameterSet] parameters: the list of parameter values, same size as ``argnames``. + :param Item item: the item that generated this parametrized call. + :rtype: List[str] + :return: the list of ids for each argname given + """ + from py.io import saferepr + + idfn = None + if callable(ids): + idfn = ids + ids = None + if ids: + func_name = self.function.__name__ + if len(ids) != len(parameters): + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parameters), len(ids)), pytrace=False) + for id_value in ids: + if id_value is not None and not isinstance(id_value, six.string_types): + msg = "In {}: ids must be list of strings, found: {} (type: {!r})" + fail( + msg.format(func_name, saferepr(id_value), type(id_value)), + pytrace=False, + ) + ids = idmaker(argnames, parameters, idfn, ids, self.config, item=item) + return ids + + def _resolve_arg_value_types(self, argnames, indirect): + """Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg" + to the function, based on the ``indirect`` parameter of the parametrized() call. + + :param List[str] argnames: list of argument names passed to ``parametrize()``. + :param indirect: same ``indirect`` parameter of ``parametrize()``. + :rtype: Dict[str, str] + A dict mapping each arg name to either: + * "params" if the argname should be the parameter of a fixture of the same name. + * "funcargs" if the argname should be a parameter to the parametrized test function. + """ + valtypes = {} + if indirect is True: + valtypes = dict.fromkeys(argnames, "params") + elif indirect is False: + valtypes = dict.fromkeys(argnames, "funcargs") + elif isinstance(indirect, (tuple, list)): + valtypes = dict.fromkeys(argnames, "funcargs") + for arg in indirect: + if arg not in argnames: + fail( + "In {}: indirect fixture '{}' doesn't exist".format( + self.function.__name__, arg + ), + pytrace=False, + ) + valtypes[arg] = "params" + return valtypes + + def _validate_if_using_arg_names(self, argnames, indirect): + """ + Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: list of argument names passed to ``parametrize()``. + :param indirect: same ``indirect`` parameter of ``parametrize()``. + :raise ValueError: if validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + "In {}: function already takes an argument '{}' with a default value".format( + func_name, arg + ), + pytrace=False, + ) + else: + if isinstance(indirect, (tuple, list)): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + "In {}: function uses no {} '{}'".format(func_name, name, arg), + pytrace=False, + ) + + def addcall(self, funcargs=None, id=NOTSET, param=NOTSET): + """ Add a new call to the underlying test function during the collection phase of a test run. + + .. deprecated:: 3.3 + + Use :meth:`parametrize` instead. + + Note that request.addcall() is called during the test collection phase prior and + independently to actual test execution. You should only use addcall() + if you need to specify multiple arguments of a test function. + + :arg funcargs: argument keyword dictionary used when invoking + the test function. + + :arg id: used for reporting and identification purposes. If you + don't supply an `id` an automatic unique id will be generated. + + :arg param: a parameter which will be exposed to a later fixture function + invocation through the ``request.param`` attribute. + """ + warnings.warn(deprecated.METAFUNC_ADD_CALL, stacklevel=2) + + assert funcargs is None or isinstance(funcargs, dict) + if funcargs is not None: + for name in funcargs: + if name not in self.fixturenames: + fail("funcarg %r not used in this function." % name) + else: + funcargs = {} + if id is None: + raise ValueError("id=None not allowed") + if id is NOTSET: + id = len(self._calls) + id = str(id) + if id in self._ids: + raise ValueError("duplicate id %r" % id) + self._ids.add(id) + + cs = CallSpec2(self) + cs.setall(funcargs, id, param) + self._calls.append(cs) + + +def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + from _pytest.fixtures import scopes + + if isinstance(indirect, (list, tuple)): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[0].scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + if used_scopes: + # Takes the most narrow scope from used fixtures + for scope in reversed(scopes): + if scope in used_scopes: + return scope + + return "function" + + +def _idval(val, argname, idx, idfn, item, config): + if idfn: + s = None + try: + s = idfn(val) + except Exception as e: + # See issue https://github.com/pytest-dev/pytest/issues/2169 + msg = ( + "While trying to determine id of parameter {} at position " + "{} the following exception was raised:\n".format(argname, idx) + ) + msg += " {}: {}\n".format(type(e).__name__, e) + msg += "This warning will be an error error in pytest-4.0." + item.warn(RemovedInPytest4Warning(msg)) + if s: + return ascii_escaped(s) + + if config: + hook_id = config.hook.pytest_make_parametrize_id( + config=config, val=val, argname=argname + ) + if hook_id: + return hook_id + + if isinstance(val, STRING_TYPES): + return ascii_escaped(val) + elif isinstance(val, (float, int, bool, NoneType)): + return str(val) + elif isinstance(val, REGEX_TYPE): + return ascii_escaped(val.pattern) + elif enum is not None and isinstance(val, enum.Enum): + return str(val) + elif (isclass(val) or isfunction(val)) and hasattr(val, "__name__"): + return val.__name__ + return str(argname) + str(idx) + + +def _idvalset(idx, parameterset, argnames, idfn, ids, item, config): + if parameterset.id is not None: + return parameterset.id + if ids is None or (idx >= len(ids) or ids[idx] is None): + this_id = [ + _idval(val, argname, idx, idfn, item=item, config=config) + for val, argname in zip(parameterset.values, argnames) + ] + return "-".join(this_id) + else: + return ascii_escaped(ids[idx]) + + +def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None): + ids = [ + _idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item) + for valindex, parameterset in enumerate(parametersets) + ] + if len(set(ids)) != len(ids): + # The ids are not unique + duplicates = [testid for testid in ids if ids.count(testid) > 1] + counters = collections.defaultdict(lambda: 0) + for index, testid in enumerate(ids): + if testid in duplicates: + ids[index] = testid + str(counters[testid]) + counters[testid] += 1 + return ids + + +def show_fixtures_per_test(config): + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +def _show_fixtures_per_test(config, session): + import _pytest.config + + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + def get_best_relpath(func): + loc = getlocation(func, curdir) + return curdir.bestrelpath(loc) + + def write_fixture(fixture_def): + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + if verbose > 0: + bestrel = get_best_relpath(fixture_def.func) + funcargspec = "{} -- {}".format(argname, bestrel) + else: + funcargspec = argname + tw.line(funcargspec, green=True) + fixture_doc = fixture_def.func.__doc__ + if fixture_doc: + write_docstring(tw, fixture_doc) + else: + tw.line(" no docstring available", red=True) + + def write_item(item): + try: + info = item._fixtureinfo + except AttributeError: + # doctests items have no _fixtureinfo attribute + return + if not info.name2fixturedefs: + # this test item does not use any fixtures + return + tw.line() + tw.sep("-", "fixtures used by {}".format(item.name)) + tw.sep("-", "({})".format(get_best_relpath(item.function))) + # dict key not used in loop but needed for sorting + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # last item is expected to be the one used by the test item + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config): + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config, session): + import _pytest.config + + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + fm = session._fixturemanager + + available = [] + seen = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, curdir) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + curdir.bestrelpath(loc), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, bestrel, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", "fixtures defined from %s" % (module,)) + currentmodule = module + if verbose <= 0 and argname[0] == "_": + continue + if verbose > 0: + funcargspec = "%s -- %s" % (argname, bestrel) + else: + funcargspec = argname + tw.line(funcargspec, green=True) + loc = getlocation(fixturedef.func, curdir) + doc = fixturedef.func.__doc__ or "" + if doc: + write_docstring(tw, doc) + else: + tw.line(" %s: no docstring available" % (loc,), red=True) + + +def write_docstring(tw, doc): + INDENT = " " + doc = doc.rstrip() + if "\n" in doc: + firstline, rest = doc.split("\n", 1) + else: + firstline, rest = doc, "" + + if firstline.strip(): + tw.line(INDENT + firstline.strip()) + + if rest: + for line in dedent(rest).split("\n"): + tw.write(INDENT + line + "\n") + + +class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): + """ a Function Item is responsible for setting up and executing a + Python test function. + """ + + _genid = None + # disable since functions handle it themselfes + _ALLOW_MARKERS = False + + def __init__( + self, + name, + parent, + args=None, + config=None, + callspec=None, + callobj=NOTSET, + keywords=None, + session=None, + fixtureinfo=None, + originalname=None, + ): + super(Function, self).__init__(name, parent, config=config, session=session) + self._args = args + if callobj is not NOTSET: + self.obj = callobj + + self.keywords.update(self.obj.__dict__) + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + # this is total hostile and a mess + # keywords are broken by design by now + # this will be redeemed later + for mark in callspec.marks: + # feel free to cry, this was broken for years before + # and keywords cant fix it per design + self.keywords[mark.name] = mark + self.own_markers.extend(normalize_mark_list(callspec.marks)) + if keywords: + self.keywords.update(keywords) + + if fixtureinfo is None: + fixtureinfo = self.session._fixturemanager.getfixtureinfo( + self, self.obj, self.cls, funcargs=not self._isyieldedfunction() + ) + self._fixtureinfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + #: original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname + + def _initrequest(self): + self.funcargs = {} + if self._isyieldedfunction(): + assert not hasattr( + self, "callspec" + ), "yielded functions (deprecated) cannot have funcargs" + else: + if hasattr(self, "callspec"): + callspec = self.callspec + assert not callspec.funcargs + self._genid = callspec.id + if hasattr(callspec, "param"): + self.param = callspec.param + self._request = fixtures.FixtureRequest(self) + + @property + def function(self): + "underlying python 'function' object" + return getimfunc(self.obj) + + def _getobj(self): + name = self.name + i = name.find("[") # parametrization + if i != -1: + name = name[:i] + return getattr(self.parent.obj, name) + + @property + def _pyfuncitem(self): + "(compatonly) for code expecting pytest-2.2 style request objects" + return self + + def _isyieldedfunction(self): + return getattr(self, "_args", None) is not None + + def runtest(self): + """ execute the underlying test function. """ + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self): + super(Function, self).setup() + fixtures.fillfixtures(self) + + +class FunctionDefinition(Function): + """ + internal hack until we get actual definition nodes instead of the + crappy metafunc hack + """ + + def runtest(self): + raise RuntimeError("function definitions are not supposed to be used") + + setup = runtest diff --git a/venv/Lib/site-packages/_pytest/python_api.py b/venv/Lib/site-packages/_pytest/python_api.py new file mode 100644 index 00000000..6cd17de2 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/python_api.py @@ -0,0 +1,721 @@ +import math +import pprint +import sys +from numbers import Number +from decimal import Decimal + +import six +from six.moves import zip, filterfalse +from more_itertools.more import always_iterable + +from _pytest.compat import isclass + +from _pytest.compat import Mapping, Sequence +from _pytest.compat import STRING_TYPES + +from _pytest.outcomes import fail +import _pytest._code + +BASE_TYPE = (type, STRING_TYPES) + + +def _cmp_raises_type_error(self, other): + """__cmp__ implementation which raises TypeError. Used + by Approx base classes to implement only == and != and raise a + TypeError for other comparisons. + + Needed in Python 2 only, Python 3 all it takes is not implementing the + other operators at all. + """ + __tracebackhide__ = True + raise TypeError( + "Comparison operators other than == and != not supported by approx objects" + ) + + +def _non_numeric_type_error(value, at): + at_str = " at {}".format(at) if at else "" + return TypeError( + "cannot make approximate comparisons to non-numeric values: {!r} {}".format( + value, at_str + ) + ) + + +# builtin pytest.approx helper + + +class ApproxBase(object): + """ + Provide shared utilities for making approximate comparisons between numbers + or sequences of numbers. + """ + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok=False): + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self): + raise NotImplementedError + + def __eq__(self, actual): + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + __hash__ = None + + def __ne__(self, actual): + return not (actual == self) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def _approx_scalar(self, x): + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """ + Yield all the pairs of numbers to be compared. This is used to + implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self): + """ + Raise a TypeError if the expected value is not a valid type. + """ + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + pass + + +def _recursive_list_map(f, x): + if isinstance(x, list): + return list(_recursive_list_map(f, xi) for xi in x) + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """ + Perform approximate comparisons where the expected value is numpy array. + """ + + def __repr__(self): + list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist()) + return "approx({!r})".format(list_scalars) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def __eq__(self, actual): + import numpy as np + + # self.expected is supposed to always be an array here + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except: # noqa + raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual)) + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, np.asscalar(self.expected[i]) + else: + for i in np.ndindex(self.expected.shape): + yield np.asscalar(actual[i]), np.asscalar(self.expected[i]) + + +class ApproxMapping(ApproxBase): + """ + Perform approximate comparisons where the expected value is a mapping with + numeric values (the keys can be anything). + """ + + def __repr__(self): + return "approx({!r})".format( + {k: self._approx_scalar(v) for k, v in self.expected.items()} + ) + + def __eq__(self, actual): + if set(actual.keys()) != set(self.expected.keys()): + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self): + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + elif not isinstance(value, Number): + raise _non_numeric_type_error(self.expected, at="key={!r}".format(key)) + + +class ApproxSequence(ApproxBase): + """ + Perform approximate comparisons where the expected value is a sequence of + numbers. + """ + + def __repr__(self): + seq_type = type(self.expected) + if seq_type not in (tuple, list, set): + seq_type = list + return "approx({!r})".format( + seq_type(self._approx_scalar(x) for x in self.expected) + ) + + def __eq__(self, actual): + if len(actual) != len(self.expected): + return False + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + def _check_type(self): + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + elif not isinstance(x, Number): + raise _non_numeric_type_error( + self.expected, at="index {}".format(index) + ) + + +class ApproxScalar(ApproxBase): + """ + Perform approximate comparisons where the expected value is a single number. + """ + + DEFAULT_ABSOLUTE_TOLERANCE = 1e-12 + DEFAULT_RELATIVE_TOLERANCE = 1e-6 + + def __repr__(self): + """ + Return a string communicating both the expected value and the tolerance + for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode + plus/minus symbol if this is python3 (it's too hard to get right for + python2). + """ + if isinstance(self.expected, complex): + return str(self.expected) + + # Infinities aren't compared using tolerances, so don't show a + # tolerance. + if math.isinf(self.expected): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = "{:.1e}".format(self.tolerance) + except ValueError: + vetted_tolerance = "???" + + if sys.version_info[0] == 2: + return "{} +- {}".format(self.expected, vetted_tolerance) + else: + return u"{} \u00b1 {}".format(self.expected, vetted_tolerance) + + def __eq__(self, actual): + """ + Return true if the given value is equal to the expected value within + the pre-specified tolerance. + """ + if _is_numpy_array(actual): + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in actual.flat) + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): + return self.nan_ok and math.isnan(abs(actual)) + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + return abs(self.expected - actual) <= self.tolerance + + __hash__ = None + + @property + def tolerance(self): + """ + Return the tolerance for the comparison. This could be either an + absolute tolerance or a relative tolerance, depending on what the user + specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + "absolute tolerance can't be negative: {}".format(absolute_tolerance) + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + "relative tolerance can't be negative: {}".format(absolute_tolerance) + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """ + Perform approximate comparisons where the expected value is a decimal. + """ + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + +def approx(expected, rel=None, abs=None, nan_ok=False): + """ + Assert that two numbers (or two sets of numbers) are equal to each other + within some tolerance. + + Due to the `intricacies of floating-point arithmetic`__, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + __ https://docs.python.org/3/tutorial/floatingpoint.html + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + Dictionary *values*:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. Only available in python>=3.5. `More information...`__ + + __ https://docs.python.org/3/library/math.html#math.isclose + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by ``numpy.allclose``. `More information...`__ + + __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered and the absolute tolerance cannot be changed, so this function + is not appropriate for very large or very small numbers. Also, it's only + available in subclasses of ``unittest.TestCase`` and it's ugly because it + doesn't follow PEP8. `More information...`__ + + __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, ``TypeError`` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. `More information...`__ + + __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ + """ + + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls = ApproxDecimal + elif isinstance(expected, Number): + cls = ApproxScalar + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif isinstance(expected, Sequence) and not isinstance(expected, STRING_TYPES): + cls = ApproxSequence + elif _is_numpy_array(expected): + cls = ApproxNumpy + else: + raise _non_numeric_type_error(expected, at=None) + + return cls(expected, rel, abs, nan_ok) + + +def _is_numpy_array(obj): + """ + Return true if the given object is a numpy array. Make a special effort to + avoid importing numpy unless it's really necessary. + """ + import sys + + np = sys.modules.get("numpy") + if np is not None: + return isinstance(obj, np.ndarray) + return False + + +# builtin pytest.raises helper + + +def raises(expected_exception, *args, **kwargs): + r""" + Assert that a code block/function call raises ``expected_exception`` + and raise a failure exception otherwise. + + :arg message: if specified, provides a custom failure message if the + exception is not raised + :arg match: if specified, asserts that the exception matches a text or regex + + This helper produces a ``ExceptionInfo()`` object (see below). + + You may use this function as a context manager:: + + >>> with raises(ZeroDivisionError): + ... 1/0 + + .. versionchanged:: 2.10 + + In the context manager form you may use the keyword argument + ``message`` to specify a custom failure message:: + + >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"): + ... pass + Traceback (most recent call last): + ... + Failed: Expecting ZeroDivisionError + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type == ValueError # this will not execute + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type == ValueError + + + Since version ``3.1`` you can use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + **Legacy forms** + + The forms below are fully supported but are discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + It is also possible to pass a string to be evaluated at runtime:: + + >>> raises(ZeroDivisionError, "f(0)") + + + The string will be evaluated using the same ``locals()`` and ``globals()`` + at the moment of the ``raises`` call. + + .. currentmodule:: _pytest._code + + Consult the API of ``excinfo`` objects: :class:`ExceptionInfo`. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. See the + official Python ``try`` statement documentation for more detailed + information. + + """ + __tracebackhide__ = True + for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)): + msg = ( + "exceptions must be old-style classes or" + " derived from BaseException, not %s" + ) + raise TypeError(msg % type(exc)) + + message = "DID NOT RAISE {}".format(expected_exception) + match_expr = None + + if not args: + if "message" in kwargs: + message = kwargs.pop("message") + if "match" in kwargs: + match_expr = kwargs.pop("match") + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(kwargs.keys()) + raise TypeError(msg) + return RaisesContext(expected_exception, message, match_expr) + elif isinstance(args[0], str): + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + # print "raises frame scope: %r" % frame.f_locals + try: + code = _pytest._code.Source(code).compile() + six.exec_(code, frame.f_globals, loc) + # XXX didn't mean f_globals == f_locals something special? + # this is destroyed here ... + except expected_exception: + return _pytest._code.ExceptionInfo() + else: + func = args[0] + try: + func(*args[1:], **kwargs) + except expected_exception: + return _pytest._code.ExceptionInfo() + fail(message) + + +raises.Exception = fail.Exception + + +class RaisesContext(object): + def __init__(self, expected_exception, message, match_expr): + self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr + self.excinfo = None + + def __enter__(self): + self.excinfo = object.__new__(_pytest._code.ExceptionInfo) + return self.excinfo + + def __exit__(self, *tp): + __tracebackhide__ = True + if tp[0] is None: + fail(self.message) + self.excinfo.__init__(tp) + suppress_exception = issubclass(self.excinfo.type, self.expected_exception) + if sys.version_info[0] == 2 and suppress_exception: + sys.exc_clear() + if self.match_expr and suppress_exception: + self.excinfo.match(self.match_expr) + return suppress_exception diff --git a/venv/Lib/site-packages/_pytest/recwarn.py b/venv/Lib/site-packages/_pytest/recwarn.py new file mode 100644 index 00000000..8738fe0b --- /dev/null +++ b/venv/Lib/site-packages/_pytest/recwarn.py @@ -0,0 +1,234 @@ +""" recording warnings during test function execution. """ +from __future__ import absolute_import, division, print_function + +import inspect + +import _pytest._code +import re +import sys +import warnings + +import six + +from _pytest.fixtures import yield_fixture +from _pytest.outcomes import fail + + +@yield_fixture +def recwarn(): + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See http://docs.python.org/library/warnings.html for information + on warning categories. + """ + wrec = WarningsRecorder() + with wrec: + warnings.simplefilter("default") + yield wrec + + +def deprecated_call(func=None, *args, **kwargs): + """context manager that can be used to ensure a block of code triggers a + ``DeprecationWarning`` or ``PendingDeprecationWarning``:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> with deprecated_call(): + ... assert api_call_v2() == 200 + + ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings + types above. + """ + __tracebackhide__ = True + if func is not None: + args = (func,) + args + return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) + + +def warns(expected_warning, *args, **kwargs): + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or + sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or + classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + + This function can be used as a context manager, or any of the other ways + ``pytest.raises`` can be used:: + + >>> with warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the exception matches a text or regex:: + + >>> with warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... + + """ + __tracebackhide__ = True + match_expr = None + if not args: + if "match" in kwargs: + match_expr = kwargs.pop("match") + return WarningsChecker(expected_warning, match_expr=match_expr) + elif isinstance(args[0], str): + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + + with WarningsChecker(expected_warning, match_expr=match_expr): + code = _pytest._code.Source(code).compile() + six.exec_(code, frame.f_globals, loc) + else: + func = args[0] + with WarningsChecker(expected_warning, match_expr=match_expr): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): + """A context manager to record raised warnings. + + Adapted from `warnings.catch_warnings`. + """ + + def __init__(self): + super(WarningsRecorder, self).__init__(record=True) + self._entered = False + self._list = [] + + @property + def list(self): + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i): + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self): + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self): + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls=Warning): + """Pop the first recorded warning, raise exception if not exists.""" + for i, w in enumerate(self._list): + if issubclass(w.category, cls): + return self._list.pop(i) + __tracebackhide__ = True + raise AssertionError("%r not found in warning list" % cls) + + def clear(self): + """Clear the list of recorded warnings.""" + self._list[:] = [] + + def __enter__(self): + if self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot enter %r twice" % self) + self._list = super(WarningsRecorder, self).__enter__() + warnings.simplefilter("always") + # python3 keeps track of a "filter version", when the filters are + # updated previously seen warnings can be re-warned. python2 has no + # concept of this so we must reset the warnings registry manually. + # trivial patching of `warnings.warn` seems to be enough somehow? + if six.PY2: + + def warn(*args, **kwargs): + kwargs.setdefault("stacklevel", 1) + kwargs["stacklevel"] += 1 + + # emulate resetting the warn registry + f_globals = sys._getframe(kwargs["stacklevel"] - 1).f_globals + if "__warningregistry__" in f_globals: + orig = f_globals["__warningregistry__"] + f_globals["__warningregistry__"] = None + try: + return self._saved_warn(*args, **kwargs) + finally: + f_globals["__warningregistry__"] = orig + else: + return self._saved_warn(*args, **kwargs) + + warnings.warn, self._saved_warn = warn, warnings.warn + return self + + def __exit__(self, *exc_info): + if not self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot exit %r without entering first" % self) + # see above where `self._saved_warn` is assigned + if six.PY2: + warnings.warn = self._saved_warn + super(WarningsRecorder, self).__exit__(*exc_info) + + +class WarningsChecker(WarningsRecorder): + def __init__(self, expected_warning=None, match_expr=None): + super(WarningsChecker, self).__init__() + + msg = "exceptions must be old-style classes or derived from Warning, not %s" + if isinstance(expected_warning, tuple): + for exc in expected_warning: + if not inspect.isclass(exc): + raise TypeError(msg % type(exc)) + elif inspect.isclass(expected_warning): + expected_warning = (expected_warning,) + elif expected_warning is not None: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning + self.match_expr = match_expr + + def __exit__(self, *exc_info): + super(WarningsChecker, self).__exit__(*exc_info) + + __tracebackhide__ = True + + # only check if we're not currently handling an exception + if all(a is None for a in exc_info): + if self.expected_warning is not None: + if not any(issubclass(r.category, self.expected_warning) for r in self): + __tracebackhide__ = True + fail( + "DID NOT WARN. No warnings of type {} was emitted. " + "The list of emitted warnings is: {}.".format( + self.expected_warning, [each.message for each in self] + ) + ) + elif self.match_expr is not None: + for r in self: + if issubclass(r.category, self.expected_warning): + if re.compile(self.match_expr).search(str(r.message)): + break + else: + fail( + "DID NOT WARN. No warnings of type {} matching" + " ('{}') was emitted. The list of emitted warnings" + " is: {}.".format( + self.expected_warning, + self.match_expr, + [each.message for each in self], + ) + ) diff --git a/venv/Lib/site-packages/_pytest/reports.py b/venv/Lib/site-packages/_pytest/reports.py new file mode 100644 index 00000000..a57c9fa8 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/reports.py @@ -0,0 +1,196 @@ +import py +from _pytest._code.code import TerminalRepr + + +def getslaveinfoline(node): + try: + return node._slaveinfocache + except AttributeError: + d = node.slaveinfo + ver = "%s.%s.%s" % d["version_info"][:3] + node._slaveinfocache = s = "[%s] %s -- Python %s %s" % ( + d["id"], + d["sysplatform"], + ver, + d["executable"], + ) + return s + + +class BaseReport(object): + def __init__(self, **kw): + self.__dict__.update(kw) + + def toterminal(self, out): + if hasattr(self, "node"): + out.line(getslaveinfoline(self.node)) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr.toterminal(out) + else: + try: + out.line(longrepr) + except UnicodeEncodeError: + out.line("") + + def get_sections(self, prefix): + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self): + """ + Read-only property that returns the full string representation + of ``longrepr``. + + .. versionadded:: 3.0 + """ + tw = py.io.TerminalWriter(stringio=True) + tw.hasmarkup = False + self.toterminal(tw) + exc = tw.stringio.getvalue() + return exc.strip() + + @property + def caplog(self): + """Return captured log lines, if log capturing is enabled + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self): + """Return captured text from stdout, if capturing is enabled + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self): + """Return captured text from stderr, if capturing is enabled + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + passed = property(lambda x: x.outcome == "passed") + failed = property(lambda x: x.outcome == "failed") + skipped = property(lambda x: x.outcome == "skipped") + + @property + def fspath(self): + return self.nodeid.split("::")[0] + + +class TestReport(BaseReport): + """ Basic test report object (also used for setup and teardown calls if + they fail). + """ + + def __init__( + self, + nodeid, + location, + keywords, + outcome, + longrepr, + when, + sections=(), + duration=0, + user_properties=None, + **extra + ): + #: normalized collection node id + self.nodeid = nodeid + + #: a (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + self.location = location + + #: a name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords = keywords + + #: test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: one of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + #: user properties is a list of tuples (name, value) that holds user + #: defined properties of the test + self.user_properties = list(user_properties or []) + + #: list of pairs ``(str, str)`` of extra information which needs to + #: marshallable. Used by pytest to add captured text + #: from ``stdout`` and ``stderr``, but may be used by other plugins + #: to add arbitrary information to reports. + self.sections = list(sections) + + #: time it took to run just the test + self.duration = duration + + self.__dict__.update(extra) + + def __repr__(self): + return "" % ( + self.nodeid, + self.when, + self.outcome, + ) + + +class TeardownErrorReport(BaseReport): + outcome = "failed" + when = "teardown" + + def __init__(self, longrepr, **extra): + self.longrepr = longrepr + self.sections = [] + self.__dict__.update(extra) + + +class CollectReport(BaseReport): + def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra): + self.nodeid = nodeid + self.outcome = outcome + self.longrepr = longrepr + self.result = result or [] + self.sections = list(sections) + self.__dict__.update(extra) + + @property + def location(self): + return (self.fspath, None, self.fspath) + + def __repr__(self): + return "" % ( + self.nodeid, + len(self.result), + self.outcome, + ) + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg): + self.longrepr = msg + + def toterminal(self, out): + out.line(self.longrepr, red=True) diff --git a/venv/Lib/site-packages/_pytest/resultlog.py b/venv/Lib/site-packages/_pytest/resultlog.py new file mode 100644 index 00000000..9ae90e77 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/resultlog.py @@ -0,0 +1,120 @@ +""" log machine-parseable test session result information in a plain +text file. +""" +from __future__ import absolute_import, division, print_function + +import py +import os + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "resultlog plugin options") + group.addoption( + "--resultlog", + "--result-log", + action="store", + metavar="path", + default=None, + help="DEPRECATED path for machine-readable result log.", + ) + + +def pytest_configure(config): + resultlog = config.option.resultlog + # prevent opening resultlog on slave nodes (xdist) + if resultlog and not hasattr(config, "slaveinput"): + dirname = os.path.dirname(os.path.abspath(resultlog)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logfile = open(resultlog, "w", 1) # line buffered + config._resultlog = ResultLog(config, logfile) + config.pluginmanager.register(config._resultlog) + + from _pytest.deprecated import RESULT_LOG + from _pytest.warnings import _issue_config_warning + + _issue_config_warning(RESULT_LOG, config) + + +def pytest_unconfigure(config): + resultlog = getattr(config, "_resultlog", None) + if resultlog: + resultlog.logfile.close() + del config._resultlog + config.pluginmanager.unregister(resultlog) + + +def generic_path(item): + chain = item.listchain() + gpath = [chain[0].name] + fspath = chain[0].fspath + fspart = False + for node in chain[1:]: + newfspath = node.fspath + if newfspath == fspath: + if fspart: + gpath.append(":") + fspart = False + else: + gpath.append(".") + else: + gpath.append("/") + fspart = True + name = node.name + if name[0] in "([": + gpath.pop() + gpath.append(name) + fspath = newfspath + return "".join(gpath) + + +class ResultLog(object): + def __init__(self, config, logfile): + self.config = config + self.logfile = logfile # preferably line buffered + + def write_log_entry(self, testpath, lettercode, longrepr): + print("%s %s" % (lettercode, testpath), file=self.logfile) + for line in longrepr.splitlines(): + print(" %s" % line, file=self.logfile) + + def log_outcome(self, report, lettercode, longrepr): + testpath = getattr(report, "nodeid", None) + if testpath is None: + testpath = report.fspath + self.write_log_entry(testpath, lettercode, longrepr) + + def pytest_runtest_logreport(self, report): + if report.when != "call" and report.passed: + return + res = self.config.hook.pytest_report_teststatus(report=report) + code = res[1] + if code == "x": + longrepr = str(report.longrepr) + elif code == "X": + longrepr = "" + elif report.passed: + longrepr = "" + elif report.failed: + longrepr = str(report.longrepr) + elif report.skipped: + longrepr = str(report.longrepr[2]) + self.log_outcome(report, code, longrepr) + + def pytest_collectreport(self, report): + if not report.passed: + if report.failed: + code = "F" + longrepr = str(report.longrepr) + else: + assert report.skipped + code = "S" + longrepr = "%s:%d: %s" % report.longrepr + self.log_outcome(report, code, longrepr) + + def pytest_internalerror(self, excrepr): + reprcrash = getattr(excrepr, "reprcrash", None) + path = getattr(reprcrash, "path", None) + if path is None: + path = "cwd:%s" % py.path.local() + self.write_log_entry(path, "!", str(excrepr)) diff --git a/venv/Lib/site-packages/_pytest/runner.py b/venv/Lib/site-packages/_pytest/runner.py new file mode 100644 index 00000000..05731799 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/runner.py @@ -0,0 +1,388 @@ +""" basic collect and runtest protocol implementations """ +from __future__ import absolute_import, division, print_function + +import bdb +import os +import sys +from time import time + +import six +from _pytest._code.code import ExceptionInfo +from _pytest.outcomes import skip, Skipped, TEST_OUTCOME + +from .reports import TestReport, CollectReport, CollectErrorRepr + +# +# pytest plugin hooks + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="show N slowest setup/test durations (N=0 for all).", + ), + + +def pytest_terminal_summary(terminalreporter): + durations = terminalreporter.config.option.durations + verbose = terminalreporter.config.getvalue("verbose") + if durations is None: + return + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration) + dlist.reverse() + if not durations: + tr.write_sep("=", "slowest test durations") + else: + tr.write_sep("=", "slowest %s test durations" % durations) + dlist = dlist[:durations] + + for rep in dlist: + if verbose < 2 and rep.duration < 0.005: + tr.write_line("") + tr.write_line("(0.00 durations hidden. Use -vv to show these durations.)") + break + nodeid = rep.nodeid.replace("::()::", "::") + tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, nodeid)) + + +def pytest_sessionstart(session): + session._setupstate = SetupState() + + +def pytest_sessionfinish(session): + session._setupstate.teardown_all() + + +def pytest_runtest_protocol(item, nextitem): + item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol(item, log=True, nextitem=None): + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: + item._initrequest() + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.option.setupshow: + show_test_item(item) + if not item.config.option.setuponly: + reports.append(call_and_report(item, "call", log)) + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # after all teardown hooks have been called + # want funcargs and request info to go away + if hasrequest: + item._request = False + item.funcargs = None + return reports + + +def show_test_item(item): + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item._nodeid) + used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys()) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + + +def pytest_runtest_setup(item): + _update_current_test_var(item, "setup") + item.session._setupstate.prepare(item) + + +def pytest_runtest_call(item): + _update_current_test_var(item, "call") + sys.last_type, sys.last_value, sys.last_traceback = (None, None, None) + try: + item.runtest() + except Exception: + # Store trace info to allow postmortem debugging + type, value, tb = sys.exc_info() + tb = tb.tb_next # Skip *this* frame + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + del type, value, tb # Get rid of these in this frame + raise + + +def pytest_runtest_teardown(item, nextitem): + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(item, nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var(item, when): + """ + Update PYTEST_CURRENT_TEST to reflect the current item and stage. + + If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = "{} ({})".format(item.nodeid, when) + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report): + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + + +# +# Implementation + + +def call_and_report(item, when, log=True, **kwds): + call = call_runtest_hook(item, when, **kwds) + hook = item.ihook + report = hook.pytest_runtest_makereport(item=item, call=call) + if log: + hook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + hook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def check_interactive_exception(call, report): + return call.excinfo and not ( + hasattr(report, "wasxfail") + or call.excinfo.errisinstance(skip.Exception) + or call.excinfo.errisinstance(bdb.BdbQuit) + ) + + +def call_runtest_hook(item, when, **kwds): + hookname = "pytest_runtest_" + when + ihook = getattr(item.ihook, hookname) + return CallInfo( + lambda: ihook(item=item, **kwds), + when=when, + treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb"), + ) + + +class CallInfo(object): + """ Result/Exception info a function invocation. """ + + #: None or ExceptionInfo object. + excinfo = None + + def __init__(self, func, when, treat_keyboard_interrupt_as_exception=False): + #: context of invocation: one of "setup", "call", + #: "teardown", "memocollect" + self.when = when + self.start = time() + try: + self.result = func() + except KeyboardInterrupt: + if treat_keyboard_interrupt_as_exception: + self.excinfo = ExceptionInfo() + else: + self.stop = time() + raise + except: # noqa + self.excinfo = ExceptionInfo() + self.stop = time() + + def __repr__(self): + if self.excinfo: + status = "exception: %s" % str(self.excinfo.value) + else: + status = "result: %r" % (self.result,) + return "" % (self.when, status) + + +def pytest_runtest_makereport(item, call): + when = call.when + duration = call.stop - call.start + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome = "passed" + longrepr = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif excinfo.errisinstance(skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py( + excinfo, style=item.config.option.tbstyle + ) + for rwhen, key, content in item._report_sections: + sections.append(("Captured %s %s" % (key, rwhen), content)) + return TestReport( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + user_properties=item.user_properties, + ) + + +def pytest_make_collect_report(collector): + call = CallInfo(lambda: list(collector.collect()), "collect") + longrepr = None + if not call.excinfo: + outcome = "passed" + else: + from _pytest import nose + + skip_exceptions = (Skipped,) + nose.get_skip_exceptions() + if call.excinfo.errisinstance(skip_exceptions): + outcome = "skipped" + r = collector._repr_failure_py(call.excinfo, "line").reprcrash + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + rep = CollectReport( + collector.nodeid, outcome, longrepr, getattr(call, "result", None) + ) + rep.call = call # see collect_one_node + return rep + + +class SetupState(object): + """ shared state for setting up/tearing down test items or collectors. """ + + def __init__(self): + self.stack = [] + self._finalizers = {} + + def addfinalizer(self, finalizer, colitem): + """ attach a finalizer to the given colitem. + if colitem is None, this will add a finalizer that + is called at the end of teardown_all(). + """ + assert colitem and not isinstance(colitem, tuple) + assert callable(finalizer) + # assert colitem in self.stack # some unit tests don't setup stack :/ + self._finalizers.setdefault(colitem, []).append(finalizer) + + def _pop_and_teardown(self): + colitem = self.stack.pop() + self._teardown_with_finalization(colitem) + + def _callfinalizers(self, colitem): + finalizers = self._finalizers.pop(colitem, None) + exc = None + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = sys.exc_info() + if exc: + six.reraise(*exc) + + def _teardown_with_finalization(self, colitem): + self._callfinalizers(colitem) + if hasattr(colitem, "teardown"): + colitem.teardown() + for colitem in self._finalizers: + assert ( + colitem is None or colitem in self.stack or isinstance(colitem, tuple) + ) + + def teardown_all(self): + while self.stack: + self._pop_and_teardown() + for key in list(self._finalizers): + self._teardown_with_finalization(key) + assert not self._finalizers + + def teardown_exact(self, item, nextitem): + needed_collectors = nextitem and nextitem.listchain() or [] + self._teardown_towards(needed_collectors) + + def _teardown_towards(self, needed_collectors): + exc = None + while self.stack: + if self.stack == needed_collectors[: len(self.stack)]: + break + try: + self._pop_and_teardown() + except TEST_OUTCOME: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = sys.exc_info() + if exc: + six.reraise(*exc) + + def prepare(self, colitem): + """ setup objects along the collector chain to the test-method + and teardown previously setup objects.""" + needed_collectors = colitem.listchain() + self._teardown_towards(needed_collectors) + + # check if the last collection node has raised an error + for col in self.stack: + if hasattr(col, "_prepare_exc"): + six.reraise(*col._prepare_exc) + for col in needed_collectors[len(self.stack) :]: + self.stack.append(col) + try: + col.setup() + except TEST_OUTCOME: + col._prepare_exc = sys.exc_info() + raise + + +def collect_one_node(collector): + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/venv/Lib/site-packages/_pytest/setuponly.py b/venv/Lib/site-packages/_pytest/setuponly.py new file mode 100644 index 00000000..c3edc5f8 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/setuponly.py @@ -0,0 +1,85 @@ +from __future__ import absolute_import, division, print_function + +import pytest +import sys + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="only setup fixtures, do not execute tests.", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="show setup of fixtures while executing tests.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_fixture_setup(fixturedef, request): + yield + config = request.config + if config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + fixturedef.cached_param = fixturedef.ids(request.param) + else: + fixturedef.cached_param = fixturedef.ids[request.param_index] + else: + fixturedef.cached_param = request.param + _show_fixture_action(fixturedef, "SETUP") + + +def pytest_fixture_post_finalizer(fixturedef): + if hasattr(fixturedef, "cached_result"): + config = fixturedef._fixturemanager.config + if config.option.setupshow: + _show_fixture_action(fixturedef, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param + + +def _show_fixture_action(fixturedef, msg): + config = fixturedef._fixturemanager.config + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + out, err = capman.read_global_capture() + + tw = config.get_terminal_writer() + tw.line() + tw.write(" " * 2 * fixturedef.scopenum) + tw.write( + "{step} {scope} {fixture}".format( + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname, + ) + ) + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write("[{}]".format(fixturedef.cached_param)) + + if capman: + capman.resume_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setuponly: + config.option.setupshow = True diff --git a/venv/Lib/site-packages/_pytest/setupplan.py b/venv/Lib/site-packages/_pytest/setupplan.py new file mode 100644 index 00000000..23f4f97e --- /dev/null +++ b/venv/Lib/site-packages/_pytest/setupplan.py @@ -0,0 +1,29 @@ +from __future__ import absolute_import, division, print_function + +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="show what fixtures and tests would be executed but " + "don't execute anything.", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup(fixturedef, request): + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + fixturedef.cached_result = (None, None, None) + return fixturedef.cached_result + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True diff --git a/venv/Lib/site-packages/_pytest/skipping.py b/venv/Lib/site-packages/_pytest/skipping.py new file mode 100644 index 00000000..90afd6de --- /dev/null +++ b/venv/Lib/site-packages/_pytest/skipping.py @@ -0,0 +1,294 @@ +""" support for skip/xfail functions and markers. """ +from __future__ import absolute_import, division, print_function + +from _pytest.config import hookimpl +from _pytest.mark.evaluate import MarkEvaluator +from _pytest.outcomes import fail, skip, xfail + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="run tests even if they are marked xfail", + ) + + parser.addini( + "xfail_strict", + "default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) + + +def pytest_configure(config): + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config._cleanup.append(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition): skip the given test function if eval(condition) " + "results in a True value. Evaluation happens within the " + "module global context. Example: skipif('sys.platform == \"win32\"') " + "skips the test if we are on the win32 platform. see " + "https://docs.pytest.org/en/latest/skipping.html", + ) + config.addinivalue_line( + "markers", + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/latest/skipping.html", + ) + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item): + # Check if skip or skipif are specified as pytest marks + item._skipped_by_mark = False + eval_skipif = MarkEvaluator(item, "skipif") + if eval_skipif.istrue(): + item._skipped_by_mark = True + skip(eval_skipif.getexplanation()) + + for skip_info in item.iter_markers(name="skip"): + item._skipped_by_mark = True + if "reason" in skip_info.kwargs: + skip(skip_info.kwargs["reason"]) + elif skip_info.args: + skip(skip_info.args[0]) + else: + skip("unconditional skip") + + item._evalxfail = MarkEvaluator(item, "xfail") + check_xfail_no_run(item) + + +@hookimpl(hookwrapper=True) +def pytest_pyfunc_call(pyfuncitem): + check_xfail_no_run(pyfuncitem) + outcome = yield + passed = outcome.excinfo is None + if passed: + check_strict_xfail(pyfuncitem) + + +def check_xfail_no_run(item): + """check xfail(run=False)""" + if not item.config.option.runxfail: + evalxfail = item._evalxfail + if evalxfail.istrue(): + if not evalxfail.get("run", True): + xfail("[NOTRUN] " + evalxfail.getexplanation()) + + +def check_strict_xfail(pyfuncitem): + """check xfail(strict=True) for the given PASSING test""" + evalxfail = pyfuncitem._evalxfail + if evalxfail.istrue(): + strict_default = pyfuncitem.config.getini("xfail_strict") + is_strict_xfail = evalxfail.get("strict", strict_default) + if is_strict_xfail: + del pyfuncitem._evalxfail + explanation = evalxfail.getexplanation() + fail("[XPASS(strict)] " + explanation, pytrace=False) + + +@hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item, call): + outcome = yield + rep = outcome.get_result() + evalxfail = getattr(item, "_evalxfail", None) + # unitttest special case, see setting of _unexpectedsuccess + if hasattr(item, "_unexpectedsuccess") and rep.when == "call": + from _pytest.compat import _is_unittest_unexpected_success_a_failure + + if item._unexpectedsuccess: + rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess) + else: + rep.longrepr = "Unexpected success" + if _is_unittest_unexpected_success_a_failure(): + rep.outcome = "failed" + else: + rep.outcome = "passed" + rep.wasxfail = rep.longrepr + elif item.config.option.runxfail: + pass # don't interefere + elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): + rep.wasxfail = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): + if call.excinfo: + if evalxfail.invalidraise(call.excinfo.value): + rep.outcome = "failed" + else: + rep.outcome = "skipped" + rep.wasxfail = evalxfail.getexplanation() + elif call.when == "call": + strict_default = item.config.getini("xfail_strict") + is_strict_xfail = evalxfail.get("strict", strict_default) + explanation = evalxfail.getexplanation() + if is_strict_xfail: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] {}".format(explanation) + else: + rep.outcome = "passed" + rep.wasxfail = explanation + elif ( + getattr(item, "_skipped_by_mark", False) + and rep.skipped + and type(rep.longrepr) is tuple + ): + # skipped by mark.skipif; change the location of the failure + # to point to the item definition, otherwise it will display + # the location of where the skip exception was raised within pytest + filename, line, reason = rep.longrepr + filename, line = item.location[:2] + rep.longrepr = filename, line, reason + + +# called by terminalreporter progress reporting + + +def pytest_report_teststatus(report): + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "xfail" + elif report.passed: + return "xpassed", "X", ("XPASS", {"yellow": True}) + + +# called by the terminalreporter instance/plugin + + +def pytest_terminal_summary(terminalreporter): + tr = terminalreporter + if not tr.reportchars: + # for name in "xfailed skipped failed xpassed": + # if not tr.stats.get(name, 0): + # tr.write_line("HINT: use '-r' option to see extra " + # "summary info about tests") + # break + return + + lines = [] + for char in tr.reportchars: + action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None) + action(terminalreporter, lines) + + if lines: + tr._tw.sep("=", "short test summary info") + for line in lines: + tr._tw.line(line) + + +def show_simple(terminalreporter, lines, stat, format): + failed = terminalreporter.stats.get(stat) + if failed: + for rep in failed: + pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) + lines.append(format % (pos,)) + + +def show_xfailed(terminalreporter, lines): + xfailed = terminalreporter.stats.get("xfailed") + if xfailed: + for rep in xfailed: + pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) + reason = rep.wasxfail + lines.append("XFAIL %s" % (pos,)) + if reason: + lines.append(" " + str(reason)) + + +def show_xpassed(terminalreporter, lines): + xpassed = terminalreporter.stats.get("xpassed") + if xpassed: + for rep in xpassed: + pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) + reason = rep.wasxfail + lines.append("XPASS %s %s" % (pos, reason)) + + +def folded_skips(skipped): + d = {} + for event in skipped: + key = event.longrepr + assert len(key) == 3, (event, key) + keywords = getattr(event, "keywords", {}) + # folding reports with global pytestmark variable + # this is workaround, because for now we cannot identify the scope of a skip marker + # TODO: revisit after marks scope would be fixed + when = getattr(event, "when", None) + if when == "setup" and "skip" in keywords and "pytestmark" not in keywords: + key = (key[0], None, key[2]) + d.setdefault(key, []).append(event) + values = [] + for key, events in d.items(): + values.append((len(events),) + key) + return values + + +def show_skipped(terminalreporter, lines): + tr = terminalreporter + skipped = tr.stats.get("skipped", []) + if skipped: + # if not tr.hasopt('skipped'): + # tr.write_line( + # "%d skipped tests, specify -rs for more info" % + # len(skipped)) + # return + fskips = folded_skips(skipped) + if fskips: + # tr.write_sep("_", "skipped test summary") + for num, fspath, lineno, reason in fskips: + if reason.startswith("Skipped: "): + reason = reason[9:] + if lineno is not None: + lines.append( + "SKIP [%d] %s:%d: %s" % (num, fspath, lineno + 1, reason) + ) + else: + lines.append("SKIP [%d] %s: %s" % (num, fspath, reason)) + + +def shower(stat, format): + def show_(terminalreporter, lines): + return show_simple(terminalreporter, lines, stat, format) + + return show_ + + +REPORTCHAR_ACTIONS = { + "x": show_xfailed, + "X": show_xpassed, + "f": shower("failed", "FAIL %s"), + "F": shower("failed", "FAIL %s"), + "s": show_skipped, + "S": show_skipped, + "p": shower("passed", "PASSED %s"), + "E": shower("error", "ERROR %s"), +} diff --git a/venv/Lib/site-packages/_pytest/terminal.py b/venv/Lib/site-packages/_pytest/terminal.py new file mode 100644 index 00000000..8deb330c --- /dev/null +++ b/venv/Lib/site-packages/_pytest/terminal.py @@ -0,0 +1,882 @@ +""" terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" +from __future__ import absolute_import, division, print_function + +import itertools +import platform +import sys +import time + +import attr +import pluggy +import py +import six +from more_itertools import collapse + +import pytest +from _pytest import nodes +from _pytest.main import ( + EXIT_OK, + EXIT_TESTSFAILED, + EXIT_INTERRUPTED, + EXIT_USAGEERROR, + EXIT_NOTESTSCOLLECTED, +) + + +import argparse + + +class MoreQuietAction(argparse.Action): + """ + a modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time + + used to unify verbosity handling + """ + + def __init__(self, option_strings, dest, default=None, required=False, help=None): + super(MoreQuietAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group._addoption( + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="increase verbosity.", + ), + group._addoption( + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="decrease verbosity.", + ), + group._addoption( + "--verbosity", dest="verbose", type=int, default=0, help="set verbosity" + ) + group._addoption( + "-r", + action="store", + dest="reportchars", + default="", + metavar="chars", + help="show extra test summary info as specified by chars (f)ailed, " + "(E)error, (s)skipped, (x)failed, (X)passed, " + "(p)passed, (P)passed with output, (a)all except pP. " + "Warnings are displayed at all times except when " + "--disable-warnings is set", + ) + group._addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="disable warnings summary", + ) + group._addoption( + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="show locals in tracebacks (disabled by default).", + ) + group._addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="traceback print mode (auto/long/short/line/native/no).", + ) + group._addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default is 'all'.", + ) + group._addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="don't cut any tracebacks (default is to cut).", + ) + group._addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="color terminal output (yes/no/auto).", + ) + + parser.addini( + "console_output_style", + help="console output: classic or with additional progress information (classic|progress).", + default="progress", + ) + + +def pytest_configure(config): + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + +def getreportopt(config): + reportopts = "" + reportchars = config.option.reportchars + if not config.option.disable_warnings and "w" not in reportchars: + reportchars += "w" + elif config.option.disable_warnings and "w" in reportchars: + reportchars = reportchars.replace("w", "") + if reportchars: + for char in reportchars: + if char not in reportopts and char != "a": + reportopts += char + elif char == "a": + reportopts = "fEsxXw" + return reportopts + + +def pytest_report_teststatus(report): + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + elif report.failed: + letter = "F" + if report.when != "call": + letter = "f" + return report.outcome, letter, report.outcome.upper() + + +@attr.s +class WarningReport(object): + """ + Simple structure to hold warnings information captured by ``pytest_logwarning`` and ``pytest_warning_captured``. + + :ivar str message: user friendly message about the warning + :ivar str|None nodeid: node id that generated the warning (see ``get_location``). + :ivar tuple|py.path.local fslocation: + file system location of the source of the warning (see ``get_location``). + + :ivar bool legacy: if this warning report was generated from the deprecated ``pytest_logwarning`` hook. + """ + + message = attr.ib() + nodeid = attr.ib(default=None) + fslocation = attr.ib(default=None) + legacy = attr.ib(default=False) + + def get_location(self, config): + """ + Returns the more user-friendly information about the location + of a warning, or None. + """ + if self.nodeid: + return self.nodeid + if self.fslocation: + if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: + filename, linenum = self.fslocation[:2] + relpath = py.path.local(filename).relto(config.invocation_dir) + if not relpath: + relpath = str(filename) + return "%s:%s" % (relpath, linenum) + else: + return str(self.fslocation) + return None + + +class TerminalReporter(object): + def __init__(self, config, file=None): + import _pytest.config + + self.config = config + self.verbosity = self.config.option.verbose + self.showheader = self.verbosity >= 0 + self.showfspath = self.verbosity >= 0 + self.showlongtestinfo = self.verbosity > 0 + self._numcollected = 0 + self._session = None + + self.stats = {} + self.startdir = py.path.local() + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + # self.writer will be deprecated in pytest-3.4 + self.writer = self._tw + self._screen_width = self._tw.fullwidth + self.currentfspath = None + self.reportchars = getreportopt(config) + self.hasmarkup = self._tw.hasmarkup + self.isatty = file.isatty() + self._progress_nodeids_reported = set() + self._show_progress_info = self._determine_show_progress_info() + + def _determine_show_progress_info(self): + """Return True if we should display progress information based on the current config""" + # do not show progress if we are not capturing output (#3038) + if self.config.getoption("capture") == "no": + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow"): + return False + return self.config.getini("console_output_style") in ("progress", "count") + + def hasopt(self, char): + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid, res): + fspath = self.config.rootdir.join(nodeid.split("::")[0]) + if fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + fspath = self.startdir.bestrelpath(fspath) + self._tw.line() + self._tw.write(fspath + " ") + self._tw.write(res) + + def write_ensure_prefix(self, prefix, extra="", **kwargs): + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self): + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def write(self, content, **markup): + self._tw.write(content, **markup) + + def write_line(self, line, **markup): + if not isinstance(line, six.text_type): + line = six.text_type(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line, **markup): + """ + Rewinds the terminal cursor to the beginning and writes the given line. + + :kwarg erase: if True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep(self, sep, title=None, **markup): + self.ensure_newline() + self._tw.sep(sep, title, **markup) + + def section(self, title, sep="=", **kw): + self._tw.sep(sep, title, **kw) + + def line(self, msg, **kw): + self._tw.line(msg, **kw) + + def pytest_internalerror(self, excrepr): + for line in six.text_type(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return 1 + + def pytest_logwarning(self, fslocation, message, nodeid): + warnings = self.stats.setdefault("warnings", []) + warning = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid, legacy=True + ) + warnings.append(warning) + + def pytest_warning_captured(self, warning_message, item): + # from _pytest.nodes import get_fslocation_from_item + from _pytest.warnings import warning_record_to_str + + warnings = self.stats.setdefault("warnings", []) + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + nodeid = item.nodeid if item is not None else "" + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + warnings.append(warning_report) + + def pytest_plugin_registered(self, plugin): + if self.config.option.traceconfig: + msg = "PLUGIN registered: %s" % (plugin,) + # XXX this event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line + self.write_line(msg) + + def pytest_deselected(self, items): + self.stats.setdefault("deselected", []).extend(items) + + def pytest_runtest_logstart(self, nodeid, location): + # ensure that the path is printed before the + # 1st test of a module starts running + if self.showlongtestinfo: + line = self._locationline(nodeid, *location) + self.write_ensure_prefix(line, "") + elif self.showfspath: + fsid = nodeid.split("::")[0] + self.write_fspath_result(fsid, "") + + def pytest_runtest_logreport(self, report): + rep = report + res = self.config.hook.pytest_report_teststatus(report=rep) + category, letter, word = res + if isinstance(word, tuple): + word, markup = word + else: + markup = None + self.stats.setdefault(category, []).append(rep) + self._tests_ran = True + if not letter and not word: + # probably passed setup/teardown + return + running_xdist = hasattr(rep, "node") + if self.verbosity <= 0: + if not running_xdist and self.showfspath: + self.write_fspath_result(rep.nodeid, letter) + else: + self._tw.write(letter) + else: + self._progress_nodeids_reported.add(rep.nodeid) + if markup is None: + if rep.passed: + markup = {"green": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + line = self._locationline(rep.nodeid, *rep.location) + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write("[%s]" % rep.node.gateway.id) + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + + def pytest_runtest_logfinish(self, nodeid): + if self.config.getini("console_output_style") == "count": + num_tests = self._session.testscollected + progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests))) + else: + progress_length = len(" [100%]") + + if self.verbosity <= 0 and self._show_progress_info: + self._progress_nodeids_reported.add(nodeid) + last_item = ( + len(self._progress_nodeids_reported) == self._session.testscollected + ) + if last_item: + self._write_progress_information_filling_space() + else: + w = self._width_of_current_line + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", cyan=True) + + def _get_progress_information_message(self): + if self.config.getoption("capture") == "no": + return "" + collected = self._session.testscollected + if self.config.getini("console_output_style") == "count": + if collected: + progress = self._progress_nodeids_reported + counter_format = "{{:{}d}}".format(len(str(collected))) + format_string = " [{}/{{}}]".format(counter_format) + return format_string.format(len(progress), collected) + return " [ {} / {} ]".format(collected, collected) + else: + if collected: + progress = len(self._progress_nodeids_reported) * 100 // collected + return " [{:3d}%]".format(progress) + return " [100%]" + + def _write_progress_information_filling_space(self): + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), cyan=True) + + @property + def _width_of_current_line(self): + """Return the width of current line, using the superior implementation of py-1.6 when available""" + try: + return self._tw.width_of_current_line + except AttributeError: + # py < 1.6.0 + return self._tw.chars_on_current_line + + def pytest_collection(self): + if not self.isatty and self.config.option.verbose >= 1: + self.write("collecting ... ", bold=True) + + def pytest_collectreport(self, report): + if report.failed: + self.stats.setdefault("error", []).append(report) + elif report.skipped: + self.stats.setdefault("skipped", []).append(report) + items = [x for x in report.result if isinstance(x, pytest.Item)] + self._numcollected += len(items) + if self.isatty: + # self.write_fspath_result(report.nodeid, 'E') + self.report_collect() + + def report_collect(self, final=False): + if self.config.option.verbose < 0: + return + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + if final: + line = "collected " + else: + line = "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += " / %d errors" % errors + if deselected: + line += " / %d deselected" % deselected + if skipped: + line += " / %d skipped" % skipped + if self.isatty: + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @pytest.hookimpl(trylast=True) + def pytest_collection_modifyitems(self): + self.report_collect(True) + + @pytest.hookimpl(trylast=True) + def pytest_sessionstart(self, session): + self._session = session + self._sessionstarttime = time.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + msg = "platform %s -- Python %s" % (sys.platform, verinfo) + if hasattr(sys, "pypy_version_info"): + verinfo = ".".join(map(str, sys.pypy_version_info[:3])) + msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3]) + msg += ", pytest-%s, py-%s, pluggy-%s" % ( + pytest.__version__, + py.__version__, + pluggy.__version__, + ) + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, startdir=self.startdir + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks(self, lines): + lines.reverse() + for line in collapse(lines): + self.write_line(line) + + def pytest_report_header(self, config): + inifile = "" + if config.inifile: + inifile = " " + config.rootdir.bestrelpath(config.inifile) + lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)] + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + + lines.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) + return lines + + def pytest_collection_finish(self, session): + if self.config.option.collectonly: + self._printcollecteditems(session.items) + if self.stats.get("failed"): + self._tw.sep("!", "collection failures") + for rep in self.stats.get("failed"): + rep.toterminal(self._tw) + return 1 + return 0 + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, startdir=self.startdir, items=session.items + ) + self._write_report_lines_from_hooks(lines) + + def _printcollecteditems(self, items): + # to print out items and their parent collectors + # we take care to leave out Instances aka () + # because later versions are going to get rid of them anyway + if self.config.option.verbose < 0: + if self.config.option.verbose < -1: + counts = {} + for item in items: + name = item.nodeid.split("::", 1)[0] + counts[name] = counts.get(name, 0) + 1 + for name, count in sorted(counts.items()): + self._tw.line("%s: %d" % (name, count)) + else: + for item in items: + nodeid = item.nodeid + nodeid = nodeid.replace("::()::", "::") + self._tw.line(nodeid) + return + stack = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + # if col.name == "()": + # continue + indent = (len(stack) - 1) * " " + self._tw.line("%s%s" % (indent, col)) + + @pytest.hookimpl(hookwrapper=True) + def pytest_sessionfinish(self, exitstatus): + outcome = yield + outcome.get_result() + self._tw.line("") + summary_exit_codes = ( + EXIT_OK, + EXIT_TESTSFAILED, + EXIT_INTERRUPTED, + EXIT_USAGEERROR, + EXIT_NOTESTSCOLLECTED, + ) + if exitstatus in summary_exit_codes: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus + ) + if exitstatus == EXIT_INTERRUPTED: + self._report_keyboardinterrupt() + del self._keyboardinterrupt_memo + self.summary_stats() + + @pytest.hookimpl(hookwrapper=True) + def pytest_terminal_summary(self): + self.summary_errors() + self.summary_failures() + yield + self.summary_warnings() + self.summary_passes() + + def pytest_keyboard_interrupt(self, excinfo): + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self): + if hasattr(self, "_keyboardinterrupt_memo"): + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self): + excrepr = self._keyboardinterrupt_memo + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --fulltrace)", + yellow=True, + ) + + def _locationline(self, nodeid, fspath, lineno, domain): + def mkrel(nodeid): + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # collect_fspath comes from testid which has a "/"-normalized path + + if fspath: + res = mkrel(nodeid).replace("::()", "") # parens-normalization + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + self.startdir.bestrelpath(fspath) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + if hasattr(rep, "location"): + fspath, lineno, domain = rep.location + return domain + else: + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # summaries for sessionfinish + # + def getreports(self, name): + values = [] + for x in self.stats.get(name, []): + if not hasattr(x, "_pdbshown"): + values.append(x) + return values + + def summary_warnings(self): + if self.hasopt("w"): + all_warnings = self.stats.get("warnings") + if not all_warnings: + return + + grouped = itertools.groupby( + all_warnings, key=lambda wr: wr.get_location(self.config) + ) + + self.write_sep("=", "warnings summary", yellow=True, bold=False) + for location, warning_records in grouped: + # legacy warnings show their location explicitly, while standard warnings look better without + # it because the location is already formatted into the message + warning_records = list(warning_records) + is_legacy = warning_records[0].legacy + if location and is_legacy: + self._tw.line(str(location)) + for w in warning_records: + if is_legacy: + lines = w.message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = w.message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line("-- Docs: https://docs.pytest.org/en/latest/warnings.html") + + def summary_passes(self): + if self.config.option.tbstyle != "no": + if self.hasopt("P"): + reports = self.getreports("passed") + if not reports: + return + self.write_sep("=", "PASSES") + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg) + self._outrep_summary(rep) + + def print_teardown_sections(self, rep): + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self): + if self.config.option.tbstyle != "no": + reports = self.getreports("failed") + if not reports: + return + self.write_sep("=", "FAILURES") + for rep in reports: + if self.config.option.tbstyle == "line": + line = self._getcrashline(rep) + self.write_line(line) + else: + msg = self._getfailureheadline(rep) + markup = {"red": True, "bold": True} + self.write_sep("_", msg, **markup) + self._outrep_summary(rep) + for report in self.getreports(""): + if report.nodeid == rep.nodeid and report.when == "teardown": + self.print_teardown_sections(report) + + def summary_errors(self): + if self.config.option.tbstyle != "no": + reports = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if not hasattr(rep, "when"): + # collect + msg = "ERROR collecting " + msg + elif rep.when == "setup": + msg = "ERROR at setup of " + msg + elif rep.when == "teardown": + msg = "ERROR at teardown of " + msg + self.write_sep("_", msg) + self._outrep_summary(rep) + + def _outrep_summary(self, rep): + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self): + session_duration = time.time() - self._sessionstarttime + (line, color) = build_summary_stats_line(self.stats) + msg = "%s in %.2f seconds" % (line, session_duration) + markup = {color: True, "bold": True} + + if self.verbosity >= 0: + self.write_sep("=", msg, **markup) + if self.verbosity == -1: + self.write_line(msg, **markup) + + +def repr_pythonversion(v=None): + if v is None: + v = sys.version_info + try: + return "%s.%s.%s-%s-%s" % v + except (TypeError, ValueError): + return str(v) + + +def build_summary_stats_line(stats): + keys = ("failed passed skipped deselected xfailed xpassed warnings error").split() + unknown_key_seen = False + for key in stats.keys(): + if key not in keys: + if key: # setup/teardown reports have an empty key, ignore them + keys.append(key) + unknown_key_seen = True + parts = [] + for key in keys: + val = stats.get(key, None) + if val: + parts.append("%d %s" % (len(val), key)) + + if parts: + line = ", ".join(parts) + else: + line = "no tests ran" + + if "failed" in stats or "error" in stats: + color = "red" + elif "warnings" in stats or unknown_key_seen: + color = "yellow" + elif "passed" in stats: + color = "green" + else: + color = "yellow" + + return (line, color) + + +def _plugin_nameversions(plugininfo): + values = [] + for plugin, dist in plugininfo: + # gets us name and version! + name = "{dist.project_name}-{dist.version}".format(dist=dist) + # questionable convenience, but it keeps things short + if name.startswith("pytest-"): + name = name[7:] + # we decided to print python package names + # they can have more than one plugin + if name not in values: + values.append(name) + return values diff --git a/venv/Lib/site-packages/_pytest/tmpdir.py b/venv/Lib/site-packages/_pytest/tmpdir.py new file mode 100644 index 00000000..1963f14c --- /dev/null +++ b/venv/Lib/site-packages/_pytest/tmpdir.py @@ -0,0 +1,186 @@ +""" support for providing temporary directories to test functions. """ +from __future__ import absolute_import, division, print_function + +import os +import re +import pytest +import py +from _pytest.monkeypatch import MonkeyPatch +import attr +import tempfile +import warnings + +from .pathlib import ( + Path, + make_numbered_dir, + make_numbered_dir_with_cleanup, + ensure_reset_dir, + LOCK_TIMEOUT, +) + + +@attr.s +class TempPathFactory(object): + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option.""" + + _given_basetemp = attr.ib() + _trace = attr.ib() + _basetemp = attr.ib(default=None) + + @classmethod + def from_config(cls, config): + """ + :param config: a pytest configuration + """ + return cls( + given_basetemp=config.option.basetemp, trace=config.trace.get("tmpdir") + ) + + def mktemp(self, basename, numbered=True): + """makes a temporary directory managed by the factory""" + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir() + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename) + self._trace("mktemp", p) + return p + + def getbasetemp(self): + """ return base temporary directory. """ + if self._basetemp is None: + if self._given_basetemp is not None: + basetemp = Path(self._given_basetemp) + ensure_reset_dir(basetemp) + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()) + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath("pytest-of-{}".format(user)) + rootdir.mkdir(exist_ok=True) + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT + ) + assert basetemp is not None + self._basetemp = t = basetemp + self._trace("new basetemp", t) + return t + else: + return self._basetemp + + +@attr.s +class TempdirFactory(object): + """ + backward comptibility wrapper that implements + :class:``py.path.local`` for :class:``TempPathFactory`` + """ + + _tmppath_factory = attr.ib() + + def ensuretemp(self, string, dir=1): + """ (deprecated) return temporary directory path with + the given string as the trailing part. It is usually + better to use the 'tmpdir' function argument which + provides an empty unique-per-test-invocation directory + and is guaranteed to be empty. + """ + # py.log._apiwarn(">1.1", "use tmpdir function argument") + from .deprecated import PYTEST_ENSURETEMP + + warnings.warn(PYTEST_ENSURETEMP, stacklevel=2) + return self.getbasetemp().ensure(string, dir=dir) + + def mktemp(self, basename, numbered=True): + """Create a subdirectory of the base temporary directory and return it. + If ``numbered``, ensure the directory is unique by adding a number + prefix greater than any existing one. + """ + return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self): + """backward compat wrapper for ``_tmppath_factory.getbasetemp``""" + return py.path.local(self._tmppath_factory.getbasetemp().resolve()) + + +def get_user(): + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010). + """ + import getpass + + try: + return getpass.getuser() + except (ImportError, KeyError): + return None + + +def pytest_configure(config): + """Create a TempdirFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmpdir_factory session fixture. + """ + mp = MonkeyPatch() + tmppath_handler = TempPathFactory.from_config(config) + t = TempdirFactory(tmppath_handler) + config._cleanup.append(mp.undo) + mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False) + mp.setattr(config, "_tmpdirhandler", t, raising=False) + mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False) + + +@pytest.fixture(scope="session") +def tmpdir_factory(request): + """Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session. + """ + return request.config._tmpdirhandler + + +@pytest.fixture(scope="session") +def tmp_path_factory(request): + """Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session. + """ + return request.config._tmp_path_factory + + +def _mk_tmp(request, factory): + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@pytest.fixture +def tmpdir(request, tmpdir_factory): + """Return a temporary directory path object + which is unique to each test function invocation, + created as a sub directory of the base temporary + directory. The returned object is a `py.path.local`_ + path object. + + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html + """ + return _mk_tmp(request, tmpdir_factory) + + +@pytest.fixture +def tmp_path(request, tmp_path_factory): + """Return a temporary directory path object + which is unique to each test function invocation, + created as a sub directory of the base temporary + directory. The returned object is a :class:`pathlib.Path` + object. + + .. note:: + + in python < 3.6 this is a pathlib2.Path + """ + + return _mk_tmp(request, tmp_path_factory) diff --git a/venv/Lib/site-packages/_pytest/unittest.py b/venv/Lib/site-packages/_pytest/unittest.py new file mode 100644 index 00000000..a2fd6ad5 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/unittest.py @@ -0,0 +1,253 @@ +""" discovery and running of std-library "unittest" style tests. """ +from __future__ import absolute_import, division, print_function + +import sys +import traceback + +# for transferring markers +import _pytest._code +from _pytest.config import hookimpl +from _pytest.outcomes import fail, skip, xfail +from _pytest.python import transfer_markers, Class, Module, Function +from _pytest.compat import getimfunc + + +def pytest_pycollect_makeitem(collector, name, obj): + # has unittest been imported and is obj a subclass of its TestCase? + try: + if not issubclass(obj, sys.modules["unittest"].TestCase): + return + except Exception: + return + # yes, so let's collect it + return UnitTestCase(name, parent=collector) + + +class UnitTestCase(Class): + # marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs + nofuncargs = True + + def setup(self): + cls = self.obj + if getattr(cls, "__unittest_skip__", False): + return # skipped + setup = getattr(cls, "setUpClass", None) + if setup is not None: + setup() + teardown = getattr(cls, "tearDownClass", None) + if teardown is not None: + self.addfinalizer(teardown) + super(UnitTestCase, self).setup() + + def collect(self): + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + self.session._fixturemanager.parsefactories(self, unittest=True) + loader = TestLoader() + module = self.getparent(Module).obj + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + funcobj = getimfunc(x) + transfer_markers(funcobj, cls, module) + yield TestCaseFunction(name, parent=self, callobj=funcobj) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + if ut is None or runtest != ut.TestCase.runTest: + yield TestCaseFunction("runTest", parent=self) + + +class TestCaseFunction(Function): + nofuncargs = True + _excinfo = None + _testcase = None + + def setup(self): + self._testcase = self.parent.obj(self.name) + self._fix_unittest_skip_decorator() + self._obj = getattr(self._testcase, self.name) + if hasattr(self._testcase, "setup_method"): + self._testcase.setup_method(self._obj) + if hasattr(self, "_request"): + self._request._fillfixtures() + + def _fix_unittest_skip_decorator(self): + """ + The @unittest.skip decorator calls functools.wraps(self._testcase) + The call to functools.wraps() fails unless self._testcase + has a __name__ attribute. This is usually automatically supplied + if the test is a function or method, but we need to add manually + here. + + See issue #1169 + """ + if sys.version_info[0] == 2: + setattr(self._testcase, "__name__", self.name) + + def teardown(self): + if hasattr(self._testcase, "teardown_method"): + self._testcase.teardown_method(self._obj) + # Allow garbage collection on TestCase instance attributes. + self._testcase = None + self._obj = None + + def startTest(self, testcase): + pass + + def _addexcinfo(self, rawexcinfo): + # unwrap potential exception info (see twisted trial support below) + rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo(rawexcinfo) + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except: # noqa + fail( + "ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + + def addFailure(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase, reason): + try: + skip(reason) + except skip.Exception: + self._skipped_by_mark = True + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure(self, testcase, rawexcinfo, reason=""): + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess(self, testcase, reason=""): + self._unexpectedsuccess = reason + + def addSuccess(self, testcase): + pass + + def stopTest(self, testcase): + pass + + def _handle_skip(self): + # implements the skipping machinery (see #2137) + # analog to pythons Lib/unittest/case.py:run + testMethod = getattr(self._testcase, self._testcase._testMethodName) + if getattr(self._testcase.__class__, "__unittest_skip__", False) or getattr( + testMethod, "__unittest_skip__", False + ): + # If the class or method was skipped. + skip_why = getattr( + self._testcase.__class__, "__unittest_skip_why__", "" + ) or getattr(testMethod, "__unittest_skip_why__", "") + try: # PY3, unittest2 on PY2 + self._testcase._addSkip(self, self._testcase, skip_why) + except TypeError: # PY2 + if sys.version_info[0] != 2: + raise + self._testcase._addSkip(self, skip_why) + return True + return False + + def runtest(self): + if self.config.pluginmanager.get_plugin("pdbinvoke") is None: + self._testcase(result=self) + else: + # disables tearDown and cleanups for post mortem debugging (see #1890) + if self._handle_skip(): + return + self._testcase.debug() + + def _prunetraceback(self, excinfo): + Function._prunetraceback(self, excinfo) + traceback = excinfo.traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest") + ) + if traceback: + excinfo.traceback = traceback + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item, call): + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + +# twisted trial support + + +@hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item): + if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: + ut = sys.modules["twisted.python.failure"] + Failure__init__ = ut.Failure.__init__ + check_testcase_implements_trial_reporter() + + def excstore( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + try: + Failure__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) + + ut.Failure.__init__ = excstore + yield + ut.Failure.__init__ = Failure__init__ + else: + yield + + +def check_testcase_implements_trial_reporter(done=[]): + if done: + return + from zope.interface import classImplements + from twisted.trial.itrial import IReporter + + classImplements(TestCaseFunction, IReporter) + done.append(1) diff --git a/venv/Lib/site-packages/_pytest/warning_types.py b/venv/Lib/site-packages/_pytest/warning_types.py new file mode 100644 index 00000000..55e1f037 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/warning_types.py @@ -0,0 +1,60 @@ +import attr + + +class PytestWarning(UserWarning): + """ + Bases: :class:`UserWarning`. + + Base class for all warnings emitted by pytest. + """ + + +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """ + Bases: :class:`pytest.PytestWarning`, :class:`DeprecationWarning`. + + Warning class for features that will be removed in a future version. + """ + + +class RemovedInPytest4Warning(PytestDeprecationWarning): + """ + Bases: :class:`pytest.PytestDeprecationWarning`. + + Warning class for features scheduled to be removed in pytest 4.0. + """ + + +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """ + Bases: :class:`pytest.PytestWarning`, :class:`FutureWarning`. + + Warning category used to denote experiments in pytest. Use sparingly as the API might change or even be + removed completely in future version + """ + + @classmethod + def simple(cls, apiname): + return cls( + "{apiname} is an experimental api that may change over time".format( + apiname=apiname + ) + ) + + +@attr.s +class UnformattedWarning(object): + """Used to hold warnings that need to format their message at runtime, as opposed to a direct message. + + Using this class avoids to keep all the warning types and messages in this module, avoiding misuse. + """ + + category = attr.ib() + template = attr.ib() + + def format(self, **kwargs): + """Returns an instance of the warning category, formatted with given kwargs""" + return self.category(self.template.format(**kwargs)) + + +PYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple("testdir.copy_example") diff --git a/venv/Lib/site-packages/_pytest/warnings.py b/venv/Lib/site-packages/_pytest/warnings.py new file mode 100644 index 00000000..e900ff06 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/warnings.py @@ -0,0 +1,172 @@ +from __future__ import absolute_import, division, print_function + +import sys +import warnings +from contextlib import contextmanager + +import pytest + +from _pytest import compat + + +def _setoption(wmod, arg): + """ + Copy of the warning._setoption function but does not escape arguments. + """ + parts = arg.split(":") + if len(parts) > 5: + raise wmod._OptionError("too many fields (max 5): %r" % (arg,)) + while len(parts) < 5: + parts.append("") + action, message, category, module, lineno = [s.strip() for s in parts] + action = wmod._getaction(action) + category = wmod._getcategory(category) + if lineno: + try: + lineno = int(lineno) + if lineno < 0: + raise ValueError + except (ValueError, OverflowError): + raise wmod._OptionError("invalid lineno %r" % (lineno,)) + else: + lineno = 0 + wmod.filterwarnings(action, message, category, module, lineno) + + +def pytest_addoption(parser): + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="set which warnings to report, see -W option of python itself.", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W and --pythonwarnings.", + ) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings ", + ) + + +@contextmanager +def catch_warnings_for_item(config, ihook, when, item): + """ + Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_captured`` hook. + """ + cmdline_filters = config.getoption("pythonwarnings") or [] + inifilters = config.getini("filterwarnings") + with warnings.catch_warnings(record=True) as log: + + if not sys.warnoptions: + # if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908) + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + # filters should have this precedence: mark, cmdline options, ini + # filters should be applied in the inverse order of precedence + for arg in inifilters: + _setoption(warnings, arg) + + for arg in cmdline_filters: + warnings._setoption(arg) + + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + _setoption(warnings, arg) + + yield + + for warning_message in log: + ihook.pytest_warning_captured.call_historic( + kwargs=dict(warning_message=warning_message, when=when, item=item) + ) + + +def warning_record_to_str(warning_message): + """Convert a warnings.WarningMessage to a string, taking in account a lot of unicode shenaningans in Python 2. + + When Python 2 support is dropped this function can be greatly simplified. + """ + warn_msg = warning_message.message + unicode_warning = False + if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args): + new_args = [] + for m in warn_msg.args: + new_args.append( + compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m + ) + unicode_warning = list(warn_msg.args) != new_args + warn_msg.args = new_args + + msg = warnings.formatwarning( + warn_msg, + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + if unicode_warning: + warnings.warn( + "Warning is using unicode non convertible to ascii, " + "converting to a safe representation:\n {!r}".format(compat.safe_str(msg)), + UnicodeWarning, + ) + return msg + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_protocol(item): + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + yield + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_collection(session): + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_terminal_summary(terminalreporter): + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + yield + + +def _issue_config_warning(warning, config): + """ + This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage: + at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured + hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891. + + :param warning: the warning instance. + :param config: + """ + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + warnings.warn(warning, stacklevel=2) + config.hook.pytest_warning_captured.call_historic( + kwargs=dict(warning_message=records[0], when="config", item=None) + ) diff --git a/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/DESCRIPTION.rst new file mode 100644 index 00000000..4f1a2384 --- /dev/null +++ b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/DESCRIPTION.rst @@ -0,0 +1,384 @@ +=========== + aniso8601 +=========== + +---------------------------------- +Another ISO 8601 parser for Python +---------------------------------- + +Features +======== +* Pure Python implementation +* Python 3 support +* Logical behavior + + - Parse a time, get a `datetime.time `_ + - Parse a date, get a `datetime.date `_ + - Parse a datetime, get a `datetime.datetime `_ + - Parse a duration, get a `datetime.timedelta `_ + - Parse an interval, get a tuple of dates or datetimes + - Parse a repeating interval, get a date or datetime `generator `_ + +* UTC offset represented as fixed-offset tzinfo +* Optional `dateutil.relativedelta `_ support for calendar accuracy +* No regular expressions + +Installation +============ + +The recommended installation method is to use pip:: + + $ pip install aniso8601 + +Alternatively, you can download the source (git repository hosted at `Bitbucket `_) and install directly:: + + $ python setup.py install + +Use +=== + +Parsing datetimes +----------------- + +To parse a typical ISO 8601 datetime string:: + + >>> import aniso8601 + >>> aniso8601.parse_datetime('1977-06-10T12:00:00Z') + datetime.datetime(1977, 6, 10, 12, 0, tzinfo=+0:00:00 UTC) + +Alternative delimiters can be specified, for example, a space:: + + >>> aniso8601.parse_datetime('1977-06-10 12:00:00Z', delimiter=' ') + datetime.datetime(1977, 6, 10, 12, 0, tzinfo=+0:00:00 UTC) + +UTC offsets are supported:: + + >>> aniso8601.parse_datetime('1979-06-05T08:00:00-08:00') + datetime.datetime(1979, 6, 5, 8, 0, tzinfo=-8:00:00 UTC) + +If a UTC offset is not specified, the returned datetime will be naive:: + + >>> aniso8601.parse_datetime('1983-01-22T08:00:00') + datetime.datetime(1983, 1, 22, 8, 0) + +Leap seconds are currently not supported and attempting to parse one raises a :code:`LeapSecondError`:: + + >>> aniso8601.parse_datetime('2018-03-06T23:59:60') + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/time.py", line 127, in parse_datetime + timepart = parse_time(isotimestr) + File "aniso8601/time.py", line 110, in parse_time + return _parse_time_naive(timestr) + File "aniso8601/time.py", line 140, in _parse_time_naive + return _RESOLUTION_MAP[get_time_resolution(timestr)](timestr) + File "aniso8601/time.py", line 214, in _parse_second_time + raise LeapSecondError('Leap seconds are not supported.') + aniso8601.exceptions.LeapSecondError: Leap seconds are not supported. + +Parsing dates +------------- + +To parse a date represented in an ISO 8601 string:: + + >>> import aniso8601 + >>> aniso8601.parse_date('1984-04-23') + datetime.date(1984, 4, 23) + +Basic format is supported as well:: + + >>> aniso8601.parse_date('19840423') + datetime.date(1984, 4, 23) + +To parse a date using the ISO 8601 week date format:: + + >>> aniso8601.parse_date('1986-W38-1') + datetime.date(1986, 9, 15) + +To parse an ISO 8601 ordinal date:: + + >>> aniso8601.parse_date('1988-132') + datetime.date(1988, 5, 11) + +Parsing times +------------- + +To parse a time formatted as an ISO 8601 string:: + + >>> import aniso8601 + >>> aniso8601.parse_time('11:31:14') + datetime.time(11, 31, 14) + +As with all of the above, basic format is supported:: + + >>> aniso8601.parse_time('113114') + datetime.time(11, 31, 14) + +A UTC offset can be specified for times:: + + >>> aniso8601.parse_time('17:18:19-02:30') + datetime.time(17, 18, 19, tzinfo=-2:30:00 UTC) + >>> aniso8601.parse_time('171819Z') + datetime.time(17, 18, 19, tzinfo=+0:00:00 UTC) + +Reduced accuracy is supported:: + + >>> aniso8601.parse_time('21:42') + datetime.time(21, 42) + >>> aniso8601.parse_time('22') + datetime.time(22, 0) + +A decimal fraction is always allowed on the lowest order element of an ISO 8601 formatted time:: + + >>> aniso8601.parse_time('22:33.5') + datetime.time(22, 33, 30) + >>> aniso8601.parse_time('23.75') + datetime.time(23, 45) + +Leap seconds are currently not supported and attempting to parse one raises a :code:`LeapSecondError`:: + + >>> aniso8601.parse_time('23:59:60') + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/time.py", line 110, in parse_time + return _parse_time_naive(timestr) + File "aniso8601/time.py", line 140, in _parse_time_naive + return _RESOLUTION_MAP[get_time_resolution(timestr)](timestr) + File "aniso8601/time.py", line 214, in _parse_second_time + raise LeapSecondError('Leap seconds are not supported.') + aniso8601.exceptions.LeapSecondError: Leap seconds are not supported. + +Parsing durations +----------------- + +To parse a duration formatted as an ISO 8601 string:: + + >>> import aniso8601 + >>> aniso8601.parse_duration('P1Y2M3DT4H54M6S') + datetime.timedelta(428, 17646) + +Reduced accuracy is supported:: + + >>> aniso8601.parse_duration('P1Y') + datetime.timedelta(365) + +A decimal fraction is allowed on the lowest order element:: + + >>> aniso8601.parse_duration('P1YT3.5M') + datetime.timedelta(365, 210) + +The decimal fraction can be specified with a comma instead of a full-stop:: + + >>> aniso8601.parse_duration('P1YT3,5M') + datetime.timedelta(365, 210) + +Parsing a duration from a combined date and time is supported as well:: + + >>> aniso8601.parse_duration('P0001-01-02T01:30:5') + datetime.timedelta(397, 5405) + +The above treat years as 365 days and months as 30 days. If calendar level accuracy is required, the relative keyword argument can be used if `python-dateutil `_ is installed:: + + >>> import aniso8601 + >>> from datetime import date + >>> one_month = aniso8601.parse_duration('P1M', relative=True) + >>> print one_month + relativedelta(months=+1) + >>> date(2003,1,27) + one_month + datetime.date(2003, 2, 27) + >>> date(2003,1,31) + one_month + datetime.date(2003, 2, 28) + >>> date(2003,1,31) + two_months + datetime.date(2003, 3, 31) + +Since a relative fractional month or year is not logical, a :code:`RelativeValueError` is raised when attempting to parse a duration with :code:`relative=True` and fractional month or year:: + + >>> aniso8601.parse_duration('P2.1Y', relative=True) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/duration.py", line 29, in parse_duration + return _parse_duration_prescribed(isodurationstr, relative) + File "aniso8601/duration.py", line 73, in _parse_duration_prescribed + raise RelativeValueError('Fractional months and years are not defined for relative intervals.') + aniso8601.exceptions.RelativeValueError: Fractional months and years are not defined for relative intervals. + +If :code:`relative=True` is set without python-dateutil available, a :code:`RuntimeError` is raised:: + + >>> aniso8601.parse_duration('P1M', relative=True) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/duration.py", line 29, in parse_duration + return _parse_duration_prescribed(isodurationstr, relative) + File "aniso8601/duration.py", line 77, in _parse_duration_prescribed + raise RuntimeError('dateutil must be installed for relative duration support.') + RuntimeError: dateutil must be installed for relative duration support + +Parsing intervals +----------------- + +To parse an interval specified by a start and end:: + + >>> import aniso8601 + >>> aniso8601.parse_interval('2007-03-01T13:00:00/2008-05-11T15:30:00') + (datetime.datetime(2007, 3, 1, 13, 0), datetime.datetime(2008, 5, 11, 15, 30)) + +Intervals specified by a start time and a duration are supported:: + + >>> aniso8601.parse_interval('2007-03-01T13:00:00Z/P1Y2M10DT2H30M') + (datetime.datetime(2007, 3, 1, 13, 0, tzinfo=+0:00:00 UTC), datetime.datetime(2008, 5, 9, 15, 30, tzinfo=+0:00:00 UTC)) + +A duration can also be specified by a duration and end time:: + + >>> aniso8601.parse_interval('P1M/1981-04-05') + (datetime.date(1981, 4, 5), datetime.date(1981, 3, 6)) + +Notice that the result of the above parse is not in order from earliest to latest. If sorted intervals are required, simply use the :code:`sorted` keyword as shown below:: + + >>> sorted(aniso8601.parse_interval('P1M/1981-04-05')) + [datetime.date(1981, 3, 6), datetime.date(1981, 4, 5)] + +The end of an interval is given as a datetime when required to maintain the resolution specified by a duration, even if the duration start is given as a date:: + + >>> aniso8601.parse_interval('2014-11-12/PT4H54M6.5S') + (datetime.date(2014, 11, 12), datetime.datetime(2014, 11, 12, 4, 54, 6, 500000)) + +Repeating intervals are supported as well, and return a generator:: + + >>> aniso8601.parse_repeating_interval('R3/1981-04-05/P1D') + + >>> list(aniso8601.parse_repeating_interval('R3/1981-04-05/P1D')) + [datetime.date(1981, 4, 5), datetime.date(1981, 4, 6), datetime.date(1981, 4, 7)] + +Repeating intervals are allowed to go in the reverse direction:: + + >>> list(aniso8601.parse_repeating_interval('R2/PT1H2M/1980-03-05T01:01:00')) + [datetime.datetime(1980, 3, 5, 1, 1), datetime.datetime(1980, 3, 4, 23, 59)] + +Unbounded intervals are also allowed (Python 2):: + + >>> result = aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00') + >>> result.next() + datetime.datetime(1980, 3, 5, 1, 1) + >>> result.next() + datetime.datetime(1980, 3, 4, 23, 59) + +or for Python 3:: + + >>> result = aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00') + >>> next(result) + datetime.datetime(1980, 3, 5, 1, 1) + >>> next(result) + datetime.datetime(1980, 3, 4, 23, 59) + +Note that you should never try to convert a generator produced by an unbounded interval to a list:: + + >>> list(aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00')) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/interval.py", line 161, in _date_generator_unbounded + currentdate += timedelta + OverflowError: date value out of range + +The above treat years as 365 days and months as 30 days. If calendar level accuracy is required, the relative keyword argument can be used if `python-dateutil `_ is installed:: + + >>> aniso8601.parse_interval('2003-01-27/P1M', relative=True) + (datetime.date(2003, 1, 27), datetime.date(2003, 2, 27)) + >>> aniso8601.parse_interval('2003-01-31/P1M', relative=True) + (datetime.date(2003, 1, 31), datetime.date(2003, 2, 28)) + >>> aniso8601.parse_interval('P1Y/2001-02-28', relative=True) + (datetime.date(2001, 2, 28), datetime.date(2000, 2, 28) + +Fractional years and months do not make sense for relative intervals. A :code:`RelativeValueError` is raised when attempting to parse an interval with :code:`relative=True` and a fractional month or year:: + + >>> aniso8601.parse_interval('P1.1Y/2001-02-28', relative=True) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/interval.py", line 37, in parse_interval + interval_parts = _parse_interval_parts(isointervalstr, intervaldelimiter, datetimedelimiter, relative) + File "aniso8601/interval.py", line 89, in _parse_interval_parts + duration = parse_duration(firstpart, relative=relative) + File "aniso8601/duration.py", line 29, in parse_duration + return _parse_duration_prescribed(isodurationstr, relative) + File "aniso8601/duration.py", line 73, in _parse_duration_prescribed + raise RelativeValueError('Fractional months and years are not defined for relative intervals.') + aniso8601.exceptions.RelativeValueError: Fractional months and years are not defined for relative intervals. + +If :code:`relative=True` is set without python-dateutil available, a :code:`RuntimeError` is raised:: + + >>> aniso8601.parse_interval('2003-01-27/P1M', relative=True) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/interval.py", line 37, in parse_interval + interval_parts = _parse_interval_parts(isointervalstr, intervaldelimiter, datetimedelimiter, relative) + File "aniso8601/interval.py", line 108, in _parse_interval_parts + duration = parse_duration(secondpart, relative=relative) + File "aniso8601/duration.py", line 29, in parse_duration + return _parse_duration_prescribed(isodurationstr, relative) + File "aniso8601/duration.py", line 77, in _parse_duration_prescribed + raise RuntimeError('dateutil must be installed for relative duration support.') + RuntimeError: dateutil must be installed for relative duration support. + +Date and time resolution +------------------------ + +In some situations, it may be useful to figure out the resolution provided by an ISO 8601 date or time string. Two functions are provided for this purpose. + +To get the resolution of a ISO 8601 time string:: + + >>> aniso8601.get_time_resolution('11:31:14') == aniso8601.resolution.TimeResolution.Seconds + True + >>> aniso8601.get_time_resolution('11:31') == aniso8601.resolution.TimeResolution.Minutes + True + >>> aniso8601.get_time_resolution('11') == aniso8601.resolution.TimeResolution.Hours + True + +Similarly, for an ISO 8601 date string:: + + >>> aniso8601.get_date_resolution('1981-04-05') == aniso8601.resolution.DateResolution.Day + True + >>> aniso8601.get_date_resolution('1981-04') == aniso8601.resolution.DateResolution.Month + True + >>> aniso8601.get_date_resolution('1981') == aniso8601.resolution.DateResolution.Year + True + +Development +=========== + +Setup +----- + +It is recommended to develop using a `virtualenv `_. + +The tests require the :code:`relative` feature to be enabled, install the necessary dependencies using pip:: + + $ pip install .[relative] + +Tests +----- + +To run the unit tests, navigate to the source directory and run the tests for the python version being worked on (python2, python3):: + + $ python2 -m unittest discover aniso8601/tests/ + +or:: + + $ python3 -m unittest discover aniso8601/tests/ + +Contributing +============ + +aniso8601 is an open source project hosted on `Bitbucket `_. + +Any and all bugs are welcome on our `issue tracker `_. +Of particular interest are valid ISO 8601 strings that don't parse, or invalid ones that do. At a minimum, +bug reports should include an example of the misbehaving string, as well as the expected result. Of course +patches containing unit tests (or fixed bugs) are welcome! + +References +========== + +* `ISO 8601:2004(E) `_ (Caution, PDF link) +* `Wikipedia article on ISO 8601 `_ +* `Discussion on alternative ISO 8601 parsers for Python `_ + + diff --git a/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/INSTALLER b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/METADATA b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/METADATA new file mode 100644 index 00000000..f2434368 --- /dev/null +++ b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/METADATA @@ -0,0 +1,413 @@ +Metadata-Version: 2.0 +Name: aniso8601 +Version: 3.0.2 +Summary: A library for parsing ISO 8601 strings. +Home-page: https://bitbucket.org/nielsenb/aniso8601 +Author: Brandon Nielsen +Author-email: nielsenb@jetfuse.net +License: UNKNOWN +Project-URL: Source, https://bitbucket.org/nielsenb/aniso8601 +Project-URL: Documentation, http://aniso8601.readthedocs.io/en/latest/ +Project-URL: Tracker, https://bitbucket.org/nielsenb/aniso8601/issues +Keywords: iso8601 parser +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides-Extra: relative +Provides-Extra: relative +Requires-Dist: python-dateutil; extra == 'relative' + +=========== + aniso8601 +=========== + +---------------------------------- +Another ISO 8601 parser for Python +---------------------------------- + +Features +======== +* Pure Python implementation +* Python 3 support +* Logical behavior + + - Parse a time, get a `datetime.time `_ + - Parse a date, get a `datetime.date `_ + - Parse a datetime, get a `datetime.datetime `_ + - Parse a duration, get a `datetime.timedelta `_ + - Parse an interval, get a tuple of dates or datetimes + - Parse a repeating interval, get a date or datetime `generator `_ + +* UTC offset represented as fixed-offset tzinfo +* Optional `dateutil.relativedelta `_ support for calendar accuracy +* No regular expressions + +Installation +============ + +The recommended installation method is to use pip:: + + $ pip install aniso8601 + +Alternatively, you can download the source (git repository hosted at `Bitbucket `_) and install directly:: + + $ python setup.py install + +Use +=== + +Parsing datetimes +----------------- + +To parse a typical ISO 8601 datetime string:: + + >>> import aniso8601 + >>> aniso8601.parse_datetime('1977-06-10T12:00:00Z') + datetime.datetime(1977, 6, 10, 12, 0, tzinfo=+0:00:00 UTC) + +Alternative delimiters can be specified, for example, a space:: + + >>> aniso8601.parse_datetime('1977-06-10 12:00:00Z', delimiter=' ') + datetime.datetime(1977, 6, 10, 12, 0, tzinfo=+0:00:00 UTC) + +UTC offsets are supported:: + + >>> aniso8601.parse_datetime('1979-06-05T08:00:00-08:00') + datetime.datetime(1979, 6, 5, 8, 0, tzinfo=-8:00:00 UTC) + +If a UTC offset is not specified, the returned datetime will be naive:: + + >>> aniso8601.parse_datetime('1983-01-22T08:00:00') + datetime.datetime(1983, 1, 22, 8, 0) + +Leap seconds are currently not supported and attempting to parse one raises a :code:`LeapSecondError`:: + + >>> aniso8601.parse_datetime('2018-03-06T23:59:60') + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/time.py", line 127, in parse_datetime + timepart = parse_time(isotimestr) + File "aniso8601/time.py", line 110, in parse_time + return _parse_time_naive(timestr) + File "aniso8601/time.py", line 140, in _parse_time_naive + return _RESOLUTION_MAP[get_time_resolution(timestr)](timestr) + File "aniso8601/time.py", line 214, in _parse_second_time + raise LeapSecondError('Leap seconds are not supported.') + aniso8601.exceptions.LeapSecondError: Leap seconds are not supported. + +Parsing dates +------------- + +To parse a date represented in an ISO 8601 string:: + + >>> import aniso8601 + >>> aniso8601.parse_date('1984-04-23') + datetime.date(1984, 4, 23) + +Basic format is supported as well:: + + >>> aniso8601.parse_date('19840423') + datetime.date(1984, 4, 23) + +To parse a date using the ISO 8601 week date format:: + + >>> aniso8601.parse_date('1986-W38-1') + datetime.date(1986, 9, 15) + +To parse an ISO 8601 ordinal date:: + + >>> aniso8601.parse_date('1988-132') + datetime.date(1988, 5, 11) + +Parsing times +------------- + +To parse a time formatted as an ISO 8601 string:: + + >>> import aniso8601 + >>> aniso8601.parse_time('11:31:14') + datetime.time(11, 31, 14) + +As with all of the above, basic format is supported:: + + >>> aniso8601.parse_time('113114') + datetime.time(11, 31, 14) + +A UTC offset can be specified for times:: + + >>> aniso8601.parse_time('17:18:19-02:30') + datetime.time(17, 18, 19, tzinfo=-2:30:00 UTC) + >>> aniso8601.parse_time('171819Z') + datetime.time(17, 18, 19, tzinfo=+0:00:00 UTC) + +Reduced accuracy is supported:: + + >>> aniso8601.parse_time('21:42') + datetime.time(21, 42) + >>> aniso8601.parse_time('22') + datetime.time(22, 0) + +A decimal fraction is always allowed on the lowest order element of an ISO 8601 formatted time:: + + >>> aniso8601.parse_time('22:33.5') + datetime.time(22, 33, 30) + >>> aniso8601.parse_time('23.75') + datetime.time(23, 45) + +Leap seconds are currently not supported and attempting to parse one raises a :code:`LeapSecondError`:: + + >>> aniso8601.parse_time('23:59:60') + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/time.py", line 110, in parse_time + return _parse_time_naive(timestr) + File "aniso8601/time.py", line 140, in _parse_time_naive + return _RESOLUTION_MAP[get_time_resolution(timestr)](timestr) + File "aniso8601/time.py", line 214, in _parse_second_time + raise LeapSecondError('Leap seconds are not supported.') + aniso8601.exceptions.LeapSecondError: Leap seconds are not supported. + +Parsing durations +----------------- + +To parse a duration formatted as an ISO 8601 string:: + + >>> import aniso8601 + >>> aniso8601.parse_duration('P1Y2M3DT4H54M6S') + datetime.timedelta(428, 17646) + +Reduced accuracy is supported:: + + >>> aniso8601.parse_duration('P1Y') + datetime.timedelta(365) + +A decimal fraction is allowed on the lowest order element:: + + >>> aniso8601.parse_duration('P1YT3.5M') + datetime.timedelta(365, 210) + +The decimal fraction can be specified with a comma instead of a full-stop:: + + >>> aniso8601.parse_duration('P1YT3,5M') + datetime.timedelta(365, 210) + +Parsing a duration from a combined date and time is supported as well:: + + >>> aniso8601.parse_duration('P0001-01-02T01:30:5') + datetime.timedelta(397, 5405) + +The above treat years as 365 days and months as 30 days. If calendar level accuracy is required, the relative keyword argument can be used if `python-dateutil `_ is installed:: + + >>> import aniso8601 + >>> from datetime import date + >>> one_month = aniso8601.parse_duration('P1M', relative=True) + >>> print one_month + relativedelta(months=+1) + >>> date(2003,1,27) + one_month + datetime.date(2003, 2, 27) + >>> date(2003,1,31) + one_month + datetime.date(2003, 2, 28) + >>> date(2003,1,31) + two_months + datetime.date(2003, 3, 31) + +Since a relative fractional month or year is not logical, a :code:`RelativeValueError` is raised when attempting to parse a duration with :code:`relative=True` and fractional month or year:: + + >>> aniso8601.parse_duration('P2.1Y', relative=True) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/duration.py", line 29, in parse_duration + return _parse_duration_prescribed(isodurationstr, relative) + File "aniso8601/duration.py", line 73, in _parse_duration_prescribed + raise RelativeValueError('Fractional months and years are not defined for relative intervals.') + aniso8601.exceptions.RelativeValueError: Fractional months and years are not defined for relative intervals. + +If :code:`relative=True` is set without python-dateutil available, a :code:`RuntimeError` is raised:: + + >>> aniso8601.parse_duration('P1M', relative=True) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/duration.py", line 29, in parse_duration + return _parse_duration_prescribed(isodurationstr, relative) + File "aniso8601/duration.py", line 77, in _parse_duration_prescribed + raise RuntimeError('dateutil must be installed for relative duration support.') + RuntimeError: dateutil must be installed for relative duration support + +Parsing intervals +----------------- + +To parse an interval specified by a start and end:: + + >>> import aniso8601 + >>> aniso8601.parse_interval('2007-03-01T13:00:00/2008-05-11T15:30:00') + (datetime.datetime(2007, 3, 1, 13, 0), datetime.datetime(2008, 5, 11, 15, 30)) + +Intervals specified by a start time and a duration are supported:: + + >>> aniso8601.parse_interval('2007-03-01T13:00:00Z/P1Y2M10DT2H30M') + (datetime.datetime(2007, 3, 1, 13, 0, tzinfo=+0:00:00 UTC), datetime.datetime(2008, 5, 9, 15, 30, tzinfo=+0:00:00 UTC)) + +A duration can also be specified by a duration and end time:: + + >>> aniso8601.parse_interval('P1M/1981-04-05') + (datetime.date(1981, 4, 5), datetime.date(1981, 3, 6)) + +Notice that the result of the above parse is not in order from earliest to latest. If sorted intervals are required, simply use the :code:`sorted` keyword as shown below:: + + >>> sorted(aniso8601.parse_interval('P1M/1981-04-05')) + [datetime.date(1981, 3, 6), datetime.date(1981, 4, 5)] + +The end of an interval is given as a datetime when required to maintain the resolution specified by a duration, even if the duration start is given as a date:: + + >>> aniso8601.parse_interval('2014-11-12/PT4H54M6.5S') + (datetime.date(2014, 11, 12), datetime.datetime(2014, 11, 12, 4, 54, 6, 500000)) + +Repeating intervals are supported as well, and return a generator:: + + >>> aniso8601.parse_repeating_interval('R3/1981-04-05/P1D') + + >>> list(aniso8601.parse_repeating_interval('R3/1981-04-05/P1D')) + [datetime.date(1981, 4, 5), datetime.date(1981, 4, 6), datetime.date(1981, 4, 7)] + +Repeating intervals are allowed to go in the reverse direction:: + + >>> list(aniso8601.parse_repeating_interval('R2/PT1H2M/1980-03-05T01:01:00')) + [datetime.datetime(1980, 3, 5, 1, 1), datetime.datetime(1980, 3, 4, 23, 59)] + +Unbounded intervals are also allowed (Python 2):: + + >>> result = aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00') + >>> result.next() + datetime.datetime(1980, 3, 5, 1, 1) + >>> result.next() + datetime.datetime(1980, 3, 4, 23, 59) + +or for Python 3:: + + >>> result = aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00') + >>> next(result) + datetime.datetime(1980, 3, 5, 1, 1) + >>> next(result) + datetime.datetime(1980, 3, 4, 23, 59) + +Note that you should never try to convert a generator produced by an unbounded interval to a list:: + + >>> list(aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00')) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/interval.py", line 161, in _date_generator_unbounded + currentdate += timedelta + OverflowError: date value out of range + +The above treat years as 365 days and months as 30 days. If calendar level accuracy is required, the relative keyword argument can be used if `python-dateutil `_ is installed:: + + >>> aniso8601.parse_interval('2003-01-27/P1M', relative=True) + (datetime.date(2003, 1, 27), datetime.date(2003, 2, 27)) + >>> aniso8601.parse_interval('2003-01-31/P1M', relative=True) + (datetime.date(2003, 1, 31), datetime.date(2003, 2, 28)) + >>> aniso8601.parse_interval('P1Y/2001-02-28', relative=True) + (datetime.date(2001, 2, 28), datetime.date(2000, 2, 28) + +Fractional years and months do not make sense for relative intervals. A :code:`RelativeValueError` is raised when attempting to parse an interval with :code:`relative=True` and a fractional month or year:: + + >>> aniso8601.parse_interval('P1.1Y/2001-02-28', relative=True) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/interval.py", line 37, in parse_interval + interval_parts = _parse_interval_parts(isointervalstr, intervaldelimiter, datetimedelimiter, relative) + File "aniso8601/interval.py", line 89, in _parse_interval_parts + duration = parse_duration(firstpart, relative=relative) + File "aniso8601/duration.py", line 29, in parse_duration + return _parse_duration_prescribed(isodurationstr, relative) + File "aniso8601/duration.py", line 73, in _parse_duration_prescribed + raise RelativeValueError('Fractional months and years are not defined for relative intervals.') + aniso8601.exceptions.RelativeValueError: Fractional months and years are not defined for relative intervals. + +If :code:`relative=True` is set without python-dateutil available, a :code:`RuntimeError` is raised:: + + >>> aniso8601.parse_interval('2003-01-27/P1M', relative=True) + Traceback (most recent call last): + File "", line 1, in + File "aniso8601/interval.py", line 37, in parse_interval + interval_parts = _parse_interval_parts(isointervalstr, intervaldelimiter, datetimedelimiter, relative) + File "aniso8601/interval.py", line 108, in _parse_interval_parts + duration = parse_duration(secondpart, relative=relative) + File "aniso8601/duration.py", line 29, in parse_duration + return _parse_duration_prescribed(isodurationstr, relative) + File "aniso8601/duration.py", line 77, in _parse_duration_prescribed + raise RuntimeError('dateutil must be installed for relative duration support.') + RuntimeError: dateutil must be installed for relative duration support. + +Date and time resolution +------------------------ + +In some situations, it may be useful to figure out the resolution provided by an ISO 8601 date or time string. Two functions are provided for this purpose. + +To get the resolution of a ISO 8601 time string:: + + >>> aniso8601.get_time_resolution('11:31:14') == aniso8601.resolution.TimeResolution.Seconds + True + >>> aniso8601.get_time_resolution('11:31') == aniso8601.resolution.TimeResolution.Minutes + True + >>> aniso8601.get_time_resolution('11') == aniso8601.resolution.TimeResolution.Hours + True + +Similarly, for an ISO 8601 date string:: + + >>> aniso8601.get_date_resolution('1981-04-05') == aniso8601.resolution.DateResolution.Day + True + >>> aniso8601.get_date_resolution('1981-04') == aniso8601.resolution.DateResolution.Month + True + >>> aniso8601.get_date_resolution('1981') == aniso8601.resolution.DateResolution.Year + True + +Development +=========== + +Setup +----- + +It is recommended to develop using a `virtualenv `_. + +The tests require the :code:`relative` feature to be enabled, install the necessary dependencies using pip:: + + $ pip install .[relative] + +Tests +----- + +To run the unit tests, navigate to the source directory and run the tests for the python version being worked on (python2, python3):: + + $ python2 -m unittest discover aniso8601/tests/ + +or:: + + $ python3 -m unittest discover aniso8601/tests/ + +Contributing +============ + +aniso8601 is an open source project hosted on `Bitbucket `_. + +Any and all bugs are welcome on our `issue tracker `_. +Of particular interest are valid ISO 8601 strings that don't parse, or invalid ones that do. At a minimum, +bug reports should include an example of the misbehaving string, as well as the expected result. Of course +patches containing unit tests (or fixed bugs) are welcome! + +References +========== + +* `ISO 8601:2004(E) `_ (Caution, PDF link) +* `Wikipedia article on ISO 8601 `_ +* `Discussion on alternative ISO 8601 parsers for Python `_ + + diff --git a/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/RECORD b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/RECORD new file mode 100644 index 00000000..2045edf5 --- /dev/null +++ b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/RECORD @@ -0,0 +1,25 @@ +aniso8601-3.0.2.dist-info/DESCRIPTION.rst,sha256=IRO_7zo9wM-GXLQbkggGyZoIVr3ddwLG627qf5-Wl90,15070 +aniso8601-3.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +aniso8601-3.0.2.dist-info/METADATA,sha256=DaU0fNs-ghRC1q6nKwF1cXiQieIyHNh-oZhNv0vRGrc,16259 +aniso8601-3.0.2.dist-info/RECORD,, +aniso8601-3.0.2.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113 +aniso8601-3.0.2.dist-info/metadata.json,sha256=loCVI9Gvc7WILza0eIH-iQITWlL8rDxjWSM2Z9K6ZLU,1135 +aniso8601-3.0.2.dist-info/top_level.txt,sha256=MVQomyeED8nGIH7PUQdMzxgLppIB48oYHtcmL17ETB0,10 +aniso8601/__init__.py,sha256=r15awjHgn8H6loHfYB4mJgGeiSTiJpIvIpVK4gXCzzk,527 +aniso8601/__pycache__/__init__.cpython-37.pyc,, +aniso8601/__pycache__/compat.cpython-37.pyc,, +aniso8601/__pycache__/date.cpython-37.pyc,, +aniso8601/__pycache__/duration.cpython-37.pyc,, +aniso8601/__pycache__/exceptions.cpython-37.pyc,, +aniso8601/__pycache__/interval.cpython-37.pyc,, +aniso8601/__pycache__/resolution.cpython-37.pyc,, +aniso8601/__pycache__/time.cpython-37.pyc,, +aniso8601/__pycache__/timezone.cpython-37.pyc,, +aniso8601/compat.py,sha256=TEGb7_3qpbKtAfu7FfSkf0tcc9lquiDZ-Yt4uQCw45o,305 +aniso8601/date.py,sha256=fmY0t6ESI6dooFOI2g57RFgIPrmxnyEI8DznuW7QrmE,8405 +aniso8601/duration.py,sha256=qwrK0nS-Zo9hVGZzmhL0aQXOuTv_mBNmO74u8PbC5RE,10726 +aniso8601/exceptions.py,sha256=N94MSQSnyctkjFXRlg4T91qQpYYSSrXuGCc1V1-knUI,1210 +aniso8601/interval.py,sha256=VPgu4po6j88CtRy-0RNPBX_BffyiN-mAFvpdyesCtT4,6447 +aniso8601/resolution.py,sha256=JklhrwxP807fpZZhLvQrDy40lzIz5OctsMK7m1yNSk8,422 +aniso8601/time.py,sha256=MG61Oc4cyCBYklNPIkO14PCod4F8FyMe9N0X8rme2Rs,8128 +aniso8601/timezone.py,sha256=9A-ah5xFapOkxOJFEPvmF-_WYnn2zfS5ZrHqQdC5K9M,3376 diff --git a/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/WHEEL b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/WHEEL new file mode 100644 index 00000000..7bf9daa1 --- /dev/null +++ b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.30.0.a0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/metadata.json b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/metadata.json new file mode 100644 index 00000000..902a9632 --- /dev/null +++ b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.details": {"contacts": [{"email": "nielsenb@jetfuse.net", "name": "Brandon Nielsen", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://bitbucket.org/nielsenb/aniso8601"}}}, "extras": ["relative"], "generator": "bdist_wheel (0.30.0.a0)", "keywords": ["iso8601", "parser"], "metadata_version": "2.0", "name": "aniso8601", "project_url": "Source, https://bitbucket.org/nielsenb/aniso8601", "run_requires": [{"extra": "relative", "requires": ["python-dateutil"]}], "summary": "A library for parsing ISO 8601 strings.", "version": "3.0.2"} \ No newline at end of file diff --git a/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/top_level.txt b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/top_level.txt new file mode 100644 index 00000000..166ae78c --- /dev/null +++ b/venv/Lib/site-packages/aniso8601-3.0.2.dist-info/top_level.txt @@ -0,0 +1 @@ +aniso8601 diff --git a/venv/Lib/site-packages/aniso8601/__init__.py b/venv/Lib/site-packages/aniso8601/__init__.py new file mode 100644 index 00000000..a5286e90 --- /dev/null +++ b/venv/Lib/site-packages/aniso8601/__init__.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Brandon Nielsen +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +#Import the main parsing functions so they are readily available +from aniso8601.time import parse_datetime, parse_time, get_time_resolution +from aniso8601.date import parse_date, get_date_resolution +from aniso8601.duration import parse_duration +from aniso8601.interval import parse_interval, parse_repeating_interval diff --git a/venv/Lib/site-packages/aniso8601/__pycache__/__init__.cpython-37.pyc b/venv/Lib/site-packages/aniso8601/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..9e21846e Binary files /dev/null and b/venv/Lib/site-packages/aniso8601/__pycache__/__init__.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/aniso8601/__pycache__/compat.cpython-37.pyc b/venv/Lib/site-packages/aniso8601/__pycache__/compat.cpython-37.pyc new file mode 100644 index 00000000..cacf7c3e Binary files /dev/null and b/venv/Lib/site-packages/aniso8601/__pycache__/compat.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/aniso8601/__pycache__/date.cpython-37.pyc b/venv/Lib/site-packages/aniso8601/__pycache__/date.cpython-37.pyc new file mode 100644 index 00000000..e1079e60 Binary files /dev/null and b/venv/Lib/site-packages/aniso8601/__pycache__/date.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/aniso8601/__pycache__/duration.cpython-37.pyc b/venv/Lib/site-packages/aniso8601/__pycache__/duration.cpython-37.pyc new file mode 100644 index 00000000..899ebf47 Binary files /dev/null and b/venv/Lib/site-packages/aniso8601/__pycache__/duration.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/aniso8601/__pycache__/exceptions.cpython-37.pyc b/venv/Lib/site-packages/aniso8601/__pycache__/exceptions.cpython-37.pyc new file mode 100644 index 00000000..b7d3c2f4 Binary files /dev/null and b/venv/Lib/site-packages/aniso8601/__pycache__/exceptions.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/aniso8601/__pycache__/interval.cpython-37.pyc b/venv/Lib/site-packages/aniso8601/__pycache__/interval.cpython-37.pyc new file mode 100644 index 00000000..b4569a5e Binary files /dev/null and b/venv/Lib/site-packages/aniso8601/__pycache__/interval.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/aniso8601/__pycache__/resolution.cpython-37.pyc b/venv/Lib/site-packages/aniso8601/__pycache__/resolution.cpython-37.pyc new file mode 100644 index 00000000..38c92db5 Binary files /dev/null and b/venv/Lib/site-packages/aniso8601/__pycache__/resolution.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/aniso8601/__pycache__/time.cpython-37.pyc b/venv/Lib/site-packages/aniso8601/__pycache__/time.cpython-37.pyc new file mode 100644 index 00000000..3ea37ac1 Binary files /dev/null and b/venv/Lib/site-packages/aniso8601/__pycache__/time.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/aniso8601/__pycache__/timezone.cpython-37.pyc b/venv/Lib/site-packages/aniso8601/__pycache__/timezone.cpython-37.pyc new file mode 100644 index 00000000..124664ee Binary files /dev/null and b/venv/Lib/site-packages/aniso8601/__pycache__/timezone.cpython-37.pyc differ diff --git a/venv/Lib/site-packages/aniso8601/compat.py b/venv/Lib/site-packages/aniso8601/compat.py new file mode 100644 index 00000000..9f82be37 --- /dev/null +++ b/venv/Lib/site-packages/aniso8601/compat.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Brandon Nielsen +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +import sys + +PY2 = sys.version_info[0] == 2 + +if PY2: + range = xrange +else: + range = range diff --git a/venv/Lib/site-packages/aniso8601/date.py b/venv/Lib/site-packages/aniso8601/date.py new file mode 100644 index 00000000..df35d0ba --- /dev/null +++ b/venv/Lib/site-packages/aniso8601/date.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Brandon Nielsen +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +import datetime + +from aniso8601.exceptions import DayOutOfBoundsError, ISOFormatError, \ + WeekOutOfBoundsError, YearOutOfBoundsError +from aniso8601.resolution import DateResolution + +def get_date_resolution(isodatestr): + #Valid string formats are: + # + #Y[YYY] + #YYYY-MM-DD + #YYYYMMDD + #YYYY-MM + #YYYY-Www + #YYYYWww + #YYYY-Www-D + #YYYYWwwD + #YYYY-DDD + #YYYYDDD + if isodatestr.startswith('+') or isodatestr.startswith('-'): + raise NotImplementedError('ISO 8601 extended year representation not supported.') + + if isodatestr.find('W') != -1: + #Handle ISO 8601 week date format + hyphens_present = 1 if isodatestr.find('-') != -1 else 0 + week_date_len = 7 + hyphens_present + weekday_date_len = 8 + 2 * hyphens_present + + if len(isodatestr) == week_date_len: + #YYYY-Www + #YYYYWww + return DateResolution.Week + elif len(isodatestr) == weekday_date_len: + #YYYY-Www-D + #YYYYWwwD + return DateResolution.Weekday + else: + raise ISOFormatError('"{0}" is not a valid ISO 8601 week date.'.format(isodatestr)) + + #If the size of the string of 4 or less, assume its a truncated year representation + if len(isodatestr) <= 4: + return DateResolution.Year + + #An ISO string may be a calendar represntation if: + # 1) When split on a hyphen, the sizes of the parts are 4, 2, 2 or 4, 2 + # 2) There are no hyphens, and the length is 8 + datestrsplit = isodatestr.split('-') + + #Check case 1 + if len(datestrsplit) == 2: + if len(datestrsplit[0]) == 4 and len(datestrsplit[1]) == 2: + return DateResolution.Month + + if len(datestrsplit) == 3: + if len(datestrsplit[0]) == 4 and len(datestrsplit[1]) == 2 and len(datestrsplit[2]) == 2: + return DateResolution.Day + + #Check case 2 + if len(isodatestr) == 8 and isodatestr.find('-') == -1: + return DateResolution.Day + + #An ISO string may be a ordinal date representation if: + # 1) When split on a hyphen, the sizes of the parts are 4, 3 + # 2) There are no hyphens, and the length is 7 + + #Check case 1 + if len(datestrsplit) == 2: + if len(datestrsplit[0]) == 4 and len(datestrsplit[1]) == 3: + return DateResolution.Ordinal + + #Check case 2 + if len(isodatestr) == 7 and isodatestr.find('-') == -1: + return DateResolution.Ordinal + + #None of the date representations match + raise ISOFormatError('"{0}" is not an ISO 8601 date, perhaps it represents a time or datetime.'.format(isodatestr)) + +def parse_date(isodatestr): + #Given a string in any ISO 8601 date format, return a datetime.date + #object that corresponds to the given date. Valid string formats are: + # + #Y[YYY] + #YYYY-MM-DD + #YYYYMMDD + #YYYY-MM + #YYYY-Www + #YYYYWww + #YYYY-Www-D + #YYYYWwwD + #YYYY-DDD + #YYYYDDD + # + #Note that the ISO 8601 date format of ±YYYYY is expressly not supported + return _RESOLUTION_MAP[get_date_resolution(isodatestr)](isodatestr) + +def _parse_year(yearstr): + #yearstr is of the format Y[YYY] + # + #0000 (1 BC) is not representible as a Python date so a ValueError is + #raised + # + #Truncated dates, like '19', refer to 1900-1999 inclusive, we simply parse + #to 1900-01-01 + # + #Since no additional resolution is provided, the month is set to 1, and + #day is set to 1 + + if len(yearstr) == 4: + isoyear = int(yearstr) + else: + #Shift 0s in from the left to form complete year + isoyear = int(yearstr.ljust(4, '0')) + + if isoyear == 0: + raise YearOutOfBoundsError('Year must be between 1..9999.') + + return datetime.date(isoyear, 1, 1) + +def _parse_calendar_day(datestr): + #datestr is of the format YYYY-MM-DD or YYYYMMDD + if len(datestr) == 10: + #YYYY-MM-DD + strformat = '%Y-%m-%d' + elif len(datestr) == 8: + #YYYYMMDD + strformat = '%Y%m%d' + else: + raise ISOFormatError('"{0}" is not a valid ISO 8601 calendar day.'.format(datestr)) + + parseddatetime = datetime.datetime.strptime(datestr, strformat) + + #Since no 'time' is given, cast to a date + return parseddatetime.date() + +def _parse_calendar_month(datestr): + #datestr is of the format YYYY-MM + if len(datestr) != 7: + raise ISOFormatError('"{0}" is not a valid ISO 8601 calendar month.'.format(datestr)) + + parseddatetime = datetime.datetime.strptime(datestr, '%Y-%m') + + #Since no 'time' is given, cast to a date + return parseddatetime.date() + +def _parse_week_day(datestr): + #datestr is of the format YYYY-Www-D, YYYYWwwD + # + #W is the week number prefix, ww is the week number, between 1 and 53 + #0 is not a valid week number, which differs from the Python implementation + # + #D is the weekday number, between 1 and 7, which differs from the Python + #implementation which is between 0 and 6 + + isoyear = int(datestr[0:4]) + gregorianyearstart = _iso_year_start(isoyear) + + #Week number will be the two characters after the W + windex = datestr.find('W') + isoweeknumber = int(datestr[windex + 1:windex + 3]) + + if isoweeknumber == 0 or isoweeknumber > 53: + raise WeekOutOfBoundsError('Week number must be between 1..53.') + + if datestr.find('-') != -1 and len(datestr) == 10: + #YYYY-Www-D + isoday = int(datestr[9:10]) + elif len(datestr) == 8: + #YYYYWwwD + isoday = int(datestr[7:8]) + else: + raise ISOFormatError('"{0}" is not a valid ISO 8601 week date.'.format(datestr)) + + if isoday == 0 or isoday > 7: + raise DayOutOfBoundsError('Weekday number must be between 1..7.') + + return gregorianyearstart + datetime.timedelta(weeks=isoweeknumber - 1, days=isoday - 1) + +def _parse_week(datestr): + #datestr is of the format YYYY-Www, YYYYWww + # + #W is the week number prefix, ww is the week number, between 1 and 53 + #0 is not a valid week number, which differs from the Python implementation + + isoyear = int(datestr[0:4]) + gregorianyearstart = _iso_year_start(isoyear) + + #Week number will be the two characters after the W + windex = datestr.find('W') + isoweeknumber = int(datestr[windex + 1:windex + 3]) + + if isoweeknumber == 0 or isoweeknumber > 53: + raise WeekOutOfBoundsError('Week number must be between 1..53.') + + return gregorianyearstart + datetime.timedelta(weeks=isoweeknumber - 1, days=0) + +def _parse_ordinal_date(datestr): + #datestr is of the format YYYY-DDD or YYYYDDD + #DDD can be from 1 - 36[5,6], this matches Python's definition + + isoyear = int(datestr[0:4]) + + if datestr.find('-') != -1: + #YYYY-DDD + isoday = int(datestr[(datestr.find('-') + 1):]) + else: + #YYYYDDD + isoday = int(datestr[4:]) + + parseddate = datetime.date(isoyear, 1, 1) + datetime.timedelta(days=isoday - 1) + + #Enforce ordinal day limitation + #https://bitbucket.org/nielsenb/aniso8601/issues/14/parsing-ordinal-dates-should-only-allow + if isoday == 0 or parseddate.year != isoyear: + raise DayOutOfBoundsError('Day of year must be from 1..365, 1..366 for leap year.') + + return parseddate + +def _iso_year_start(isoyear): + #Given an ISO year, returns the equivalent of the start of the year + #on the Gregorian calendar (which is used by Python) + #Stolen from: + #http://stackoverflow.com/questions/304256/whats-the-best-way-to-find-the-inverse-of-datetime-isocalendar + + #Determine the location of the 4th of January, the first week of + #the ISO year is the week containing the 4th of January + #http://en.wikipedia.org/wiki/ISO_week_date + fourth_jan = datetime.date(isoyear, 1, 4) + + #Note the conversion from ISO day (1 - 7) and Python day (0 - 6) + delta = datetime.timedelta(fourth_jan.isoweekday() - 1) + + #Return the start of the year + return fourth_jan - delta + +_RESOLUTION_MAP = { + DateResolution.Day: _parse_calendar_day, + DateResolution.Ordinal: _parse_ordinal_date, + DateResolution.Month: _parse_calendar_month, + DateResolution.Week: _parse_week, + DateResolution.Weekday: _parse_week_day, + DateResolution.Year: _parse_year +} diff --git a/venv/Lib/site-packages/aniso8601/duration.py b/venv/Lib/site-packages/aniso8601/duration.py new file mode 100644 index 00000000..98095a23 --- /dev/null +++ b/venv/Lib/site-packages/aniso8601/duration.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Brandon Nielsen +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +import datetime + +from aniso8601.date import parse_date +from aniso8601.exceptions import ISOFormatError, RelativeValueError +from aniso8601.time import parse_time +from aniso8601 import compat + +def parse_duration(isodurationstr, relative=False): + #Given a string representing an ISO 8601 duration, return a + #datetime.timedelta (or dateutil.relativedelta.relativedelta + #if relative=True) that matches the given duration. Valid formats are: + # + #PnYnMnDTnHnMnS (or any reduced precision equivalent) + #PT

This is a doc

') + + + """ + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parse(doc, **kwargs) + + +def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): + """Parse an HTML fragment as a string or file-like object into a tree + + :arg doc: the fragment to parse as a string or file-like object + + :arg container: the container context to parse the fragment in + + :arg treebuilder: the treebuilder to use when parsing + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + :returns: parsed tree + + Example: + + >>> from html5lib.html5libparser import parseFragment + >>> parseFragment('this is a fragment') + + + """ + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parseFragment(doc, container=container, **kwargs) + + +def method_decorator_metaclass(function): + class Decorated(type): + def __new__(meta, classname, bases, classDict): + for attributeName, attribute in classDict.items(): + if isinstance(attribute, types.FunctionType): + attribute = function(attribute) + + classDict[attributeName] = attribute + return type.__new__(meta, classname, bases, classDict) + return Decorated + + +class HTMLParser(object): + """HTML parser + + Generates a tree structure from a stream of (possibly malformed) HTML. + + """ + + def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False): + """ + :arg tree: a treebuilder class controlling the type of tree that will be + returned. Built in treebuilders can be accessed through + html5lib.treebuilders.getTreeBuilder(treeType) + + :arg strict: raise an exception when a parse error is encountered + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + :arg debug: whether or not to enable debug mode which logs things + + Example: + + >>> from html5lib.html5parser import HTMLParser + >>> parser = HTMLParser() # generates parser with etree builder + >>> parser = HTMLParser('lxml', strict=True) # generates parser with lxml builder which is strict + + """ + + # Raise an exception on the first error encountered + self.strict = strict + + if tree is None: + tree = treebuilders.getTreeBuilder("etree") + self.tree = tree(namespaceHTMLElements) + self.errors = [] + + self.phases = dict([(name, cls(self, self.tree)) for name, cls in + getPhases(debug).items()]) + + def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): + + self.innerHTMLMode = innerHTML + self.container = container + self.scripting = scripting + self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs) + self.reset() + + try: + self.mainLoop() + except _ReparseException: + self.reset() + self.mainLoop() + + def reset(self): + self.tree.reset() + self.firstStartTag = False + self.errors = [] + self.log = [] # only used with debug mode + # "quirks" / "limited quirks" / "no quirks" + self.compatMode = "no quirks" + + if self.innerHTMLMode: + self.innerHTML = self.container.lower() + + if self.innerHTML in cdataElements: + self.tokenizer.state = self.tokenizer.rcdataState + elif self.innerHTML in rcdataElements: + self.tokenizer.state = self.tokenizer.rawtextState + elif self.innerHTML == 'plaintext': + self.tokenizer.state = self.tokenizer.plaintextState + else: + # state already is data state + # self.tokenizer.state = self.tokenizer.dataState + pass + self.phase = self.phases["beforeHtml"] + self.phase.insertHtmlElement() + self.resetInsertionMode() + else: + self.innerHTML = False # pylint:disable=redefined-variable-type + self.phase = self.phases["initial"] + + self.lastPhase = None + + self.beforeRCDataPhase = None + + self.framesetOK = True + + @property + def documentEncoding(self): + """Name of the character encoding that was used to decode the input stream, or + :obj:`None` if that is not determined yet + + """ + if not hasattr(self, 'tokenizer'): + return None + return self.tokenizer.stream.charEncoding[0].name + + def isHTMLIntegrationPoint(self, element): + if (element.name == "annotation-xml" and + element.namespace == namespaces["mathml"]): + return ("encoding" in element.attributes and + element.attributes["encoding"].translate( + asciiUpper2Lower) in + ("text/html", "application/xhtml+xml")) + else: + return (element.namespace, element.name) in htmlIntegrationPointElements + + def isMathMLTextIntegrationPoint(self, element): + return (element.namespace, element.name) in mathmlTextIntegrationPointElements + + def mainLoop(self): + CharactersToken = tokenTypes["Characters"] + SpaceCharactersToken = tokenTypes["SpaceCharacters"] + StartTagToken = tokenTypes["StartTag"] + EndTagToken = tokenTypes["EndTag"] + CommentToken = tokenTypes["Comment"] + DoctypeToken = tokenTypes["Doctype"] + ParseErrorToken = tokenTypes["ParseError"] + + for token in self.normalizedTokens(): + prev_token = None + new_token = token + while new_token is not None: + prev_token = new_token + currentNode = self.tree.openElements[-1] if self.tree.openElements else None + currentNodeNamespace = currentNode.namespace if currentNode else None + currentNodeName = currentNode.name if currentNode else None + + type = new_token["type"] + + if type == ParseErrorToken: + self.parseError(new_token["data"], new_token.get("datavars", {})) + new_token = None + else: + if (len(self.tree.openElements) == 0 or + currentNodeNamespace == self.tree.defaultNamespace or + (self.isMathMLTextIntegrationPoint(currentNode) and + ((type == StartTagToken and + token["name"] not in frozenset(["mglyph", "malignmark"])) or + type in (CharactersToken, SpaceCharactersToken))) or + (currentNodeNamespace == namespaces["mathml"] and + currentNodeName == "annotation-xml" and + type == StartTagToken and + token["name"] == "svg") or + (self.isHTMLIntegrationPoint(currentNode) and + type in (StartTagToken, CharactersToken, SpaceCharactersToken))): + phase = self.phase + else: + phase = self.phases["inForeignContent"] + + if type == CharactersToken: + new_token = phase.processCharacters(new_token) + elif type == SpaceCharactersToken: + new_token = phase.processSpaceCharacters(new_token) + elif type == StartTagToken: + new_token = phase.processStartTag(new_token) + elif type == EndTagToken: + new_token = phase.processEndTag(new_token) + elif type == CommentToken: + new_token = phase.processComment(new_token) + elif type == DoctypeToken: + new_token = phase.processDoctype(new_token) + + if (type == StartTagToken and prev_token["selfClosing"] and + not prev_token["selfClosingAcknowledged"]): + self.parseError("non-void-element-with-trailing-solidus", + {"name": prev_token["name"]}) + + # When the loop finishes it's EOF + reprocess = True + phases = [] + while reprocess: + phases.append(self.phase) + reprocess = self.phase.processEOF() + if reprocess: + assert self.phase not in phases + + def normalizedTokens(self): + for token in self.tokenizer: + yield self.normalizeToken(token) + + def parse(self, stream, *args, **kwargs): + """Parse a HTML document into a well-formed tree + + :arg stream: a file-like object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element). + + :arg scripting: treat noscript elements as if JavaScript was turned on + + :returns: parsed tree + + Example: + + >>> from html5lib.html5parser import HTMLParser + >>> parser = HTMLParser() + >>> parser.parse('

This is a doc

') + + + """ + self._parse(stream, False, None, *args, **kwargs) + return self.tree.getDocument() + + def parseFragment(self, stream, *args, **kwargs): + """Parse a HTML fragment into a well-formed tree fragment + + :arg container: name of the element we're setting the innerHTML + property if set to None, default to 'div' + + :arg stream: a file-like object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + + :arg scripting: treat noscript elements as if JavaScript was turned on + + :returns: parsed tree + + Example: + + >>> from html5lib.html5libparser import HTMLParser + >>> parser = HTMLParser() + >>> parser.parseFragment('this is a fragment') + + + """ + self._parse(stream, True, *args, **kwargs) + return self.tree.getFragment() + + def parseError(self, errorcode="XXX-undefined-error", datavars=None): + # XXX The idea is to make errorcode mandatory. + if datavars is None: + datavars = {} + self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) + if self.strict: + raise ParseError(E[errorcode] % datavars) + + def normalizeToken(self, token): + # HTML5 specific normalizations to the token stream + if token["type"] == tokenTypes["StartTag"]: + raw = token["data"] + token["data"] = OrderedDict(raw) + if len(raw) > len(token["data"]): + # we had some duplicated attribute, fix so first wins + token["data"].update(raw[::-1]) + + return token + + def adjustMathMLAttributes(self, token): + adjust_attributes(token, adjustMathMLAttributes) + + def adjustSVGAttributes(self, token): + adjust_attributes(token, adjustSVGAttributes) + + def adjustForeignAttributes(self, token): + adjust_attributes(token, adjustForeignAttributesMap) + + def reparseTokenNormal(self, token): + # pylint:disable=unused-argument + self.parser.phase() + + def resetInsertionMode(self): + # The name of this method is mostly historical. (It's also used in the + # specification.) + last = False + newModes = { + "select": "inSelect", + "td": "inCell", + "th": "inCell", + "tr": "inRow", + "tbody": "inTableBody", + "thead": "inTableBody", + "tfoot": "inTableBody", + "caption": "inCaption", + "colgroup": "inColumnGroup", + "table": "inTable", + "head": "inBody", + "body": "inBody", + "frameset": "inFrameset", + "html": "beforeHead" + } + for node in self.tree.openElements[::-1]: + nodeName = node.name + new_phase = None + if node == self.tree.openElements[0]: + assert self.innerHTML + last = True + nodeName = self.innerHTML + # Check for conditions that should only happen in the innerHTML + # case + if nodeName in ("select", "colgroup", "head", "html"): + assert self.innerHTML + + if not last and node.namespace != self.tree.defaultNamespace: + continue + + if nodeName in newModes: + new_phase = self.phases[newModes[nodeName]] + break + elif last: + new_phase = self.phases["inBody"] + break + + self.phase = new_phase + + def parseRCDataRawtext(self, token, contentType): + # Generic RCDATA/RAWTEXT Parsing algorithm + assert contentType in ("RAWTEXT", "RCDATA") + + self.tree.insertElement(token) + + if contentType == "RAWTEXT": + self.tokenizer.state = self.tokenizer.rawtextState + else: + self.tokenizer.state = self.tokenizer.rcdataState + + self.originalPhase = self.phase + + self.phase = self.phases["text"] + + +@_utils.memoize +def getPhases(debug): + def log(function): + """Logger that records which phase processes each token""" + type_names = dict((value, key) for key, value in + tokenTypes.items()) + + def wrapped(self, *args, **kwargs): + if function.__name__.startswith("process") and len(args) > 0: + token = args[0] + try: + info = {"type": type_names[token['type']]} + except: + raise + if token['type'] in tagTokenTypes: + info["name"] = token['name'] + + self.parser.log.append((self.parser.tokenizer.state.__name__, + self.parser.phase.__class__.__name__, + self.__class__.__name__, + function.__name__, + info)) + return function(self, *args, **kwargs) + else: + return function(self, *args, **kwargs) + return wrapped + + def getMetaclass(use_metaclass, metaclass_func): + if use_metaclass: + return method_decorator_metaclass(metaclass_func) + else: + return type + + # pylint:disable=unused-argument + class Phase(with_metaclass(getMetaclass(debug, log))): + """Base class for helper object that implements each phase of processing + """ + + def __init__(self, parser, tree): + self.parser = parser + self.tree = tree + + def processEOF(self): + raise NotImplementedError + + def processComment(self, token): + # For most phases the following is correct. Where it's not it will be + # overridden. + self.tree.insertComment(token, self.tree.openElements[-1]) + + def processDoctype(self, token): + self.parser.parseError("unexpected-doctype") + + def processCharacters(self, token): + self.tree.insertText(token["data"]) + + def processSpaceCharacters(self, token): + self.tree.insertText(token["data"]) + + def processStartTag(self, token): + return self.startTagHandler[token["name"]](token) + + def startTagHtml(self, token): + if not self.parser.firstStartTag and token["name"] == "html": + self.parser.parseError("non-html-root") + # XXX Need a check here to see if the first start tag token emitted is + # this token... If it's not, invoke self.parser.parseError(). + for attr, value in token["data"].items(): + if attr not in self.tree.openElements[0].attributes: + self.tree.openElements[0].attributes[attr] = value + self.parser.firstStartTag = False + + def processEndTag(self, token): + return self.endTagHandler[token["name"]](token) + + class InitialPhase(Phase): + def processSpaceCharacters(self, token): + pass + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + correct = token["correct"] + + if (name != "html" or publicId is not None or + systemId is not None and systemId != "about:legacy-compat"): + self.parser.parseError("unknown-doctype") + + if publicId is None: + publicId = "" + + self.tree.insertDoctype(token) + + if publicId != "": + publicId = publicId.translate(asciiUpper2Lower) + + if (not correct or token["name"] != "html" or + publicId.startswith( + ("+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//")) or + publicId in ("-//w3o//dtd w3 html strict 3.0//en//", + "-/w3c/dtd html 4.0 transitional/en", + "html") or + publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is None or + systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): + self.parser.compatMode = "quirks" + elif (publicId.startswith( + ("-//w3c//dtd xhtml 1.0 frameset//", + "-//w3c//dtd xhtml 1.0 transitional//")) or + publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is not None): + self.parser.compatMode = "limited quirks" + + self.parser.phase = self.parser.phases["beforeHtml"] + + def anythingElse(self): + self.parser.compatMode = "quirks" + self.parser.phase = self.parser.phases["beforeHtml"] + + def processCharacters(self, token): + self.parser.parseError("expected-doctype-but-got-chars") + self.anythingElse() + return token + + def processStartTag(self, token): + self.parser.parseError("expected-doctype-but-got-start-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEndTag(self, token): + self.parser.parseError("expected-doctype-but-got-end-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEOF(self): + self.parser.parseError("expected-doctype-but-got-eof") + self.anythingElse() + return True + + class BeforeHtmlPhase(Phase): + # helper methods + def insertHtmlElement(self): + self.tree.insertRoot(impliedTagToken("html", "StartTag")) + self.parser.phase = self.parser.phases["beforeHead"] + + # other + def processEOF(self): + self.insertHtmlElement() + return True + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.insertHtmlElement() + return token + + def processStartTag(self, token): + if token["name"] == "html": + self.parser.firstStartTag = True + self.insertHtmlElement() + return token + + def processEndTag(self, token): + if token["name"] not in ("head", "body", "html", "br"): + self.parser.parseError("unexpected-end-tag-before-html", + {"name": token["name"]}) + else: + self.insertHtmlElement() + return token + + class BeforeHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + (("head", "body", "html", "br"), self.endTagImplyHead) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.startTagHead(impliedTagToken("head", "StartTag")) + return True + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.tree.insertElement(token) + self.tree.headPointer = self.tree.openElements[-1] + self.parser.phase = self.parser.phases["inHead"] + + def startTagOther(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagImplyHead(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagOther(self, token): + self.parser.parseError("end-tag-after-implied-root", + {"name": token["name"]}) + + class InHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("title", self.startTagTitle), + (("noframes", "style"), self.startTagNoFramesStyle), + ("noscript", self.startTagNoscript), + ("script", self.startTagScript), + (("base", "basefont", "bgsound", "command", "link"), + self.startTagBaseLinkCommand), + ("meta", self.startTagMeta), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("head", self.endTagHead), + (("br", "html", "body"), self.endTagHtmlBodyBr) + ]) + self.endTagHandler.default = self.endTagOther + + # the real thing + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.parser.parseError("two-heads-are-not-better-than-one") + + def startTagBaseLinkCommand(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagMeta(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + attributes = token["data"] + if self.parser.tokenizer.stream.charEncoding[1] == "tentative": + if "charset" in attributes: + self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) + elif ("content" in attributes and + "http-equiv" in attributes and + attributes["http-equiv"].lower() == "content-type"): + # Encoding it as UTF-8 here is a hack, as really we should pass + # the abstract Unicode string, and just use the + # ContentAttrParser on that, but using UTF-8 allows all chars + # to be encoded and as a ASCII-superset works. + data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8")) + parser = _inputstream.ContentAttrParser(data) + codec = parser.parse() + self.parser.tokenizer.stream.changeEncoding(codec) + + def startTagTitle(self, token): + self.parser.parseRCDataRawtext(token, "RCDATA") + + def startTagNoFramesStyle(self, token): + # Need to decide whether to implement the scripting-disabled case + self.parser.parseRCDataRawtext(token, "RAWTEXT") + + def startTagNoscript(self, token): + if self.parser.scripting: + self.parser.parseRCDataRawtext(token, "RAWTEXT") + else: + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inHeadNoscript"] + + def startTagScript(self, token): + self.tree.insertElement(token) + self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState + self.parser.originalPhase = self.parser.phase + self.parser.phase = self.parser.phases["text"] + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHead(self, token): + node = self.parser.tree.openElements.pop() + assert node.name == "head", "Expected head got %s" % node.name + self.parser.phase = self.parser.phases["afterHead"] + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.endTagHead(impliedTagToken("head")) + + class InHeadNoscriptPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand), + (("head", "noscript"), self.startTagHeadNoscript), + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("noscript", self.endTagNoscript), + ("br", self.endTagBr), + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.parser.parseError("eof-in-head-noscript") + self.anythingElse() + return True + + def processComment(self, token): + return self.parser.phases["inHead"].processComment(token) + + def processCharacters(self, token): + self.parser.parseError("char-in-head-noscript") + self.anythingElse() + return token + + def processSpaceCharacters(self, token): + return self.parser.phases["inHead"].processSpaceCharacters(token) + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagBaseLinkCommand(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagHeadNoscript(self, token): + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + + def startTagOther(self, token): + self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) + self.anythingElse() + return token + + def endTagNoscript(self, token): + node = self.parser.tree.openElements.pop() + assert node.name == "noscript", "Expected noscript got %s" % node.name + self.parser.phase = self.parser.phases["inHead"] + + def endTagBr(self, token): + self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + # Caller must raise parse error first! + self.endTagNoscript(impliedTagToken("noscript")) + + class AfterHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("body", self.startTagBody), + ("frameset", self.startTagFrameset), + (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", + "style", "title"), + self.startTagFromHead), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"), + self.endTagHtmlBodyBr)]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagBody(self, token): + self.parser.framesetOK = False + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inBody"] + + def startTagFrameset(self, token): + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inFrameset"] + + def startTagFromHead(self, token): + self.parser.parseError("unexpected-start-tag-out-of-my-head", + {"name": token["name"]}) + self.tree.openElements.append(self.tree.headPointer) + self.parser.phases["inHead"].processStartTag(token) + for node in self.tree.openElements[::-1]: + if node.name == "head": + self.tree.openElements.remove(node) + break + + def startTagHead(self, token): + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.tree.insertElement(impliedTagToken("body", "StartTag")) + self.parser.phase = self.parser.phases["inBody"] + self.parser.framesetOK = True + + class InBodyPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody + # the really-really-really-very crazy mode + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + # Set this to the default handler + self.processSpaceCharacters = self.processSpaceCharactersNonPre + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("base", "basefont", "bgsound", "command", "link", "meta", + "script", "style", "title"), + self.startTagProcessInHead), + ("body", self.startTagBody), + ("frameset", self.startTagFrameset), + (("address", "article", "aside", "blockquote", "center", "details", + "dir", "div", "dl", "fieldset", "figcaption", "figure", + "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p", + "section", "summary", "ul"), + self.startTagCloseP), + (headingElements, self.startTagHeading), + (("pre", "listing"), self.startTagPreListing), + ("form", self.startTagForm), + (("li", "dd", "dt"), self.startTagListItem), + ("plaintext", self.startTagPlaintext), + ("a", self.startTagA), + (("b", "big", "code", "em", "font", "i", "s", "small", "strike", + "strong", "tt", "u"), self.startTagFormatting), + ("nobr", self.startTagNobr), + ("button", self.startTagButton), + (("applet", "marquee", "object"), self.startTagAppletMarqueeObject), + ("xmp", self.startTagXmp), + ("table", self.startTagTable), + (("area", "br", "embed", "img", "keygen", "wbr"), + self.startTagVoidFormatting), + (("param", "source", "track"), self.startTagParamSource), + ("input", self.startTagInput), + ("hr", self.startTagHr), + ("image", self.startTagImage), + ("isindex", self.startTagIsIndex), + ("textarea", self.startTagTextarea), + ("iframe", self.startTagIFrame), + ("noscript", self.startTagNoscript), + (("noembed", "noframes"), self.startTagRawtext), + ("select", self.startTagSelect), + (("rp", "rt"), self.startTagRpRt), + (("option", "optgroup"), self.startTagOpt), + (("math"), self.startTagMath), + (("svg"), self.startTagSvg), + (("caption", "col", "colgroup", "frame", "head", + "tbody", "td", "tfoot", "th", "thead", + "tr"), self.startTagMisplaced) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("body", self.endTagBody), + ("html", self.endTagHtml), + (("address", "article", "aside", "blockquote", "button", "center", + "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure", + "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre", + "section", "summary", "ul"), self.endTagBlock), + ("form", self.endTagForm), + ("p", self.endTagP), + (("dd", "dt", "li"), self.endTagListItem), + (headingElements, self.endTagHeading), + (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", + "strike", "strong", "tt", "u"), self.endTagFormatting), + (("applet", "marquee", "object"), self.endTagAppletMarqueeObject), + ("br", self.endTagBr), + ]) + self.endTagHandler.default = self.endTagOther + + def isMatchingFormattingElement(self, node1, node2): + return (node1.name == node2.name and + node1.namespace == node2.namespace and + node1.attributes == node2.attributes) + + # helper + def addFormattingElement(self, token): + self.tree.insertElement(token) + element = self.tree.openElements[-1] + + matchingElements = [] + for node in self.tree.activeFormattingElements[::-1]: + if node is Marker: + break + elif self.isMatchingFormattingElement(node, element): + matchingElements.append(node) + + assert len(matchingElements) <= 3 + if len(matchingElements) == 3: + self.tree.activeFormattingElements.remove(matchingElements[-1]) + self.tree.activeFormattingElements.append(element) + + # the real deal + def processEOF(self): + allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td", + "tfoot", "th", "thead", "tr", "body", + "html")) + for node in self.tree.openElements[::-1]: + if node.name not in allowed_elements: + self.parser.parseError("expected-closing-tag-but-got-eof") + break + # Stop parsing + + def processSpaceCharactersDropNewline(self, token): + # Sometimes (start of
, , and ",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function aa(){return!0}function ba(){return!1}function ca(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h]","i"),ha=/^\s+/,ia=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,ja=/<([\w:]+)/,ka=/\s*$/g,ra={option:[1,""],legend:[1,"
","
"],area:[1,"",""],param:[1,"",""],thead:[1,"","
"],tr:[2,"","
"],col:[2,"","
"],td:[3,"","
"],_default:k.htmlSerialize?[0,"",""]:[1,"X
","
"]},sa=da(y),ta=sa.appendChild(y.createElement("div"));ra.optgroup=ra.option,ra.tbody=ra.tfoot=ra.colgroup=ra.caption=ra.thead,ra.th=ra.td;function ua(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ua(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function va(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wa(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xa(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function ya(a){var b=pa.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function za(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Aa(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Ba(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xa(b).text=a.text,ya(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!ga.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(ta.innerHTML=a.outerHTML,ta.removeChild(f=ta.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ua(f),h=ua(a),g=0;null!=(e=h[g]);++g)d[g]&&Ba(e,d[g]);if(b)if(c)for(h=h||ua(a),d=d||ua(f),g=0;null!=(e=h[g]);g++)Aa(e,d[g]);else Aa(a,f);return d=ua(f,"script"),d.length>0&&za(d,!i&&ua(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=da(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(la.test(f)){h=h||o.appendChild(b.createElement("div")),i=(ja.exec(f)||["",""])[1].toLowerCase(),l=ra[i]||ra._default,h.innerHTML=l[1]+f.replace(ia,"<$1>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&ha.test(f)&&p.push(b.createTextNode(ha.exec(f)[0])),!k.tbody){f="table"!==i||ka.test(f)?""!==l[1]||ka.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ua(p,"input"),va),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ua(o.appendChild(f),"script"),g&&za(h),c)){e=0;while(f=h[e++])oa.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wa(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wa(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ua(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&za(ua(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ua(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fa,""):void 0;if(!("string"!=typeof a||ma.test(a)||!k.htmlSerialize&&ga.test(a)||!k.leadingWhitespace&&ha.test(a)||ra[(ja.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ia,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ua(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ua(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&na.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ua(i,"script"),xa),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ua(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,ya),j=0;f>j;j++)d=g[j],oa.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qa,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Ca,Da={};function Ea(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fa(a){var b=y,c=Da[a];return c||(c=Ea(a,b),"none"!==c&&c||(Ca=(Ca||m("