diff --git a/README.md b/README.md
index 2511df3..5bfd726 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,6 @@
Espeonage
+ 
“A Future Sight for your next matchup!”
diff --git a/assets/logo.png b/assets/logo.png
new file mode 100644
index 0000000..3591def
Binary files /dev/null and b/assets/logo.png differ
diff --git a/espeonage/battle_simulator.py b/espeonage/battle_simulator.py
index 9d7bbe9..545f891 100644
--- a/espeonage/battle_simulator.py
+++ b/espeonage/battle_simulator.py
@@ -10,6 +10,65 @@
class BattleSimulator:
"""Simulates battle progress from replay logs"""
+ # Comprehensive list of non-attacking moves to filter out
+ NON_ATTACK_MOVES = {
+ # Status moves
+ 'Toxic', 'Thunder Wave', 'Will-O-Wisp', 'Spore', 'Sleep Powder', 'Stun Spore',
+ 'Hypnosis', 'Yawn', 'Poison Powder', 'Glare', 'Paralyze', 'Confuse Ray',
+ 'Supersonic', 'Swagger', 'Sweet Kiss', 'Attract', 'Taunt', 'Torment',
+ 'Encore', 'Disable', 'Heal Block', 'Embargo', 'Leech Seed',
+
+ # Entry hazards
+ 'Spikes', 'Toxic Spikes', 'Stealth Rock', 'Sticky Web',
+
+ # Field effects
+ 'Trick Room', 'Magic Room', 'Wonder Room', 'Gravity', 'Grassy Terrain',
+ 'Misty Terrain', 'Electric Terrain', 'Psychic Terrain', 'Rain Dance',
+ 'Sunny Day', 'Sandstorm', 'Hail', 'Snow', 'Tailwind', 'Light Screen',
+ 'Reflect', 'Aurora Veil', 'Safeguard', 'Mist',
+
+ # Boosting/status moves
+ 'Swords Dance', 'Dragon Dance', 'Nasty Plot', 'Calm Mind', 'Bulk Up',
+ 'Agility', 'Rock Polish', 'Quiver Dance', 'Shift Gear', 'Coil',
+ 'Curse', 'Iron Defense', 'Amnesia', 'Acid Armor', 'Barrier',
+ 'Cosmic Power', 'Cotton Guard', 'Defend Order', 'Harden', 'Withdraw',
+ 'Defense Curl', 'Stockpile', 'Charge', 'Focus Energy', 'Meditate',
+ 'Sharpen', 'Acupressure', 'Howl', 'Work Up', 'Growth', 'Hone Claws',
+ 'Shell Smash', 'Tail Glow', 'Geomancy', 'No Retreat',
+
+ # Recovery moves
+ 'Recover', 'Roost', 'Slack Off', 'Soft-Boiled', 'Rest', 'Wish',
+ 'Healing Wish', 'Lunar Dance', 'Heal Order', 'Milk Drink', 'Moonlight',
+ 'Morning Sun', 'Synthesis', 'Heal Bell', 'Aromatherapy', 'Refresh',
+ 'Purify', 'Life Dew', 'Shore Up', 'Swallow', 'Strength Sap',
+
+ # Protection moves
+ 'Protect', 'Detect', 'Endure', 'King\'s Shield', 'Spiky Shield',
+ 'Baneful Bunker', 'Obstruct', 'Silk Trap', 'Burning Bulwark',
+
+ # Switching/pivot moves (these don't directly deal KO damage)
+ 'Teleport', 'Baton Pass', 'Parting Shot', 'Shed Shell',
+
+ # Support moves
+ 'Substitute', 'Helping Hand', 'Follow Me', 'Rage Powder', 'Spotlight',
+ 'Ally Switch', 'Trick', 'Switcheroo', 'Bestow', 'Instruct',
+ 'Skill Swap', 'Role Play', 'Entrainment', 'Guard Split', 'Power Split',
+ 'Speed Swap', 'Guard Swap', 'Power Swap', 'Heart Swap', 'Mimic',
+ 'Transform', 'Copycat', 'Me First', 'Snatch', 'Recycle', 'Metronome',
+
+ # Weather/terrain setters (already listed above but including for clarity)
+
+ # Other non-damaging moves
+ 'Splash', 'Celebrate', 'Hold Hands', 'Happy Hour', 'Conversion',
+ 'Conversion 2', 'Camouflage', 'Nightmare', 'Perish Song', 'Mean Look',
+ 'Block', 'Spider Web', 'Sand Tomb', 'Whirlpool', 'Bind', 'Wrap',
+ 'Fire Spin', 'Magma Storm', 'Infestation', 'Clamp', 'Snore', 'Forest\'s Curse',
+ 'Trick-or-Treat', 'Rototiller', 'Magnetic Flux', 'Gear Up', 'Electric Terrain',
+ 'Flower Shield', 'Ion Deluge', 'Powder', 'Tearful Look', 'Baby-Doll Eyes',
+ 'Play Nice', 'Venom Drench', 'Stockpile', 'Belly Drum', 'Psych Up',
+ 'Power Trick', 'Guard Split', 'Power Split', 'Speed Swap',
+ }
+
def __init__(self):
self.tracker = PokemonTracker()
self.calculator = DamageCalculator()
@@ -96,6 +155,18 @@ def _parse_hp(self, hp_str: str) -> tuple:
return None, None, status
+ def _is_attack_move(self, move: str) -> bool:
+ """
+ Determine if a move is a direct attacking move
+
+ Args:
+ move: Move name
+
+ Returns:
+ True if the move is a direct attack, False otherwise
+ """
+ return move not in self.NON_ATTACK_MOVES
+
def _handle_player(self, args: List[str]):
"""Handle player command"""
pass
@@ -184,7 +255,12 @@ def _handle_faint(self, args: List[str]):
opponent = 'p1' if player == 'p2' else 'p2'
if opponent in self.last_move:
attacker_id = self.last_move[opponent]['pokemon']
- self.tracker.track_knockout(attacker_id)
+ move = self.last_move[opponent]['move']
+
+ # Only attribute kill to move if it's an attacking move
+ if self._is_attack_move(move):
+ self.tracker.track_knockout(attacker_id)
+ self.tracker.track_move_kill(attacker_id, move)
def _handle_ability(self, args: List[str]):
"""Handle ability reveal"""
diff --git a/espeonage/cli.py b/espeonage/cli.py
index bddf143..de64720 100644
--- a/espeonage/cli.py
+++ b/espeonage/cli.py
@@ -131,6 +131,14 @@ def format_text_output(results: dict) -> str:
if data['moves']:
lines.append(f" Moves: {', '.join(data['moves'])}")
+ # Display move kills if any
+ if data.get('move_kills') and any(kills > 0 for kills in data['move_kills'].values()):
+ lines.append(f" Move Kills:")
+ for move, kills in sorted(data['move_kills'].items()):
+ if kills > 0:
+ ko_text = "KO" if kills == 1 else "KOs"
+ lines.append(f" {move}: {kills} {ko_text}")
+
lines.append(f" Stats:")
lines.append(f" K/D Ratio: {data['kd_ratio']:.2f} ({data['knockouts']}/{data['deaths']})")
lines.append(f" Damage Dealt: {data['damage_dealt']}")
diff --git a/espeonage/pokemon_tracker.py b/espeonage/pokemon_tracker.py
index d29dad6..b915e01 100644
--- a/espeonage/pokemon_tracker.py
+++ b/espeonage/pokemon_tracker.py
@@ -28,6 +28,7 @@ class PokemonData:
deaths: int = 0
damage_dealt: int = 0
damage_taken: int = 0
+ move_kills: Dict[str, int] = field(default_factory=dict)
# EV/IV inference data
observed_stats: Dict[str, List[int]] = field(default_factory=dict)
@@ -187,6 +188,19 @@ def track_knockout(self, pokemon: str):
if pokemon in self.pokemon:
self.pokemon[pokemon].add_knockout()
+ def track_move_kill(self, pokemon: str, move: str):
+ """
+ Track when a Pokémon gets a knockout with a specific move
+
+ Args:
+ pokemon: Pokémon identifier
+ move: Move name that got the kill
+ """
+ if pokemon in self.pokemon:
+ if move not in self.pokemon[pokemon].move_kills:
+ self.pokemon[pokemon].move_kills[move] = 0
+ self.pokemon[pokemon].move_kills[move] += 1
+
def track_damage(self, attacker: str, defender: str, damage: int):
"""
Track damage dealt and taken
@@ -229,5 +243,6 @@ def get_summary(self) -> Dict:
'kd_ratio': data.get_kd_ratio(),
'damage_dealt': data.damage_dealt,
'damage_taken': data.damage_taken,
+ 'move_kills': data.move_kills,
}
return summary
diff --git a/tests/test_move_kills.py b/tests/test_move_kills.py
new file mode 100644
index 0000000..da77c9e
--- /dev/null
+++ b/tests/test_move_kills.py
@@ -0,0 +1,205 @@
+"""Tests for move kill tracking functionality."""
+
+import unittest
+from espeonage.battle_simulator import BattleSimulator
+from espeonage.replay_parser import ReplayParser
+
+
+class TestMoveKills(unittest.TestCase):
+ """Test cases for move kill tracking."""
+
+ def test_attack_move_classification(self):
+ """Test that attack moves are correctly identified."""
+ simulator = BattleSimulator()
+
+ # Test attacking moves
+ self.assertTrue(simulator._is_attack_move('Earthquake'))
+ self.assertTrue(simulator._is_attack_move('Thunderbolt'))
+ self.assertTrue(simulator._is_attack_move('Hydro Pump'))
+ self.assertTrue(simulator._is_attack_move('Flamethrower'))
+ self.assertTrue(simulator._is_attack_move('Ice Beam'))
+ self.assertTrue(simulator._is_attack_move('Dragon Claw'))
+
+ # Test non-attacking moves
+ self.assertFalse(simulator._is_attack_move('Stealth Rock'))
+ self.assertFalse(simulator._is_attack_move('Toxic'))
+ self.assertFalse(simulator._is_attack_move('Recover'))
+ self.assertFalse(simulator._is_attack_move('Swords Dance'))
+ self.assertFalse(simulator._is_attack_move('Will-O-Wisp'))
+ self.assertFalse(simulator._is_attack_move('Thunder Wave'))
+ self.assertFalse(simulator._is_attack_move('Leech Seed'))
+ self.assertFalse(simulator._is_attack_move('Spikes'))
+
+ def test_move_kills_tracked_for_attacks(self):
+ """Test that kills are tracked for direct attacking moves."""
+ parser = ReplayParser()
+ log = (
+ "|player|p1|Player1|\n"
+ "|player|p2|Player2|\n"
+ "|switch|p1a: Pikachu|Pikachu, L50|150/150\n"
+ "|switch|p2a: Charizard|Charizard, L50|200/200\n"
+ "|turn|1\n"
+ "|move|p1a: Pikachu|Thunderbolt|p2a: Charizard\n"
+ "|-damage|p2a: Charizard|0 fnt\n"
+ "|faint|p2a: Charizard\n"
+ "|win|Player1\n"
+ )
+
+ result = parser.parse_raw_log(log)
+ simulator = BattleSimulator()
+ battle_result = simulator.process_battle_log(result['battle_log'])
+
+ pikachu_data = battle_result['pokemon']['p1:Pikachu']
+
+ # Pikachu should have 1 knockout
+ self.assertEqual(pikachu_data['knockouts'], 1)
+
+ # Pikachu should have 1 kill with Thunderbolt
+ self.assertIn('Thunderbolt', pikachu_data['move_kills'])
+ self.assertEqual(pikachu_data['move_kills']['Thunderbolt'], 1)
+
+ def test_move_kills_not_tracked_for_status(self):
+ """Test that kills from status effects are not tracked."""
+ parser = ReplayParser()
+ log = (
+ "|player|p1|Player1|\n"
+ "|player|p2|Player2|\n"
+ "|switch|p1a: Toxapex|Toxapex, L50|150/150\n"
+ "|switch|p2a: Charizard|Charizard, L50|200/200\n"
+ "|turn|1\n"
+ "|move|p1a: Toxapex|Toxic|p2a: Charizard\n"
+ "|-status|p2a: Charizard|tox\n"
+ "|move|p2a: Charizard|Flamethrower|p1a: Toxapex\n"
+ "|-damage|p1a: Toxapex|100/150\n"
+ "|turn|2\n"
+ "|move|p1a: Toxapex|Recover|p1a: Toxapex\n"
+ "|-heal|p1a: Toxapex|150/150\n"
+ "|move|p2a: Charizard|Flamethrower|p1a: Toxapex\n"
+ "|-damage|p1a: Toxapex|100/150\n"
+ "|-damage|p2a: Charizard|180/200 tox|[from] psn\n"
+ "|turn|3\n"
+ "|move|p1a: Toxapex|Recover|p1a: Toxapex\n"
+ "|-heal|p1a: Toxapex|150/150\n"
+ "|-damage|p2a: Charizard|0 fnt|[from] psn\n"
+ "|faint|p2a: Charizard\n"
+ "|win|Player1\n"
+ )
+
+ result = parser.parse_raw_log(log)
+ simulator = BattleSimulator()
+ battle_result = simulator.process_battle_log(result['battle_log'])
+
+ toxapex_data = battle_result['pokemon']['p1:Toxapex']
+
+ # Toxapex should have 0 knockouts because the kill was from status
+ # (Recover was the last move used, which is non-attacking)
+ self.assertEqual(toxapex_data['knockouts'], 0)
+
+ # Toxapex should have no move kills
+ self.assertEqual(len(toxapex_data['move_kills']), 0)
+
+ def test_multiple_kills_same_move(self):
+ """Test that multiple kills with the same move are tracked correctly."""
+ parser = ReplayParser()
+ log = (
+ "|player|p1|Player1|\n"
+ "|player|p2|Player2|\n"
+ "|switch|p1a: Pikachu|Pikachu, L50|150/150\n"
+ "|switch|p2a: Charizard|Charizard, L50|200/200\n"
+ "|turn|1\n"
+ "|move|p1a: Pikachu|Thunderbolt|p2a: Charizard\n"
+ "|-damage|p2a: Charizard|0 fnt\n"
+ "|faint|p2a: Charizard\n"
+ "|switch|p2a: Blastoise|Blastoise, L50|180/180\n"
+ "|turn|2\n"
+ "|move|p1a: Pikachu|Thunderbolt|p2a: Blastoise\n"
+ "|-damage|p2a: Blastoise|0 fnt\n"
+ "|faint|p2a: Blastoise\n"
+ "|win|Player1\n"
+ )
+
+ result = parser.parse_raw_log(log)
+ simulator = BattleSimulator()
+ battle_result = simulator.process_battle_log(result['battle_log'])
+
+ pikachu_data = battle_result['pokemon']['p1:Pikachu']
+
+ # Pikachu should have 2 knockouts
+ self.assertEqual(pikachu_data['knockouts'], 2)
+
+ # Pikachu should have 2 kills with Thunderbolt
+ self.assertIn('Thunderbolt', pikachu_data['move_kills'])
+ self.assertEqual(pikachu_data['move_kills']['Thunderbolt'], 2)
+
+ def test_multiple_kills_different_moves(self):
+ """Test that kills from different moves are tracked separately."""
+ parser = ReplayParser()
+ log = (
+ "|player|p1|Player1|\n"
+ "|player|p2|Player2|\n"
+ "|switch|p1a: Pikachu|Pikachu, L50|150/150\n"
+ "|switch|p2a: Charizard|Charizard, L50|200/200\n"
+ "|turn|1\n"
+ "|move|p1a: Pikachu|Thunderbolt|p2a: Charizard\n"
+ "|-damage|p2a: Charizard|0 fnt\n"
+ "|faint|p2a: Charizard\n"
+ "|switch|p2a: Pidgeot|Pidgeot, L50|160/160\n"
+ "|turn|2\n"
+ "|move|p1a: Pikachu|Iron Tail|p2a: Pidgeot\n"
+ "|-damage|p2a: Pidgeot|0 fnt\n"
+ "|faint|p2a: Pidgeot\n"
+ "|win|Player1\n"
+ )
+
+ result = parser.parse_raw_log(log)
+ simulator = BattleSimulator()
+ battle_result = simulator.process_battle_log(result['battle_log'])
+
+ pikachu_data = battle_result['pokemon']['p1:Pikachu']
+
+ # Pikachu should have 2 knockouts
+ self.assertEqual(pikachu_data['knockouts'], 2)
+
+ # Pikachu should have 1 kill with Thunderbolt
+ self.assertIn('Thunderbolt', pikachu_data['move_kills'])
+ self.assertEqual(pikachu_data['move_kills']['Thunderbolt'], 1)
+
+ # Pikachu should have 1 kill with Iron Tail
+ self.assertIn('Iron Tail', pikachu_data['move_kills'])
+ self.assertEqual(pikachu_data['move_kills']['Iron Tail'], 1)
+
+ def test_hazard_kill_not_attributed(self):
+ """Test that kills from hazards like Stealth Rock are not attributed."""
+ parser = ReplayParser()
+ log = (
+ "|player|p1|Player1|\n"
+ "|player|p2|Player2|\n"
+ "|switch|p1a: Ferrothorn|Ferrothorn, L50|150/150\n"
+ "|switch|p2a: Charizard|Charizard, L50|200/200\n"
+ "|turn|1\n"
+ "|move|p1a: Ferrothorn|Stealth Rock|p2a: Charizard\n"
+ "|-sidestart|p2: Player2|move: Stealth Rock\n"
+ "|move|p2a: Charizard|Fire Blast|p1a: Ferrothorn\n"
+ "|-damage|p1a: Ferrothorn|100/150\n"
+ "|turn|2\n"
+ "|switch|p2a: Moltres|Moltres, L50|180/180\n"
+ "|-damage|p2a: Moltres|0 fnt|[from] Stealth Rock\n"
+ "|faint|p2a: Moltres\n"
+ "|win|Player1\n"
+ )
+
+ result = parser.parse_raw_log(log)
+ simulator = BattleSimulator()
+ battle_result = simulator.process_battle_log(result['battle_log'])
+
+ ferrothorn_data = battle_result['pokemon']['p1:Ferrothorn']
+
+ # Ferrothorn should have 0 knockouts (hazard kills don't count)
+ self.assertEqual(ferrothorn_data['knockouts'], 0)
+
+ # Ferrothorn should have no move kills
+ self.assertEqual(len(ferrothorn_data['move_kills']), 0)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/venv/lib/python3.9/site-packages/_distutils_hack/__init__.py b/venv/lib/python3.9/site-packages/_distutils_hack/__init__.py
new file mode 100644
index 0000000..5f40996
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/_distutils_hack/__init__.py
@@ -0,0 +1,128 @@
+import sys
+import os
+import re
+import importlib
+import warnings
+
+
+is_pypy = '__pypy__' in sys.builtin_module_names
+
+
+warnings.filterwarnings('ignore',
+ r'.+ distutils\b.+ deprecated',
+ DeprecationWarning)
+
+
+def warn_distutils_present():
+ if 'distutils' not in sys.modules:
+ return
+ if is_pypy and sys.version_info < (3, 7):
+ # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
+ # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
+ return
+ warnings.warn(
+ "Distutils was imported before Setuptools, but importing Setuptools "
+ "also replaces the `distutils` module in `sys.modules`. This may lead "
+ "to undesirable behaviors or errors. To avoid these issues, avoid "
+ "using distutils directly, ensure that setuptools is installed in the "
+ "traditional way (e.g. not an editable install), and/or make sure "
+ "that setuptools is always imported before distutils.")
+
+
+def clear_distutils():
+ if 'distutils' not in sys.modules:
+ return
+ warnings.warn("Setuptools is replacing distutils.")
+ mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
+ for name in mods:
+ del sys.modules[name]
+
+
+def enabled():
+ """
+ Allow selection of distutils by environment variable.
+ """
+ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
+ return which == 'local'
+
+
+def ensure_local_distutils():
+ clear_distutils()
+ distutils = importlib.import_module('setuptools._distutils')
+ distutils.__name__ = 'distutils'
+ sys.modules['distutils'] = distutils
+
+ # sanity check that submodules load as expected
+ core = importlib.import_module('distutils.core')
+ assert '_distutils' in core.__file__, core.__file__
+
+
+def do_override():
+ """
+ Ensure that the local copy of distutils is preferred over stdlib.
+
+ See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
+ for more motivation.
+ """
+ if enabled():
+ warn_distutils_present()
+ ensure_local_distutils()
+
+
+class DistutilsMetaFinder:
+ def find_spec(self, fullname, path, target=None):
+ if path is not None:
+ return
+
+ method_name = 'spec_for_{fullname}'.format(**locals())
+ method = getattr(self, method_name, lambda: None)
+ return method()
+
+ def spec_for_distutils(self):
+ import importlib.abc
+ import importlib.util
+
+ class DistutilsLoader(importlib.abc.Loader):
+
+ def create_module(self, spec):
+ return importlib.import_module('setuptools._distutils')
+
+ def exec_module(self, module):
+ pass
+
+ return importlib.util.spec_from_loader('distutils', DistutilsLoader())
+
+ def spec_for_pip(self):
+ """
+ Ensure stdlib distutils when running under pip.
+ See pypa/pip#8761 for rationale.
+ """
+ if self.pip_imported_during_build():
+ return
+ clear_distutils()
+ self.spec_for_distutils = lambda: None
+
+ @staticmethod
+ def pip_imported_during_build():
+ """
+ Detect if pip is being imported in a build script. Ref #2355.
+ """
+ import traceback
+ return any(
+ frame.f_globals['__file__'].endswith('setup.py')
+ for frame, line in traceback.walk_stack(None)
+ )
+
+
+DISTUTILS_FINDER = DistutilsMetaFinder()
+
+
+def add_shim():
+ sys.meta_path.insert(0, DISTUTILS_FINDER)
+
+
+def remove_shim():
+ try:
+ sys.meta_path.remove(DISTUTILS_FINDER)
+ except ValueError:
+ pass
diff --git a/venv/lib/python3.9/site-packages/_distutils_hack/override.py b/venv/lib/python3.9/site-packages/_distutils_hack/override.py
new file mode 100644
index 0000000..2cc433a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/_distutils_hack/override.py
@@ -0,0 +1 @@
+__import__('_distutils_hack').do_override()
diff --git a/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/INSTALLER b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/METADATA b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/METADATA
new file mode 100644
index 0000000..3b872db
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/METADATA
@@ -0,0 +1,123 @@
+Metadata-Version: 2.4
+Name: beautifulsoup4
+Version: 4.14.2
+Summary: Screen-scraping library
+Project-URL: Download, https://www.crummy.com/software/BeautifulSoup/bs4/download/
+Project-URL: Homepage, https://www.crummy.com/software/BeautifulSoup/bs4/
+Author-email: Leonard Richardson
+License: MIT License
+License-File: AUTHORS
+License-File: LICENSE
+Keywords: HTML,XML,parse,soup
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Classifier: Topic :: Text Processing :: Markup :: SGML
+Classifier: Topic :: Text Processing :: Markup :: XML
+Requires-Python: >=3.7.0
+Requires-Dist: soupsieve>1.2
+Requires-Dist: typing-extensions>=4.0.0
+Provides-Extra: cchardet
+Requires-Dist: cchardet; extra == 'cchardet'
+Provides-Extra: chardet
+Requires-Dist: chardet; extra == 'chardet'
+Provides-Extra: charset-normalizer
+Requires-Dist: charset-normalizer; extra == 'charset-normalizer'
+Provides-Extra: html5lib
+Requires-Dist: html5lib; extra == 'html5lib'
+Provides-Extra: lxml
+Requires-Dist: lxml; extra == 'lxml'
+Description-Content-Type: text/markdown
+
+Beautiful Soup is a library that makes it easy to scrape information
+from web pages. It sits atop an HTML or XML parser, providing Pythonic
+idioms for iterating, searching, and modifying the parse tree.
+
+# Quick start
+
+```
+>>> from bs4 import BeautifulSoup
+>>> soup = BeautifulSoup("SomebadHTML")
+>>> print(soup.prettify())
+
+
+
+ Some
+
+ bad
+
+ HTML
+
+
+
+
+
+>>> soup.find(string="bad")
+'bad'
+>>> soup.i
+HTML
+#
+>>> soup = BeautifulSoup("Some badXML", "xml")
+#
+>>> print(soup.prettify())
+
+
+ Some
+
+ bad
+
+ XML
+
+
+```
+
+To go beyond the basics, [comprehensive documentation is available](https://www.crummy.com/software/BeautifulSoup/bs4/doc/).
+
+# Links
+
+* [Homepage](https://www.crummy.com/software/BeautifulSoup/bs4/)
+* [Documentation](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)
+* [Discussion group](https://groups.google.com/group/beautifulsoup/)
+* [Development](https://code.launchpad.net/beautifulsoup/)
+* [Bug tracker](https://bugs.launchpad.net/beautifulsoup/)
+* [Complete changelog](https://git.launchpad.net/beautifulsoup/tree/CHANGELOG)
+
+# Note on Python 2 sunsetting
+
+Beautiful Soup's support for Python 2 was discontinued on December 31,
+2020: one year after the sunset date for Python 2 itself. From this
+point onward, new Beautiful Soup development will exclusively target
+Python 3. The final release of Beautiful Soup 4 to support Python 2
+was 4.9.3.
+
+# Supporting the project
+
+If you use Beautiful Soup as part of your professional work, please consider a
+[Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-beautifulsoup4?utm_source=pypi-beautifulsoup4&utm_medium=referral&utm_campaign=readme).
+This will support many of the free software projects your organization
+depends on, not just Beautiful Soup.
+
+If you use Beautiful Soup for personal projects, the best way to say
+thank you is to read
+[Tool Safety](https://www.crummy.com/software/BeautifulSoup/zine/), a zine I
+wrote about what Beautiful Soup has taught me about software
+development.
+
+# Building the documentation
+
+The bs4/doc/ directory contains full documentation in Sphinx
+format. Run `make html` in that directory to create HTML
+documentation.
+
+# Running the unit tests
+
+Beautiful Soup supports unit test discovery using Pytest:
+
+```
+$ pytest
+```
+
diff --git a/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/RECORD b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/RECORD
new file mode 100644
index 0000000..2f4fc08
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/RECORD
@@ -0,0 +1,38 @@
+beautifulsoup4-4.14.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+beautifulsoup4-4.14.2.dist-info/METADATA,sha256=9oTk4mYoQfIB4g10cxVg97zCIyjq6y98JGH03PsNlxc,3809
+beautifulsoup4-4.14.2.dist-info/RECORD,,
+beautifulsoup4-4.14.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+beautifulsoup4-4.14.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
+beautifulsoup4-4.14.2.dist-info/licenses/AUTHORS,sha256=uYkjiRjh_aweRnF8tAW2PpJJeickE68NmJwd9siry28,2201
+beautifulsoup4-4.14.2.dist-info/licenses/LICENSE,sha256=VbTY1LHlvIbRDvrJG3TIe8t3UmsPW57a-LnNKtxzl7I,1441
+bs4/__init__.py,sha256=tbgzI5m3zvS_aaV1EuoBTNtKlJXvHGh_WgnYAHADZBo,44386
+bs4/__pycache__/__init__.cpython-39.pyc,,
+bs4/__pycache__/_deprecation.cpython-39.pyc,,
+bs4/__pycache__/_typing.cpython-39.pyc,,
+bs4/__pycache__/_warnings.cpython-39.pyc,,
+bs4/__pycache__/css.cpython-39.pyc,,
+bs4/__pycache__/dammit.cpython-39.pyc,,
+bs4/__pycache__/diagnose.cpython-39.pyc,,
+bs4/__pycache__/element.cpython-39.pyc,,
+bs4/__pycache__/exceptions.cpython-39.pyc,,
+bs4/__pycache__/filter.cpython-39.pyc,,
+bs4/__pycache__/formatter.cpython-39.pyc,,
+bs4/_deprecation.py,sha256=niHJCk37APg8KEuFOa57ZXaxLdBmc_2V6uuaJqu7r30,2408
+bs4/_typing.py,sha256=zNcx7R1yCTK8WwtumP28hc7CJ3pMyZXj_VAeYaNXMZA,7549
+bs4/_warnings.py,sha256=ZuOETgcnEbZgw2N0nnNXn6wvtrn2ut7AF0d98bvkMFc,4711
+bs4/builder/__init__.py,sha256=Rl4qjOXvdyyyjayOFqbkgoUoo81IgoyKD-RwWeVK59g,31194
+bs4/builder/__pycache__/__init__.cpython-39.pyc,,
+bs4/builder/__pycache__/_html5lib.cpython-39.pyc,,
+bs4/builder/__pycache__/_htmlparser.cpython-39.pyc,,
+bs4/builder/__pycache__/_lxml.cpython-39.pyc,,
+bs4/builder/_html5lib.py,sha256=hL6xUk4_I2i5CMguFoYFlrI26cY4Dut7fOEQrUctHIM,23607
+bs4/builder/_htmlparser.py,sha256=EiloGYOv4OSwRmBYv4QchcG4xmeOSevod0H3F3yw77o,17877
+bs4/builder/_lxml.py,sha256=ZGxR0UEHE4SAjoK4uspG6BPJBIu2BmLmmR5g5MsrjCo,18573
+bs4/css.py,sha256=_m_l_4SGWHnY620VJ21j_qQH1RX3p91sYVemgKxaLsM,12713
+bs4/dammit.py,sha256=YkIRAyZyKfyoqtVeI_LT7WvRY28izj_jSBGI58-sU84,51493
+bs4/diagnose.py,sha256=at98iuxyOrqec4V8iwkTIbNUqBCsq9Lr3fDAQx2129Y,7846
+bs4/element.py,sha256=oXmj7LG_2NpsDK90mq73q0PMK0FjFBIGSeTTJLVwwTc,120237
+bs4/exceptions.py,sha256=Q9FOadNe8QRvzDMaKSXe2Wtl8JK_oAZW7mbFZBVP_GE,951
+bs4/filter.py,sha256=rw8ZNhTDLEJVCEiSifou5tZR_3zBLeuvAyouY82qU_E,29201
+bs4/formatter.py,sha256=uBT0k6W8O5kJ9PCuJYjra97yoUqC-dlM9D_v-oRM0r8,10478
+bs4/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/REQUESTED b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/REQUESTED
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/WHEEL b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/WHEEL
new file mode 100644
index 0000000..12228d4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.27.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/licenses/AUTHORS b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/licenses/AUTHORS
new file mode 100644
index 0000000..18926c2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/licenses/AUTHORS
@@ -0,0 +1,49 @@
+Behold, mortal, the origins of Beautiful Soup...
+================================================
+
+Leonard Richardson is the primary maintainer.
+
+Aaron DeVore, Isaac Muse and Chris Papademetrious have made
+significant contributions to the code base.
+
+Mark Pilgrim provided the encoding detection code that forms the base
+of UnicodeDammit.
+
+Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful
+Soup 4 working under Python 3.
+
+Simon Willison wrote soupselect, which was used to make Beautiful Soup
+support CSS selectors. Isaac Muse wrote SoupSieve, which made it
+possible to _remove_ the CSS selector code from Beautiful Soup.
+
+Sam Ruby helped with a lot of edge cases.
+
+Jonathan Ellis was awarded the prestigious Beau Potage D'Or for his
+work in solving the nestable tags conundrum.
+
+An incomplete list of people have contributed patches to Beautiful
+Soup:
+
+ Istvan Albert, Andrew Lin, Anthony Baxter, Oliver Beattie, Andrew
+Boyko, Tony Chang, Francisco Canas, "Delong", Zephyr Fang, Fuzzy,
+Roman Gaufman, Yoni Gilad, Richie Hindle, Toshihiro Kamiya, Peteris
+Krumins, Kent Johnson, Marek Kapolka, Andreas Kostyrka, Roel Kramer,
+Ben Last, Robert Leftwich, Stefaan Lippens, "liquider", Staffan
+Malmgren, Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon",
+Ed Oskiewicz, Martijn Peters, Greg Phillips, Giles Radford, Stefano
+Revera, Arthur Rudolph, Marko Samastur, James Salter, Jouni Seppänen,
+Alexander Schmolck, Tim Shirley, Geoffrey Sneddon, Ville Skyttä,
+"Vikas", Jens Svalgaard, Andy Theyers, Eric Weiser, Glyn Webster, John
+Wiseman, Paul Wright, Danny Yoo
+
+An incomplete list of people who made suggestions or found bugs or
+found ways to break Beautiful Soup:
+
+ Hanno Böck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel,
+ Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes,
+ Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams,
+ warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison,
+ Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed
+ Summers, Dennis Sutch, Chris Smith, Aaron Swartz, Stuart
+ Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de
+ Sousa Rocha, Yichun Wei, Per Vognsen
diff --git a/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/licenses/LICENSE b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/licenses/LICENSE
new file mode 100644
index 0000000..08e3a9c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/beautifulsoup4-4.14.2.dist-info/licenses/LICENSE
@@ -0,0 +1,31 @@
+Beautiful Soup is made available under the MIT license:
+
+ Copyright (c) Leonard Richardson
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+
+Beautiful Soup incorporates code from the html5lib library, which is
+also made available under the MIT license. Copyright (c) James Graham
+and other contributors
+
+Beautiful Soup has an optional dependency on the soupsieve library,
+which is also made available under the MIT license. Copyright (c)
+Isaac Muse
diff --git a/venv/lib/python3.9/site-packages/bs4/__init__.py b/venv/lib/python3.9/site-packages/bs4/__init__.py
new file mode 100644
index 0000000..a171821
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/__init__.py
@@ -0,0 +1,1174 @@
+"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend".
+
+http://www.crummy.com/software/BeautifulSoup/
+
+Beautiful Soup uses a pluggable XML or HTML parser to parse a
+(possibly invalid) document into a tree representation. Beautiful Soup
+provides methods and Pythonic idioms that make it easy to navigate,
+search, and modify the parse tree.
+
+Beautiful Soup works with Python 3.7 and up. It works better if lxml
+and/or html5lib is installed, but they are not required.
+
+For more than you ever wanted to know about Beautiful Soup, see the
+documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
+"""
+
+__author__ = "Leonard Richardson (leonardr@segfault.org)"
+__version__ = "4.14.2"
+__copyright__ = "Copyright (c) 2004-2025 Leonard Richardson"
+# Use of this source code is governed by the MIT license.
+__license__ = "MIT"
+
+__all__ = [
+ "AttributeResemblesVariableWarning",
+ "BeautifulSoup",
+ "Comment",
+ "Declaration",
+ "ProcessingInstruction",
+ "ResultSet",
+ "CSS",
+ "Script",
+ "Stylesheet",
+ "Tag",
+ "TemplateString",
+ "ElementFilter",
+ "UnicodeDammit",
+ "CData",
+ "Doctype",
+
+ # Exceptions
+ "FeatureNotFound",
+ "ParserRejectedMarkup",
+ "StopParsing",
+
+ # Warnings
+ "AttributeResemblesVariableWarning",
+ "GuessedAtParserWarning",
+ "MarkupResemblesLocatorWarning",
+ "UnusualUsageWarning",
+ "XMLParsedAsHTMLWarning",
+]
+
+from collections import Counter
+import io
+import sys
+import warnings
+
+# The very first thing we do is give a useful error if someone is
+# running this code under Python 2.
+if sys.version_info.major < 3:
+ raise ImportError(
+ "You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3."
+ )
+
+from .builder import (
+ builder_registry,
+ TreeBuilder,
+)
+from .builder._htmlparser import HTMLParserTreeBuilder
+from .dammit import UnicodeDammit
+from .css import CSS
+from ._deprecation import (
+ _deprecated,
+)
+from .element import (
+ CData,
+ Comment,
+ DEFAULT_OUTPUT_ENCODING,
+ Declaration,
+ Doctype,
+ NavigableString,
+ PageElement,
+ ProcessingInstruction,
+ PYTHON_SPECIFIC_ENCODINGS,
+ ResultSet,
+ Script,
+ Stylesheet,
+ Tag,
+ TemplateString,
+)
+from .formatter import Formatter
+from .filter import (
+ ElementFilter,
+ SoupStrainer,
+)
+from typing import (
+ Any,
+ cast,
+ Counter as CounterType,
+ Dict,
+ Iterator,
+ List,
+ Sequence,
+ Sized,
+ Optional,
+ Type,
+ Union,
+)
+
+from bs4._typing import (
+ _Encoding,
+ _Encodings,
+ _IncomingMarkup,
+ _InsertableElement,
+ _RawAttributeValue,
+ _RawAttributeValues,
+ _RawMarkup,
+)
+
+# Import all warnings and exceptions into the main package.
+from bs4.exceptions import (
+ FeatureNotFound,
+ ParserRejectedMarkup,
+ StopParsing,
+)
+from bs4._warnings import (
+ AttributeResemblesVariableWarning,
+ GuessedAtParserWarning,
+ MarkupResemblesLocatorWarning,
+ UnusualUsageWarning,
+ XMLParsedAsHTMLWarning,
+)
+
+
+class BeautifulSoup(Tag):
+ """A data structure representing a parsed HTML or XML document.
+
+ Most of the methods you'll call on a BeautifulSoup object are inherited from
+ PageElement or Tag.
+
+ Internally, this class defines the basic interface called by the
+ tree builders when converting an HTML/XML document into a data
+ structure. The interface abstracts away the differences between
+ parsers. To write a new tree builder, you'll need to understand
+ these methods as a whole.
+
+ These methods will be called by the BeautifulSoup constructor:
+ * reset()
+ * feed(markup)
+
+ The tree builder may call these methods from its feed() implementation:
+ * handle_starttag(name, attrs) # See note about return value
+ * handle_endtag(name)
+ * handle_data(data) # Appends to the current data node
+ * endData(containerClass) # Ends the current data node
+
+ No matter how complicated the underlying parser is, you should be
+ able to build a tree using 'start tag' events, 'end tag' events,
+ 'data' events, and "done with data" events.
+
+ If you encounter an empty-element tag (aka a self-closing tag,
+ like HTML's tag), call handle_starttag and then
+ handle_endtag.
+ """
+
+ #: Since `BeautifulSoup` subclasses `Tag`, it's possible to treat it as
+ #: a `Tag` with a `Tag.name`. Hoever, this name makes it clear the
+ #: `BeautifulSoup` object isn't a real markup tag.
+ ROOT_TAG_NAME: str = "[document]"
+
+ #: If the end-user gives no indication which tree builder they
+ #: want, look for one with these features.
+ DEFAULT_BUILDER_FEATURES: Sequence[str] = ["html", "fast"]
+
+ #: A string containing all ASCII whitespace characters, used in
+ #: during parsing to detect data chunks that seem 'empty'.
+ ASCII_SPACES: str = "\x20\x0a\x09\x0c\x0d"
+
+ # FUTURE PYTHON:
+ element_classes: Dict[Type[PageElement], Type[PageElement]] #: :meta private:
+ builder: TreeBuilder #: :meta private:
+ is_xml: bool
+ known_xml: Optional[bool]
+ parse_only: Optional[SoupStrainer] #: :meta private:
+
+ # These members are only used while parsing markup.
+ markup: Optional[_RawMarkup] #: :meta private:
+ current_data: List[str] #: :meta private:
+ currentTag: Optional[Tag] #: :meta private:
+ tagStack: List[Tag] #: :meta private:
+ open_tag_counter: CounterType[str] #: :meta private:
+ preserve_whitespace_tag_stack: List[Tag] #: :meta private:
+ string_container_stack: List[Tag] #: :meta private:
+ _most_recent_element: Optional[PageElement] #: :meta private:
+
+ #: Beautiful Soup's best guess as to the character encoding of the
+ #: original document.
+ original_encoding: Optional[_Encoding]
+
+ #: The character encoding, if any, that was explicitly defined
+ #: in the original document. This may or may not match
+ #: `BeautifulSoup.original_encoding`.
+ declared_html_encoding: Optional[_Encoding]
+
+ #: This is True if the markup that was parsed contains
+ #: U+FFFD REPLACEMENT_CHARACTER characters which were not present
+ #: in the original markup. These mark character sequences that
+ #: could not be represented in Unicode.
+ contains_replacement_characters: bool
+
+ def __init__(
+ self,
+ markup: _IncomingMarkup = "",
+ features: Optional[Union[str, Sequence[str]]] = None,
+ builder: Optional[Union[TreeBuilder, Type[TreeBuilder]]] = None,
+ parse_only: Optional[SoupStrainer] = None,
+ from_encoding: Optional[_Encoding] = None,
+ exclude_encodings: Optional[_Encodings] = None,
+ element_classes: Optional[Dict[Type[PageElement], Type[PageElement]]] = None,
+ **kwargs: Any,
+ ):
+ """Constructor.
+
+ :param markup: A string or a file-like object representing
+ markup to be parsed.
+
+ :param features: Desirable features of the parser to be
+ used. This may be the name of a specific parser ("lxml",
+ "lxml-xml", "html.parser", or "html5lib") or it may be the
+ type of markup to be used ("html", "html5", "xml"). It's
+ recommended that you name a specific parser, so that
+ Beautiful Soup gives you the same results across platforms
+ and virtual environments.
+
+ :param builder: A TreeBuilder subclass to instantiate (or
+ instance to use) instead of looking one up based on
+ `features`. You only need to use this if you've implemented a
+ custom TreeBuilder.
+
+ :param parse_only: A SoupStrainer. Only parts of the document
+ matching the SoupStrainer will be considered. This is useful
+ when parsing part of a document that would otherwise be too
+ large to fit into memory.
+
+ :param from_encoding: A string indicating the encoding of the
+ document to be parsed. Pass this in if Beautiful Soup is
+ guessing wrongly about the document's encoding.
+
+ :param exclude_encodings: A list of strings indicating
+ encodings known to be wrong. Pass this in if you don't know
+ the document's encoding but you know Beautiful Soup's guess is
+ wrong.
+
+ :param element_classes: A dictionary mapping BeautifulSoup
+ classes like Tag and NavigableString, to other classes you'd
+ like to be instantiated instead as the parse tree is
+ built. This is useful for subclassing Tag or NavigableString
+ to modify default behavior.
+
+ :param kwargs: For backwards compatibility purposes, the
+ constructor accepts certain keyword arguments used in
+ Beautiful Soup 3. None of these arguments do anything in
+ Beautiful Soup 4; they will result in a warning and then be
+ ignored.
+
+ Apart from this, any keyword arguments passed into the
+ BeautifulSoup constructor are propagated to the TreeBuilder
+ constructor. This makes it possible to configure a
+ TreeBuilder by passing in arguments, not just by saying which
+ one to use.
+ """
+ if "convertEntities" in kwargs:
+ del kwargs["convertEntities"]
+ warnings.warn(
+ "BS4 does not respect the convertEntities argument to the "
+ "BeautifulSoup constructor. Entities are always converted "
+ "to Unicode characters."
+ )
+
+ if "markupMassage" in kwargs:
+ del kwargs["markupMassage"]
+ warnings.warn(
+ "BS4 does not respect the markupMassage argument to the "
+ "BeautifulSoup constructor. The tree builder is responsible "
+ "for any necessary markup massage."
+ )
+
+ if "smartQuotesTo" in kwargs:
+ del kwargs["smartQuotesTo"]
+ warnings.warn(
+ "BS4 does not respect the smartQuotesTo argument to the "
+ "BeautifulSoup constructor. Smart quotes are always converted "
+ "to Unicode characters."
+ )
+
+ if "selfClosingTags" in kwargs:
+ del kwargs["selfClosingTags"]
+ warnings.warn(
+ "Beautiful Soup 4 does not respect the selfClosingTags argument to the "
+ "BeautifulSoup constructor. The tree builder is responsible "
+ "for understanding self-closing tags."
+ )
+
+ if "isHTML" in kwargs:
+ del kwargs["isHTML"]
+ warnings.warn(
+ "Beautiful Soup 4 does not respect the isHTML argument to the "
+ "BeautifulSoup constructor. Suggest you use "
+ "features='lxml' for HTML and features='lxml-xml' for "
+ "XML."
+ )
+
+ def deprecated_argument(old_name: str, new_name: str) -> Optional[Any]:
+ if old_name in kwargs:
+ warnings.warn(
+ 'The "%s" argument to the BeautifulSoup constructor '
+ 'was renamed to "%s" in Beautiful Soup 4.0.0'
+ % (old_name, new_name),
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ return kwargs.pop(old_name)
+ return None
+
+ parse_only = parse_only or deprecated_argument("parseOnlyThese", "parse_only")
+ if parse_only is not None:
+ # Issue a warning if we can tell in advance that
+ # parse_only will exclude the entire tree.
+ if parse_only.excludes_everything:
+ warnings.warn(
+ f"The given value for parse_only will exclude everything: {parse_only}",
+ UserWarning,
+ stacklevel=3,
+ )
+
+ from_encoding = from_encoding or deprecated_argument(
+ "fromEncoding", "from_encoding"
+ )
+
+ if from_encoding and isinstance(markup, str):
+ warnings.warn(
+ "You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored."
+ )
+ from_encoding = None
+
+ self.element_classes = element_classes or dict()
+
+ # We need this information to track whether or not the builder
+ # was specified well enough that we can omit the 'you need to
+ # specify a parser' warning.
+ original_builder = builder
+ original_features = features
+
+ builder_class: Optional[Type[TreeBuilder]] = None
+ if isinstance(builder, type):
+ # A builder class was passed in; it needs to be instantiated.
+ builder_class = builder
+ builder = None
+ elif builder is None:
+ if isinstance(features, str):
+ features = [features]
+ if features is None or len(features) == 0:
+ features = self.DEFAULT_BUILDER_FEATURES
+ possible_builder_class = builder_registry.lookup(*features)
+ if possible_builder_class is None:
+ raise FeatureNotFound(
+ "Couldn't find a tree builder with the features you "
+ "requested: %s. Do you need to install a parser library?"
+ % ",".join(features)
+ )
+ builder_class = possible_builder_class
+
+ # At this point either we have a TreeBuilder instance in
+ # builder, or we have a builder_class that we can instantiate
+ # with the remaining **kwargs.
+ if builder is None:
+ assert builder_class is not None
+ builder = builder_class(**kwargs)
+ if (
+ not original_builder
+ and not (
+ original_features == builder.NAME
+ or (
+ isinstance(original_features, str)
+ and original_features in builder.ALTERNATE_NAMES
+ )
+ )
+ and markup
+ ):
+ # The user did not tell us which TreeBuilder to use,
+ # and we had to guess. Issue a warning.
+ if builder.is_xml:
+ markup_type = "XML"
+ else:
+ markup_type = "HTML"
+
+ # This code adapted from warnings.py so that we get the same line
+ # of code as our warnings.warn() call gets, even if the answer is wrong
+ # (as it may be in a multithreading situation).
+ caller = None
+ try:
+ caller = sys._getframe(1)
+ except ValueError:
+ pass
+ if caller:
+ globals = caller.f_globals
+ line_number = caller.f_lineno
+ else:
+ globals = sys.__dict__
+ line_number = 1
+ filename = globals.get("__file__")
+ if filename:
+ fnl = filename.lower()
+ if fnl.endswith((".pyc", ".pyo")):
+ filename = filename[:-1]
+ if filename:
+ # If there is no filename at all, the user is most likely in a REPL,
+ # and the warning is not necessary.
+ values = dict(
+ filename=filename,
+ line_number=line_number,
+ parser=builder.NAME,
+ markup_type=markup_type,
+ )
+ warnings.warn(
+ GuessedAtParserWarning.MESSAGE % values,
+ GuessedAtParserWarning,
+ stacklevel=2,
+ )
+ else:
+ if kwargs:
+ warnings.warn(
+ "Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`."
+ )
+
+ self.builder = builder
+ self.is_xml = builder.is_xml
+ self.known_xml = self.is_xml
+ self._namespaces = dict()
+ self.parse_only = parse_only
+
+ if hasattr(markup, "read"): # It's a file-type object.
+ markup = cast(io.IOBase, markup).read()
+ elif not isinstance(markup, (bytes, str)) and not hasattr(markup, "__len__"):
+ raise TypeError(
+ f"Incoming markup is of an invalid type: {markup!r}. Markup must be a string, a bytestring, or an open filehandle."
+ )
+ elif isinstance(markup, Sized) and len(markup) <= 256 and (
+ (isinstance(markup, bytes) and b"<" not in markup and b"\n" not in markup)
+ or (isinstance(markup, str) and "<" not in markup and "\n" not in markup)
+ ):
+ # Issue warnings for a couple beginner problems
+ # involving passing non-markup to Beautiful Soup.
+ # Beautiful Soup will still parse the input as markup,
+ # since that is sometimes the intended behavior.
+ if not self._markup_is_url(markup):
+ self._markup_resembles_filename(markup)
+
+ # At this point we know markup is a string or bytestring. If
+ # it was a file-type object, we've read from it.
+ markup = cast(_RawMarkup, markup)
+
+ rejections = []
+ success = False
+ for (
+ self.markup,
+ self.original_encoding,
+ self.declared_html_encoding,
+ self.contains_replacement_characters,
+ ) in self.builder.prepare_markup(
+ markup, from_encoding, exclude_encodings=exclude_encodings
+ ):
+ self.reset()
+ self.builder.initialize_soup(self)
+ try:
+ self._feed()
+ success = True
+ break
+ except ParserRejectedMarkup as e:
+ rejections.append(e)
+ pass
+
+ if not success:
+ other_exceptions = [str(e) for e in rejections]
+ raise ParserRejectedMarkup(
+ "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n "
+ + "\n ".join(other_exceptions)
+ )
+
+ # Clear out the markup and remove the builder's circular
+ # reference to this object.
+ self.markup = None
+ self.builder.soup = None
+
+ def copy_self(self) -> "BeautifulSoup":
+ """Create a new BeautifulSoup object with the same TreeBuilder,
+ but not associated with any markup.
+
+ This is the first step of the deepcopy process.
+ """
+ clone = type(self)("", None, self.builder)
+
+ # Keep track of the encoding of the original document,
+ # since we won't be parsing it again.
+ clone.original_encoding = self.original_encoding
+ return clone
+
+ def __getstate__(self) -> Dict[str, Any]:
+ # Frequently a tree builder can't be pickled.
+ d = dict(self.__dict__)
+ if "builder" in d and d["builder"] is not None and not self.builder.picklable:
+ d["builder"] = type(self.builder)
+ # Store the contents as a Unicode string.
+ d["contents"] = []
+ d["markup"] = self.decode()
+
+ # If _most_recent_element is present, it's a Tag object left
+ # over from initial parse. It might not be picklable and we
+ # don't need it.
+ if "_most_recent_element" in d:
+ del d["_most_recent_element"]
+ return d
+
+ def __setstate__(self, state: Dict[str, Any]) -> None:
+ # If necessary, restore the TreeBuilder by looking it up.
+ self.__dict__ = state
+ if isinstance(self.builder, type):
+ self.builder = self.builder()
+ elif not self.builder:
+ # We don't know which builder was used to build this
+ # parse tree, so use a default we know is always available.
+ self.builder = HTMLParserTreeBuilder()
+ self.builder.soup = self
+ self.reset()
+ self._feed()
+
+ @classmethod
+ @_deprecated(
+ replaced_by="nothing (private method, will be removed)", version="4.13.0"
+ )
+ def _decode_markup(cls, markup: _RawMarkup) -> str:
+ """Ensure `markup` is Unicode so it's safe to send into warnings.warn.
+
+ warnings.warn had this problem back in 2010 but fortunately
+ not anymore. This has not been used for a long time; I just
+ noticed that fact while working on 4.13.0.
+ """
+ if isinstance(markup, bytes):
+ decoded = markup.decode("utf-8", "replace")
+ else:
+ decoded = markup
+ return decoded
+
+ @classmethod
+ def _markup_is_url(cls, markup: _RawMarkup) -> bool:
+ """Error-handling method to raise a warning if incoming markup looks
+ like a URL.
+
+ :param markup: A string of markup.
+ :return: Whether or not the markup resembled a URL
+ closely enough to justify issuing a warning.
+ """
+ problem: bool = False
+ if isinstance(markup, bytes):
+ problem = (
+ any(markup.startswith(prefix) for prefix in (b"http:", b"https:"))
+ and b" " not in markup
+ )
+ elif isinstance(markup, str):
+ problem = (
+ any(markup.startswith(prefix) for prefix in ("http:", "https:"))
+ and " " not in markup
+ )
+ else:
+ return False
+
+ if not problem:
+ return False
+ warnings.warn(
+ MarkupResemblesLocatorWarning.URL_MESSAGE % dict(what="URL"),
+ MarkupResemblesLocatorWarning,
+ stacklevel=3,
+ )
+ return True
+
+ @classmethod
+ def _markup_resembles_filename(cls, markup: _RawMarkup) -> bool:
+ """Error-handling method to issue a warning if incoming markup
+ resembles a filename.
+
+ :param markup: A string of markup.
+ :return: Whether or not the markup resembled a filename
+ closely enough to justify issuing a warning.
+ """
+ markup_b: bytes
+
+ # We're only checking ASCII characters, so rather than write
+ # the same tests twice, convert Unicode to a bytestring and
+ # operate on the bytestring.
+ if isinstance(markup, str):
+ markup_b = markup.encode("utf8")
+ else:
+ markup_b = markup
+
+ # Step 1: does it end with a common textual file extension?
+ filelike = False
+ lower = markup_b.lower()
+ extensions = [b".html", b".htm", b".xml", b".xhtml", b".txt"]
+ if any(lower.endswith(ext) for ext in extensions):
+ filelike = True
+ if not filelike:
+ return False
+
+ # Step 2: it _might_ be a file, but there are a few things
+ # we can look for that aren't very common in filenames.
+
+ # Characters that have special meaning to Unix shells. (< was
+ # excluded before this method was called.)
+ #
+ # Many of these are also reserved characters that cannot
+ # appear in Windows filenames.
+ for byte in markup_b:
+ if byte in b"?*#&;>$|":
+ return False
+
+ # Two consecutive forward slashes (as seen in a URL) or two
+ # consecutive spaces (as seen in fixed-width data).
+ #
+ # (Paths to Windows network shares contain consecutive
+ # backslashes, so checking that doesn't seem as helpful.)
+ if b"//" in markup_b:
+ return False
+ if b" " in markup_b:
+ return False
+
+ # A colon in any position other than position 1 (e.g. after a
+ # Windows drive letter).
+ if markup_b.startswith(b":"):
+ return False
+ colon_i = markup_b.rfind(b":")
+ if colon_i not in (-1, 1):
+ return False
+
+ # Step 3: If it survived all of those checks, it's similar
+ # enough to a file to justify issuing a warning.
+ warnings.warn(
+ MarkupResemblesLocatorWarning.FILENAME_MESSAGE % dict(what="filename"),
+ MarkupResemblesLocatorWarning,
+ stacklevel=3,
+ )
+ return True
+
+ def _feed(self) -> None:
+ """Internal method that parses previously set markup, creating a large
+ number of Tag and NavigableString objects.
+ """
+ # Convert the document to Unicode.
+ self.builder.reset()
+
+ if self.markup is not None:
+ self.builder.feed(self.markup)
+ # Close out any unfinished strings and close all the open tags.
+ self.endData()
+ while (
+ self.currentTag is not None and self.currentTag.name != self.ROOT_TAG_NAME
+ ):
+ self.popTag()
+
+ def reset(self) -> None:
+ """Reset this object to a state as though it had never parsed any
+ markup.
+ """
+ Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
+ self.hidden = True
+ self.builder.reset()
+ self.current_data = []
+ self.currentTag = None
+ self.tagStack = []
+ self.open_tag_counter = Counter()
+ self.preserve_whitespace_tag_stack = []
+ self.string_container_stack = []
+ self._most_recent_element = None
+ self.pushTag(self)
+
+ def new_tag(
+ self,
+ name: str,
+ namespace: Optional[str] = None,
+ nsprefix: Optional[str] = None,
+ attrs: Optional[_RawAttributeValues] = None,
+ sourceline: Optional[int] = None,
+ sourcepos: Optional[int] = None,
+ string: Optional[str] = None,
+ **kwattrs: _RawAttributeValue,
+ ) -> Tag:
+ """Create a new Tag associated with this BeautifulSoup object.
+
+ :param name: The name of the new Tag.
+ :param namespace: The URI of the new Tag's XML namespace, if any.
+ :param prefix: The prefix for the new Tag's XML namespace, if any.
+ :param attrs: A dictionary of this Tag's attribute values; can
+ be used instead of ``kwattrs`` for attributes like 'class'
+ that are reserved words in Python.
+ :param sourceline: The line number where this tag was
+ (purportedly) found in its source document.
+ :param sourcepos: The character position within ``sourceline`` where this
+ tag was (purportedly) found.
+ :param string: String content for the new Tag, if any.
+ :param kwattrs: Keyword arguments for the new Tag's attribute values.
+
+ """
+ attr_container = self.builder.attribute_dict_class(**kwattrs)
+ if attrs is not None:
+ attr_container.update(attrs)
+ tag_class = self.element_classes.get(Tag, Tag)
+
+ # Assume that this is either Tag or a subclass of Tag. If not,
+ # the user brought type-unsafety upon themselves.
+ tag_class = cast(Type[Tag], tag_class)
+ tag = tag_class(
+ None,
+ self.builder,
+ name,
+ namespace,
+ nsprefix,
+ attr_container,
+ sourceline=sourceline,
+ sourcepos=sourcepos,
+ )
+
+ if string is not None:
+ tag.string = string
+ return tag
+
+ def string_container(
+ self, base_class: Optional[Type[NavigableString]] = None
+ ) -> Type[NavigableString]:
+ """Find the class that should be instantiated to hold a given kind of
+ string.
+
+ This may be a built-in Beautiful Soup class or a custom class passed
+ in to the BeautifulSoup constructor.
+ """
+ container = base_class or NavigableString
+
+ # The user may want us to use some other class (hopefully a
+ # custom subclass) instead of the one we'd use normally.
+ container = cast(
+ Type[NavigableString], self.element_classes.get(container, container)
+ )
+
+ # On top of that, we may be inside a tag that needs a special
+ # container class.
+ if self.string_container_stack and container is NavigableString:
+ container = self.builder.string_containers.get(
+ self.string_container_stack[-1].name, container
+ )
+ return container
+
+ def new_string(
+ self, s: str, subclass: Optional[Type[NavigableString]] = None
+ ) -> NavigableString:
+ """Create a new `NavigableString` associated with this `BeautifulSoup`
+ object.
+
+ :param s: The string content of the `NavigableString`
+ :param subclass: The subclass of `NavigableString`, if any, to
+ use. If a document is being processed, an appropriate
+ subclass for the current location in the document will
+ be determined automatically.
+ """
+ container = self.string_container(subclass)
+ return container(s)
+
+ def insert_before(self, *args: _InsertableElement) -> List[PageElement]:
+ """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
+ it because there is nothing before or after it in the parse tree.
+ """
+ raise NotImplementedError(
+ "BeautifulSoup objects don't support insert_before()."
+ )
+
+ def insert_after(self, *args: _InsertableElement) -> List[PageElement]:
+ """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
+ it because there is nothing before or after it in the parse tree.
+ """
+ raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
+
+ def popTag(self) -> Optional[Tag]:
+ """Internal method called by _popToTag when a tag is closed.
+
+ :meta private:
+ """
+ if not self.tagStack:
+ # Nothing to pop. This shouldn't happen.
+ return None
+ tag = self.tagStack.pop()
+ if tag.name in self.open_tag_counter:
+ self.open_tag_counter[tag.name] -= 1
+ if (
+ self.preserve_whitespace_tag_stack
+ and tag == self.preserve_whitespace_tag_stack[-1]
+ ):
+ self.preserve_whitespace_tag_stack.pop()
+ if self.string_container_stack and tag == self.string_container_stack[-1]:
+ self.string_container_stack.pop()
+ # print("Pop", tag.name)
+ if self.tagStack:
+ self.currentTag = self.tagStack[-1]
+ return self.currentTag
+
+ def pushTag(self, tag: Tag) -> None:
+ """Internal method called by handle_starttag when a tag is opened.
+
+ :meta private:
+ """
+ # print("Push", tag.name)
+ if self.currentTag is not None:
+ self.currentTag.contents.append(tag)
+ self.tagStack.append(tag)
+ self.currentTag = self.tagStack[-1]
+ if tag.name != self.ROOT_TAG_NAME:
+ self.open_tag_counter[tag.name] += 1
+ if tag.name in self.builder.preserve_whitespace_tags:
+ self.preserve_whitespace_tag_stack.append(tag)
+ if tag.name in self.builder.string_containers:
+ self.string_container_stack.append(tag)
+
+ def endData(self, containerClass: Optional[Type[NavigableString]] = None) -> None:
+ """Method called by the TreeBuilder when the end of a data segment
+ occurs.
+
+ :param containerClass: The class to use when incorporating the
+ data segment into the parse tree.
+
+ :meta private:
+ """
+ if self.current_data:
+ current_data = "".join(self.current_data)
+ # If whitespace is not preserved, and this string contains
+ # nothing but ASCII spaces, replace it with a single space
+ # or newline.
+ if not self.preserve_whitespace_tag_stack:
+ strippable = True
+ for i in current_data:
+ if i not in self.ASCII_SPACES:
+ strippable = False
+ break
+ if strippable:
+ if "\n" in current_data:
+ current_data = "\n"
+ else:
+ current_data = " "
+
+ # Reset the data collector.
+ self.current_data = []
+
+ # Should we add this string to the tree at all?
+ if (
+ self.parse_only
+ and len(self.tagStack) <= 1
+ and (not self.parse_only.allow_string_creation(current_data))
+ ):
+ return
+
+ containerClass = self.string_container(containerClass)
+ o = containerClass(current_data)
+ self.object_was_parsed(o)
+
+ def object_was_parsed(
+ self,
+ o: PageElement,
+ parent: Optional[Tag] = None,
+ most_recent_element: Optional[PageElement] = None,
+ ) -> None:
+ """Method called by the TreeBuilder to integrate an object into the
+ parse tree.
+
+ :meta private:
+ """
+ if parent is None:
+ parent = self.currentTag
+ assert parent is not None
+ previous_element: Optional[PageElement]
+ if most_recent_element is not None:
+ previous_element = most_recent_element
+ else:
+ previous_element = self._most_recent_element
+
+ next_element = previous_sibling = next_sibling = None
+ if isinstance(o, Tag):
+ next_element = o.next_element
+ next_sibling = o.next_sibling
+ previous_sibling = o.previous_sibling
+ if previous_element is None:
+ previous_element = o.previous_element
+
+ fix = parent.next_element is not None
+
+ o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
+
+ self._most_recent_element = o
+ parent.contents.append(o)
+
+ # Check if we are inserting into an already parsed node.
+ if fix:
+ self._linkage_fixer(parent)
+
+ def _linkage_fixer(self, el: Tag) -> None:
+ """Make sure linkage of this fragment is sound."""
+
+ first = el.contents[0]
+ child = el.contents[-1]
+ descendant: PageElement = child
+
+ if child is first and el.parent is not None:
+ # Parent should be linked to first child
+ el.next_element = child
+ # We are no longer linked to whatever this element is
+ prev_el = child.previous_element
+ if prev_el is not None and prev_el is not el:
+ prev_el.next_element = None
+ # First child should be linked to the parent, and no previous siblings.
+ child.previous_element = el
+ child.previous_sibling = None
+
+ # We have no sibling as we've been appended as the last.
+ child.next_sibling = None
+
+ # This index is a tag, dig deeper for a "last descendant"
+ if isinstance(child, Tag) and child.contents:
+ # _last_decendant is typed as returning Optional[PageElement],
+ # but the value can't be None here, because el is a Tag
+ # which we know has contents.
+ descendant = cast(PageElement, child._last_descendant(False))
+
+ # As the final step, link last descendant. It should be linked
+ # to the parent's next sibling (if found), else walk up the chain
+ # and find a parent with a sibling. It should have no next sibling.
+ descendant.next_element = None
+ descendant.next_sibling = None
+
+ target: Optional[Tag] = el
+ while True:
+ if target is None:
+ break
+ elif target.next_sibling is not None:
+ descendant.next_element = target.next_sibling
+ target.next_sibling.previous_element = child
+ break
+ target = target.parent
+
+ def _popToTag(
+ self, name: str, nsprefix: Optional[str] = None, inclusivePop: bool = True
+ ) -> Optional[Tag]:
+ """Pops the tag stack up to and including the most recent
+ instance of the given tag.
+
+ If there are no open tags with the given name, nothing will be
+ popped.
+
+ :param name: Pop up to the most recent tag with this name.
+ :param nsprefix: The namespace prefix that goes with `name`.
+ :param inclusivePop: It this is false, pops the tag stack up
+ to but *not* including the most recent instqance of the
+ given tag.
+
+ :meta private:
+ """
+ # print("Popping to %s" % name)
+ if name == self.ROOT_TAG_NAME:
+ # The BeautifulSoup object itself can never be popped.
+ return None
+
+ most_recently_popped = None
+
+ stack_size = len(self.tagStack)
+ for i in range(stack_size - 1, 0, -1):
+ if not self.open_tag_counter.get(name):
+ break
+ t = self.tagStack[i]
+ if name == t.name and nsprefix == t.prefix:
+ if inclusivePop:
+ most_recently_popped = self.popTag()
+ break
+ most_recently_popped = self.popTag()
+
+ return most_recently_popped
+
+ def handle_starttag(
+ self,
+ name: str,
+ namespace: Optional[str],
+ nsprefix: Optional[str],
+ attrs: _RawAttributeValues,
+ sourceline: Optional[int] = None,
+ sourcepos: Optional[int] = None,
+ namespaces: Optional[Dict[str, str]] = None,
+ ) -> Optional[Tag]:
+ """Called by the tree builder when a new tag is encountered.
+
+ :param name: Name of the tag.
+ :param nsprefix: Namespace prefix for the tag.
+ :param attrs: A dictionary of attribute values. Note that
+ attribute values are expected to be simple strings; processing
+ of multi-valued attributes such as "class" comes later.
+ :param sourceline: The line number where this tag was found in its
+ source document.
+ :param sourcepos: The character position within `sourceline` where this
+ tag was found.
+ :param namespaces: A dictionary of all namespace prefix mappings
+ currently in scope in the document.
+
+ If this method returns None, the tag was rejected by an active
+ `ElementFilter`. You should proceed as if the tag had not occurred
+ in the document. For instance, if this was a self-closing tag,
+ don't call handle_endtag.
+
+ :meta private:
+ """
+ # print("Start tag %s: %s" % (name, attrs))
+ self.endData()
+
+ if (
+ self.parse_only
+ and len(self.tagStack) <= 1
+ and not self.parse_only.allow_tag_creation(nsprefix, name, attrs)
+ ):
+ return None
+
+ tag_class = self.element_classes.get(Tag, Tag)
+ # Assume that this is either Tag or a subclass of Tag. If not,
+ # the user brought type-unsafety upon themselves.
+ tag_class = cast(Type[Tag], tag_class)
+ tag = tag_class(
+ self,
+ self.builder,
+ name,
+ namespace,
+ nsprefix,
+ attrs,
+ self.currentTag,
+ self._most_recent_element,
+ sourceline=sourceline,
+ sourcepos=sourcepos,
+ namespaces=namespaces,
+ )
+ if tag is None:
+ return tag
+ if self._most_recent_element is not None:
+ self._most_recent_element.next_element = tag
+ self._most_recent_element = tag
+ self.pushTag(tag)
+ return tag
+
+ def handle_endtag(self, name: str, nsprefix: Optional[str] = None) -> None:
+ """Called by the tree builder when an ending tag is encountered.
+
+ :param name: Name of the tag.
+ :param nsprefix: Namespace prefix for the tag.
+
+ :meta private:
+ """
+ # print("End tag: " + name)
+ self.endData()
+ self._popToTag(name, nsprefix)
+
+ def handle_data(self, data: str) -> None:
+ """Called by the tree builder when a chunk of textual data is
+ encountered.
+
+ :meta private:
+ """
+ self.current_data.append(data)
+
+ def decode(
+ self,
+ indent_level: Optional[int] = None,
+ eventual_encoding: _Encoding = DEFAULT_OUTPUT_ENCODING,
+ formatter: Union[Formatter, str] = "minimal",
+ iterator: Optional[Iterator[PageElement]] = None,
+ **kwargs: Any,
+ ) -> str:
+ """Returns a string representation of the parse tree
+ as a full HTML or XML document.
+
+ :param indent_level: Each line of the rendering will be
+ indented this many levels. (The ``formatter`` decides what a
+ 'level' means, in terms of spaces or other characters
+ output.) This is used internally in recursive calls while
+ pretty-printing.
+ :param eventual_encoding: The encoding of the final document.
+ If this is None, the document will be a Unicode string.
+ :param formatter: Either a `Formatter` object, or a string naming one of
+ the standard formatters.
+ :param iterator: The iterator to use when navigating over the
+ parse tree. This is only used by `Tag.decode_contents` and
+ you probably won't need to use it.
+ """
+ if self.is_xml:
+ # Print the XML declaration
+ encoding_part = ""
+ declared_encoding: Optional[str] = eventual_encoding
+ if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
+ # This is a special Python encoding; it can't actually
+ # go into an XML document because it means nothing
+ # outside of Python.
+ declared_encoding = None
+ if declared_encoding is not None:
+ encoding_part = ' encoding="%s"' % declared_encoding
+ prefix = '\n' % encoding_part
+ else:
+ prefix = ""
+
+ # Prior to 4.13.0, the first argument to this method was a
+ # bool called pretty_print, which gave the method a different
+ # signature from its superclass implementation, Tag.decode.
+ #
+ # The signatures of the two methods now match, but just in
+ # case someone is still passing a boolean in as the first
+ # argument to this method (or a keyword argument with the old
+ # name), we can handle it and put out a DeprecationWarning.
+ warning: Optional[str] = None
+ pretty_print: Optional[bool] = None
+ if isinstance(indent_level, bool):
+ if indent_level is True:
+ indent_level = 0
+ elif indent_level is False:
+ indent_level = None
+ warning = f"As of 4.13.0, the first argument to BeautifulSoup.decode has been changed from bool to int, to match Tag.decode. Pass in a value of {indent_level} instead."
+ else:
+ pretty_print = kwargs.pop("pretty_print", None)
+ assert not kwargs
+ if pretty_print is not None:
+ if pretty_print is True:
+ indent_level = 0
+ elif pretty_print is False:
+ indent_level = None
+ warning = f"As of 4.13.0, the pretty_print argument to BeautifulSoup.decode has been removed, to match Tag.decode. Pass in a value of indent_level={indent_level} instead."
+
+ if warning:
+ warnings.warn(warning, DeprecationWarning, stacklevel=2)
+ elif indent_level is False or pretty_print is False:
+ indent_level = None
+ return prefix + super(BeautifulSoup, self).decode(
+ indent_level, eventual_encoding, formatter, iterator
+ )
+
+
+# Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup'
+_s = BeautifulSoup
+_soup = BeautifulSoup
+
+
+class BeautifulStoneSoup(BeautifulSoup):
+ """Deprecated interface to an XML parser."""
+
+ def __init__(self, *args: Any, **kwargs: Any):
+ kwargs["features"] = "xml"
+ warnings.warn(
+ "The BeautifulStoneSoup class was deprecated in version 4.0.0. Instead of using "
+ 'it, pass features="xml" into the BeautifulSoup constructor.',
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
+
+
+# If this file is run as a script, act as an HTML pretty-printer.
+if __name__ == "__main__":
+ import sys
+
+ soup = BeautifulSoup(sys.stdin)
+ print((soup.prettify()))
diff --git a/venv/lib/python3.9/site-packages/bs4/_deprecation.py b/venv/lib/python3.9/site-packages/bs4/_deprecation.py
new file mode 100644
index 0000000..a7b5685
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/_deprecation.py
@@ -0,0 +1,80 @@
+"""Helper functions for deprecation.
+
+This interface is itself unstable and may change without warning. Do
+not use these functions yourself, even as a joke. The underscores are
+there for a reason. No support will be given.
+
+In particular, most of this will go away without warning once
+Beautiful Soup drops support for Python 3.11, since Python 3.12
+defines a `@typing.deprecated()
+decorator. `_
+"""
+
+import functools
+import warnings
+
+from typing import (
+ Any,
+ Callable,
+)
+
+
+def _deprecated_alias(old_name: str, new_name: str, version: str):
+ """Alias one attribute name to another for backward compatibility
+
+ :meta private:
+ """
+
+ @property # type:ignore
+ def alias(self) -> Any:
+ ":meta private:"
+ warnings.warn(
+ f"Access to deprecated property {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return getattr(self, new_name)
+
+ @alias.setter
+ def alias(self, value: str) -> None:
+ ":meta private:"
+ warnings.warn(
+ f"Write to deprecated property {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return setattr(self, new_name, value)
+
+ return alias
+
+
+def _deprecated_function_alias(
+ old_name: str, new_name: str, version: str
+) -> Callable[[Any], Any]:
+ def alias(self, *args: Any, **kwargs: Any) -> Any:
+ ":meta private:"
+ warnings.warn(
+ f"Call to deprecated method {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return getattr(self, new_name)(*args, **kwargs)
+
+ return alias
+
+
+def _deprecated(replaced_by: str, version: str) -> Callable:
+ def deprecate(func: Callable) -> Callable:
+ @functools.wraps(func)
+ def with_warning(*args: Any, **kwargs: Any) -> Any:
+ ":meta private:"
+ warnings.warn(
+ f"Call to deprecated method {func.__name__}. (Replaced by {replaced_by}) -- Deprecated since version {version}.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return func(*args, **kwargs)
+
+ return with_warning
+
+ return deprecate
diff --git a/venv/lib/python3.9/site-packages/bs4/_typing.py b/venv/lib/python3.9/site-packages/bs4/_typing.py
new file mode 100644
index 0000000..0ab69df
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/_typing.py
@@ -0,0 +1,205 @@
+# Custom type aliases used throughout Beautiful Soup to improve readability.
+
+# Notes on improvements to the type system in newer versions of Python
+# that can be used once Beautiful Soup drops support for older
+# versions:
+#
+# * ClassVar can be put on class variables now.
+# * In 3.10, x|y is an accepted shorthand for Union[x,y].
+# * In 3.10, TypeAlias gains capabilities that can be used to
+# improve the tree matching types (I don't remember what, exactly).
+# * In 3.9 it's possible to specialize the re.Match type,
+# e.g. re.Match[str]. In 3.8 there's a typing.re namespace for this,
+# but it's removed in 3.12, so to support the widest possible set of
+# versions I'm not using it.
+
+from typing_extensions import (
+ runtime_checkable,
+ Protocol,
+ TypeAlias,
+)
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ IO,
+ Iterable,
+ Mapping,
+ Optional,
+ Pattern,
+ TYPE_CHECKING,
+ Union,
+)
+
+if TYPE_CHECKING:
+ from bs4.element import (
+ AttributeValueList,
+ NamespacedAttribute,
+ NavigableString,
+ PageElement,
+ ResultSet,
+ Tag,
+ )
+
+
+@runtime_checkable
+class _RegularExpressionProtocol(Protocol):
+ """A protocol object which can accept either Python's built-in
+ `re.Pattern` objects, or the similar ``Regex`` objects defined by the
+ third-party ``regex`` package.
+ """
+
+ def search(
+ self, string: str, pos: int = ..., endpos: int = ...
+ ) -> Optional[Any]: ...
+
+ @property
+ def pattern(self) -> str: ...
+
+
+# Aliases for markup in various stages of processing.
+#
+#: The rawest form of markup: either a string, bytestring, or an open filehandle.
+_IncomingMarkup: TypeAlias = Union[str, bytes, IO[str], IO[bytes]]
+
+#: Markup that is in memory but has (potentially) yet to be converted
+#: to Unicode.
+_RawMarkup: TypeAlias = Union[str, bytes]
+
+# Aliases for character encodings
+#
+
+#: A data encoding.
+_Encoding: TypeAlias = str
+
+#: One or more data encodings.
+_Encodings: TypeAlias = Iterable[_Encoding]
+
+# Aliases for XML namespaces
+#
+
+#: The prefix for an XML namespace.
+_NamespacePrefix: TypeAlias = str
+
+#: The URL of an XML namespace
+_NamespaceURL: TypeAlias = str
+
+#: A mapping of prefixes to namespace URLs.
+_NamespaceMapping: TypeAlias = Dict[_NamespacePrefix, _NamespaceURL]
+
+#: A mapping of namespace URLs to prefixes
+_InvertedNamespaceMapping: TypeAlias = Dict[_NamespaceURL, _NamespacePrefix]
+
+# Aliases for the attribute values associated with HTML/XML tags.
+#
+
+#: The value associated with an HTML or XML attribute. This is the
+#: relatively unprocessed value Beautiful Soup expects to come from a
+#: `TreeBuilder`.
+_RawAttributeValue: TypeAlias = str
+
+#: A dictionary of names to `_RawAttributeValue` objects. This is how
+#: Beautiful Soup expects a `TreeBuilder` to represent a tag's
+#: attribute values.
+_RawAttributeValues: TypeAlias = (
+ "Mapping[Union[str, NamespacedAttribute], _RawAttributeValue]"
+)
+
+#: An attribute value in its final form, as stored in the
+# `Tag` class, after it has been processed and (in some cases)
+# split into a list of strings.
+_AttributeValue: TypeAlias = Union[str, "AttributeValueList"]
+
+#: A dictionary of names to :py:data:`_AttributeValue` objects. This is what
+#: a tag's attributes look like after processing.
+_AttributeValues: TypeAlias = Dict[str, _AttributeValue]
+
+#: The methods that deal with turning :py:data:`_RawAttributeValue` into
+#: :py:data:`_AttributeValue` may be called several times, even after the values
+#: are already processed (e.g. when cloning a tag), so they need to
+#: be able to acommodate both possibilities.
+_RawOrProcessedAttributeValues: TypeAlias = Union[_RawAttributeValues, _AttributeValues]
+
+#: A number of tree manipulation methods can take either a `PageElement` or a
+#: normal Python string (which will be converted to a `NavigableString`).
+_InsertableElement: TypeAlias = Union["PageElement", str]
+
+# Aliases to represent the many possibilities for matching bits of a
+# parse tree.
+#
+# This is very complicated because we're applying a formal type system
+# to some very DWIM code. The types we end up with will be the types
+# of the arguments to the SoupStrainer constructor and (more
+# familiarly to Beautiful Soup users) the find* methods.
+
+#: A function that takes a PageElement and returns a yes-or-no answer.
+_PageElementMatchFunction: TypeAlias = Callable[["PageElement"], bool]
+
+#: A function that takes the raw parsed ingredients of a markup tag
+#: and returns a yes-or-no answer.
+# Not necessary at the moment.
+# _AllowTagCreationFunction:TypeAlias = Callable[[Optional[str], str, Optional[_RawAttributeValues]], bool]
+
+#: A function that takes the raw parsed ingredients of a markup string node
+#: and returns a yes-or-no answer.
+# Not necessary at the moment.
+# _AllowStringCreationFunction:TypeAlias = Callable[[Optional[str]], bool]
+
+#: A function that takes a `Tag` and returns a yes-or-no answer.
+#: A `TagNameMatchRule` expects this kind of function, if you're
+#: going to pass it a function.
+_TagMatchFunction: TypeAlias = Callable[["Tag"], bool]
+
+#: A function that takes a string (or None) and returns a yes-or-no
+#: answer. An `AttributeValueMatchRule` expects this kind of function, if
+#: you're going to pass it a function.
+_NullableStringMatchFunction: TypeAlias = Callable[[Optional[str]], bool]
+
+#: A function that takes a string and returns a yes-or-no answer. A
+# `StringMatchRule` expects this kind of function, if you're going to
+# pass it a function.
+_StringMatchFunction: TypeAlias = Callable[[str], bool]
+
+#: Either a tag name, an attribute value or a string can be matched
+#: against a string, bytestring, regular expression, or a boolean.
+_BaseStrainable: TypeAlias = Union[str, bytes, Pattern[str], bool]
+
+#: A tag can be matched either with the `_BaseStrainable` options, or
+#: using a function that takes the `Tag` as its sole argument.
+_BaseStrainableElement: TypeAlias = Union[_BaseStrainable, _TagMatchFunction]
+
+#: A tag's attribute value can be matched either with the
+#: `_BaseStrainable` options, or using a function that takes that
+#: value as its sole argument.
+_BaseStrainableAttribute: TypeAlias = Union[_BaseStrainable, _NullableStringMatchFunction]
+
+#: A tag can be matched using either a single criterion or a list of
+#: criteria.
+_StrainableElement: TypeAlias = Union[
+ _BaseStrainableElement, Iterable[_BaseStrainableElement]
+]
+
+#: An attribute value can be matched using either a single criterion
+#: or a list of criteria.
+_StrainableAttribute: TypeAlias = Union[
+ _BaseStrainableAttribute, Iterable[_BaseStrainableAttribute]
+]
+
+#: An string can be matched using the same techniques as
+#: an attribute value.
+_StrainableString: TypeAlias = _StrainableAttribute
+
+#: A dictionary may be used to match against multiple attribute vlaues at once.
+_StrainableAttributes: TypeAlias = Dict[str, _StrainableAttribute]
+
+#: Many Beautiful soup methods return a PageElement or an ResultSet of
+#: PageElements. A PageElement is either a Tag or a NavigableString.
+#: These convenience aliases make it easier for IDE users to see which methods
+#: are available on the objects they're dealing with.
+_OneElement: TypeAlias = Union["PageElement", "Tag", "NavigableString"]
+_AtMostOneElement: TypeAlias = Optional[_OneElement]
+_AtMostOneTag: TypeAlias = Optional["Tag"]
+_AtMostOneNavigableString: TypeAlias = Optional["NavigableString"]
+_QueryResults: TypeAlias = "ResultSet[_OneElement]"
+_SomeTags: TypeAlias = "ResultSet[Tag]"
+_SomeNavigableStrings: TypeAlias = "ResultSet[NavigableString]"
diff --git a/venv/lib/python3.9/site-packages/bs4/_warnings.py b/venv/lib/python3.9/site-packages/bs4/_warnings.py
new file mode 100644
index 0000000..4309473
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/_warnings.py
@@ -0,0 +1,98 @@
+"""Define some custom warnings."""
+
+
+class GuessedAtParserWarning(UserWarning):
+ """The warning issued when BeautifulSoup has to guess what parser to
+ use -- probably because no parser was specified in the constructor.
+ """
+
+ MESSAGE: str = """No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system ("%(parser)s"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
+
+The code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features="%(parser)s"' to the BeautifulSoup constructor.
+"""
+
+
+class UnusualUsageWarning(UserWarning):
+ """A superclass for warnings issued when Beautiful Soup sees
+ something that is typically the result of a mistake in the calling
+ code, but might be intentional on the part of the user. If it is
+ in fact intentional, you can filter the individual warning class
+ to get rid of the warning. If you don't like Beautiful Soup
+ second-guessing what you are doing, you can filter the
+ UnusualUsageWarningclass itself and get rid of these entirely.
+ """
+
+
+class MarkupResemblesLocatorWarning(UnusualUsageWarning):
+ """The warning issued when BeautifulSoup is given 'markup' that
+ actually looks like a resource locator -- a URL or a path to a file
+ on disk.
+ """
+
+ #: :meta private:
+ GENERIC_MESSAGE: str = """
+
+However, if you want to parse some data that happens to look like a %(what)s, then nothing has gone wrong: you are using Beautiful Soup correctly, and this warning is spurious and can be filtered. To make this warning go away, run this code before calling the BeautifulSoup constructor:
+
+ from bs4 import MarkupResemblesLocatorWarning
+ import warnings
+
+ warnings.filterwarnings("ignore", category=MarkupResemblesLocatorWarning)
+ """
+
+ URL_MESSAGE: str = (
+ """The input passed in on this line looks more like a URL than HTML or XML.
+
+If you meant to use Beautiful Soup to parse the web page found at a certain URL, then something has gone wrong. You should use an Python package like 'requests' to fetch the content behind the URL. Once you have the content as a string, you can feed that string into Beautiful Soup."""
+ + GENERIC_MESSAGE
+ )
+
+ FILENAME_MESSAGE: str = (
+ """The input passed in on this line looks more like a filename than HTML or XML.
+
+If you meant to use Beautiful Soup to parse the contents of a file on disk, then something has gone wrong. You should open the file first, using code like this:
+
+ filehandle = open(your filename)
+
+You can then feed the open filehandle into Beautiful Soup instead of using the filename."""
+ + GENERIC_MESSAGE
+ )
+
+
+class AttributeResemblesVariableWarning(UnusualUsageWarning, SyntaxWarning):
+ """The warning issued when Beautiful Soup suspects a provided
+ attribute name may actually be the misspelled name of a Beautiful
+ Soup variable. Generally speaking, this is only used in cases like
+ "_class" where it's very unlikely the user would be referencing an
+ XML attribute with that name.
+ """
+
+ MESSAGE: str = """%(original)r is an unusual attribute name and is a common misspelling for %(autocorrect)r.
+
+If you meant %(autocorrect)r, change your code to use it, and this warning will go away.
+
+If you really did mean to check the %(original)r attribute, this warning is spurious and can be filtered. To make it go away, run this code before creating your BeautifulSoup object:
+
+ from bs4 import AttributeResemblesVariableWarning
+ import warnings
+
+ warnings.filterwarnings("ignore", category=AttributeResemblesVariableWarning)
+"""
+
+
+class XMLParsedAsHTMLWarning(UnusualUsageWarning):
+ """The warning issued when an HTML parser is used to parse
+ XML that is not (as far as we can tell) XHTML.
+ """
+
+ MESSAGE: str = """It looks like you're using an HTML parser to parse an XML document.
+
+Assuming this really is an XML document, what you're doing might work, but you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the Python package 'lxml' installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor.
+
+If you want or need to use an HTML parser on this document, you can make this warning go away by filtering it. To do that, run this code before calling the BeautifulSoup constructor:
+
+ from bs4 import XMLParsedAsHTMLWarning
+ import warnings
+
+ warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning)
+"""
diff --git a/venv/lib/python3.9/site-packages/bs4/builder/__init__.py b/venv/lib/python3.9/site-packages/bs4/builder/__init__.py
new file mode 100644
index 0000000..4aae4d3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/builder/__init__.py
@@ -0,0 +1,848 @@
+from __future__ import annotations
+
+# Use of this source code is governed by the MIT license.
+__license__ = "MIT"
+
+from collections import defaultdict
+import re
+from types import ModuleType
+from typing import (
+ Any,
+ cast,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Pattern,
+ Set,
+ Tuple,
+ Type,
+ TYPE_CHECKING,
+)
+import warnings
+import sys
+from bs4.element import (
+ AttributeDict,
+ AttributeValueList,
+ CharsetMetaAttributeValue,
+ ContentMetaAttributeValue,
+ RubyParenthesisString,
+ RubyTextString,
+ Stylesheet,
+ Script,
+ TemplateString,
+ nonwhitespace_re,
+)
+
+# Exceptions were moved to their own module in 4.13. Import here for
+# backwards compatibility.
+from bs4.exceptions import ParserRejectedMarkup
+
+from bs4._typing import (
+ _AttributeValues,
+ _RawAttributeValue,
+)
+
+from bs4._warnings import XMLParsedAsHTMLWarning
+
+if TYPE_CHECKING:
+ from bs4 import BeautifulSoup
+ from bs4.element import (
+ NavigableString,
+ Tag,
+ )
+ from bs4._typing import (
+ _AttributeValue,
+ _Encoding,
+ _Encodings,
+ _RawOrProcessedAttributeValues,
+ _RawMarkup,
+ )
+
+__all__ = [
+ "HTMLTreeBuilder",
+ "SAXTreeBuilder",
+ "TreeBuilder",
+ "TreeBuilderRegistry",
+]
+
+# Some useful features for a TreeBuilder to have.
+FAST = "fast"
+PERMISSIVE = "permissive"
+STRICT = "strict"
+XML = "xml"
+HTML = "html"
+HTML_5 = "html5"
+
+__all__ = [
+ "TreeBuilderRegistry",
+ "TreeBuilder",
+ "HTMLTreeBuilder",
+ "DetectsXMLParsedAsHTML",
+
+ "ParserRejectedMarkup", # backwards compatibility only as of 4.13.0
+]
+
+class TreeBuilderRegistry(object):
+ """A way of looking up TreeBuilder subclasses by their name or by desired
+ features.
+ """
+
+ builders_for_feature: Dict[str, List[Type[TreeBuilder]]]
+ builders: List[Type[TreeBuilder]]
+
+ def __init__(self) -> None:
+ self.builders_for_feature = defaultdict(list)
+ self.builders = []
+
+ def register(self, treebuilder_class: type[TreeBuilder]) -> None:
+ """Register a treebuilder based on its advertised features.
+
+ :param treebuilder_class: A subclass of `TreeBuilder`. its
+ `TreeBuilder.features` attribute should list its features.
+ """
+ for feature in treebuilder_class.features:
+ self.builders_for_feature[feature].insert(0, treebuilder_class)
+ self.builders.insert(0, treebuilder_class)
+
+ def lookup(self, *features: str) -> Optional[Type[TreeBuilder]]:
+ """Look up a TreeBuilder subclass with the desired features.
+
+ :param features: A list of features to look for. If none are
+ provided, the most recently registered TreeBuilder subclass
+ will be used.
+ :return: A TreeBuilder subclass, or None if there's no
+ registered subclass with all the requested features.
+ """
+ if len(self.builders) == 0:
+ # There are no builders at all.
+ return None
+
+ if len(features) == 0:
+ # They didn't ask for any features. Give them the most
+ # recently registered builder.
+ return self.builders[0]
+
+ # Go down the list of features in order, and eliminate any builders
+ # that don't match every feature.
+ feature_list = list(features)
+ feature_list.reverse()
+ candidates = None
+ candidate_set = None
+ while len(feature_list) > 0:
+ feature = feature_list.pop()
+ we_have_the_feature = self.builders_for_feature.get(feature, [])
+ if len(we_have_the_feature) > 0:
+ if candidates is None:
+ candidates = we_have_the_feature
+ candidate_set = set(candidates)
+ elif candidate_set is not None:
+ # Eliminate any candidates that don't have this feature.
+ candidate_set = candidate_set.intersection(set(we_have_the_feature))
+
+ # The only valid candidates are the ones in candidate_set.
+ # Go through the original list of candidates and pick the first one
+ # that's in candidate_set.
+ if candidate_set is None or candidates is None:
+ return None
+ for candidate in candidates:
+ if candidate in candidate_set:
+ return candidate
+ return None
+
+
+#: The `BeautifulSoup` constructor will take a list of features
+#: and use it to look up `TreeBuilder` classes in this registry.
+builder_registry: TreeBuilderRegistry = TreeBuilderRegistry()
+
+
+class TreeBuilder(object):
+ """Turn a textual document into a Beautiful Soup object tree.
+
+ This is an abstract superclass which smooths out the behavior of
+ different parser libraries into a single, unified interface.
+
+ :param multi_valued_attributes: If this is set to None, the
+ TreeBuilder will not turn any values for attributes like
+ 'class' into lists. Setting this to a dictionary will
+ customize this behavior; look at :py:attr:`bs4.builder.HTMLTreeBuilder.DEFAULT_CDATA_LIST_ATTRIBUTES`
+ for an example.
+
+ Internally, these are called "CDATA list attributes", but that
+ probably doesn't make sense to an end-user, so the argument name
+ is ``multi_valued_attributes``.
+
+ :param preserve_whitespace_tags: A set of tags to treat
+ the way tags are treated in HTML. Tags in this set
+ are immune from pretty-printing; their contents will always be
+ output as-is.
+
+ :param string_containers: A dictionary mapping tag names to
+ the classes that should be instantiated to contain the textual
+ contents of those tags. The default is to use NavigableString
+ for every tag, no matter what the name. You can override the
+ default by changing :py:attr:`DEFAULT_STRING_CONTAINERS`.
+
+ :param store_line_numbers: If the parser keeps track of the line
+ numbers and positions of the original markup, that information
+ will, by default, be stored in each corresponding
+ :py:class:`bs4.element.Tag` object. You can turn this off by
+ passing store_line_numbers=False; then Tag.sourcepos and
+ Tag.sourceline will always be None. If the parser you're using
+ doesn't keep track of this information, then store_line_numbers
+ is irrelevant.
+
+ :param attribute_dict_class: The value of a multi-valued attribute
+ (such as HTML's 'class') willl be stored in an instance of this
+ class. The default is Beautiful Soup's built-in
+ `AttributeValueList`, which is a normal Python list, and you
+ will probably never need to change it.
+ """
+
+ USE_DEFAULT: Any = object() #: :meta private:
+
+ def __init__(
+ self,
+ multi_valued_attributes: Dict[str, Set[str]] = USE_DEFAULT,
+ preserve_whitespace_tags: Set[str] = USE_DEFAULT,
+ store_line_numbers: bool = USE_DEFAULT,
+ string_containers: Dict[str, Type[NavigableString]] = USE_DEFAULT,
+ empty_element_tags: Set[str] = USE_DEFAULT,
+ attribute_dict_class: Type[AttributeDict] = AttributeDict,
+ attribute_value_list_class: Type[AttributeValueList] = AttributeValueList,
+ ):
+ self.soup = None
+ if multi_valued_attributes is self.USE_DEFAULT:
+ multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
+ self.cdata_list_attributes = multi_valued_attributes
+ if preserve_whitespace_tags is self.USE_DEFAULT:
+ preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
+ self.preserve_whitespace_tags = preserve_whitespace_tags
+ if empty_element_tags is self.USE_DEFAULT:
+ self.empty_element_tags = self.DEFAULT_EMPTY_ELEMENT_TAGS
+ else:
+ self.empty_element_tags = empty_element_tags
+ # TODO: store_line_numbers is probably irrelevant now that
+ # the behavior of sourceline and sourcepos has been made consistent
+ # everywhere.
+ if store_line_numbers == self.USE_DEFAULT:
+ store_line_numbers = self.TRACKS_LINE_NUMBERS
+ self.store_line_numbers = store_line_numbers
+ if string_containers == self.USE_DEFAULT:
+ string_containers = self.DEFAULT_STRING_CONTAINERS
+ self.string_containers = string_containers
+ self.attribute_dict_class = attribute_dict_class
+ self.attribute_value_list_class = attribute_value_list_class
+
+ NAME: str = "[Unknown tree builder]"
+ ALTERNATE_NAMES: Iterable[str] = []
+ features: Iterable[str] = []
+
+ is_xml: bool = False
+ picklable: bool = False
+
+ soup: Optional[BeautifulSoup] #: :meta private:
+
+ #: A tag will be considered an empty-element
+ #: tag when and only when it has no contents.
+ empty_element_tags: Optional[Set[str]] = None #: :meta private:
+ cdata_list_attributes: Dict[str, Set[str]] #: :meta private:
+ preserve_whitespace_tags: Set[str] #: :meta private:
+ string_containers: Dict[str, Type[NavigableString]] #: :meta private:
+ tracks_line_numbers: bool #: :meta private:
+
+ #: A value for these tag/attribute combinations is a space- or
+ #: comma-separated list of CDATA, rather than a single CDATA.
+ DEFAULT_CDATA_LIST_ATTRIBUTES: Dict[str, Set[str]] = defaultdict(set)
+
+ #: Whitespace should be preserved inside these tags.
+ DEFAULT_PRESERVE_WHITESPACE_TAGS: Set[str] = set()
+
+ #: The textual contents of tags with these names should be
+ #: instantiated with some class other than `bs4.element.NavigableString`.
+ DEFAULT_STRING_CONTAINERS: Dict[str, Type[bs4.element.NavigableString]] = {} # type:ignore
+
+ #: By default, tags are treated as empty-element tags if they have
+ #: no contents--that is, using XML rules. HTMLTreeBuilder
+ #: defines a different set of DEFAULT_EMPTY_ELEMENT_TAGS based on the
+ #: HTML 4 and HTML5 standards.
+ DEFAULT_EMPTY_ELEMENT_TAGS: Optional[Set[str]] = None
+
+ #: Most parsers don't keep track of line numbers.
+ TRACKS_LINE_NUMBERS: bool = False
+
+ def initialize_soup(self, soup: BeautifulSoup) -> None:
+ """The BeautifulSoup object has been initialized and is now
+ being associated with the TreeBuilder.
+
+ :param soup: A BeautifulSoup object.
+ """
+ self.soup = soup
+
+ def reset(self) -> None:
+ """Do any work necessary to reset the underlying parser
+ for a new document.
+
+ By default, this does nothing.
+ """
+ pass
+
+ def can_be_empty_element(self, tag_name: str) -> bool:
+ """Might a tag with this name be an empty-element tag?
+
+ The final markup may or may not actually present this tag as
+ self-closing.
+
+ For instance: an HTMLBuilder does not consider a tag to be
+ an empty-element tag (it's not in
+ HTMLBuilder.empty_element_tags). This means an empty
tag
+ will be presented as "
", not "
" or "".
+
+ The default implementation has no opinion about which tags are
+ empty-element tags, so a tag will be presented as an
+ empty-element tag if and only if it has no children.
+ " " will become " ", and "bar " will
+ be left alone.
+
+ :param tag_name: The name of a markup tag.
+ """
+ if self.empty_element_tags is None:
+ return True
+ return tag_name in self.empty_element_tags
+
+ def feed(self, markup: _RawMarkup) -> None:
+ """Run incoming markup through some parsing process."""
+ raise NotImplementedError()
+
+ def prepare_markup(
+ self,
+ markup: _RawMarkup,
+ user_specified_encoding: Optional[_Encoding] = None,
+ document_declared_encoding: Optional[_Encoding] = None,
+ exclude_encodings: Optional[_Encodings] = None,
+ ) -> Iterable[Tuple[_RawMarkup, Optional[_Encoding], Optional[_Encoding], bool]]:
+ """Run any preliminary steps necessary to make incoming markup
+ acceptable to the parser.
+
+ :param markup: The markup that's about to be parsed.
+ :param user_specified_encoding: The user asked to try this encoding
+ to convert the markup into a Unicode string.
+ :param document_declared_encoding: The markup itself claims to be
+ in this encoding. NOTE: This argument is not used by the
+ calling code and can probably be removed.
+ :param exclude_encodings: The user asked *not* to try any of
+ these encodings.
+
+ :yield: A series of 4-tuples: (markup, encoding, declared encoding,
+ has undergone character replacement)
+
+ Each 4-tuple represents a strategy that the parser can try
+ to convert the document to Unicode and parse it. Each
+ strategy will be tried in turn.
+
+ By default, the only strategy is to parse the markup
+ as-is. See `LXMLTreeBuilderForXML` and
+ `HTMLParserTreeBuilder` for implementations that take into
+ account the quirks of particular parsers.
+
+ :meta private:
+
+ """
+ yield markup, None, None, False
+
+ def test_fragment_to_document(self, fragment: str) -> str:
+ """Wrap an HTML fragment to make it look like a document.
+
+ Different parsers do this differently. For instance, lxml
+ introduces an empty
tag, and html5lib
+ doesn't. Abstracting this away lets us write simple tests
+ which run HTML fragments through the parser and compare the
+ results against other HTML fragments.
+
+ This method should not be used outside of unit tests.
+
+ :param fragment: A fragment of HTML.
+ :return: A full HTML document.
+ :meta private:
+ """
+ return fragment
+
+ def set_up_substitutions(self, tag: Tag) -> bool:
+ """Set up any substitutions that will need to be performed on
+ a `Tag` when it's output as a string.
+
+ By default, this does nothing. See `HTMLTreeBuilder` for a
+ case where this is used.
+
+ :return: Whether or not a substitution was performed.
+ :meta private:
+ """
+ return False
+
+ def _replace_cdata_list_attribute_values(
+ self, tag_name: str, attrs: _RawOrProcessedAttributeValues
+ ) -> _AttributeValues:
+ """When an attribute value is associated with a tag that can
+ have multiple values for that attribute, convert the string
+ value to a list of strings.
+
+ Basically, replaces class="foo bar" with class=["foo", "bar"]
+
+ NOTE: This method modifies its input in place.
+
+ :param tag_name: The name of a tag.
+ :param attrs: A dictionary containing the tag's attributes.
+ Any appropriate attribute values will be modified in place.
+ :return: The modified dictionary that was originally passed in.
+ """
+
+ # First, cast the attrs dict to _AttributeValues. This might
+ # not be accurate yet, but it will be by the time this method
+ # returns.
+ modified_attrs = cast(_AttributeValues, attrs)
+ if not modified_attrs or not self.cdata_list_attributes:
+ # Nothing to do.
+ return modified_attrs
+
+ # There is at least a possibility that we need to modify one of
+ # the attribute values.
+ universal: Set[str] = self.cdata_list_attributes.get("*", set())
+ tag_specific = self.cdata_list_attributes.get(tag_name.lower(), None)
+ for attr in list(modified_attrs.keys()):
+ modified_value: _AttributeValue
+ if attr in universal or (tag_specific and attr in tag_specific):
+ # We have a "class"-type attribute whose string
+ # value is a whitespace-separated list of
+ # values. Split it into a list.
+ original_value: _AttributeValue = modified_attrs[attr]
+ if isinstance(original_value, _RawAttributeValue):
+ # This is a _RawAttributeValue (a string) that
+ # needs to be split and converted to a
+ # AttributeValueList so it can be an
+ # _AttributeValue.
+ modified_value = self.attribute_value_list_class(
+ nonwhitespace_re.findall(original_value)
+ )
+ else:
+ # html5lib calls setAttributes twice for the
+ # same tag when rearranging the parse tree. On
+ # the second call the attribute value here is
+ # already a list. This can also happen when a
+ # Tag object is cloned. If this happens, leave
+ # the value alone rather than trying to split
+ # it again.
+ modified_value = original_value
+ modified_attrs[attr] = modified_value
+ return modified_attrs
+
+
+class SAXTreeBuilder(TreeBuilder):
+ """A Beautiful Soup treebuilder that listens for SAX events.
+
+ This is not currently used for anything, and it will be removed
+ soon. It was a good idea, but it wasn't properly integrated into the
+ rest of Beautiful Soup, so there have been long stretches where it
+ hasn't worked properly.
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ warnings.warn(
+ "The SAXTreeBuilder class was deprecated in 4.13.0 and will be removed soon thereafter. It is completely untested and probably doesn't work; do not use it.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ super(SAXTreeBuilder, self).__init__(*args, **kwargs)
+
+ def feed(self, markup: _RawMarkup) -> None:
+ raise NotImplementedError()
+
+ def close(self) -> None:
+ pass
+
+ def startElement(self, name: str, attrs: Dict[str, str]) -> None:
+ attrs = AttributeDict((key[1], value) for key, value in list(attrs.items()))
+ # print("Start %s, %r" % (name, attrs))
+ assert self.soup is not None
+ self.soup.handle_starttag(name, None, None, attrs)
+
+ def endElement(self, name: str) -> None:
+ # print("End %s" % name)
+ assert self.soup is not None
+ self.soup.handle_endtag(name)
+
+ def startElementNS(
+ self, nsTuple: Tuple[str, str], nodeName: str, attrs: Dict[str, str]
+ ) -> None:
+ # Throw away (ns, nodeName) for now.
+ self.startElement(nodeName, attrs)
+
+ def endElementNS(self, nsTuple: Tuple[str, str], nodeName: str) -> None:
+ # Throw away (ns, nodeName) for now.
+ self.endElement(nodeName)
+ # handler.endElementNS((ns, node.nodeName), node.nodeName)
+
+ def startPrefixMapping(self, prefix: str, nodeValue: str) -> None:
+ # Ignore the prefix for now.
+ pass
+
+ def endPrefixMapping(self, prefix: str) -> None:
+ # Ignore the prefix for now.
+ # handler.endPrefixMapping(prefix)
+ pass
+
+ def characters(self, content: str) -> None:
+ assert self.soup is not None
+ self.soup.handle_data(content)
+
+ def startDocument(self) -> None:
+ pass
+
+ def endDocument(self) -> None:
+ pass
+
+
+class HTMLTreeBuilder(TreeBuilder):
+ """This TreeBuilder knows facts about HTML, such as which tags are treated
+ specially by the HTML standard.
+ """
+
+ #: Some HTML tags are defined as having no contents. Beautiful Soup
+ #: treats these specially.
+ DEFAULT_EMPTY_ELEMENT_TAGS: Optional[Set[str]] = set(
+ [
+ # These are from HTML5.
+ "area",
+ "base",
+ "br",
+ "col",
+ "embed",
+ "hr",
+ "img",
+ "input",
+ "keygen",
+ "link",
+ "menuitem",
+ "meta",
+ "param",
+ "source",
+ "track",
+ "wbr",
+ # These are from earlier versions of HTML and are removed in HTML5.
+ "basefont",
+ "bgsound",
+ "command",
+ "frame",
+ "image",
+ "isindex",
+ "nextid",
+ "spacer",
+ ]
+ )
+
+ #: The HTML standard defines these tags as block-level elements. Beautiful
+ #: Soup does not treat these elements differently from other elements,
+ #: but it may do so eventually, and this information is available if
+ #: you need to use it.
+ DEFAULT_BLOCK_ELEMENTS: Set[str] = set(
+ [
+ "address",
+ "article",
+ "aside",
+ "blockquote",
+ "canvas",
+ "dd",
+ "div",
+ "dl",
+ "dt",
+ "fieldset",
+ "figcaption",
+ "figure",
+ "footer",
+ "form",
+ "h1",
+ "h2",
+ "h3",
+ "h4",
+ "h5",
+ "h6",
+ "header",
+ "hr",
+ "li",
+ "main",
+ "nav",
+ "noscript",
+ "ol",
+ "output",
+ "p",
+ "pre",
+ "section",
+ "table",
+ "tfoot",
+ "ul",
+ "video",
+ ]
+ )
+
+ #: These HTML tags need special treatment so they can be
+ #: represented by a string class other than `bs4.element.NavigableString`.
+ #:
+ #: For some of these tags, it's because the HTML standard defines
+ #: an unusual content model for them. I made this list by going
+ #: through the HTML spec
+ #: (https://html.spec.whatwg.org/#metadata-content) and looking for
+ #: "metadata content" elements that can contain strings.
+ #:
+ #: The Ruby tags ( and ) are here despite being normal
+ #: "phrasing content" tags, because the content they contain is
+ #: qualitatively different from other text in the document, and it
+ #: can be useful to be able to distinguish it.
+ #:
+ #: TODO: Arguably could go here but it seems
+ #: qualitatively different from the other tags.
+ DEFAULT_STRING_CONTAINERS: Dict[str, Type[bs4.element.NavigableString]] = { # type:ignore
+ "rt": RubyTextString,
+ "rp": RubyParenthesisString,
+ "style": Stylesheet,
+ "script": Script,
+ "template": TemplateString,
+ }
+
+ #: The HTML standard defines these attributes as containing a
+ #: space-separated list of values, not a single value. That is,
+ #: class="foo bar" means that the 'class' attribute has two values,
+ #: 'foo' and 'bar', not the single value 'foo bar'. When we
+ #: encounter one of these attributes, we will parse its value into
+ #: a list of values if possible. Upon output, the list will be
+ #: converted back into a string.
+ DEFAULT_CDATA_LIST_ATTRIBUTES: Dict[str, Set[str]] = {
+ "*": {"class", "accesskey", "dropzone"},
+ "a": {"rel", "rev"},
+ "link": {"rel", "rev"},
+ "td": {"headers"},
+ "th": {"headers"},
+ "form": {"accept-charset"},
+ "object": {"archive"},
+ # These are HTML5 specific, as are *.accesskey and *.dropzone above.
+ "area": {"rel"},
+ "icon": {"sizes"},
+ "iframe": {"sandbox"},
+ "output": {"for"},
+ }
+
+ #: By default, whitespace inside these HTML tags will be
+ #: preserved rather than being collapsed.
+ DEFAULT_PRESERVE_WHITESPACE_TAGS: set[str] = set(["pre", "textarea"])
+
+ def set_up_substitutions(self, tag: Tag) -> bool:
+ """Replace the declared encoding in a tag with a placeholder,
+ to be substituted when the tag is output to a string.
+
+ An HTML document may come in to Beautiful Soup as one
+ encoding, but exit in a different encoding, and the tag
+ needs to be changed to reflect this.
+
+ :return: Whether or not a substitution was performed.
+
+ :meta private:
+ """
+ # We are only interested in tags
+ if tag.name != "meta":
+ return False
+
+ # TODO: This cast will fail in the (very unlikely) scenario
+ # that the programmer who instantiates the TreeBuilder
+ # specifies meta['content'] or meta['charset'] as
+ # cdata_list_attributes.
+ content: Optional[str] = cast(Optional[str], tag.get("content"))
+ charset: Optional[str] = cast(Optional[str], tag.get("charset"))
+
+ # But we can accommodate meta['http-equiv'] being made a
+ # cdata_list_attribute (again, very unlikely) without much
+ # trouble.
+ http_equiv: List[str] = tag.get_attribute_list("http-equiv")
+
+ # We are interested in tags that say what encoding the
+ # document was originally in. This means HTML 5-style
+ # tags that provide the "charset" attribute. It also means
+ # HTML 4-style tags that provide the "content"
+ # attribute and have "http-equiv" set to "content-type".
+ #
+ # In both cases we will replace the value of the appropriate
+ # attribute with a standin object that can take on any
+ # encoding.
+ substituted = False
+ if charset is not None:
+ # HTML 5 style:
+ #
+ tag["charset"] = CharsetMetaAttributeValue(charset)
+ substituted = True
+
+ elif content is not None and any(
+ x.lower() == "content-type" for x in http_equiv
+ ):
+ # HTML 4 style:
+ #
+ tag["content"] = ContentMetaAttributeValue(content)
+ substituted = True
+
+ return substituted
+
+
+class DetectsXMLParsedAsHTML(object):
+ """A mixin class for any class (a TreeBuilder, or some class used by a
+ TreeBuilder) that's in a position to detect whether an XML
+ document is being incorrectly parsed as HTML, and issue an
+ appropriate warning.
+
+ This requires being able to observe an incoming processing
+ instruction that might be an XML declaration, and also able to
+ observe tags as they're opened. If you can't do that for a given
+ `TreeBuilder`, there's a less reliable implementation based on
+ examining the raw markup.
+ """
+
+ #: Regular expression for seeing if string markup has an tag.
+ LOOKS_LIKE_HTML: Pattern[str] = re.compile("<[^ +]html", re.I)
+
+ #: Regular expression for seeing if byte markup has an tag.
+ LOOKS_LIKE_HTML_B: Pattern[bytes] = re.compile(b"<[^ +]html", re.I)
+
+ #: The start of an XML document string.
+ XML_PREFIX: str = " bool:
+ """Perform a check on some markup to see if it looks like XML
+ that's not XHTML. If so, issue a warning.
+
+ This is much less reliable than doing the check while parsing,
+ but some of the tree builders can't do that.
+
+ :param stacklevel: The stacklevel of the code calling this\
+ function.
+
+ :return: True if the markup looks like non-XHTML XML, False
+ otherwise.
+ """
+ if markup is None:
+ return False
+ markup = markup[:500]
+ if isinstance(markup, bytes):
+ markup_b: bytes = markup
+ looks_like_xml = markup_b.startswith(
+ cls.XML_PREFIX_B
+ ) and not cls.LOOKS_LIKE_HTML_B.search(markup)
+ else:
+ markup_s: str = markup
+ looks_like_xml = markup_s.startswith(
+ cls.XML_PREFIX
+ ) and not cls.LOOKS_LIKE_HTML.search(markup)
+
+ if looks_like_xml:
+ cls._warn(stacklevel=stacklevel + 2)
+ return True
+ return False
+
+ @classmethod
+ def _warn(cls, stacklevel: int = 5) -> None:
+ """Issue a warning about XML being parsed as HTML."""
+ warnings.warn(
+ XMLParsedAsHTMLWarning.MESSAGE,
+ XMLParsedAsHTMLWarning,
+ stacklevel=stacklevel,
+ )
+
+ def _initialize_xml_detector(self) -> None:
+ """Call this method before parsing a document."""
+ self._first_processing_instruction = None
+ self._root_tag_name = None
+
+ def _document_might_be_xml(self, processing_instruction: str) -> None:
+ """Call this method when encountering an XML declaration, or a
+ "processing instruction" that might be an XML declaration.
+
+ This helps Beautiful Soup detect potential issues later, if
+ the XML document turns out to be a non-XHTML document that's
+ being parsed as XML.
+ """
+ if (
+ self._first_processing_instruction is not None
+ or self._root_tag_name is not None
+ ):
+ # The document has already started. Don't bother checking
+ # anymore.
+ return
+
+ self._first_processing_instruction = processing_instruction
+
+ # We won't know until we encounter the first tag whether or
+ # not this is actually a problem.
+
+ def _root_tag_encountered(self, name: str) -> None:
+ """Call this when you encounter the document's root tag.
+
+ This is where we actually check whether an XML document is
+ being incorrectly parsed as HTML, and issue the warning.
+ """
+ if self._root_tag_name is not None:
+ # This method was incorrectly called multiple times. Do
+ # nothing.
+ return
+
+ self._root_tag_name = name
+
+ if (
+ name != "html"
+ and self._first_processing_instruction is not None
+ and self._first_processing_instruction.lower().startswith("xml ")
+ ):
+ # We encountered an XML declaration and then a tag other
+ # than 'html'. This is a reliable indicator that a
+ # non-XHTML document is being parsed as XML.
+ self._warn(stacklevel=10)
+
+
+def register_treebuilders_from(module: ModuleType) -> None:
+ """Copy TreeBuilders from the given module into this module."""
+ this_module = sys.modules[__name__]
+ for name in module.__all__:
+ obj = getattr(module, name)
+
+ if issubclass(obj, TreeBuilder):
+ setattr(this_module, name, obj)
+ this_module.__all__.append(name)
+ # Register the builder while we're at it.
+ this_module.builder_registry.register(obj)
+
+
+# Builders are registered in reverse order of priority, so that custom
+# builder registrations will take precedence. In general, we want lxml
+# to take precedence over html5lib, because it's faster. And we only
+# want to use HTMLParser as a last resort.
+from . import _htmlparser # noqa: E402
+
+register_treebuilders_from(_htmlparser)
+try:
+ from . import _html5lib
+
+ register_treebuilders_from(_html5lib)
+except ImportError:
+ # They don't have html5lib installed.
+ pass
+try:
+ from . import _lxml
+
+ register_treebuilders_from(_lxml)
+except ImportError:
+ # They don't have lxml installed.
+ pass
diff --git a/venv/lib/python3.9/site-packages/bs4/builder/_html5lib.py b/venv/lib/python3.9/site-packages/bs4/builder/_html5lib.py
new file mode 100644
index 0000000..c90231f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/builder/_html5lib.py
@@ -0,0 +1,611 @@
+# Use of this source code is governed by the MIT license.
+__license__ = "MIT"
+
+__all__ = [
+ "HTML5TreeBuilder",
+]
+
+from typing import (
+ Any,
+ cast,
+ Dict,
+ Iterable,
+ Optional,
+ Sequence,
+ TYPE_CHECKING,
+ Tuple,
+ Union,
+)
+from typing_extensions import TypeAlias
+from bs4._typing import (
+ _AttributeValue,
+ _AttributeValues,
+ _Encoding,
+ _Encodings,
+ _NamespaceURL,
+ _RawMarkup,
+)
+
+import warnings
+from bs4.builder import (
+ DetectsXMLParsedAsHTML,
+ PERMISSIVE,
+ HTML,
+ HTML_5,
+ HTMLTreeBuilder,
+)
+from bs4.element import (
+ NamespacedAttribute,
+ PageElement,
+ nonwhitespace_re,
+)
+import html5lib
+from html5lib.constants import (
+ namespaces,
+)
+from bs4.element import (
+ Comment,
+ Doctype,
+ NavigableString,
+ Tag,
+)
+
+if TYPE_CHECKING:
+ from bs4 import BeautifulSoup
+
+from html5lib.treebuilders import base as treebuilder_base
+
+
+class HTML5TreeBuilder(HTMLTreeBuilder):
+ """Use `html5lib `_ to
+ build a tree.
+
+ Note that `HTML5TreeBuilder` does not support some common HTML
+ `TreeBuilder` features. Some of these features could theoretically
+ be implemented, but at the very least it's quite difficult,
+ because html5lib moves the parse tree around as it's being built.
+
+ Specifically:
+
+ * This `TreeBuilder` doesn't use different subclasses of
+ `NavigableString` (e.g. `Script`) based on the name of the tag
+ in which the string was found.
+ * You can't use a `SoupStrainer` to parse only part of a document.
+ """
+
+ NAME: str = "html5lib"
+
+ features: Iterable[str] = [NAME, PERMISSIVE, HTML_5, HTML]
+
+ #: html5lib can tell us which line number and position in the
+ #: original file is the source of an element.
+ TRACKS_LINE_NUMBERS: bool = True
+
+ underlying_builder: "TreeBuilderForHtml5lib" #: :meta private:
+ user_specified_encoding: Optional[_Encoding]
+
+ def prepare_markup(
+ self,
+ markup: _RawMarkup,
+ user_specified_encoding: Optional[_Encoding] = None,
+ document_declared_encoding: Optional[_Encoding] = None,
+ exclude_encodings: Optional[_Encodings] = None,
+ ) -> Iterable[Tuple[_RawMarkup, Optional[_Encoding], Optional[_Encoding], bool]]:
+ # Store the user-specified encoding for use later on.
+ self.user_specified_encoding = user_specified_encoding
+
+ # document_declared_encoding and exclude_encodings aren't used
+ # ATM because the html5lib TreeBuilder doesn't use
+ # UnicodeDammit.
+ for variable, name in (
+ (document_declared_encoding, "document_declared_encoding"),
+ (exclude_encodings, "exclude_encodings"),
+ ):
+ if variable:
+ warnings.warn(
+ f"You provided a value for {name}, but the html5lib tree builder doesn't support {name}.",
+ stacklevel=3,
+ )
+
+ # html5lib only parses HTML, so if it's given XML that's worth
+ # noting.
+ DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup, stacklevel=3)
+
+ yield (markup, None, None, False)
+
+ # These methods are defined by Beautiful Soup.
+ def feed(self, markup: _RawMarkup) -> None:
+ """Run some incoming markup through some parsing process,
+ populating the `BeautifulSoup` object in `HTML5TreeBuilder.soup`.
+ """
+ if self.soup is not None and self.soup.parse_only is not None:
+ warnings.warn(
+ "You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.",
+ stacklevel=4,
+ )
+
+ # self.underlying_builder is probably None now, but it'll be set
+ # when html5lib calls self.create_treebuilder().
+ parser = html5lib.HTMLParser(tree=self.create_treebuilder)
+ assert self.underlying_builder is not None
+ self.underlying_builder.parser = parser
+ extra_kwargs = dict()
+ if not isinstance(markup, str):
+ # kwargs, specifically override_encoding, will eventually
+ # be passed in to html5lib's
+ # HTMLBinaryInputStream.__init__.
+ extra_kwargs["override_encoding"] = self.user_specified_encoding
+
+ doc = parser.parse(markup, **extra_kwargs) # type:ignore
+
+ # Set the character encoding detected by the tokenizer.
+ if isinstance(markup, str):
+ # We need to special-case this because html5lib sets
+ # charEncoding to UTF-8 if it gets Unicode input.
+ doc.original_encoding = None
+ else:
+ original_encoding = parser.tokenizer.stream.charEncoding[0] # type:ignore
+ # The encoding is an html5lib Encoding object. We want to
+ # use a string for compatibility with other tree builders.
+ original_encoding = original_encoding.name
+ doc.original_encoding = original_encoding
+ self.underlying_builder.parser = None
+
+ def create_treebuilder(
+ self, namespaceHTMLElements: bool
+ ) -> "TreeBuilderForHtml5lib":
+ """Called by html5lib to instantiate the kind of class it
+ calls a 'TreeBuilder'.
+
+ :param namespaceHTMLElements: Whether or not to namespace HTML elements.
+
+ :meta private:
+ """
+ self.underlying_builder = TreeBuilderForHtml5lib(
+ namespaceHTMLElements, self.soup, store_line_numbers=self.store_line_numbers
+ )
+ return self.underlying_builder
+
+ def test_fragment_to_document(self, fragment: str) -> str:
+ """See `TreeBuilder`."""
+ return "%s" % fragment
+
+
+class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
+ soup: "BeautifulSoup" #: :meta private:
+ parser: Optional[html5lib.HTMLParser] #: :meta private:
+
+ def __init__(
+ self,
+ namespaceHTMLElements: bool,
+ soup: Optional["BeautifulSoup"] = None,
+ store_line_numbers: bool = True,
+ **kwargs: Any,
+ ):
+ if soup:
+ self.soup = soup
+ else:
+ warnings.warn(
+ "The optionality of the 'soup' argument to the TreeBuilderForHtml5lib constructor is deprecated as of Beautiful Soup 4.13.0: 'soup' is now required. If you can't pass in a BeautifulSoup object here, or you get this warning and it seems mysterious to you, please contact the Beautiful Soup developer team for possible un-deprecation.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ from bs4 import BeautifulSoup
+
+ # TODO: Why is the parser 'html.parser' here? Using
+ # html5lib doesn't cause an infinite loop and is more
+ # accurate. Best to get rid of this entire section, I think.
+ self.soup = BeautifulSoup(
+ "", "html.parser", store_line_numbers=store_line_numbers, **kwargs
+ )
+ # TODO: What are **kwargs exactly? Should they be passed in
+ # here in addition to/instead of being passed to the BeautifulSoup
+ # constructor?
+ super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
+
+ # This will be set later to a real html5lib HTMLParser object,
+ # which we can use to track the current line number.
+ self.parser = None
+ self.store_line_numbers = store_line_numbers
+
+ def documentClass(self) -> "Element":
+ self.soup.reset()
+ return Element(self.soup, self.soup, None)
+
+ def insertDoctype(self, token: Dict[str, Any]) -> None:
+ name: str = cast(str, token["name"])
+ publicId: Optional[str] = cast(Optional[str], token["publicId"])
+ systemId: Optional[str] = cast(Optional[str], token["systemId"])
+
+ doctype = Doctype.for_name_and_ids(name, publicId, systemId)
+ self.soup.object_was_parsed(doctype)
+
+ def elementClass(self, name: str, namespace: str) -> "Element":
+ sourceline: Optional[int] = None
+ sourcepos: Optional[int] = None
+ if self.parser is not None and self.store_line_numbers:
+ # This represents the point immediately after the end of the
+ # tag. We don't know when the tag started, but we do know
+ # where it ended -- the character just before this one.
+ sourceline, sourcepos = self.parser.tokenizer.stream.position() # type:ignore
+ assert sourcepos is not None
+ sourcepos = sourcepos - 1
+ tag = self.soup.new_tag(
+ name, namespace, sourceline=sourceline, sourcepos=sourcepos
+ )
+
+ return Element(tag, self.soup, namespace)
+
+ def commentClass(self, data: str) -> "TextNode":
+ return TextNode(Comment(data), self.soup)
+
+ def fragmentClass(self) -> "Element":
+ """This is only used by html5lib HTMLParser.parseFragment(),
+ which is never used by Beautiful Soup, only by the html5lib
+ unit tests. Since we don't currently hook into those tests,
+ the implementation is left blank.
+ """
+ raise NotImplementedError()
+
+ def getFragment(self) -> "Element":
+ """This is only used by the html5lib unit tests. Since we
+ don't currently hook into those tests, the implementation is
+ left blank.
+ """
+ raise NotImplementedError()
+
+ def appendChild(self, node: "Element") -> None:
+ # TODO: This code is not covered by the BS4 tests, and
+ # apparently not triggered by the html5lib test suite either.
+ # But it doesn't seem test-specific and there are calls to it
+ # (or a method with the same name) all over html5lib, so I'm
+ # leaving the implementation in place rather than replacing it
+ # with NotImplementedError()
+ self.soup.append(node.element)
+
+ def getDocument(self) -> "BeautifulSoup":
+ return self.soup
+
+ def testSerializer(self, node: "Element") -> None:
+ """This is only used by the html5lib unit tests. Since we
+ don't currently hook into those tests, the implementation is
+ left blank.
+ """
+ raise NotImplementedError()
+
+
+class AttrList(object):
+ """Represents a Tag's attributes in a way compatible with html5lib."""
+
+ element: Tag
+ attrs: _AttributeValues
+
+ def __init__(self, element: Tag):
+ self.element = element
+ self.attrs = dict(self.element.attrs)
+
+ def __iter__(self) -> Iterable[Tuple[str, _AttributeValue]]:
+ return list(self.attrs.items()).__iter__()
+
+ def __setitem__(self, name: str, value: _AttributeValue) -> None:
+ # If this attribute is a multi-valued attribute for this element,
+ # turn its value into a list.
+ list_attr = self.element.cdata_list_attributes or {}
+ if name in list_attr.get("*", []) or (
+ self.element.name in list_attr
+ and name in list_attr.get(self.element.name, [])
+ ):
+ # A node that is being cloned may have already undergone
+ # this procedure. Check for this and skip it.
+ if not isinstance(value, list):
+ assert isinstance(value, str)
+ value = self.element.attribute_value_list_class(
+ nonwhitespace_re.findall(value)
+ )
+ self.element[name] = value
+
+ def items(self) -> Iterable[Tuple[str, _AttributeValue]]:
+ return list(self.attrs.items())
+
+ def keys(self) -> Iterable[str]:
+ return list(self.attrs.keys())
+
+ def __len__(self) -> int:
+ return len(self.attrs)
+
+ def __getitem__(self, name: str) -> _AttributeValue:
+ return self.attrs[name]
+
+ def __contains__(self, name: str) -> bool:
+ return name in list(self.attrs.keys())
+
+
+class BeautifulSoupNode(treebuilder_base.Node):
+ # A node can correspond to _either_ a Tag _or_ a NavigableString.
+ tag: Optional[Tag]
+ string: Optional[NavigableString]
+ soup: "BeautifulSoup"
+ namespace: Optional[_NamespaceURL]
+
+ @property
+ def element(self) -> PageElement:
+ assert self.tag is not None or self.string is not None
+ if self.tag is not None:
+ return self.tag
+ else:
+ assert self.string is not None
+ return self.string
+
+ @property
+ def nodeType(self) -> int:
+ """Return the html5lib constant corresponding to the type of
+ the underlying DOM object.
+
+ NOTE: This property is only accessed by the html5lib test
+ suite, not by Beautiful Soup proper.
+ """
+ raise NotImplementedError()
+
+ # TODO-TYPING: typeshed stubs are incorrect about this;
+ # cloneNode returns a new Node, not None.
+ def cloneNode(self) -> treebuilder_base.Node: # type:ignore
+ raise NotImplementedError()
+
+
+class Element(BeautifulSoupNode):
+ namespace: Optional[_NamespaceURL]
+
+ def __init__(
+ self, element: Tag, soup: "BeautifulSoup", namespace: Optional[_NamespaceURL]
+ ):
+ self.tag = element
+ self.string = None
+ self.soup = soup
+ self.namespace = namespace
+ treebuilder_base.Node.__init__(self, element.name)
+
+ def appendChild(self, node: "BeautifulSoupNode") -> None:
+ string_child: Optional[NavigableString] = None
+ child: PageElement
+ if type(node.string) is NavigableString:
+ # We check for NavigableString *only* because we want to avoid
+ # joining PreformattedStrings, such as Comments, with nearby strings.
+ string_child = child = node.string
+ else:
+ child = node.element
+ node.parent = self
+
+ if (
+ child is not None
+ and child.parent is not None
+ and not isinstance(child, str)
+ ):
+ node.element.extract()
+
+ if (
+ string_child is not None
+ and self.tag is not None and self.tag.contents
+ and type(self.tag.contents[-1]) is NavigableString
+ ):
+ # We are appending a string onto another string.
+ # TODO This has O(n^2) performance, for input like
+ # "aaa..."
+ old_element = self.tag.contents[-1]
+ new_element = self.soup.new_string(old_element + string_child)
+ old_element.replace_with(new_element)
+ self.soup._most_recent_element = new_element
+ else:
+ if isinstance(node, str):
+ # Create a brand new NavigableString from this string.
+ child = self.soup.new_string(node)
+
+ # Tell Beautiful Soup to act as if it parsed this element
+ # immediately after the parent's last descendant. (Or
+ # immediately after the parent, if it has no children.)
+ if self.tag is not None and self.tag.contents:
+ most_recent_element = self.tag._last_descendant(False)
+ elif self.element.next_element is not None:
+ # Something from further ahead in the parse tree is
+ # being inserted into this earlier element. This is
+ # very annoying because it means an expensive search
+ # for the last element in the tree.
+ most_recent_element = self.soup._last_descendant()
+ else:
+ most_recent_element = self.element
+
+ self.soup.object_was_parsed(
+ child, parent=self.tag, most_recent_element=most_recent_element
+ )
+
+ def getAttributes(self) -> AttrList:
+ assert self.tag is not None
+ return AttrList(self.tag)
+
+ # An HTML5lib attribute name may either be a single string,
+ # or a tuple (namespace, name).
+ _Html5libAttributeName: TypeAlias = Union[str, Tuple[str, str]]
+ # Now we can define the type this method accepts as a dictionary
+ # mapping those attribute names to single string values.
+ _Html5libAttributes: TypeAlias = Dict[_Html5libAttributeName, str]
+
+ def setAttributes(self, attributes: Optional[_Html5libAttributes]) -> None:
+ assert self.tag is not None
+ if attributes is not None and len(attributes) > 0:
+ # Replace any namespaced attributes with
+ # NamespacedAttribute objects.
+ for name, value in list(attributes.items()):
+ if isinstance(name, tuple):
+ new_name = NamespacedAttribute(*name)
+ del attributes[name]
+ attributes[new_name] = value
+
+ # We can now cast attributes to the type of Dict
+ # used by Beautiful Soup.
+ normalized_attributes = cast(_AttributeValues, attributes)
+
+ # Values for tags like 'class' came in as single strings;
+ # replace them with lists of strings as appropriate.
+ self.soup.builder._replace_cdata_list_attribute_values(
+ self.name, normalized_attributes
+ )
+
+ # Then set the attributes on the Tag associated with this
+ # BeautifulSoupNode.
+ for name, value_or_values in list(normalized_attributes.items()):
+ self.tag[name] = value_or_values
+
+ # The attributes may contain variables that need substitution.
+ # Call set_up_substitutions manually.
+ #
+ # The Tag constructor called this method when the Tag was created,
+ # but we just set/changed the attributes, so call it again.
+ self.soup.builder.set_up_substitutions(self.tag)
+
+ attributes = property(getAttributes, setAttributes)
+
+ def insertText(
+ self, data: str, insertBefore: Optional["BeautifulSoupNode"] = None
+ ) -> None:
+ text = TextNode(self.soup.new_string(data), self.soup)
+ if insertBefore:
+ self.insertBefore(text, insertBefore)
+ else:
+ self.appendChild(text)
+
+ def insertBefore(
+ self, node: "BeautifulSoupNode", refNode: "BeautifulSoupNode"
+ ) -> None:
+ assert self.tag is not None
+ index = self.tag.index(refNode.element)
+ if (
+ type(node.element) is NavigableString
+ and self.tag.contents
+ and type(self.tag.contents[index - 1]) is NavigableString
+ ):
+ # (See comments in appendChild)
+ old_node = self.tag.contents[index - 1]
+ assert type(old_node) is NavigableString
+ new_str = self.soup.new_string(old_node + node.element)
+ old_node.replace_with(new_str)
+ else:
+ self.tag.insert(index, node.element)
+ node.parent = self
+
+ def removeChild(self, node: "Element") -> None:
+ node.element.extract()
+
+ def reparentChildren(self, newParent: "Element") -> None:
+ """Move all of this tag's children into another tag."""
+ # print("MOVE", self.element.contents)
+ # print("FROM", self.element)
+ # print("TO", new_parent.element)
+
+ element = self.tag
+ assert element is not None
+ new_parent_element = newParent.tag
+ assert new_parent_element is not None
+ # Determine what this tag's next_element will be once all the children
+ # are removed.
+ final_next_element = element.next_sibling
+
+ new_parents_last_descendant = new_parent_element._last_descendant(False, False)
+ if len(new_parent_element.contents) > 0:
+ # The new parent already contains children. We will be
+ # appending this tag's children to the end.
+
+ # We can make this assertion since we know new_parent has
+ # children.
+ assert new_parents_last_descendant is not None
+ new_parents_last_child = new_parent_element.contents[-1]
+ new_parents_last_descendant_next_element = (
+ new_parents_last_descendant.next_element
+ )
+ else:
+ # The new parent contains no children.
+ new_parents_last_child = None
+ new_parents_last_descendant_next_element = new_parent_element.next_element
+
+ to_append = element.contents
+ if len(to_append) > 0:
+ # Set the first child's previous_element and previous_sibling
+ # to elements within the new parent
+ first_child = to_append[0]
+ if new_parents_last_descendant is not None:
+ first_child.previous_element = new_parents_last_descendant
+ else:
+ first_child.previous_element = new_parent_element
+ first_child.previous_sibling = new_parents_last_child
+ if new_parents_last_descendant is not None:
+ new_parents_last_descendant.next_element = first_child
+ else:
+ new_parent_element.next_element = first_child
+ if new_parents_last_child is not None:
+ new_parents_last_child.next_sibling = first_child
+
+ # Find the very last element being moved. It is now the
+ # parent's last descendant. It has no .next_sibling and
+ # its .next_element is whatever the previous last
+ # descendant had.
+ last_childs_last_descendant = to_append[-1]._last_descendant(
+ is_initialized=False, accept_self=True
+ )
+
+ # Since we passed accept_self=True into _last_descendant,
+ # there's no possibility that the result is None.
+ assert last_childs_last_descendant is not None
+ last_childs_last_descendant.next_element = (
+ new_parents_last_descendant_next_element
+ )
+ if new_parents_last_descendant_next_element is not None:
+ # TODO-COVERAGE: This code has no test coverage and
+ # I'm not sure how to get html5lib to go through this
+ # path, but it's just the other side of the previous
+ # line.
+ new_parents_last_descendant_next_element.previous_element = (
+ last_childs_last_descendant
+ )
+ last_childs_last_descendant.next_sibling = None
+
+ for child in to_append:
+ child.parent = new_parent_element
+ new_parent_element.contents.append(child)
+
+ # Now that this element has no children, change its .next_element.
+ element.contents = []
+ element.next_element = final_next_element
+
+ # print("DONE WITH MOVE")
+ # print("FROM", self.element)
+ # print("TO", new_parent_element)
+
+ # TODO-TYPING: typeshed stubs are incorrect about this;
+ # hasContent returns a boolean, not None.
+ def hasContent(self) -> bool: # type:ignore
+ return self.tag is None or len(self.tag.contents) > 0
+
+ # TODO-TYPING: typeshed stubs are incorrect about this;
+ # cloneNode returns a new Node, not None.
+ def cloneNode(self) -> treebuilder_base.Node: # type:ignore
+ assert self.tag is not None
+ tag = self.soup.new_tag(self.tag.name, self.namespace)
+ node = Element(tag, self.soup, self.namespace)
+ for key, value in self.attributes:
+ node.attributes[key] = value
+ return node
+
+ def getNameTuple(self) -> Tuple[Optional[_NamespaceURL], str]:
+ if self.namespace is None:
+ return namespaces["html"], self.name
+ else:
+ return self.namespace, self.name
+
+ nameTuple = property(getNameTuple)
+
+
+class TextNode(BeautifulSoupNode):
+
+ def __init__(self, element: NavigableString, soup: "BeautifulSoup"):
+ treebuilder_base.Node.__init__(self, None)
+ self.tag = None
+ self.string = element
+ self.soup = soup
diff --git a/venv/lib/python3.9/site-packages/bs4/builder/_htmlparser.py b/venv/lib/python3.9/site-packages/bs4/builder/_htmlparser.py
new file mode 100644
index 0000000..5b4190d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/builder/_htmlparser.py
@@ -0,0 +1,474 @@
+# encoding: utf-8
+"""Use the HTMLParser library to parse HTML files that aren't too bad."""
+from __future__ import annotations
+
+# Use of this source code is governed by the MIT license.
+__license__ = "MIT"
+
+__all__ = [
+ "HTMLParserTreeBuilder",
+]
+
+from html.parser import HTMLParser
+
+from typing import (
+ Any,
+ Callable,
+ cast,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ TYPE_CHECKING,
+ Tuple,
+ Type,
+ Union,
+)
+
+from bs4.element import (
+ AttributeDict,
+ CData,
+ Comment,
+ Declaration,
+ Doctype,
+ ProcessingInstruction,
+)
+from bs4.dammit import EntitySubstitution, UnicodeDammit
+
+from bs4.builder import (
+ DetectsXMLParsedAsHTML,
+ HTML,
+ HTMLTreeBuilder,
+ STRICT,
+)
+
+from bs4.exceptions import ParserRejectedMarkup
+
+if TYPE_CHECKING:
+ from bs4 import BeautifulSoup
+ from bs4.element import NavigableString
+ from bs4._typing import (
+ _Encoding,
+ _Encodings,
+ _RawMarkup,
+ )
+
+HTMLPARSER = "html.parser"
+
+_DuplicateAttributeHandler = Callable[[Dict[str, str], str, str], None]
+
+
+class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
+ #: Constant to handle duplicate attributes by ignoring later values
+ #: and keeping the earlier ones.
+ REPLACE: str = "replace"
+
+ #: Constant to handle duplicate attributes by replacing earlier values
+ #: with later ones.
+ IGNORE: str = "ignore"
+
+ """A subclass of the Python standard library's HTMLParser class, which
+ listens for HTMLParser events and translates them into calls
+ to Beautiful Soup's tree construction API.
+
+ :param on_duplicate_attribute: A strategy for what to do if a
+ tag includes the same attribute more than once. Accepted
+ values are: REPLACE (replace earlier values with later
+ ones, the default), IGNORE (keep the earliest value
+ encountered), or a callable. A callable must take three
+ arguments: the dictionary of attributes already processed,
+ the name of the duplicate attribute, and the most recent value
+ encountered.
+ """
+
+ def __init__(
+ self,
+ soup: BeautifulSoup,
+ *args: Any,
+ on_duplicate_attribute: Union[str, _DuplicateAttributeHandler] = REPLACE,
+ **kwargs: Any,
+ ):
+ self.soup = soup
+ self.on_duplicate_attribute = on_duplicate_attribute
+ self.attribute_dict_class = soup.builder.attribute_dict_class
+ HTMLParser.__init__(self, *args, **kwargs)
+
+ # Keep a list of empty-element tags that were encountered
+ # without an explicit closing tag. If we encounter a closing tag
+ # of this type, we'll associate it with one of those entries.
+ #
+ # This isn't a stack because we don't care about the
+ # order. It's a list of closing tags we've already handled and
+ # will ignore, assuming they ever show up.
+ self.already_closed_empty_element = []
+
+ self._initialize_xml_detector()
+
+ on_duplicate_attribute: Union[str, _DuplicateAttributeHandler]
+ already_closed_empty_element: List[str]
+ soup: BeautifulSoup
+
+ def error(self, message: str) -> None:
+ # NOTE: This method is required so long as Python 3.9 is
+ # supported. The corresponding code is removed from HTMLParser
+ # in 3.5, but not removed from ParserBase until 3.10.
+ # https://github.com/python/cpython/issues/76025
+ #
+ # The original implementation turned the error into a warning,
+ # but in every case I discovered, this made HTMLParser
+ # immediately crash with an error message that was less
+ # helpful than the warning. The new implementation makes it
+ # more clear that html.parser just can't parse this
+ # markup. The 3.10 implementation does the same, though it
+ # raises AssertionError rather than calling a method. (We
+ # catch this error and wrap it in a ParserRejectedMarkup.)
+ raise ParserRejectedMarkup(message)
+
+ def handle_startendtag(
+ self, tag: str, attrs: List[Tuple[str, Optional[str]]]
+ ) -> None:
+ """Handle an incoming empty-element tag.
+
+ html.parser only calls this method when the markup looks like
+ .
+ """
+ # `handle_empty_element` tells handle_starttag not to close the tag
+ # just because its name matches a known empty-element tag. We
+ # know that this is an empty-element tag, and we want to call
+ # handle_endtag ourselves.
+ self.handle_starttag(tag, attrs, handle_empty_element=False)
+ self.handle_endtag(tag)
+
+ def handle_starttag(
+ self,
+ tag: str,
+ attrs: List[Tuple[str, Optional[str]]],
+ handle_empty_element: bool = True,
+ ) -> None:
+ """Handle an opening tag, e.g. ''
+
+ :param handle_empty_element: True if this tag is known to be
+ an empty-element tag (i.e. there is not expected to be any
+ closing tag).
+ """
+ # TODO: handle namespaces here?
+ attr_dict: AttributeDict = self.attribute_dict_class()
+ for key, value in attrs:
+ # Change None attribute values to the empty string
+ # for consistency with the other tree builders.
+ if value is None:
+ value = ""
+ if key in attr_dict:
+ # A single attribute shows up multiple times in this
+ # tag. How to handle it depends on the
+ # on_duplicate_attribute setting.
+ on_dupe = self.on_duplicate_attribute
+ if on_dupe == self.IGNORE:
+ pass
+ elif on_dupe in (None, self.REPLACE):
+ attr_dict[key] = value
+ else:
+ on_dupe = cast(_DuplicateAttributeHandler, on_dupe)
+ on_dupe(attr_dict, key, value)
+ else:
+ attr_dict[key] = value
+ # print("START", tag)
+ sourceline: Optional[int]
+ sourcepos: Optional[int]
+ if self.soup.builder.store_line_numbers:
+ sourceline, sourcepos = self.getpos()
+ else:
+ sourceline = sourcepos = None
+ tagObj = self.soup.handle_starttag(
+ tag, None, None, attr_dict, sourceline=sourceline, sourcepos=sourcepos
+ )
+ if tagObj is not None and tagObj.is_empty_element and handle_empty_element:
+ # Unlike other parsers, html.parser doesn't send separate end tag
+ # events for empty-element tags. (It's handled in
+ # handle_startendtag, but only if the original markup looked like
+ # .)
+ #
+ # So we need to call handle_endtag() ourselves. Since we
+ # know the start event is identical to the end event, we
+ # don't want handle_endtag() to cross off any previous end
+ # events for tags of this name.
+ self.handle_endtag(tag, check_already_closed=False)
+
+ # But we might encounter an explicit closing tag for this tag
+ # later on. If so, we want to ignore it.
+ self.already_closed_empty_element.append(tag)
+
+ if self._root_tag_name is None:
+ self._root_tag_encountered(tag)
+
+ def handle_endtag(self, tag: str, check_already_closed: bool = True) -> None:
+ """Handle a closing tag, e.g. ' '
+
+ :param tag: A tag name.
+ :param check_already_closed: True if this tag is expected to
+ be the closing portion of an empty-element tag,
+ e.g. ' '.
+ """
+ # print("END", tag)
+ if check_already_closed and tag in self.already_closed_empty_element:
+ # This is a redundant end tag for an empty-element tag.
+ # We've already called handle_endtag() for it, so just
+ # check it off the list.
+ # print("ALREADY CLOSED", tag)
+ self.already_closed_empty_element.remove(tag)
+ else:
+ self.soup.handle_endtag(tag)
+
+ def handle_data(self, data: str) -> None:
+ """Handle some textual data that shows up between tags."""
+ self.soup.handle_data(data)
+
+ def handle_charref(self, name: str) -> None:
+ """Handle a numeric character reference by converting it to the
+ corresponding Unicode character and treating it as textual
+ data.
+
+ :param name: Character number, possibly in hexadecimal.
+ """
+ # TODO: This was originally a workaround for a bug in
+ # HTMLParser. (http://bugs.python.org/issue13633) The bug has
+ # been fixed, but removing this code still makes some
+ # Beautiful Soup tests fail. This needs investigation.
+ if name.startswith("x"):
+ real_name = int(name.lstrip("x"), 16)
+ elif name.startswith("X"):
+ real_name = int(name.lstrip("X"), 16)
+ else:
+ real_name = int(name)
+
+ data = None
+ if real_name < 256:
+ # HTML numeric entities are supposed to reference Unicode
+ # code points, but sometimes they reference code points in
+ # some other encoding (ahem, Windows-1252). E.g.
+ # instead of É for LEFT DOUBLE QUOTATION MARK. This
+ # code tries to detect this situation and compensate.
+ for encoding in (self.soup.original_encoding, "windows-1252"):
+ if not encoding:
+ continue
+ try:
+ data = bytearray([real_name]).decode(encoding)
+ except UnicodeDecodeError:
+ pass
+ if not data:
+ try:
+ data = chr(real_name)
+ except (ValueError, OverflowError):
+ pass
+ data = data or "\N{REPLACEMENT CHARACTER}"
+ self.handle_data(data)
+
+ def handle_entityref(self, name: str) -> None:
+ """Handle a named entity reference by converting it to the
+ corresponding Unicode character(s) and treating it as textual
+ data.
+
+ :param name: Name of the entity reference.
+ """
+ character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
+ if character is not None:
+ data = character
+ else:
+ # If this were XML, it would be ambiguous whether "&foo"
+ # was an character entity reference with a missing
+ # semicolon or the literal string "&foo". Since this is
+ # HTML, we have a complete list of all character entity references,
+ # and this one wasn't found, so assume it's the literal string "&foo".
+ data = "&%s" % name
+ self.handle_data(data)
+
+ def handle_comment(self, data: str) -> None:
+ """Handle an HTML comment.
+
+ :param data: The text of the comment.
+ """
+ self.soup.endData()
+ self.soup.handle_data(data)
+ self.soup.endData(Comment)
+
+ def handle_decl(self, decl: str) -> None:
+ """Handle a DOCTYPE declaration.
+
+ :param data: The text of the declaration.
+ """
+ self.soup.endData()
+ decl = decl[len("DOCTYPE ") :]
+ self.soup.handle_data(decl)
+ self.soup.endData(Doctype)
+
+ def unknown_decl(self, data: str) -> None:
+ """Handle a declaration of unknown type -- probably a CDATA block.
+
+ :param data: The text of the declaration.
+ """
+ cls: Type[NavigableString]
+ if data.upper().startswith("CDATA["):
+ cls = CData
+ data = data[len("CDATA[") :]
+ else:
+ cls = Declaration
+ self.soup.endData()
+ self.soup.handle_data(data)
+ self.soup.endData(cls)
+
+ def handle_pi(self, data: str) -> None:
+ """Handle a processing instruction.
+
+ :param data: The text of the instruction.
+ """
+ self.soup.endData()
+ self.soup.handle_data(data)
+ self._document_might_be_xml(data)
+ self.soup.endData(ProcessingInstruction)
+
+
+class HTMLParserTreeBuilder(HTMLTreeBuilder):
+ """A Beautiful soup `bs4.builder.TreeBuilder` that uses the
+ :py:class:`html.parser.HTMLParser` parser, found in the Python
+ standard library.
+
+ """
+
+ is_xml: bool = False
+ picklable: bool = True
+ NAME: str = HTMLPARSER
+ features: Iterable[str] = [NAME, HTML, STRICT]
+ parser_args: Tuple[Iterable[Any], Dict[str, Any]]
+
+ #: The html.parser knows which line number and position in the
+ #: original file is the source of an element.
+ TRACKS_LINE_NUMBERS: bool = True
+
+ def __init__(
+ self,
+ parser_args: Optional[Iterable[Any]] = None,
+ parser_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs: Any,
+ ):
+ """Constructor.
+
+ :param parser_args: Positional arguments to pass into
+ the BeautifulSoupHTMLParser constructor, once it's
+ invoked.
+ :param parser_kwargs: Keyword arguments to pass into
+ the BeautifulSoupHTMLParser constructor, once it's
+ invoked.
+ :param kwargs: Keyword arguments for the superclass constructor.
+ """
+ # Some keyword arguments will be pulled out of kwargs and placed
+ # into parser_kwargs.
+ extra_parser_kwargs = dict()
+ for arg in ("on_duplicate_attribute",):
+ if arg in kwargs:
+ value = kwargs.pop(arg)
+ extra_parser_kwargs[arg] = value
+ super(HTMLParserTreeBuilder, self).__init__(**kwargs)
+ parser_args = parser_args or []
+ parser_kwargs = parser_kwargs or {}
+ parser_kwargs.update(extra_parser_kwargs)
+ parser_kwargs["convert_charrefs"] = False
+ self.parser_args = (parser_args, parser_kwargs)
+
+ def prepare_markup(
+ self,
+ markup: _RawMarkup,
+ user_specified_encoding: Optional[_Encoding] = None,
+ document_declared_encoding: Optional[_Encoding] = None,
+ exclude_encodings: Optional[_Encodings] = None,
+ ) -> Iterable[Tuple[str, Optional[_Encoding], Optional[_Encoding], bool]]:
+ """Run any preliminary steps necessary to make incoming markup
+ acceptable to the parser.
+
+ :param markup: Some markup -- probably a bytestring.
+ :param user_specified_encoding: The user asked to try this encoding.
+ :param document_declared_encoding: The markup itself claims to be
+ in this encoding.
+ :param exclude_encodings: The user asked _not_ to try any of
+ these encodings.
+
+ :yield: A series of 4-tuples: (markup, encoding, declared encoding,
+ has undergone character replacement)
+
+ Each 4-tuple represents a strategy for parsing the document.
+ This TreeBuilder uses Unicode, Dammit to convert the markup
+ into Unicode, so the ``markup`` element of the tuple will
+ always be a string.
+ """
+ if isinstance(markup, str):
+ # Parse Unicode as-is.
+ yield (markup, None, None, False)
+ return
+
+ # Ask UnicodeDammit to sniff the most likely encoding.
+
+ known_definite_encodings: List[_Encoding] = []
+ if user_specified_encoding:
+ # This was provided by the end-user; treat it as a known
+ # definite encoding per the algorithm laid out in the
+ # HTML5 spec. (See the EncodingDetector class for
+ # details.)
+ known_definite_encodings.append(user_specified_encoding)
+
+ user_encodings: List[_Encoding] = []
+ if document_declared_encoding:
+ # This was found in the document; treat it as a slightly
+ # lower-priority user encoding.
+ user_encodings.append(document_declared_encoding)
+
+ dammit = UnicodeDammit(
+ markup,
+ known_definite_encodings=known_definite_encodings,
+ user_encodings=user_encodings,
+ is_html=True,
+ exclude_encodings=exclude_encodings,
+ )
+
+ if dammit.unicode_markup is None:
+ # In every case I've seen, Unicode, Dammit is able to
+ # convert the markup into Unicode, even if it needs to use
+ # REPLACEMENT CHARACTER. But there is a code path that
+ # could result in unicode_markup being None, and
+ # HTMLParser can only parse Unicode, so here we handle
+ # that code path.
+ raise ParserRejectedMarkup(
+ "Could not convert input to Unicode, and html.parser will not accept bytestrings."
+ )
+ else:
+ yield (
+ dammit.unicode_markup,
+ dammit.original_encoding,
+ dammit.declared_html_encoding,
+ dammit.contains_replacement_characters,
+ )
+
+ def feed(self, markup: _RawMarkup) -> None:
+ args, kwargs = self.parser_args
+
+ # HTMLParser.feed will only handle str, but
+ # BeautifulSoup.markup is allowed to be _RawMarkup, because
+ # it's set by the yield value of
+ # TreeBuilder.prepare_markup. Fortunately,
+ # HTMLParserTreeBuilder.prepare_markup always yields a str
+ # (UnicodeDammit.unicode_markup).
+ assert isinstance(markup, str)
+
+ # We know BeautifulSoup calls TreeBuilder.initialize_soup
+ # before calling feed(), so we can assume self.soup
+ # is set.
+ assert self.soup is not None
+ parser = BeautifulSoupHTMLParser(self.soup, *args, **kwargs)
+
+ try:
+ parser.feed(markup)
+ parser.close()
+ except AssertionError as e:
+ # html.parser raises AssertionError in rare cases to
+ # indicate a fatal problem with the markup, especially
+ # when there's an error in the doctype declaration.
+ raise ParserRejectedMarkup(e)
+ parser.already_closed_empty_element = []
diff --git a/venv/lib/python3.9/site-packages/bs4/builder/_lxml.py b/venv/lib/python3.9/site-packages/bs4/builder/_lxml.py
new file mode 100644
index 0000000..1d3c084
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/builder/_lxml.py
@@ -0,0 +1,492 @@
+# encoding: utf-8
+from __future__ import annotations
+
+# Use of this source code is governed by the MIT license.
+__license__ = "MIT"
+
+__all__ = [
+ "LXMLTreeBuilderForXML",
+ "LXMLTreeBuilder",
+]
+
+
+from typing import (
+ Any,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Type,
+ TYPE_CHECKING,
+ Union,
+)
+
+from io import BytesIO
+from io import StringIO
+
+from typing_extensions import TypeAlias
+
+from lxml import etree # type:ignore
+from bs4.element import (
+ AttributeDict,
+ XMLAttributeDict,
+ Comment,
+ Doctype,
+ NamespacedAttribute,
+ ProcessingInstruction,
+ XMLProcessingInstruction,
+)
+from bs4.builder import (
+ DetectsXMLParsedAsHTML,
+ FAST,
+ HTML,
+ HTMLTreeBuilder,
+ PERMISSIVE,
+ TreeBuilder,
+ XML,
+)
+from bs4.dammit import EncodingDetector
+from bs4.exceptions import ParserRejectedMarkup
+
+if TYPE_CHECKING:
+ from bs4._typing import (
+ _Encoding,
+ _Encodings,
+ _NamespacePrefix,
+ _NamespaceURL,
+ _NamespaceMapping,
+ _InvertedNamespaceMapping,
+ _RawMarkup,
+ )
+ from bs4 import BeautifulSoup
+
+LXML: str = "lxml"
+
+
+def _invert(d: dict[Any, Any]) -> dict[Any, Any]:
+ "Invert a dictionary."
+ return dict((v, k) for k, v in list(d.items()))
+
+
+_LXMLParser: TypeAlias = Union[etree.XMLParser, etree.HTMLParser]
+_ParserOrParserClass: TypeAlias = Union[
+ _LXMLParser, Type[etree.XMLParser], Type[etree.HTMLParser]
+]
+
+
+class LXMLTreeBuilderForXML(TreeBuilder):
+ DEFAULT_PARSER_CLASS: Type[etree.XMLParser] = etree.XMLParser
+
+ is_xml: bool = True
+
+ processing_instruction_class: Type[ProcessingInstruction]
+
+ NAME: str = "lxml-xml"
+ ALTERNATE_NAMES: Iterable[str] = ["xml"]
+
+ # Well, it's permissive by XML parser standards.
+ features: Iterable[str] = [NAME, LXML, XML, FAST, PERMISSIVE]
+
+ CHUNK_SIZE: int = 512
+
+ # This namespace mapping is specified in the XML Namespace
+ # standard.
+ DEFAULT_NSMAPS: _NamespaceMapping = dict(xml="http://www.w3.org/XML/1998/namespace")
+
+ DEFAULT_NSMAPS_INVERTED: _InvertedNamespaceMapping = _invert(DEFAULT_NSMAPS)
+
+ nsmaps: List[Optional[_InvertedNamespaceMapping]]
+ empty_element_tags: Optional[Set[str]]
+ parser: Any
+ _default_parser: Optional[etree.XMLParser]
+
+ # NOTE: If we parsed Element objects and looked at .sourceline,
+ # we'd be able to see the line numbers from the original document.
+ # But instead we build an XMLParser or HTMLParser object to serve
+ # as the target of parse messages, and those messages don't include
+ # line numbers.
+ # See: https://bugs.launchpad.net/lxml/+bug/1846906
+
+ def initialize_soup(self, soup: BeautifulSoup) -> None:
+ """Let the BeautifulSoup object know about the standard namespace
+ mapping.
+
+ :param soup: A `BeautifulSoup`.
+ """
+ # Beyond this point, self.soup is set, so we can assume (and
+ # assert) it's not None whenever necessary.
+ super(LXMLTreeBuilderForXML, self).initialize_soup(soup)
+ self._register_namespaces(self.DEFAULT_NSMAPS)
+
+ def _register_namespaces(self, mapping: Dict[str, str]) -> None:
+ """Let the BeautifulSoup object know about namespaces encountered
+ while parsing the document.
+
+ This might be useful later on when creating CSS selectors.
+
+ This will track (almost) all namespaces, even ones that were
+ only in scope for part of the document. If two namespaces have
+ the same prefix, only the first one encountered will be
+ tracked. Un-prefixed namespaces are not tracked.
+
+ :param mapping: A dictionary mapping namespace prefixes to URIs.
+ """
+ assert self.soup is not None
+ for key, value in list(mapping.items()):
+ # This is 'if key' and not 'if key is not None' because we
+ # don't track un-prefixed namespaces. Soupselect will
+ # treat an un-prefixed namespace as the default, which
+ # causes confusion in some cases.
+ if key and key not in self.soup._namespaces:
+ # Let the BeautifulSoup object know about a new namespace.
+ # If there are multiple namespaces defined with the same
+ # prefix, the first one in the document takes precedence.
+ self.soup._namespaces[key] = value
+
+ def default_parser(self, encoding: Optional[_Encoding]) -> _ParserOrParserClass:
+ """Find the default parser for the given encoding.
+
+ :return: Either a parser object or a class, which
+ will be instantiated with default arguments.
+ """
+ if self._default_parser is not None:
+ return self._default_parser
+ return self.DEFAULT_PARSER_CLASS(target=self, recover=True, encoding=encoding)
+
+ def parser_for(self, encoding: Optional[_Encoding]) -> _LXMLParser:
+ """Instantiate an appropriate parser for the given encoding.
+
+ :param encoding: A string.
+ :return: A parser object such as an `etree.XMLParser`.
+ """
+ # Use the default parser.
+ parser = self.default_parser(encoding)
+
+ if callable(parser):
+ # Instantiate the parser with default arguments
+ parser = parser(target=self, recover=True, encoding=encoding)
+ return parser
+
+ def __init__(
+ self,
+ parser: Optional[etree.XMLParser] = None,
+ empty_element_tags: Optional[Set[str]] = None,
+ **kwargs: Any,
+ ):
+ # TODO: Issue a warning if parser is present but not a
+ # callable, since that means there's no way to create new
+ # parsers for different encodings.
+ self._default_parser = parser
+ self.soup = None
+ self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
+ self.active_namespace_prefixes = [dict(self.DEFAULT_NSMAPS)]
+ if self.is_xml:
+ self.processing_instruction_class = XMLProcessingInstruction
+ else:
+ self.processing_instruction_class = ProcessingInstruction
+
+ if "attribute_dict_class" not in kwargs:
+ kwargs["attribute_dict_class"] = XMLAttributeDict
+ super(LXMLTreeBuilderForXML, self).__init__(**kwargs)
+
+ def _getNsTag(self, tag: str) -> Tuple[Optional[str], str]:
+ # Split the namespace URL out of a fully-qualified lxml tag
+ # name. Copied from lxml's src/lxml/sax.py.
+ if tag[0] == "{" and "}" in tag:
+ namespace, name = tag[1:].split("}", 1)
+ return (namespace, name)
+ return (None, tag)
+
+ def prepare_markup(
+ self,
+ markup: _RawMarkup,
+ user_specified_encoding: Optional[_Encoding] = None,
+ document_declared_encoding: Optional[_Encoding] = None,
+ exclude_encodings: Optional[_Encodings] = None,
+ ) -> Iterable[
+ Tuple[Union[str, bytes], Optional[_Encoding], Optional[_Encoding], bool]
+ ]:
+ """Run any preliminary steps necessary to make incoming markup
+ acceptable to the parser.
+
+ lxml really wants to get a bytestring and convert it to
+ Unicode itself. So instead of using UnicodeDammit to convert
+ the bytestring to Unicode using different encodings, this
+ implementation uses EncodingDetector to iterate over the
+ encodings, and tell lxml to try to parse the document as each
+ one in turn.
+
+ :param markup: Some markup -- hopefully a bytestring.
+ :param user_specified_encoding: The user asked to try this encoding.
+ :param document_declared_encoding: The markup itself claims to be
+ in this encoding.
+ :param exclude_encodings: The user asked _not_ to try any of
+ these encodings.
+
+ :yield: A series of 4-tuples: (markup, encoding, declared encoding,
+ has undergone character replacement)
+
+ Each 4-tuple represents a strategy for converting the
+ document to Unicode and parsing it. Each strategy will be tried
+ in turn.
+ """
+ if not self.is_xml:
+ # We're in HTML mode, so if we're given XML, that's worth
+ # noting.
+ DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup, stacklevel=3)
+
+ if isinstance(markup, str):
+ # We were given Unicode. Maybe lxml can parse Unicode on
+ # this system?
+
+ # TODO: This is a workaround for
+ # https://bugs.launchpad.net/lxml/+bug/1948551.
+ # We can remove it once the upstream issue is fixed.
+ if len(markup) > 0 and markup[0] == "\N{BYTE ORDER MARK}":
+ markup = markup[1:]
+ yield markup, None, document_declared_encoding, False
+
+ if isinstance(markup, str):
+ # No, apparently not. Convert the Unicode to UTF-8 and
+ # tell lxml to parse it as UTF-8.
+ yield (markup.encode("utf8"), "utf8", document_declared_encoding, False)
+
+ # Since the document was Unicode in the first place, there
+ # is no need to try any more strategies; we know this will
+ # work.
+ return
+
+ known_definite_encodings: List[_Encoding] = []
+ if user_specified_encoding:
+ # This was provided by the end-user; treat it as a known
+ # definite encoding per the algorithm laid out in the
+ # HTML5 spec. (See the EncodingDetector class for
+ # details.)
+ known_definite_encodings.append(user_specified_encoding)
+
+ user_encodings: List[_Encoding] = []
+ if document_declared_encoding:
+ # This was found in the document; treat it as a slightly
+ # lower-priority user encoding.
+ user_encodings.append(document_declared_encoding)
+
+ detector = EncodingDetector(
+ markup,
+ known_definite_encodings=known_definite_encodings,
+ user_encodings=user_encodings,
+ is_html=not self.is_xml,
+ exclude_encodings=exclude_encodings,
+ )
+ for encoding in detector.encodings:
+ yield (detector.markup, encoding, document_declared_encoding, False)
+
+ def feed(self, markup: _RawMarkup) -> None:
+ io: Union[BytesIO, StringIO]
+ if isinstance(markup, bytes):
+ io = BytesIO(markup)
+ elif isinstance(markup, str):
+ io = StringIO(markup)
+
+ # initialize_soup is called before feed, so we know this
+ # is not None.
+ assert self.soup is not None
+
+ # Call feed() at least once, even if the markup is empty,
+ # or the parser won't be initialized.
+ data = io.read(self.CHUNK_SIZE)
+ try:
+ self.parser = self.parser_for(self.soup.original_encoding)
+ self.parser.feed(data)
+ while len(data) != 0:
+ # Now call feed() on the rest of the data, chunk by chunk.
+ data = io.read(self.CHUNK_SIZE)
+ if len(data) != 0:
+ self.parser.feed(data)
+ self.parser.close()
+ except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
+ raise ParserRejectedMarkup(e)
+
+ def close(self) -> None:
+ self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
+
+ def start(
+ self,
+ tag: str | bytes,
+ attrib: Dict[str | bytes, str | bytes],
+ nsmap: _NamespaceMapping = {},
+ ) -> None:
+ # This is called by lxml code as a result of calling
+ # BeautifulSoup.feed(), and we know self.soup is set by the time feed()
+ # is called.
+ assert self.soup is not None
+ assert isinstance(tag, str)
+
+ # We need to recreate the attribute dict for three
+ # reasons. First, for type checking, so we can assert there
+ # are no bytestrings in the keys or values. Second, because we
+ # need a mutable dict--lxml might send us an immutable
+ # dictproxy. Third, so we can handle namespaced attribute
+ # names by converting the keys to NamespacedAttributes.
+ new_attrib: Dict[Union[str, NamespacedAttribute], str] = (
+ self.attribute_dict_class()
+ )
+ for k, v in attrib.items():
+ assert isinstance(k, str)
+ assert isinstance(v, str)
+ new_attrib[k] = v
+
+ nsprefix: Optional[_NamespacePrefix] = None
+ namespace: Optional[_NamespaceURL] = None
+ # Invert each namespace map as it comes in.
+ if len(nsmap) == 0 and len(self.nsmaps) > 1:
+ # There are no new namespaces for this tag, but
+ # non-default namespaces are in play, so we need a
+ # separate tag stack to know when they end.
+ self.nsmaps.append(None)
+ elif len(nsmap) > 0:
+ # A new namespace mapping has come into play.
+
+ # First, Let the BeautifulSoup object know about it.
+ self._register_namespaces(nsmap)
+
+ # Then, add it to our running list of inverted namespace
+ # mappings.
+ self.nsmaps.append(_invert(nsmap))
+
+ # The currently active namespace prefixes have
+ # changed. Calculate the new mapping so it can be stored
+ # with all Tag objects created while these prefixes are in
+ # scope.
+ current_mapping = dict(self.active_namespace_prefixes[-1])
+ current_mapping.update(nsmap)
+
+ # We should not track un-prefixed namespaces as we can only hold one
+ # and it will be recognized as the default namespace by soupsieve,
+ # which may be confusing in some situations.
+ if "" in current_mapping:
+ del current_mapping[""]
+ self.active_namespace_prefixes.append(current_mapping)
+
+ # Also treat the namespace mapping as a set of attributes on the
+ # tag, so we can recreate it later.
+ for prefix, namespace in list(nsmap.items()):
+ attribute = NamespacedAttribute(
+ "xmlns", prefix, "http://www.w3.org/2000/xmlns/"
+ )
+ new_attrib[attribute] = namespace
+
+ # Namespaces are in play. Find any attributes that came in
+ # from lxml with namespaces attached to their names, and
+ # turn then into NamespacedAttribute objects.
+ final_attrib: AttributeDict = self.attribute_dict_class()
+ for attr, value in list(new_attrib.items()):
+ namespace, attr = self._getNsTag(attr)
+ if namespace is None:
+ final_attrib[attr] = value
+ else:
+ nsprefix = self._prefix_for_namespace(namespace)
+ attr = NamespacedAttribute(nsprefix, attr, namespace)
+ final_attrib[attr] = value
+
+ namespace, tag = self._getNsTag(tag)
+ nsprefix = self._prefix_for_namespace(namespace)
+ self.soup.handle_starttag(
+ tag,
+ namespace,
+ nsprefix,
+ final_attrib,
+ namespaces=self.active_namespace_prefixes[-1],
+ )
+
+ def _prefix_for_namespace(
+ self, namespace: Optional[_NamespaceURL]
+ ) -> Optional[_NamespacePrefix]:
+ """Find the currently active prefix for the given namespace."""
+ if namespace is None:
+ return None
+ for inverted_nsmap in reversed(self.nsmaps):
+ if inverted_nsmap is not None and namespace in inverted_nsmap:
+ return inverted_nsmap[namespace]
+ return None
+
+ def end(self, tag: str | bytes) -> None:
+ assert self.soup is not None
+ assert isinstance(tag, str)
+ self.soup.endData()
+ namespace, tag = self._getNsTag(tag)
+ nsprefix = None
+ if namespace is not None:
+ for inverted_nsmap in reversed(self.nsmaps):
+ if inverted_nsmap is not None and namespace in inverted_nsmap:
+ nsprefix = inverted_nsmap[namespace]
+ break
+ self.soup.handle_endtag(tag, nsprefix)
+ if len(self.nsmaps) > 1:
+ # This tag, or one of its parents, introduced a namespace
+ # mapping, so pop it off the stack.
+ out_of_scope_nsmap = self.nsmaps.pop()
+
+ if out_of_scope_nsmap is not None:
+ # This tag introduced a namespace mapping which is no
+ # longer in scope. Recalculate the currently active
+ # namespace prefixes.
+ self.active_namespace_prefixes.pop()
+
+ def pi(self, target: str, data: str) -> None:
+ assert self.soup is not None
+ self.soup.endData()
+ data = target + " " + data
+ self.soup.handle_data(data)
+ self.soup.endData(self.processing_instruction_class)
+
+ def data(self, data: str | bytes) -> None:
+ assert self.soup is not None
+ assert isinstance(data, str)
+ self.soup.handle_data(data)
+
+ def doctype(self, name: str, pubid: str, system: str) -> None:
+ assert self.soup is not None
+ self.soup.endData()
+ doctype_string = Doctype._string_for_name_and_ids(name, pubid, system)
+ self.soup.handle_data(doctype_string)
+ self.soup.endData(containerClass=Doctype)
+
+ def comment(self, text: str | bytes) -> None:
+ "Handle comments as Comment objects."
+ assert self.soup is not None
+ assert isinstance(text, str)
+ self.soup.endData()
+ self.soup.handle_data(text)
+ self.soup.endData(Comment)
+
+ def test_fragment_to_document(self, fragment: str) -> str:
+ """See `TreeBuilder`."""
+ return '\n%s' % fragment
+
+
+class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
+ NAME: str = LXML
+ ALTERNATE_NAMES: Iterable[str] = ["lxml-html"]
+
+ features: Iterable[str] = list(ALTERNATE_NAMES) + [NAME, HTML, FAST, PERMISSIVE]
+ is_xml: bool = False
+
+ def default_parser(self, encoding: Optional[_Encoding]) -> _ParserOrParserClass:
+ return etree.HTMLParser
+
+ def feed(self, markup: _RawMarkup) -> None:
+ # We know self.soup is set by the time feed() is called.
+ assert self.soup is not None
+ encoding = self.soup.original_encoding
+ try:
+ self.parser = self.parser_for(encoding)
+ self.parser.feed(markup)
+ self.parser.close()
+ except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
+ raise ParserRejectedMarkup(e)
+
+ def test_fragment_to_document(self, fragment: str) -> str:
+ """See `TreeBuilder`."""
+ return "%s" % fragment
diff --git a/venv/lib/python3.9/site-packages/bs4/css.py b/venv/lib/python3.9/site-packages/bs4/css.py
new file mode 100644
index 0000000..75ad998
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/css.py
@@ -0,0 +1,339 @@
+"""Integration code for CSS selectors using `Soup Sieve `_ (pypi: ``soupsieve``).
+
+Acquire a `CSS` object through the `element.Tag.css` attribute of
+the starting point of your CSS selector, or (if you want to run a
+selector against the entire document) of the `BeautifulSoup` object
+itself.
+
+The main advantage of doing this instead of using ``soupsieve``
+functions is that you don't need to keep passing the `element.Tag` to be
+selected against, since the `CSS` object is permanently scoped to that
+`element.Tag`.
+
+"""
+
+from __future__ import annotations
+
+from types import ModuleType
+from typing import (
+ Any,
+ cast,
+ Iterable,
+ Iterator,
+ MutableSequence,
+ Optional,
+ TYPE_CHECKING,
+)
+import warnings
+from bs4._typing import _NamespaceMapping
+
+if TYPE_CHECKING:
+ from soupsieve import SoupSieve
+ from bs4 import element
+ from bs4.element import ResultSet, Tag
+
+soupsieve: Optional[ModuleType]
+try:
+ import soupsieve
+except ImportError:
+ soupsieve = None
+ warnings.warn(
+ "The soupsieve package is not installed. CSS selectors cannot be used."
+ )
+
+
+class CSS(object):
+ """A proxy object against the ``soupsieve`` library, to simplify its
+ CSS selector API.
+
+ You don't need to instantiate this class yourself; instead, use
+ `element.Tag.css`.
+
+ :param tag: All CSS selectors run by this object will use this as
+ their starting point.
+
+ :param api: An optional drop-in replacement for the ``soupsieve`` module,
+ intended for use in unit tests.
+ """
+
+ def __init__(self, tag: element.Tag, api: Optional[ModuleType] = None):
+ if api is None:
+ api = soupsieve
+ if api is None:
+ raise NotImplementedError(
+ "Cannot execute CSS selectors because the soupsieve package is not installed."
+ )
+ self.api = api
+ self.tag = tag
+
+ def escape(self, ident: str) -> str:
+ """Escape a CSS identifier.
+
+ This is a simple wrapper around `soupsieve.escape() `_. See the
+ documentation for that function for more information.
+ """
+ if soupsieve is None:
+ raise NotImplementedError(
+ "Cannot escape CSS identifiers because the soupsieve package is not installed."
+ )
+ return cast(str, self.api.escape(ident))
+
+ def _ns(
+ self, ns: Optional[_NamespaceMapping], select: str
+ ) -> Optional[_NamespaceMapping]:
+ """Normalize a dictionary of namespaces."""
+ if not isinstance(select, self.api.SoupSieve) and ns is None:
+ # If the selector is a precompiled pattern, it already has
+ # a namespace context compiled in, which cannot be
+ # replaced.
+ ns = self.tag._namespaces
+ return ns
+
+ def _rs(self, results: MutableSequence[Tag]) -> ResultSet[Tag]:
+ """Normalize a list of results to a py:class:`ResultSet`.
+
+ A py:class:`ResultSet` is more consistent with the rest of
+ Beautiful Soup's API, and :py:meth:`ResultSet.__getattr__` has
+ a helpful error message if you try to treat a list of results
+ as a single result (a common mistake).
+ """
+ # Import here to avoid circular import
+ from bs4 import ResultSet
+
+ return ResultSet(None, results)
+
+ def compile(
+ self,
+ select: str,
+ namespaces: Optional[_NamespaceMapping] = None,
+ flags: int = 0,
+ **kwargs: Any,
+ ) -> SoupSieve:
+ """Pre-compile a selector and return the compiled object.
+
+ :param selector: A CSS selector.
+
+ :param namespaces: A dictionary mapping namespace prefixes
+ used in the CSS selector to namespace URIs. By default,
+ Beautiful Soup will use the prefixes it encountered while
+ parsing the document.
+
+ :param flags: Flags to be passed into Soup Sieve's
+ `soupsieve.compile() `_ method.
+
+ :param kwargs: Keyword arguments to be passed into Soup Sieve's
+ `soupsieve.compile() `_ method.
+
+ :return: A precompiled selector object.
+ :rtype: soupsieve.SoupSieve
+ """
+ return self.api.compile(select, self._ns(namespaces, select), flags, **kwargs)
+
+ def select_one(
+ self,
+ select: str,
+ namespaces: Optional[_NamespaceMapping] = None,
+ flags: int = 0,
+ **kwargs: Any,
+ ) -> element.Tag | None:
+ """Perform a CSS selection operation on the current Tag and return the
+ first result, if any.
+
+ This uses the Soup Sieve library. For more information, see
+ that library's documentation for the `soupsieve.select_one() `_ method.
+
+ :param selector: A CSS selector.
+
+ :param namespaces: A dictionary mapping namespace prefixes
+ used in the CSS selector to namespace URIs. By default,
+ Beautiful Soup will use the prefixes it encountered while
+ parsing the document.
+
+ :param flags: Flags to be passed into Soup Sieve's
+ `soupsieve.select_one() `_ method.
+
+ :param kwargs: Keyword arguments to be passed into Soup Sieve's
+ `soupsieve.select_one() `_ method.
+ """
+ return self.api.select_one(
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
+ )
+
+ def select(
+ self,
+ select: str,
+ namespaces: Optional[_NamespaceMapping] = None,
+ limit: int = 0,
+ flags: int = 0,
+ **kwargs: Any,
+ ) -> ResultSet[element.Tag]:
+ """Perform a CSS selection operation on the current `element.Tag`.
+
+ This uses the Soup Sieve library. For more information, see
+ that library's documentation for the `soupsieve.select() `_ method.
+
+ :param selector: A CSS selector.
+
+ :param namespaces: A dictionary mapping namespace prefixes
+ used in the CSS selector to namespace URIs. By default,
+ Beautiful Soup will pass in the prefixes it encountered while
+ parsing the document.
+
+ :param limit: After finding this number of results, stop looking.
+
+ :param flags: Flags to be passed into Soup Sieve's
+ `soupsieve.select() `_ method.
+
+ :param kwargs: Keyword arguments to be passed into Soup Sieve's
+ `soupsieve.select() `_ method.
+ """
+ if limit is None:
+ limit = 0
+
+ return self._rs(
+ self.api.select(
+ select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs
+ )
+ )
+
+ def iselect(
+ self,
+ select: str,
+ namespaces: Optional[_NamespaceMapping] = None,
+ limit: int = 0,
+ flags: int = 0,
+ **kwargs: Any,
+ ) -> Iterator[element.Tag]:
+ """Perform a CSS selection operation on the current `element.Tag`.
+
+ This uses the Soup Sieve library. For more information, see
+ that library's documentation for the `soupsieve.iselect()
+ `_
+ method. It is the same as select(), but it returns a generator
+ instead of a list.
+
+ :param selector: A string containing a CSS selector.
+
+ :param namespaces: A dictionary mapping namespace prefixes
+ used in the CSS selector to namespace URIs. By default,
+ Beautiful Soup will pass in the prefixes it encountered while
+ parsing the document.
+
+ :param limit: After finding this number of results, stop looking.
+
+ :param flags: Flags to be passed into Soup Sieve's
+ `soupsieve.iselect() `_ method.
+
+ :param kwargs: Keyword arguments to be passed into Soup Sieve's
+ `soupsieve.iselect() `_ method.
+ """
+ return self.api.iselect(
+ select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs
+ )
+
+ def closest(
+ self,
+ select: str,
+ namespaces: Optional[_NamespaceMapping] = None,
+ flags: int = 0,
+ **kwargs: Any,
+ ) -> Optional[element.Tag]:
+ """Find the `element.Tag` closest to this one that matches the given selector.
+
+ This uses the Soup Sieve library. For more information, see
+ that library's documentation for the `soupsieve.closest()
+ `_
+ method.
+
+ :param selector: A string containing a CSS selector.
+
+ :param namespaces: A dictionary mapping namespace prefixes
+ used in the CSS selector to namespace URIs. By default,
+ Beautiful Soup will pass in the prefixes it encountered while
+ parsing the document.
+
+ :param flags: Flags to be passed into Soup Sieve's
+ `soupsieve.closest() `_ method.
+
+ :param kwargs: Keyword arguments to be passed into Soup Sieve's
+ `soupsieve.closest() `_ method.
+
+ """
+ return self.api.closest(
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
+ )
+
+ def match(
+ self,
+ select: str,
+ namespaces: Optional[_NamespaceMapping] = None,
+ flags: int = 0,
+ **kwargs: Any,
+ ) -> bool:
+ """Check whether or not this `element.Tag` matches the given CSS selector.
+
+ This uses the Soup Sieve library. For more information, see
+ that library's documentation for the `soupsieve.match()
+ `_
+ method.
+
+ :param: a CSS selector.
+
+ :param namespaces: A dictionary mapping namespace prefixes
+ used in the CSS selector to namespace URIs. By default,
+ Beautiful Soup will pass in the prefixes it encountered while
+ parsing the document.
+
+ :param flags: Flags to be passed into Soup Sieve's
+ `soupsieve.match()
+ `_
+ method.
+
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
+ `soupsieve.match()
+ `_
+ method.
+ """
+ return cast(
+ bool,
+ self.api.match(
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
+ ),
+ )
+
+ def filter(
+ self,
+ select: str,
+ namespaces: Optional[_NamespaceMapping] = None,
+ flags: int = 0,
+ **kwargs: Any,
+ ) -> ResultSet[element.Tag]:
+ """Filter this `element.Tag`'s direct children based on the given CSS selector.
+
+ This uses the Soup Sieve library. It works the same way as
+ passing a `element.Tag` into that library's `soupsieve.filter()
+ `_
+ method. For more information, see the documentation for
+ `soupsieve.filter()
+ `_.
+
+ :param namespaces: A dictionary mapping namespace prefixes
+ used in the CSS selector to namespace URIs. By default,
+ Beautiful Soup will pass in the prefixes it encountered while
+ parsing the document.
+
+ :param flags: Flags to be passed into Soup Sieve's
+ `soupsieve.filter()
+ `_
+ method.
+
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
+ `soupsieve.filter()
+ `_
+ method.
+ """
+ return self._rs(
+ self.api.filter(
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
+ )
+ )
diff --git a/venv/lib/python3.9/site-packages/bs4/dammit.py b/venv/lib/python3.9/site-packages/bs4/dammit.py
new file mode 100644
index 0000000..3649951
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/dammit.py
@@ -0,0 +1,1408 @@
+# -*- coding: utf-8 -*-
+"""Beautiful Soup bonus library: Unicode, Dammit
+
+This library converts a bytestream to Unicode through any means
+necessary. It is heavily based on code from Mark Pilgrim's `Universal
+Feed Parser `_, now maintained
+by Kurt McKee. It does not rewrite the body of an XML or HTML document
+to reflect a new encoding; that's the job of `TreeBuilder`.
+
+"""
+
+# Use of this source code is governed by the MIT license.
+__license__ = "MIT"
+
+from html.entities import codepoint2name
+from collections import defaultdict
+import codecs
+from html.entities import html5
+import re
+from logging import Logger, getLogger
+from types import ModuleType
+from typing import (
+ Dict,
+ Iterator,
+ List,
+ Optional,
+ Pattern,
+ Set,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+from typing_extensions import Literal
+from bs4._typing import (
+ _Encoding,
+ _Encodings,
+)
+import warnings
+
+# Import a library to autodetect character encodings. We'll support
+# any of a number of libraries that all support the same API:
+#
+# * cchardet
+# * chardet
+# * charset-normalizer
+chardet_module: Optional[ModuleType] = None
+try:
+ # PyPI package: cchardet
+ import cchardet # type:ignore
+
+ chardet_module = cchardet
+except ImportError:
+ try:
+ # Debian package: python-chardet
+ # PyPI package: chardet
+ import chardet
+
+ chardet_module = chardet
+ except ImportError:
+ try:
+ # PyPI package: charset-normalizer
+ import charset_normalizer # type:ignore
+
+ chardet_module = charset_normalizer
+ except ImportError:
+ # No chardet available.
+ pass
+
+
+def _chardet_dammit(s: bytes) -> Optional[str]:
+ """Try as hard as possible to detect the encoding of a bytestring."""
+ if chardet_module is None or isinstance(s, str):
+ return None
+ module = chardet_module
+ return module.detect(s)["encoding"]
+
+
+# Build bytestring and Unicode versions of regular expressions for finding
+# a declared encoding inside an XML or HTML document.
+xml_encoding: str = "^\\s*<\\?.*encoding=['\"](.*?)['\"].*\\?>" #: :meta private:
+html_meta: str = (
+ "<\\s*meta[^>]+charset\\s*=\\s*[\"']?([^>]*?)[ /;'\">]" #: :meta private:
+)
+
+# TODO-TYPING: The Pattern type here could use more refinement, but it's tricky.
+encoding_res: Dict[Type, Dict[str, Pattern]] = dict()
+encoding_res[bytes] = {
+ "html": re.compile(html_meta.encode("ascii"), re.I),
+ "xml": re.compile(xml_encoding.encode("ascii"), re.I),
+}
+encoding_res[str] = {
+ "html": re.compile(html_meta, re.I),
+ "xml": re.compile(xml_encoding, re.I),
+}
+
+
+class EntitySubstitution(object):
+ """The ability to substitute XML or HTML entities for certain characters."""
+
+ #: A map of named HTML entities to the corresponding Unicode string.
+ #:
+ #: :meta hide-value:
+ HTML_ENTITY_TO_CHARACTER: Dict[str, str]
+
+ #: A map of Unicode strings to the corresponding named HTML entities;
+ #: the inverse of HTML_ENTITY_TO_CHARACTER.
+ #:
+ #: :meta hide-value:
+ CHARACTER_TO_HTML_ENTITY: Dict[str, str]
+
+ #: A regular expression that matches any character (or, in rare
+ #: cases, pair of characters) that can be replaced with a named
+ #: HTML entity.
+ #:
+ #: :meta hide-value:
+ CHARACTER_TO_HTML_ENTITY_RE: Pattern[str]
+
+ #: A very similar regular expression to
+ #: CHARACTER_TO_HTML_ENTITY_RE, but which also matches unescaped
+ #: ampersands. This is used by the 'html' formatted to provide
+ #: backwards-compatibility, even though the HTML5 spec allows most
+ #: ampersands to go unescaped.
+ #:
+ #: :meta hide-value:
+ CHARACTER_TO_HTML_ENTITY_WITH_AMPERSAND_RE: Pattern[str]
+
+ @classmethod
+ def _populate_class_variables(cls) -> None:
+ """Initialize variables used by this class to manage the plethora of
+ HTML5 named entities.
+
+ This function sets the following class variables:
+
+ CHARACTER_TO_HTML_ENTITY - A mapping of Unicode strings like "⦨" to
+ entity names like "angmsdaa". When a single Unicode string has
+ multiple entity names, we try to choose the most commonly-used
+ name.
+
+ HTML_ENTITY_TO_CHARACTER: A mapping of entity names like "angmsdaa" to
+ Unicode strings like "⦨".
+
+ CHARACTER_TO_HTML_ENTITY_RE: A regular expression matching (almost) any
+ Unicode string that corresponds to an HTML5 named entity.
+
+ CHARACTER_TO_HTML_ENTITY_WITH_AMPERSAND_RE: A very similar
+ regular expression to CHARACTER_TO_HTML_ENTITY_RE, but which
+ also matches unescaped ampersands. This is used by the 'html'
+ formatted to provide backwards-compatibility, even though the HTML5
+ spec allows most ampersands to go unescaped.
+ """
+ unicode_to_name = {}
+ name_to_unicode = {}
+
+ short_entities = set()
+ long_entities_by_first_character = defaultdict(set)
+
+ for name_with_semicolon, character in sorted(html5.items()):
+ # "It is intentional, for legacy compatibility, that many
+ # code points have multiple character reference names. For
+ # example, some appear both with and without the trailing
+ # semicolon, or with different capitalizations."
+ # - https://html.spec.whatwg.org/multipage/named-characters.html#named-character-references
+ #
+ # The parsers are in charge of handling (or not) character
+ # references with no trailing semicolon, so we remove the
+ # semicolon whenever it appears.
+ if name_with_semicolon.endswith(";"):
+ name = name_with_semicolon[:-1]
+ else:
+ name = name_with_semicolon
+
+ # When parsing HTML, we want to recognize any known named
+ # entity and convert it to a sequence of Unicode
+ # characters.
+ if name not in name_to_unicode:
+ name_to_unicode[name] = character
+
+ # When _generating_ HTML, we want to recognize special
+ # character sequences that _could_ be converted to named
+ # entities.
+ unicode_to_name[character] = name
+
+ # We also need to build a regular expression that lets us
+ # _find_ those characters in output strings so we can
+ # replace them.
+ #
+ # This is tricky, for two reasons.
+
+ if len(character) == 1 and ord(character) < 128 and character not in "<>":
+ # First, it would be annoying to turn single ASCII
+ # characters like | into named entities like
+ # |. The exceptions are <>, which we _must_
+ # turn into named entities to produce valid HTML.
+ continue
+
+ if len(character) > 1 and all(ord(x) < 128 for x in character):
+ # We also do not want to turn _combinations_ of ASCII
+ # characters like 'fj' into named entities like 'fj',
+ # though that's more debateable.
+ continue
+
+ # Second, some named entities have a Unicode value that's
+ # a subset of the Unicode value for some _other_ named
+ # entity. As an example, \u2267' is ≧,
+ # but '\u2267\u0338' is ≧̸. Our regular
+ # expression needs to match the first two characters of
+ # "\u2267\u0338foo", but only the first character of
+ # "\u2267foo".
+ #
+ # In this step, we build two sets of characters that
+ # _eventually_ need to go into the regular expression. But
+ # we won't know exactly what the regular expression needs
+ # to look like until we've gone through the entire list of
+ # named entities.
+ if len(character) == 1 and character != "&":
+ short_entities.add(character)
+ else:
+ long_entities_by_first_character[character[0]].add(character)
+
+ # Now that we've been through the entire list of entities, we
+ # can create a regular expression that matches any of them.
+ particles = set()
+ for short in short_entities:
+ long_versions = long_entities_by_first_character[short]
+ if not long_versions:
+ particles.add(short)
+ else:
+ ignore = "".join([x[1] for x in long_versions])
+ # This finds, e.g. \u2267 but only if it is _not_
+ # followed by \u0338.
+ particles.add("%s(?![%s])" % (short, ignore))
+
+ for long_entities in list(long_entities_by_first_character.values()):
+ for long_entity in long_entities:
+ particles.add(long_entity)
+
+ re_definition = "(%s)" % "|".join(particles)
+
+ particles.add("&")
+ re_definition_with_ampersand = "(%s)" % "|".join(particles)
+
+ # If an entity shows up in both html5 and codepoint2name, it's
+ # likely that HTML5 gives it several different names, such as
+ # 'rsquo' and 'rsquor'. When converting Unicode characters to
+ # named entities, the codepoint2name name should take
+ # precedence where possible, since that's the more easily
+ # recognizable one.
+ for codepoint, name in list(codepoint2name.items()):
+ character = chr(codepoint)
+ unicode_to_name[character] = name
+
+ cls.CHARACTER_TO_HTML_ENTITY = unicode_to_name
+ cls.HTML_ENTITY_TO_CHARACTER = name_to_unicode
+ cls.CHARACTER_TO_HTML_ENTITY_RE = re.compile(re_definition)
+ cls.CHARACTER_TO_HTML_ENTITY_WITH_AMPERSAND_RE = re.compile(
+ re_definition_with_ampersand
+ )
+
+ #: A map of Unicode strings to the corresponding named XML entities.
+ #:
+ #: :meta hide-value:
+ CHARACTER_TO_XML_ENTITY: Dict[str, str] = {
+ "'": "apos",
+ '"': "quot",
+ "&": "amp",
+ "<": "lt",
+ ">": "gt",
+ }
+
+ # Matches any named or numeric HTML entity.
+ ANY_ENTITY_RE = re.compile("&(#\\d+|#x[0-9a-fA-F]+|\\w+);", re.I)
+
+ #: A regular expression matching an angle bracket or an ampersand that
+ #: is not part of an XML or HTML entity.
+ #:
+ #: :meta hide-value:
+ BARE_AMPERSAND_OR_BRACKET: Pattern[str] = re.compile(
+ "([<>]|" "&(?!#\\d+;|#x[0-9a-fA-F]+;|\\w+;)" ")"
+ )
+
+ #: A regular expression matching an angle bracket or an ampersand.
+ #:
+ #: :meta hide-value:
+ AMPERSAND_OR_BRACKET: Pattern[str] = re.compile("([<>&])")
+
+ @classmethod
+ def _substitute_html_entity(cls, matchobj: re.Match) -> str:
+ """Used with a regular expression to substitute the
+ appropriate HTML entity for a special character string."""
+ original_entity = matchobj.group(0)
+ entity = cls.CHARACTER_TO_HTML_ENTITY.get(original_entity)
+ if entity is None:
+ return "&%s;" % original_entity
+ return "&%s;" % entity
+
+ @classmethod
+ def _substitute_xml_entity(cls, matchobj: re.Match) -> str:
+ """Used with a regular expression to substitute the
+ appropriate XML entity for a special character string."""
+ entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
+ return "&%s;" % entity
+
+ @classmethod
+ def _escape_entity_name(cls, matchobj: re.Match) -> str:
+ return "&%s;" % matchobj.group(1)
+
+ @classmethod
+ def _escape_unrecognized_entity_name(cls, matchobj: re.Match) -> str:
+ possible_entity = matchobj.group(1)
+ if possible_entity in cls.HTML_ENTITY_TO_CHARACTER:
+ return "&%s;" % possible_entity
+ return "&%s;" % possible_entity
+
+ @classmethod
+ def quoted_attribute_value(cls, value: str) -> str:
+ """Make a value into a quoted XML attribute, possibly escaping it.
+
+ Most strings will be quoted using double quotes.
+
+ Bob's Bar -> "Bob's Bar"
+
+ If a string contains double quotes, it will be quoted using
+ single quotes.
+
+ Welcome to "my bar" -> 'Welcome to "my bar"'
+
+ If a string contains both single and double quotes, the
+ double quotes will be escaped, and the string will be quoted
+ using double quotes.
+
+ Welcome to "Bob's Bar" -> Welcome to "Bob's bar"
+
+ :param value: The XML attribute value to quote
+ :return: The quoted value
+ """
+ quote_with = '"'
+ if '"' in value:
+ if "'" in value:
+ # The string contains both single and double
+ # quotes. Turn the double quotes into
+ # entities. We quote the double quotes rather than
+ # the single quotes because the entity name is
+ # """ whether this is HTML or XML. If we
+ # quoted the single quotes, we'd have to decide
+ # between ' and &squot;.
+ replace_with = """
+ value = value.replace('"', replace_with)
+ else:
+ # There are double quotes but no single quotes.
+ # We can use single quotes to quote the attribute.
+ quote_with = "'"
+ return quote_with + value + quote_with
+
+ @classmethod
+ def substitute_xml(cls, value: str, make_quoted_attribute: bool = False) -> str:
+ """Replace special XML characters with named XML entities.
+
+ The less-than sign will become <, the greater-than sign
+ will become >, and any ampersands will become &. If you
+ want ampersands that seem to be part of an entity definition
+ to be left alone, use `substitute_xml_containing_entities`
+ instead.
+
+ :param value: A string to be substituted.
+
+ :param make_quoted_attribute: If True, then the string will be
+ quoted, as befits an attribute value.
+
+ :return: A version of ``value`` with special characters replaced
+ with named entities.
+ """
+ # Escape angle brackets and ampersands.
+ value = cls.AMPERSAND_OR_BRACKET.sub(cls._substitute_xml_entity, value)
+
+ if make_quoted_attribute:
+ value = cls.quoted_attribute_value(value)
+ return value
+
+ @classmethod
+ def substitute_xml_containing_entities(
+ cls, value: str, make_quoted_attribute: bool = False
+ ) -> str:
+ """Substitute XML entities for special XML characters.
+
+ :param value: A string to be substituted. The less-than sign will
+ become <, the greater-than sign will become >, and any
+ ampersands that are not part of an entity defition will
+ become &.
+
+ :param make_quoted_attribute: If True, then the string will be
+ quoted, as befits an attribute value.
+ """
+ # Escape angle brackets, and ampersands that aren't part of
+ # entities.
+ value = cls.BARE_AMPERSAND_OR_BRACKET.sub(cls._substitute_xml_entity, value)
+
+ if make_quoted_attribute:
+ value = cls.quoted_attribute_value(value)
+ return value
+
+ @classmethod
+ def substitute_html(cls, s: str) -> str:
+ """Replace certain Unicode characters with named HTML entities.
+
+ This differs from ``data.encode(encoding, 'xmlcharrefreplace')``
+ in that the goal is to make the result more readable (to those
+ with ASCII displays) rather than to recover from
+ errors. There's absolutely nothing wrong with a UTF-8 string
+ containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
+ character with "é" will make it more readable to some
+ people.
+
+ :param s: The string to be modified.
+ :return: The string with some Unicode characters replaced with
+ HTML entities.
+ """
+ # Convert any appropriate characters to HTML entities.
+ return cls.CHARACTER_TO_HTML_ENTITY_WITH_AMPERSAND_RE.sub(
+ cls._substitute_html_entity, s
+ )
+
+ @classmethod
+ def substitute_html5(cls, s: str) -> str:
+ """Replace certain Unicode characters with named HTML entities
+ using HTML5 rules.
+
+ Specifically, this method is much less aggressive about
+ escaping ampersands than substitute_html. Only ambiguous
+ ampersands are escaped, per the HTML5 standard:
+
+ "An ambiguous ampersand is a U+0026 AMPERSAND character (&)
+ that is followed by one or more ASCII alphanumerics, followed
+ by a U+003B SEMICOLON character (;), where these characters do
+ not match any of the names given in the named character
+ references section."
+
+ Unlike substitute_html5_raw, this method assumes HTML entities
+ were converted to Unicode characters on the way in, as
+ Beautiful Soup does. By the time Beautiful Soup does its work,
+ the only ambiguous ampersands that need to be escaped are the
+ ones that were escaped in the original markup when mentioning
+ HTML entities.
+
+ :param s: The string to be modified.
+ :return: The string with some Unicode characters replaced with
+ HTML entities.
+ """
+ # First, escape any HTML entities found in the markup.
+ s = cls.ANY_ENTITY_RE.sub(cls._escape_entity_name, s)
+
+ # Next, convert any appropriate characters to unescaped HTML entities.
+ s = cls.CHARACTER_TO_HTML_ENTITY_RE.sub(cls._substitute_html_entity, s)
+
+ return s
+
+ @classmethod
+ def substitute_html5_raw(cls, s: str) -> str:
+ """Replace certain Unicode characters with named HTML entities
+ using HTML5 rules.
+
+ substitute_html5_raw is similar to substitute_html5 but it is
+ designed for standalone use (whereas substitute_html5 is
+ designed for use with Beautiful Soup).
+
+ :param s: The string to be modified.
+ :return: The string with some Unicode characters replaced with
+ HTML entities.
+ """
+ # First, escape the ampersand for anything that looks like an
+ # entity but isn't in the list of recognized entities. All other
+ # ampersands can be left alone.
+ s = cls.ANY_ENTITY_RE.sub(cls._escape_unrecognized_entity_name, s)
+
+ # Then, convert a range of Unicode characters to unescaped
+ # HTML entities.
+ s = cls.CHARACTER_TO_HTML_ENTITY_RE.sub(cls._substitute_html_entity, s)
+
+ return s
+
+
+EntitySubstitution._populate_class_variables()
+
+
+class EncodingDetector:
+ """This class is capable of guessing a number of possible encodings
+ for a bytestring.
+
+ Order of precedence:
+
+ 1. Encodings you specifically tell EncodingDetector to try first
+ (the ``known_definite_encodings`` argument to the constructor).
+
+ 2. An encoding determined by sniffing the document's byte-order mark.
+
+ 3. Encodings you specifically tell EncodingDetector to try if
+ byte-order mark sniffing fails (the ``user_encodings`` argument to the
+ constructor).
+
+ 4. An encoding declared within the bytestring itself, either in an
+ XML declaration (if the bytestring is to be interpreted as an XML
+ document), or in a tag (if the bytestring is to be
+ interpreted as an HTML document.)
+
+ 5. An encoding detected through textual analysis by chardet,
+ cchardet, or a similar external library.
+
+ 6. UTF-8.
+
+ 7. Windows-1252.
+
+ :param markup: Some markup in an unknown encoding.
+
+ :param known_definite_encodings: When determining the encoding
+ of ``markup``, these encodings will be tried first, in
+ order. In HTML terms, this corresponds to the "known
+ definite encoding" step defined in `section 13.2.3.1 of the HTML standard `_.
+
+ :param user_encodings: These encodings will be tried after the
+ ``known_definite_encodings`` have been tried and failed, and
+ after an attempt to sniff the encoding by looking at a
+ byte order mark has failed. In HTML terms, this
+ corresponds to the step "user has explicitly instructed
+ the user agent to override the document's character
+ encoding", defined in `section 13.2.3.2 of the HTML standard `_.
+
+ :param override_encodings: A **deprecated** alias for
+ ``known_definite_encodings``. Any encodings here will be tried
+ immediately after the encodings in
+ ``known_definite_encodings``.
+
+ :param is_html: If True, this markup is considered to be
+ HTML. Otherwise it's assumed to be XML.
+
+ :param exclude_encodings: These encodings will not be tried,
+ even if they otherwise would be.
+
+ """
+
+ def __init__(
+ self,
+ markup: bytes,
+ known_definite_encodings: Optional[_Encodings] = None,
+ is_html: Optional[bool] = False,
+ exclude_encodings: Optional[_Encodings] = None,
+ user_encodings: Optional[_Encodings] = None,
+ override_encodings: Optional[_Encodings] = None,
+ ):
+ self.known_definite_encodings = list(known_definite_encodings or [])
+ if override_encodings:
+ warnings.warn(
+ "The 'override_encodings' argument was deprecated in 4.10.0. Use 'known_definite_encodings' instead.",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ self.known_definite_encodings += override_encodings
+ self.user_encodings = user_encodings or []
+ exclude_encodings = exclude_encodings or []
+ self.exclude_encodings = set([x.lower() for x in exclude_encodings])
+ self.chardet_encoding = None
+ self.is_html = False if is_html is None else is_html
+ self.declared_encoding: Optional[str] = None
+
+ # First order of business: strip a byte-order mark.
+ self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
+
+ known_definite_encodings: _Encodings
+ user_encodings: _Encodings
+ exclude_encodings: _Encodings
+ chardet_encoding: Optional[_Encoding]
+ is_html: bool
+ declared_encoding: Optional[_Encoding]
+ markup: bytes
+ sniffed_encoding: Optional[_Encoding]
+
+ def _usable(self, encoding: Optional[_Encoding], tried: Set[_Encoding]) -> bool:
+ """Should we even bother to try this encoding?
+
+ :param encoding: Name of an encoding.
+ :param tried: Encodings that have already been tried. This
+ will be modified as a side effect.
+ """
+ if encoding is None:
+ return False
+ encoding = encoding.lower()
+ if encoding in self.exclude_encodings:
+ return False
+ if encoding not in tried:
+ tried.add(encoding)
+ return True
+ return False
+
+ @property
+ def encodings(self) -> Iterator[_Encoding]:
+ """Yield a number of encodings that might work for this markup.
+
+ :yield: A sequence of strings. Each is the name of an encoding
+ that *might* work to convert a bytestring into Unicode.
+ """
+ tried: Set[_Encoding] = set()
+
+ # First, try the known definite encodings
+ for e in self.known_definite_encodings:
+ if self._usable(e, tried):
+ yield e
+
+ # Did the document originally start with a byte-order mark
+ # that indicated its encoding?
+ if self.sniffed_encoding is not None and self._usable(
+ self.sniffed_encoding, tried
+ ):
+ yield self.sniffed_encoding
+
+ # Sniffing the byte-order mark did nothing; try the user
+ # encodings.
+ for e in self.user_encodings:
+ if self._usable(e, tried):
+ yield e
+
+ # Look within the document for an XML or HTML encoding
+ # declaration.
+ if self.declared_encoding is None:
+ self.declared_encoding = self.find_declared_encoding(
+ self.markup, self.is_html
+ )
+ if self.declared_encoding is not None and self._usable(
+ self.declared_encoding, tried
+ ):
+ yield self.declared_encoding
+
+ # Use third-party character set detection to guess at the
+ # encoding.
+ if self.chardet_encoding is None:
+ self.chardet_encoding = _chardet_dammit(self.markup)
+ if self.chardet_encoding is not None and self._usable(
+ self.chardet_encoding, tried
+ ):
+ yield self.chardet_encoding
+
+ # As a last-ditch effort, try utf-8 and windows-1252.
+ for e in ("utf-8", "windows-1252"):
+ if self._usable(e, tried):
+ yield e
+
+ @classmethod
+ def strip_byte_order_mark(cls, data: bytes) -> Tuple[bytes, Optional[_Encoding]]:
+ """If a byte-order mark is present, strip it and return the encoding it implies.
+
+ :param data: A bytestring that may or may not begin with a
+ byte-order mark.
+
+ :return: A 2-tuple (data stripped of byte-order mark, encoding implied by byte-order mark)
+ """
+ encoding = None
+ if isinstance(data, str):
+ # Unicode data cannot have a byte-order mark.
+ return data, encoding
+ if (
+ (len(data) >= 4)
+ and (data[:2] == b"\xfe\xff")
+ and (data[2:4] != b"\x00\x00")
+ ):
+ encoding = "utf-16be"
+ data = data[2:]
+ elif (
+ (len(data) >= 4)
+ and (data[:2] == b"\xff\xfe")
+ and (data[2:4] != b"\x00\x00")
+ ):
+ encoding = "utf-16le"
+ data = data[2:]
+ elif data[:3] == b"\xef\xbb\xbf":
+ encoding = "utf-8"
+ data = data[3:]
+ elif data[:4] == b"\x00\x00\xfe\xff":
+ encoding = "utf-32be"
+ data = data[4:]
+ elif data[:4] == b"\xff\xfe\x00\x00":
+ encoding = "utf-32le"
+ data = data[4:]
+ return data, encoding
+
+ @classmethod
+ def find_declared_encoding(
+ cls,
+ markup: Union[bytes, str],
+ is_html: bool = False,
+ search_entire_document: bool = False,
+ ) -> Optional[_Encoding]:
+ """Given a document, tries to find an encoding declared within the
+ text of the document itself.
+
+ An XML encoding is declared at the beginning of the document.
+
+ An HTML encoding is declared in a tag, hopefully near the
+ beginning of the document.
+
+ :param markup: Some markup.
+ :param is_html: If True, this markup is considered to be HTML. Otherwise
+ it's assumed to be XML.
+ :param search_entire_document: Since an encoding is supposed
+ to declared near the beginning of the document, most of
+ the time it's only necessary to search a few kilobytes of
+ data. Set this to True to force this method to search the
+ entire document.
+ :return: The declared encoding, if one is found.
+ """
+ if search_entire_document:
+ xml_endpos = html_endpos = len(markup)
+ else:
+ xml_endpos = 1024
+ html_endpos = max(2048, int(len(markup) * 0.05))
+
+ if isinstance(markup, bytes):
+ res = encoding_res[bytes]
+ else:
+ res = encoding_res[str]
+
+ xml_re = res["xml"]
+ html_re = res["html"]
+ declared_encoding: Optional[_Encoding] = None
+ declared_encoding_match = xml_re.search(markup, endpos=xml_endpos)
+ if not declared_encoding_match and is_html:
+ declared_encoding_match = html_re.search(markup, endpos=html_endpos)
+ if declared_encoding_match is not None:
+ declared_encoding = declared_encoding_match.groups()[0]
+ if declared_encoding:
+ if isinstance(declared_encoding, bytes):
+ declared_encoding = declared_encoding.decode("ascii", "replace")
+ return declared_encoding.lower()
+ return None
+
+
+class UnicodeDammit:
+ """A class for detecting the encoding of a bytestring containing an
+ HTML or XML document, and decoding it to Unicode. If the source
+ encoding is windows-1252, `UnicodeDammit` can also replace
+ Microsoft smart quotes with their HTML or XML equivalents.
+
+ :param markup: HTML or XML markup in an unknown encoding.
+
+ :param known_definite_encodings: When determining the encoding
+ of ``markup``, these encodings will be tried first, in
+ order. In HTML terms, this corresponds to the "known
+ definite encoding" step defined in `section 13.2.3.1 of the HTML standard `_.
+
+ :param user_encodings: These encodings will be tried after the
+ ``known_definite_encodings`` have been tried and failed, and
+ after an attempt to sniff the encoding by looking at a
+ byte order mark has failed. In HTML terms, this
+ corresponds to the step "user has explicitly instructed
+ the user agent to override the document's character
+ encoding", defined in `section 13.2.3.2 of the HTML standard `_.
+
+ :param override_encodings: A **deprecated** alias for
+ ``known_definite_encodings``. Any encodings here will be tried
+ immediately after the encodings in
+ ``known_definite_encodings``.
+
+ :param smart_quotes_to: By default, Microsoft smart quotes will,
+ like all other characters, be converted to Unicode
+ characters. Setting this to ``ascii`` will convert them to ASCII
+ quotes instead. Setting it to ``xml`` will convert them to XML
+ entity references, and setting it to ``html`` will convert them
+ to HTML entity references.
+
+ :param is_html: If True, ``markup`` is treated as an HTML
+ document. Otherwise it's treated as an XML document.
+
+ :param exclude_encodings: These encodings will not be considered,
+ even if the sniffing code thinks they might make sense.
+
+ """
+
+ def __init__(
+ self,
+ markup: bytes,
+ known_definite_encodings: Optional[_Encodings] = [],
+ smart_quotes_to: Optional[Literal["ascii", "xml", "html"]] = None,
+ is_html: bool = False,
+ exclude_encodings: Optional[_Encodings] = [],
+ user_encodings: Optional[_Encodings] = None,
+ override_encodings: Optional[_Encodings] = None,
+ ):
+ self.smart_quotes_to = smart_quotes_to
+ self.tried_encodings = []
+ self.contains_replacement_characters = False
+ self.is_html = is_html
+ self.log = getLogger(__name__)
+ self.detector = EncodingDetector(
+ markup,
+ known_definite_encodings,
+ is_html,
+ exclude_encodings,
+ user_encodings,
+ override_encodings,
+ )
+
+ # Short-circuit if the data is in Unicode to begin with.
+ if isinstance(markup, str):
+ self.markup = markup.encode("utf8")
+ self.unicode_markup = markup
+ self.original_encoding = None
+ return
+
+ # The encoding detector may have stripped a byte-order mark.
+ # Use the stripped markup from this point on.
+ self.markup = self.detector.markup
+
+ u = None
+ for encoding in self.detector.encodings:
+ markup = self.detector.markup
+ u = self._convert_from(encoding)
+ if u is not None:
+ break
+
+ if not u:
+ # None of the encodings worked. As an absolute last resort,
+ # try them again with character replacement.
+
+ for encoding in self.detector.encodings:
+ if encoding != "ascii":
+ u = self._convert_from(encoding, "replace")
+ if u is not None:
+ self.log.warning(
+ "Some characters could not be decoded, and were "
+ "replaced with REPLACEMENT CHARACTER."
+ )
+
+ self.contains_replacement_characters = True
+ break
+
+ # If none of that worked, we could at this point force it to
+ # ASCII, but that would destroy so much data that I think
+ # giving up is better.
+ #
+ # Note that this is extremely unlikely, probably impossible,
+ # because the "replace" strategy is so powerful. Even running
+ # the Python binary through Unicode, Dammit gives you Unicode,
+ # albeit Unicode riddled with REPLACEMENT CHARACTER.
+ if u is None:
+ self.original_encoding = None
+ self.unicode_markup = None
+ else:
+ self.unicode_markup = u
+
+ #: The original markup, before it was converted to Unicode.
+ #: This is not necessarily the same as what was passed in to the
+ #: constructor, since any byte-order mark will be stripped.
+ markup: bytes
+
+ #: The Unicode version of the markup, following conversion. This
+ #: is set to None if there was simply no way to convert the
+ #: bytestring to Unicode (as with binary data).
+ unicode_markup: Optional[str]
+
+ #: This is True if `UnicodeDammit.unicode_markup` contains
+ #: U+FFFD REPLACEMENT_CHARACTER characters which were not present
+ #: in `UnicodeDammit.markup`. These mark character sequences that
+ #: could not be represented in Unicode.
+ contains_replacement_characters: bool
+
+ #: Unicode, Dammit's best guess as to the original character
+ #: encoding of `UnicodeDammit.markup`.
+ original_encoding: Optional[_Encoding]
+
+ #: The strategy used to handle Microsoft smart quotes.
+ smart_quotes_to: Optional[str]
+
+ #: The (encoding, error handling strategy) 2-tuples that were used to
+ #: try and convert the markup to Unicode.
+ tried_encodings: List[Tuple[_Encoding, str]]
+
+ log: Logger #: :meta private:
+
+ def _sub_ms_char(self, match: re.Match) -> bytes:
+ """Changes a MS smart quote character to an XML or HTML
+ entity, or an ASCII character.
+
+ TODO: Since this is only used to convert smart quotes, it
+ could be simplified, and MS_CHARS_TO_ASCII made much less
+ parochial.
+ """
+ orig: bytes = match.group(1)
+ sub: bytes
+ if self.smart_quotes_to == "ascii":
+ if orig in self.MS_CHARS_TO_ASCII:
+ sub = self.MS_CHARS_TO_ASCII[orig].encode()
+ else:
+ # Shouldn't happen; substitute the character
+ # with itself.
+ sub = orig
+ else:
+ if orig in self.MS_CHARS:
+ substitutions = self.MS_CHARS[orig]
+ if type(substitutions) is tuple:
+ if self.smart_quotes_to == "xml":
+ sub = b"" + substitutions[1].encode() + b";"
+ else:
+ sub = b"&" + substitutions[0].encode() + b";"
+ else:
+ substitutions = cast(str, substitutions)
+ sub = substitutions.encode()
+ else:
+ # Shouldn't happen; substitute the character
+ # for itself.
+ sub = orig
+ return sub
+
+ #: This dictionary maps commonly seen values for "charset" in HTML
+ #: meta tags to the corresponding Python codec names. It only covers
+ #: values that aren't in Python's aliases and can't be determined
+ #: by the heuristics in `find_codec`.
+ #:
+ #: :meta hide-value:
+ CHARSET_ALIASES: Dict[str, _Encoding] = {
+ "macintosh": "mac-roman",
+ "x-sjis": "shift-jis",
+ }
+
+ #: A list of encodings that tend to contain Microsoft smart quotes.
+ #:
+ #: :meta hide-value:
+ ENCODINGS_WITH_SMART_QUOTES: _Encodings = [
+ "windows-1252",
+ "iso-8859-1",
+ "iso-8859-2",
+ ]
+
+ def _convert_from(
+ self, proposed: _Encoding, errors: str = "strict"
+ ) -> Optional[str]:
+ """Attempt to convert the markup to the proposed encoding.
+
+ :param proposed: The name of a character encoding.
+ :param errors: An error handling strategy, used when calling `str`.
+ :return: The converted markup, or `None` if the proposed
+ encoding/error handling strategy didn't work.
+ """
+ lookup_result = self.find_codec(proposed)
+ if lookup_result is None or (lookup_result, errors) in self.tried_encodings:
+ return None
+ proposed = lookup_result
+ self.tried_encodings.append((proposed, errors))
+ markup = self.markup
+ # Convert smart quotes to HTML if coming from an encoding
+ # that might have them.
+ if (
+ self.smart_quotes_to is not None
+ and proposed in self.ENCODINGS_WITH_SMART_QUOTES
+ ):
+ smart_quotes_re = b"([\x80-\x9f])"
+ smart_quotes_compiled = re.compile(smart_quotes_re)
+ markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
+
+ try:
+ # print("Trying to convert document to %s (errors=%s)" % (
+ # proposed, errors))
+ u = self._to_unicode(markup, proposed, errors)
+ self.unicode_markup = u
+ self.original_encoding = proposed
+ except Exception:
+ # print("That didn't work!")
+ # print(e)
+ return None
+ # print("Correct encoding: %s" % proposed)
+ return self.unicode_markup
+
+ def _to_unicode(
+ self, data: bytes, encoding: _Encoding, errors: str = "strict"
+ ) -> str:
+ """Given a bytestring and its encoding, decodes the string into Unicode.
+
+ :param encoding: The name of an encoding.
+ :param errors: An error handling strategy, used when calling `str`.
+ """
+ return str(data, encoding, errors)
+
+ @property
+ def declared_html_encoding(self) -> Optional[_Encoding]:
+ """If the markup is an HTML document, returns the encoding, if any,
+ declared *inside* the document.
+ """
+ if not self.is_html:
+ return None
+ return self.detector.declared_encoding
+
+ def find_codec(self, charset: _Encoding) -> Optional[str]:
+ """Look up the Python codec corresponding to a given character set.
+
+ :param charset: The name of a character set.
+ :return: The name of a Python codec.
+ """
+ value = (
+ self._codec(self.CHARSET_ALIASES.get(charset, charset))
+ or (charset and self._codec(charset.replace("-", "")))
+ or (charset and self._codec(charset.replace("-", "_")))
+ or (charset and charset.lower())
+ or charset
+ )
+ if value:
+ return value.lower()
+ return None
+
+ def _codec(self, charset: _Encoding) -> Optional[str]:
+ if not charset:
+ return charset
+ codec = None
+ try:
+ codecs.lookup(charset)
+ codec = charset
+ except (LookupError, ValueError):
+ pass
+ return codec
+
+ #: A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities.
+ #:
+ #: :meta hide-value:
+ MS_CHARS: Dict[bytes, Union[str, Tuple[str, str]]] = {
+ b"\x80": ("euro", "20AC"),
+ b"\x81": " ",
+ b"\x82": ("sbquo", "201A"),
+ b"\x83": ("fnof", "192"),
+ b"\x84": ("bdquo", "201E"),
+ b"\x85": ("hellip", "2026"),
+ b"\x86": ("dagger", "2020"),
+ b"\x87": ("Dagger", "2021"),
+ b"\x88": ("circ", "2C6"),
+ b"\x89": ("permil", "2030"),
+ b"\x8a": ("Scaron", "160"),
+ b"\x8b": ("lsaquo", "2039"),
+ b"\x8c": ("OElig", "152"),
+ b"\x8d": "?",
+ b"\x8e": ("#x17D", "17D"),
+ b"\x8f": "?",
+ b"\x90": "?",
+ b"\x91": ("lsquo", "2018"),
+ b"\x92": ("rsquo", "2019"),
+ b"\x93": ("ldquo", "201C"),
+ b"\x94": ("rdquo", "201D"),
+ b"\x95": ("bull", "2022"),
+ b"\x96": ("ndash", "2013"),
+ b"\x97": ("mdash", "2014"),
+ b"\x98": ("tilde", "2DC"),
+ b"\x99": ("trade", "2122"),
+ b"\x9a": ("scaron", "161"),
+ b"\x9b": ("rsaquo", "203A"),
+ b"\x9c": ("oelig", "153"),
+ b"\x9d": "?",
+ b"\x9e": ("#x17E", "17E"),
+ b"\x9f": ("Yuml", ""),
+ }
+
+ #: A parochial partial mapping of ISO-Latin-1 to ASCII. Contains
+ #: horrors like stripping diacritical marks to turn á into a, but also
+ #: contains non-horrors like turning “ into ".
+ #:
+ #: Seriously, don't use this for anything other than removing smart
+ #: quotes.
+ #:
+ #: :meta private:
+ MS_CHARS_TO_ASCII: Dict[bytes, str] = {
+ b"\x80": "EUR",
+ b"\x81": " ",
+ b"\x82": ",",
+ b"\x83": "f",
+ b"\x84": ",,",
+ b"\x85": "...",
+ b"\x86": "+",
+ b"\x87": "++",
+ b"\x88": "^",
+ b"\x89": "%",
+ b"\x8a": "S",
+ b"\x8b": "<",
+ b"\x8c": "OE",
+ b"\x8d": "?",
+ b"\x8e": "Z",
+ b"\x8f": "?",
+ b"\x90": "?",
+ b"\x91": "'",
+ b"\x92": "'",
+ b"\x93": '"',
+ b"\x94": '"',
+ b"\x95": "*",
+ b"\x96": "-",
+ b"\x97": "--",
+ b"\x98": "~",
+ b"\x99": "(TM)",
+ b"\x9a": "s",
+ b"\x9b": ">",
+ b"\x9c": "oe",
+ b"\x9d": "?",
+ b"\x9e": "z",
+ b"\x9f": "Y",
+ b"\xa0": " ",
+ b"\xa1": "!",
+ b"\xa2": "c",
+ b"\xa3": "GBP",
+ b"\xa4": "$", # This approximation is especially parochial--this is the
+ # generic currency symbol.
+ b"\xa5": "YEN",
+ b"\xa6": "|",
+ b"\xa7": "S",
+ b"\xa8": "..",
+ b"\xa9": "",
+ b"\xaa": "(th)",
+ b"\xab": "<<",
+ b"\xac": "!",
+ b"\xad": " ",
+ b"\xae": "(R)",
+ b"\xaf": "-",
+ b"\xb0": "o",
+ b"\xb1": "+-",
+ b"\xb2": "2",
+ b"\xb3": "3",
+ b"\xb4": "'",
+ b"\xb5": "u",
+ b"\xb6": "P",
+ b"\xb7": "*",
+ b"\xb8": ",",
+ b"\xb9": "1",
+ b"\xba": "(th)",
+ b"\xbb": ">>",
+ b"\xbc": "1/4",
+ b"\xbd": "1/2",
+ b"\xbe": "3/4",
+ b"\xbf": "?",
+ b"\xc0": "A",
+ b"\xc1": "A",
+ b"\xc2": "A",
+ b"\xc3": "A",
+ b"\xc4": "A",
+ b"\xc5": "A",
+ b"\xc6": "AE",
+ b"\xc7": "C",
+ b"\xc8": "E",
+ b"\xc9": "E",
+ b"\xca": "E",
+ b"\xcb": "E",
+ b"\xcc": "I",
+ b"\xcd": "I",
+ b"\xce": "I",
+ b"\xcf": "I",
+ b"\xd0": "D",
+ b"\xd1": "N",
+ b"\xd2": "O",
+ b"\xd3": "O",
+ b"\xd4": "O",
+ b"\xd5": "O",
+ b"\xd6": "O",
+ b"\xd7": "*",
+ b"\xd8": "O",
+ b"\xd9": "U",
+ b"\xda": "U",
+ b"\xdb": "U",
+ b"\xdc": "U",
+ b"\xdd": "Y",
+ b"\xde": "b",
+ b"\xdf": "B",
+ b"\xe0": "a",
+ b"\xe1": "a",
+ b"\xe2": "a",
+ b"\xe3": "a",
+ b"\xe4": "a",
+ b"\xe5": "a",
+ b"\xe6": "ae",
+ b"\xe7": "c",
+ b"\xe8": "e",
+ b"\xe9": "e",
+ b"\xea": "e",
+ b"\xeb": "e",
+ b"\xec": "i",
+ b"\xed": "i",
+ b"\xee": "i",
+ b"\xef": "i",
+ b"\xf0": "o",
+ b"\xf1": "n",
+ b"\xf2": "o",
+ b"\xf3": "o",
+ b"\xf4": "o",
+ b"\xf5": "o",
+ b"\xf6": "o",
+ b"\xf7": "/",
+ b"\xf8": "o",
+ b"\xf9": "u",
+ b"\xfa": "u",
+ b"\xfb": "u",
+ b"\xfc": "u",
+ b"\xfd": "y",
+ b"\xfe": "b",
+ b"\xff": "y",
+ }
+
+ #: A map used when removing rogue Windows-1252/ISO-8859-1
+ #: characters in otherwise UTF-8 documents.
+ #:
+ #: Note that \\x81, \\x8d, \\x8f, \\x90, and \\x9d are undefined in
+ #: Windows-1252.
+ #:
+ #: :meta hide-value:
+ WINDOWS_1252_TO_UTF8: Dict[int, bytes] = {
+ 0x80: b"\xe2\x82\xac", # €
+ 0x82: b"\xe2\x80\x9a", # ‚
+ 0x83: b"\xc6\x92", # ƒ
+ 0x84: b"\xe2\x80\x9e", # „
+ 0x85: b"\xe2\x80\xa6", # …
+ 0x86: b"\xe2\x80\xa0", # †
+ 0x87: b"\xe2\x80\xa1", # ‡
+ 0x88: b"\xcb\x86", # ˆ
+ 0x89: b"\xe2\x80\xb0", # ‰
+ 0x8A: b"\xc5\xa0", # Š
+ 0x8B: b"\xe2\x80\xb9", # ‹
+ 0x8C: b"\xc5\x92", # Œ
+ 0x8E: b"\xc5\xbd", # Ž
+ 0x91: b"\xe2\x80\x98", # ‘
+ 0x92: b"\xe2\x80\x99", # ’
+ 0x93: b"\xe2\x80\x9c", # “
+ 0x94: b"\xe2\x80\x9d", # ”
+ 0x95: b"\xe2\x80\xa2", # •
+ 0x96: b"\xe2\x80\x93", # –
+ 0x97: b"\xe2\x80\x94", # —
+ 0x98: b"\xcb\x9c", # ˜
+ 0x99: b"\xe2\x84\xa2", # ™
+ 0x9A: b"\xc5\xa1", # š
+ 0x9B: b"\xe2\x80\xba", # ›
+ 0x9C: b"\xc5\x93", # œ
+ 0x9E: b"\xc5\xbe", # ž
+ 0x9F: b"\xc5\xb8", # Ÿ
+ 0xA0: b"\xc2\xa0", #
+ 0xA1: b"\xc2\xa1", # ¡
+ 0xA2: b"\xc2\xa2", # ¢
+ 0xA3: b"\xc2\xa3", # £
+ 0xA4: b"\xc2\xa4", # ¤
+ 0xA5: b"\xc2\xa5", # ¥
+ 0xA6: b"\xc2\xa6", # ¦
+ 0xA7: b"\xc2\xa7", # §
+ 0xA8: b"\xc2\xa8", # ¨
+ 0xA9: b"\xc2\xa9", # ©
+ 0xAA: b"\xc2\xaa", # ª
+ 0xAB: b"\xc2\xab", # «
+ 0xAC: b"\xc2\xac", # ¬
+ 0xAD: b"\xc2\xad", #
+ 0xAE: b"\xc2\xae", # ®
+ 0xAF: b"\xc2\xaf", # ¯
+ 0xB0: b"\xc2\xb0", # °
+ 0xB1: b"\xc2\xb1", # ±
+ 0xB2: b"\xc2\xb2", # ²
+ 0xB3: b"\xc2\xb3", # ³
+ 0xB4: b"\xc2\xb4", # ´
+ 0xB5: b"\xc2\xb5", # µ
+ 0xB6: b"\xc2\xb6", # ¶
+ 0xB7: b"\xc2\xb7", # ·
+ 0xB8: b"\xc2\xb8", # ¸
+ 0xB9: b"\xc2\xb9", # ¹
+ 0xBA: b"\xc2\xba", # º
+ 0xBB: b"\xc2\xbb", # »
+ 0xBC: b"\xc2\xbc", # ¼
+ 0xBD: b"\xc2\xbd", # ½
+ 0xBE: b"\xc2\xbe", # ¾
+ 0xBF: b"\xc2\xbf", # ¿
+ 0xC0: b"\xc3\x80", # À
+ 0xC1: b"\xc3\x81", # Á
+ 0xC2: b"\xc3\x82", # Â
+ 0xC3: b"\xc3\x83", # Ã
+ 0xC4: b"\xc3\x84", # Ä
+ 0xC5: b"\xc3\x85", # Å
+ 0xC6: b"\xc3\x86", # Æ
+ 0xC7: b"\xc3\x87", # Ç
+ 0xC8: b"\xc3\x88", # È
+ 0xC9: b"\xc3\x89", # É
+ 0xCA: b"\xc3\x8a", # Ê
+ 0xCB: b"\xc3\x8b", # Ë
+ 0xCC: b"\xc3\x8c", # Ì
+ 0xCD: b"\xc3\x8d", # Í
+ 0xCE: b"\xc3\x8e", # Î
+ 0xCF: b"\xc3\x8f", # Ï
+ 0xD0: b"\xc3\x90", # Ð
+ 0xD1: b"\xc3\x91", # Ñ
+ 0xD2: b"\xc3\x92", # Ò
+ 0xD3: b"\xc3\x93", # Ó
+ 0xD4: b"\xc3\x94", # Ô
+ 0xD5: b"\xc3\x95", # Õ
+ 0xD6: b"\xc3\x96", # Ö
+ 0xD7: b"\xc3\x97", # ×
+ 0xD8: b"\xc3\x98", # Ø
+ 0xD9: b"\xc3\x99", # Ù
+ 0xDA: b"\xc3\x9a", # Ú
+ 0xDB: b"\xc3\x9b", # Û
+ 0xDC: b"\xc3\x9c", # Ü
+ 0xDD: b"\xc3\x9d", # Ý
+ 0xDE: b"\xc3\x9e", # Þ
+ 0xDF: b"\xc3\x9f", # ß
+ 0xE0: b"\xc3\xa0", # à
+ 0xE1: b"\xa1", # á
+ 0xE2: b"\xc3\xa2", # â
+ 0xE3: b"\xc3\xa3", # ã
+ 0xE4: b"\xc3\xa4", # ä
+ 0xE5: b"\xc3\xa5", # å
+ 0xE6: b"\xc3\xa6", # æ
+ 0xE7: b"\xc3\xa7", # ç
+ 0xE8: b"\xc3\xa8", # è
+ 0xE9: b"\xc3\xa9", # é
+ 0xEA: b"\xc3\xaa", # ê
+ 0xEB: b"\xc3\xab", # ë
+ 0xEC: b"\xc3\xac", # ì
+ 0xED: b"\xc3\xad", # í
+ 0xEE: b"\xc3\xae", # î
+ 0xEF: b"\xc3\xaf", # ï
+ 0xF0: b"\xc3\xb0", # ð
+ 0xF1: b"\xc3\xb1", # ñ
+ 0xF2: b"\xc3\xb2", # ò
+ 0xF3: b"\xc3\xb3", # ó
+ 0xF4: b"\xc3\xb4", # ô
+ 0xF5: b"\xc3\xb5", # õ
+ 0xF6: b"\xc3\xb6", # ö
+ 0xF7: b"\xc3\xb7", # ÷
+ 0xF8: b"\xc3\xb8", # ø
+ 0xF9: b"\xc3\xb9", # ù
+ 0xFA: b"\xc3\xba", # ú
+ 0xFB: b"\xc3\xbb", # û
+ 0xFC: b"\xc3\xbc", # ü
+ 0xFD: b"\xc3\xbd", # ý
+ 0xFE: b"\xc3\xbe", # þ
+ }
+
+ #: :meta private:
+ MULTIBYTE_MARKERS_AND_SIZES: List[Tuple[int, int, int]] = [
+ (0xC2, 0xDF, 2), # 2-byte characters start with a byte C2-DF
+ (0xE0, 0xEF, 3), # 3-byte characters start with E0-EF
+ (0xF0, 0xF4, 4), # 4-byte characters start with F0-F4
+ ]
+
+ #: :meta private:
+ FIRST_MULTIBYTE_MARKER: int = MULTIBYTE_MARKERS_AND_SIZES[0][0]
+
+ #: :meta private:
+ LAST_MULTIBYTE_MARKER: int = MULTIBYTE_MARKERS_AND_SIZES[-1][1]
+
+ @classmethod
+ def detwingle(
+ cls,
+ in_bytes: bytes,
+ main_encoding: _Encoding = "utf8",
+ embedded_encoding: _Encoding = "windows-1252",
+ ) -> bytes:
+ """Fix characters from one encoding embedded in some other encoding.
+
+ Currently the only situation supported is Windows-1252 (or its
+ subset ISO-8859-1), embedded in UTF-8.
+
+ :param in_bytes: A bytestring that you suspect contains
+ characters from multiple encodings. Note that this *must*
+ be a bytestring. If you've already converted the document
+ to Unicode, you're too late.
+ :param main_encoding: The primary encoding of ``in_bytes``.
+ :param embedded_encoding: The encoding that was used to embed characters
+ in the main document.
+ :return: A bytestring similar to ``in_bytes``, in which
+ ``embedded_encoding`` characters have been converted to
+ their ``main_encoding`` equivalents.
+ """
+ if embedded_encoding.replace("_", "-").lower() not in (
+ "windows-1252",
+ "windows_1252",
+ ):
+ raise NotImplementedError(
+ "Windows-1252 and ISO-8859-1 are the only currently supported "
+ "embedded encodings."
+ )
+
+ if main_encoding.lower() not in ("utf8", "utf-8"):
+ raise NotImplementedError(
+ "UTF-8 is the only currently supported main encoding."
+ )
+
+ byte_chunks = []
+
+ chunk_start = 0
+ pos = 0
+ while pos < len(in_bytes):
+ byte = in_bytes[pos]
+ if byte >= cls.FIRST_MULTIBYTE_MARKER and byte <= cls.LAST_MULTIBYTE_MARKER:
+ # This is the start of a UTF-8 multibyte character. Skip
+ # to the end.
+ for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES:
+ if byte >= start and byte <= end:
+ pos += size
+ break
+ elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8:
+ # We found a Windows-1252 character!
+ # Save the string up to this point as a chunk.
+ byte_chunks.append(in_bytes[chunk_start:pos])
+
+ # Now translate the Windows-1252 character into UTF-8
+ # and add it as another, one-byte chunk.
+ byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte])
+ pos += 1
+ chunk_start = pos
+ else:
+ # Go on to the next character.
+ pos += 1
+ if chunk_start == 0:
+ # The string is unchanged.
+ return in_bytes
+ else:
+ # Store the final chunk.
+ byte_chunks.append(in_bytes[chunk_start:])
+ return b"".join(byte_chunks)
diff --git a/venv/lib/python3.9/site-packages/bs4/diagnose.py b/venv/lib/python3.9/site-packages/bs4/diagnose.py
new file mode 100644
index 0000000..4d11239
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/diagnose.py
@@ -0,0 +1,268 @@
+"""Diagnostic functions, mainly for use when doing tech support."""
+
+# Use of this source code is governed by the MIT license.
+__license__ = "MIT"
+
+import cProfile
+from io import BytesIO
+from html.parser import HTMLParser
+import bs4
+from bs4 import BeautifulSoup, __version__
+from bs4.builder import builder_registry
+from typing import (
+ Any,
+ IO,
+ List,
+ Optional,
+ Tuple,
+ TYPE_CHECKING,
+)
+
+if TYPE_CHECKING:
+ from bs4._typing import _IncomingMarkup
+
+import pstats
+import random
+import tempfile
+import time
+import traceback
+import sys
+
+
+def diagnose(data: "_IncomingMarkup") -> None:
+ """Diagnostic suite for isolating common problems.
+
+ :param data: Some markup that needs to be explained.
+ :return: None; diagnostics are printed to standard output.
+ """
+ print(("Diagnostic running on Beautiful Soup %s" % __version__))
+ print(("Python version %s" % sys.version))
+
+ basic_parsers = ["html.parser", "html5lib", "lxml"]
+ for name in basic_parsers:
+ for builder in builder_registry.builders:
+ if name in builder.features:
+ break
+ else:
+ basic_parsers.remove(name)
+ print(
+ ("I noticed that %s is not installed. Installing it may help." % name)
+ )
+
+ if "lxml" in basic_parsers:
+ basic_parsers.append("lxml-xml")
+ try:
+ from lxml import etree # type:ignore
+
+ print(("Found lxml version %s" % ".".join(map(str, etree.LXML_VERSION))))
+ except ImportError:
+ print("lxml is not installed or couldn't be imported.")
+
+ if "html5lib" in basic_parsers:
+ try:
+ import html5lib
+
+ print(("Found html5lib version %s" % html5lib.__version__))
+ except ImportError:
+ print("html5lib is not installed or couldn't be imported.")
+
+ if hasattr(data, "read"):
+ data = data.read()
+
+ for parser in basic_parsers:
+ print(("Trying to parse your markup with %s" % parser))
+ success = False
+ try:
+ soup = BeautifulSoup(data, features=parser)
+ success = True
+ except Exception:
+ print(("%s could not parse the markup." % parser))
+ traceback.print_exc()
+ if success:
+ print(("Here's what %s did with the markup:" % parser))
+ print((soup.prettify()))
+
+ print(("-" * 80))
+
+
+def lxml_trace(data: "_IncomingMarkup", html: bool = True, **kwargs: Any) -> None:
+ """Print out the lxml events that occur during parsing.
+
+ This lets you see how lxml parses a document when no Beautiful
+ Soup code is running. You can use this to determine whether
+ an lxml-specific problem is in Beautiful Soup's lxml tree builders
+ or in lxml itself.
+
+ :param data: Some markup.
+ :param html: If True, markup will be parsed with lxml's HTML parser.
+ if False, lxml's XML parser will be used.
+ """
+ from lxml import etree
+
+ recover = kwargs.pop("recover", True)
+ if isinstance(data, str):
+ data = data.encode("utf8")
+ if not isinstance(data, IO):
+ reader = BytesIO(data)
+ for event, element in etree.iterparse(reader, html=html, recover=recover, **kwargs):
+ print(("%s, %4s, %s" % (event, element.tag, element.text)))
+
+
+class AnnouncingParser(HTMLParser):
+ """Subclass of HTMLParser that announces parse events, without doing
+ anything else.
+
+ You can use this to get a picture of how html.parser sees a given
+ document. The easiest way to do this is to call `htmlparser_trace`.
+ """
+
+ def _p(self, s: str) -> None:
+ print(s)
+
+ def handle_starttag(
+ self,
+ name: str,
+ attrs: List[Tuple[str, Optional[str]]],
+ handle_empty_element: bool = True,
+ ) -> None:
+ self._p(f"{name} {attrs} START")
+
+ def handle_endtag(self, name: str, check_already_closed: bool = True) -> None:
+ self._p("%s END" % name)
+
+ def handle_data(self, data: str) -> None:
+ self._p("%s DATA" % data)
+
+ def handle_charref(self, name: str) -> None:
+ self._p("%s CHARREF" % name)
+
+ def handle_entityref(self, name: str) -> None:
+ self._p("%s ENTITYREF" % name)
+
+ def handle_comment(self, data: str) -> None:
+ self._p("%s COMMENT" % data)
+
+ def handle_decl(self, data: str) -> None:
+ self._p("%s DECL" % data)
+
+ def unknown_decl(self, data: str) -> None:
+ self._p("%s UNKNOWN-DECL" % data)
+
+ def handle_pi(self, data: str) -> None:
+ self._p("%s PI" % data)
+
+
+def htmlparser_trace(data: str) -> None:
+ """Print out the HTMLParser events that occur during parsing.
+
+ This lets you see how HTMLParser parses a document when no
+ Beautiful Soup code is running.
+
+ :param data: Some markup.
+ """
+ parser = AnnouncingParser()
+ parser.feed(data)
+
+
+_vowels: str = "aeiou"
+_consonants: str = "bcdfghjklmnpqrstvwxyz"
+
+
+def rword(length: int = 5) -> str:
+ """Generate a random word-like string.
+
+ :meta private:
+ """
+ s = ""
+ for i in range(length):
+ if i % 2 == 0:
+ t = _consonants
+ else:
+ t = _vowels
+ s += random.choice(t)
+ return s
+
+
+def rsentence(length: int = 4) -> str:
+ """Generate a random sentence-like string.
+
+ :meta private:
+ """
+ return " ".join(rword(random.randint(4, 9)) for i in range(length))
+
+
+def rdoc(num_elements: int = 1000) -> str:
+ """Randomly generate an invalid HTML document.
+
+ :meta private:
+ """
+ tag_names = ["p", "div", "span", "i", "b", "script", "table"]
+ elements = []
+ for i in range(num_elements):
+ choice = random.randint(0, 3)
+ if choice == 0:
+ # New tag.
+ tag_name = random.choice(tag_names)
+ elements.append("<%s>" % tag_name)
+ elif choice == 1:
+ elements.append(rsentence(random.randint(1, 4)))
+ elif choice == 2:
+ # Close a tag.
+ tag_name = random.choice(tag_names)
+ elements.append("%s>" % tag_name)
+ return "" + "\n".join(elements) + ""
+
+
+def benchmark_parsers(num_elements: int = 100000) -> None:
+ """Very basic head-to-head performance benchmark."""
+ print(("Comparative parser benchmark on Beautiful Soup %s" % __version__))
+ data = rdoc(num_elements)
+ print(("Generated a large invalid HTML document (%d bytes)." % len(data)))
+
+ for parser_name in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
+ success = False
+ try:
+ a = time.time()
+ BeautifulSoup(data, parser_name)
+ b = time.time()
+ success = True
+ except Exception:
+ print(("%s could not parse the markup." % parser_name))
+ traceback.print_exc()
+ if success:
+ print(("BS4+%s parsed the markup in %.2fs." % (parser_name, b - a)))
+
+ from lxml import etree
+
+ a = time.time()
+ etree.HTML(data)
+ b = time.time()
+ print(("Raw lxml parsed the markup in %.2fs." % (b - a)))
+
+ import html5lib
+
+ parser = html5lib.HTMLParser()
+ a = time.time()
+ parser.parse(data)
+ b = time.time()
+ print(("Raw html5lib parsed the markup in %.2fs." % (b - a)))
+
+
+def profile(num_elements: int = 100000, parser: str = "lxml") -> None:
+ """Use Python's profiler on a randomly generated document."""
+ filehandle = tempfile.NamedTemporaryFile()
+ filename = filehandle.name
+
+ data = rdoc(num_elements)
+ vars = dict(bs4=bs4, data=data, parser=parser)
+ cProfile.runctx("bs4.BeautifulSoup(data, parser)", vars, vars, filename)
+
+ stats = pstats.Stats(filename)
+ # stats.strip_dirs()
+ stats.sort_stats("cumulative")
+ stats.print_stats("_html5lib|bs4", 50)
+
+
+# If this file is run as a script, standard input is diagnosed.
+if __name__ == "__main__":
+ diagnose(sys.stdin.read())
diff --git a/venv/lib/python3.9/site-packages/bs4/element.py b/venv/lib/python3.9/site-packages/bs4/element.py
new file mode 100644
index 0000000..dd07d8e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/bs4/element.py
@@ -0,0 +1,3211 @@
+from __future__ import annotations
+
+# Use of this source code is governed by the MIT license.
+__license__ = "MIT"
+
+import re
+import warnings
+
+from bs4.css import CSS
+from bs4._deprecation import (
+ _deprecated,
+ _deprecated_alias,
+ _deprecated_function_alias,
+)
+from bs4.formatter import (
+ Formatter,
+ HTMLFormatter,
+ XMLFormatter,
+)
+from bs4._warnings import AttributeResemblesVariableWarning
+
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Generic,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ MutableSequence,
+ Optional,
+ Pattern,
+ Set,
+ TYPE_CHECKING,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ cast,
+ overload,
+)
+from typing_extensions import (
+ Self,
+ TypeAlias,
+)
+
+if TYPE_CHECKING:
+ from bs4 import BeautifulSoup
+ from bs4.builder import TreeBuilder
+ from bs4.filter import ElementFilter
+ from bs4.formatter import (
+ _EntitySubstitutionFunction,
+ _FormatterOrName,
+ )
+ from bs4._typing import (
+ _AtMostOneElement,
+ _AtMostOneTag,
+ _AtMostOneNavigableString,
+ _AttributeValue,
+ _AttributeValues,
+ _Encoding,
+ _InsertableElement,
+ _OneElement,
+ _QueryResults,
+ _RawOrProcessedAttributeValues,
+ _StrainableElement,
+ _StrainableAttribute,
+ _StrainableAttributes,
+ _StrainableString,
+ _SomeNavigableStrings,
+ _SomeTags,
+ )
+
+_OneOrMoreStringTypes: TypeAlias = Union[
+ Type["NavigableString"], Iterable[Type["NavigableString"]]
+]
+
+_FindMethodName: TypeAlias = Optional[Union["_StrainableElement", "ElementFilter"]]
+
+# Deprecated module-level attributes.
+# See https://peps.python.org/pep-0562/
+_deprecated_names = dict(
+ whitespace_re="The {name} attribute was deprecated in version 4.7.0. If you need it, make your own copy."
+)
+#: :meta private:
+_deprecated_whitespace_re: Pattern[str] = re.compile(r"\s+")
+
+
+def __getattr__(name: str) -> Any:
+ if name in _deprecated_names:
+ message = _deprecated_names[name]
+ warnings.warn(message.format(name=name), DeprecationWarning, stacklevel=2)
+
+ return globals()[f"_deprecated_{name}"]
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
+
+
+#: Documents output by Beautiful Soup will be encoded with
+#: this encoding unless you specify otherwise.
+DEFAULT_OUTPUT_ENCODING: str = "utf-8"
+
+#: A regular expression that can be used to split on whitespace.
+nonwhitespace_re: Pattern[str] = re.compile(r"\S+")
+
+#: These encodings are recognized by Python (so `Tag.encode`
+#: could theoretically support them) but XML and HTML don't recognize
+#: them (so they should not show up in an XML or HTML document as that
+#: document's encoding).
+#:
+#: If an XML document is encoded in one of these encodings, no encoding
+#: will be mentioned in the XML declaration. If an HTML document is
+#: encoded in one of these encodings, and the HTML document has a
+#: tag that mentions an encoding, the encoding will be given as
+#: the empty string.
+#:
+#: Source:
+#: Python documentation, `Python Specific Encodings `_
+PYTHON_SPECIFIC_ENCODINGS: Set[_Encoding] = set(
+ [
+ "idna",
+ "mbcs",
+ "oem",
+ "palmos",
+ "punycode",
+ "raw_unicode_escape",
+ "undefined",
+ "unicode_escape",
+ "raw-unicode-escape",
+ "unicode-escape",
+ "string-escape",
+ "string_escape",
+ ]
+)
+
+
+class NamespacedAttribute(str):
+ """A namespaced attribute (e.g. the 'xml:lang' in 'xml:lang="en"')
+ which remembers the namespace prefix ('xml') and the name ('lang')
+ that were used to create it.
+ """
+
+ prefix: Optional[str]
+ name: Optional[str]
+ namespace: Optional[str]
+
+ def __new__(
+ cls,
+ prefix: Optional[str],
+ name: Optional[str] = None,
+ namespace: Optional[str] = None,
+ ) -> Self:
+ if not name:
+ # This is the default namespace. Its name "has no value"
+ # per https://www.w3.org/TR/xml-names/#defaulting
+ name = None
+
+ if not name:
+ obj = str.__new__(cls, prefix)
+ elif not prefix:
+ # Not really namespaced.
+ obj = str.__new__(cls, name)
+ else:
+ obj = str.__new__(cls, prefix + ":" + name)
+ obj.prefix = prefix
+ obj.name = name
+ obj.namespace = namespace
+ return obj
+
+
+class AttributeValueWithCharsetSubstitution(str):
+ """An abstract class standing in for a character encoding specified
+ inside an HTML `` `` tag.
+
+ Subclasses exist for each place such a character encoding might be
+ found: either inside the ``charset`` attribute
+ (`CharsetMetaAttributeValue`) or inside the ``content`` attribute
+ (`ContentMetaAttributeValue`)
+
+ This allows Beautiful Soup to replace that part of the HTML file
+ with a different encoding when ouputting a tree as a string.
+ """
+
+ # The original, un-encoded value of the ``content`` attribute.
+ #: :meta private:
+ original_value: str
+
+ def substitute_encoding(self, eventual_encoding: str) -> str:
+ """Do whatever's necessary in this implementation-specific
+ portion an HTML document to substitute in a specific encoding.
+ """
+ raise NotImplementedError()
+
+
+class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
+ """A generic stand-in for the value of a `` `` tag's ``charset``
+ attribute.
+
+ When Beautiful Soup parses the markup `` ``, the
+ value of the ``charset`` attribute will become one of these objects.
+
+ If the document is later encoded to an encoding other than UTF-8, its
+ `` `` tag will mention the new encoding instead of ``utf8``.
+ """
+
+ def __new__(cls, original_value: str) -> Self:
+ # We don't need to use the original value for anything, but
+ # it might be useful for the user to know.
+ obj = str.__new__(cls, original_value)
+ obj.original_value = original_value
+ return obj
+
+ def substitute_encoding(self, eventual_encoding: _Encoding = "utf-8") -> str:
+ """When an HTML document is being encoded to a given encoding, the
+ value of a `` `` tag's ``charset`` becomes the name of
+ the encoding.
+ """
+ if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
+ return ""
+ return eventual_encoding
+
+
+class AttributeValueList(List[str]):
+ """Class for the list used to hold the values of attributes which
+ have multiple values (such as HTML's 'class'). It's just a regular
+ list, but you can subclass it and pass it in to the TreeBuilder
+ constructor as attribute_value_list_class, to have your subclass
+ instantiated instead.
+ """
+
+
+class AttributeDict(Dict[Any,Any]):
+ """Superclass for the dictionary used to hold a tag's
+ attributes. You can use this, but it's just a regular dict with no
+ special logic.
+ """
+
+
+class XMLAttributeDict(AttributeDict):
+ """A dictionary for holding a Tag's attributes, which processes
+ incoming values for consistency with the HTML spec.
+ """
+
+ def __setitem__(self, key: str, value: Any) -> None:
+ """Set an attribute value, possibly modifying it to comply with
+ the XML spec.
+
+ This just means converting common non-string values to
+ strings: XML attributes may have "any literal string as a
+ value."
+ """
+ if value is None:
+ value = ""
+ if isinstance(value, bool):
+ # XML does not define any rules for boolean attributes.
+ # Preserve the old Beautiful Soup behavior (a bool that
+ # gets converted to a string on output) rather than
+ # guessing what the value should be.
+ pass
+ elif isinstance(value, (int, float)):
+ # It's dangerous to convert _every_ attribute value into a
+ # plain string, since an attribute value may be a more
+ # sophisticated string-like object
+ # (e.g. CharsetMetaAttributeValue). But we can definitely
+ # convert numeric values and booleans, which are the most common.
+ value = str(value)
+
+ super().__setitem__(key, value)
+
+
+class HTMLAttributeDict(AttributeDict):
+ """A dictionary for holding a Tag's attributes, which processes
+ incoming values for consistency with the HTML spec, which says
+ 'Attribute values are a mixture of text and character
+ references...'
+
+ Basically, this means converting common non-string values into
+ strings, like XMLAttributeDict, though HTML also has some rules
+ around boolean attributes that XML doesn't have.
+ """
+
+ def __setitem__(self, key: str, value: Any) -> None:
+ """Set an attribute value, possibly modifying it to comply
+ with the HTML spec,
+ """
+ if value in (False, None):
+ # 'The values "true" and "false" are not allowed on
+ # boolean attributes. To represent a false value, the
+ # attribute has to be omitted altogether.'
+ if key in self:
+ del self[key]
+ return
+ if isinstance(value, bool):
+ # 'If the [boolean] attribute is present, its value must
+ # either be the empty string or a value that is an ASCII
+ # case-insensitive match for the attribute's canonical
+ # name, with no leading or trailing whitespace.'
+ #
+ # [fixme] It's not clear to me whether "canonical name"
+ # means fully-qualified name, unqualified name, or
+ # (probably not) name with namespace prefix. For now I'm
+ # going with unqualified name.
+ if isinstance(key, NamespacedAttribute):
+ value = key.name
+ else:
+ value = key
+ elif isinstance(value, (int, float)):
+ # See note in XMLAttributeDict for the reasoning why we
+ # only do this to numbers.
+ value = str(value)
+ super().__setitem__(key, value)
+
+
+class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
+ """A generic stand-in for the value of a `` `` tag's ``content``
+ attribute.
+
+ When Beautiful Soup parses the markup:
+ `` ``
+
+ The value of the ``content`` attribute will become one of these objects.
+
+ If the document is later encoded to an encoding other than UTF-8, its
+ `` `` tag will mention the new encoding instead of ``utf8``.
+ """
+
+ #: Match the 'charset' argument inside the 'content' attribute
+ #: of a tag.
+ #: :meta private:
+ CHARSET_RE: Pattern[str] = re.compile(r"((^|;)\s*charset=)([^;]*)", re.M)
+
+ def __new__(cls, original_value: str) -> Self:
+ cls.CHARSET_RE.search(original_value)
+ obj = str.__new__(cls, original_value)
+ obj.original_value = original_value
+ return obj
+
+ def substitute_encoding(self, eventual_encoding: _Encoding = "utf-8") -> str:
+ """When an HTML document is being encoded to a given encoding, the
+ value of the ``charset=`` in a `` `` tag's ``content`` becomes
+ the name of the encoding.
+ """
+ if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
+ return self.CHARSET_RE.sub("", self.original_value)
+
+ def rewrite(match: re.Match[str]) -> str:
+ return match.group(1) + eventual_encoding
+
+ return self.CHARSET_RE.sub(rewrite, self.original_value)
+
+
+class PageElement(object):
+ """An abstract class representing a single element in the parse tree.
+
+ `NavigableString`, `Tag`, etc. are all subclasses of
+ `PageElement`. For this reason you'll see a lot of methods that
+ return `PageElement`, but you'll never see an actual `PageElement`
+ object. For the most part you can think of `PageElement` as
+ meaning "a `Tag` or a `NavigableString`."
+ """
+
+ #: In general, we can't tell just by looking at an element whether
+ #: it's contained in an XML document or an HTML document. But for
+ #: `Tag` objects (q.v.) we can store this information at parse time.
+ #: :meta private:
+ known_xml: Optional[bool] = None
+
+ #: Whether or not this element has been decomposed from the tree
+ #: it was created in.
+ _decomposed: bool
+
+ parent: Optional[Tag]
+ next_element: _AtMostOneElement
+ previous_element: _AtMostOneElement
+ next_sibling: _AtMostOneElement
+ previous_sibling: _AtMostOneElement
+
+ #: Whether or not this element is hidden from generated output.
+ #: Only the `BeautifulSoup` object itself is hidden.
+ hidden: bool = False
+
+ def setup(
+ self,
+ parent: Optional[Tag] = None,
+ previous_element: _AtMostOneElement = None,
+ next_element: _AtMostOneElement = None,
+ previous_sibling: _AtMostOneElement = None,
+ next_sibling: _AtMostOneElement = None,
+ ) -> None:
+ """Sets up the initial relations between this element and
+ other elements.
+
+ :param parent: The parent of this element.
+
+ :param previous_element: The element parsed immediately before
+ this one.
+
+ :param next_element: The element parsed immediately after
+ this one.
+
+ :param previous_sibling: The most recently encountered element
+ on the same level of the parse tree as this one.
+
+ :param previous_sibling: The next element to be encountered
+ on the same level of the parse tree as this one.
+ """
+ self.parent = parent
+
+ self.previous_element = previous_element
+ if self.previous_element is not None:
+ self.previous_element.next_element = self
+
+ self.next_element = next_element
+ if self.next_element is not None:
+ self.next_element.previous_element = self
+
+ self.next_sibling = next_sibling
+ if self.next_sibling is not None:
+ self.next_sibling.previous_sibling = self
+
+ if (
+ previous_sibling is None
+ and self.parent is not None
+ and self.parent.contents
+ ):
+ previous_sibling = self.parent.contents[-1]
+
+ self.previous_sibling = previous_sibling
+ if self.previous_sibling is not None:
+ self.previous_sibling.next_sibling = self
+
+ def format_string(self, s: str, formatter: Optional[_FormatterOrName]) -> str:
+ """Format the given string using the given formatter.
+
+ :param s: A string.
+ :param formatter: A Formatter object, or a string naming one of the standard formatters.
+ """
+ if formatter is None:
+ return s
+ if not isinstance(formatter, Formatter):
+ formatter = self.formatter_for_name(formatter)
+ output = formatter.substitute(s)
+ return output
+
+ def formatter_for_name(
+ self, formatter_name: Union[_FormatterOrName, _EntitySubstitutionFunction]
+ ) -> Formatter:
+ """Look up or create a Formatter for the given identifier,
+ if necessary.
+
+ :param formatter: Can be a `Formatter` object (used as-is), a
+ function (used as the entity substitution hook for an
+ `bs4.formatter.XMLFormatter` or
+ `bs4.formatter.HTMLFormatter`), or a string (used to look
+ up an `bs4.formatter.XMLFormatter` or
+ `bs4.formatter.HTMLFormatter` in the appropriate registry.
+
+ """
+ if isinstance(formatter_name, Formatter):
+ return formatter_name
+ c: type[Formatter]
+ registry: Mapping[Optional[str], Formatter]
+ if self._is_xml:
+ c = XMLFormatter
+ registry = XMLFormatter.REGISTRY
+ else:
+ c = HTMLFormatter
+ registry = HTMLFormatter.REGISTRY
+ if callable(formatter_name):
+ return c(entity_substitution=formatter_name)
+ return registry[formatter_name]
+
+ @property
+ def _is_xml(self) -> bool:
+ """Is this element part of an XML tree or an HTML tree?
+
+ This is used in formatter_for_name, when deciding whether an
+ XMLFormatter or HTMLFormatter is more appropriate. It can be
+ inefficient, but it should be called very rarely.
+ """
+ if self.known_xml is not None:
+ # Most of the time we will have determined this when the
+ # document is parsed.
+ return self.known_xml
+
+ # Otherwise, it's likely that this element was created by
+ # direct invocation of the constructor from within the user's
+ # Python code.
+ if self.parent is None:
+ # This is the top-level object. It should have .known_xml set
+ # from tree creation. If not, take a guess--BS is usually
+ # used on HTML markup.
+ return getattr(self, "is_xml", False)
+ return self.parent._is_xml
+
+ nextSibling = _deprecated_alias("nextSibling", "next_sibling", "4.0.0")
+ previousSibling = _deprecated_alias("previousSibling", "previous_sibling", "4.0.0")
+
+ def __deepcopy__(self, memo: Dict[Any, Any], recursive: bool = False) -> Self:
+ raise NotImplementedError()
+
+ def __copy__(self) -> Self:
+ """A copy of a PageElement can only be a deep copy, because
+ only one PageElement can occupy a given place in a parse tree.
+ """
+ return self.__deepcopy__({})
+
+ default: Iterable[type[NavigableString]] = tuple() #: :meta private:
+
+ def _all_strings(
+ self, strip: bool = False, types: Iterable[type[NavigableString]] = default
+ ) -> Iterator[str]:
+ """Yield all strings of certain classes, possibly stripping them.
+
+ This is implemented differently in `Tag` and `NavigableString`.
+ """
+ raise NotImplementedError()
+
+ @property
+ def stripped_strings(self) -> Iterator[str]:
+ """Yield all interesting strings in this PageElement, stripping them
+ first.
+
+ See `Tag` for information on which strings are considered
+ interesting in a given context.
+ """
+ for string in self._all_strings(True):
+ yield string
+
+ def get_text(
+ self,
+ separator: str = "",
+ strip: bool = False,
+ types: Iterable[Type[NavigableString]] = default,
+ ) -> str:
+ """Get all child strings of this PageElement, concatenated using the
+ given separator.
+
+ :param separator: Strings will be concatenated using this separator.
+
+ :param strip: If True, strings will be stripped before being
+ concatenated.
+
+ :param types: A tuple of NavigableString subclasses. Any
+ strings of a subclass not found in this list will be
+ ignored. Although there are exceptions, the default
+ behavior in most cases is to consider only NavigableString
+ and CData objects. That means no comments, processing
+ instructions, etc.
+
+ :return: A string.
+ """
+ return separator.join([s for s in self._all_strings(strip, types=types)])
+
+ getText = get_text
+ text = property(get_text)
+
+ def replace_with(self, *args: _InsertableElement) -> Self:
+ """Replace this `PageElement` with one or more other elements,
+ objects, keeping the rest of the tree the same.
+
+ :return: This `PageElement`, no longer part of the tree.
+ """
+ if self.parent is None:
+ raise ValueError(
+ "Cannot replace one element with another when the "
+ "element to be replaced is not part of a tree."
+ )
+ if len(args) == 1 and args[0] is self:
+ # Replacing an element with itself is a no-op.
+ return self
+ if any(x is self.parent for x in args):
+ raise ValueError("Cannot replace a Tag with its parent.")
+ old_parent = self.parent
+ my_index = self.parent.index(self)
+ self.extract(_self_index=my_index)
+ for idx, replace_with in enumerate(args, start=my_index):
+ old_parent.insert(idx, replace_with)
+ return self
+
+ replaceWith = _deprecated_function_alias("replaceWith", "replace_with", "4.0.0")
+
+ def wrap(self, wrap_inside: Tag) -> Tag:
+ """Wrap this `PageElement` inside a `Tag`.
+
+ :return: ``wrap_inside``, occupying the position in the tree that used
+ to be occupied by this object, and with this object now inside it.
+ """
+ me = self.replace_with(wrap_inside)
+ wrap_inside.append(me)
+ return wrap_inside
+
+ def extract(self, _self_index: Optional[int] = None) -> Self:
+ """Destructively rips this element out of the tree.
+
+ :param _self_index: The location of this element in its parent's
+ .contents, if known. Passing this in allows for a performance
+ optimization.
+
+ :return: this `PageElement`, no longer part of the tree.
+ """
+ if self.parent is not None:
+ if _self_index is None:
+ _self_index = self.parent.index(self)
+ del self.parent.contents[_self_index]
+
+ # Find the two elements that would be next to each other if
+ # this element (and any children) hadn't been parsed. Connect
+ # the two.
+ last_child = self._last_descendant()
+
+ # last_child can't be None because we passed accept_self=True
+ # into _last_descendant. Worst case, last_child will be
+ # self. Making this cast removes several mypy complaints later
+ # on as we manipulate last_child.
+ last_child = cast(PageElement, last_child)
+ next_element = last_child.next_element
+
+ if self.previous_element is not None:
+ if self.previous_element is not next_element:
+ self.previous_element.next_element = next_element
+ if next_element is not None and next_element is not self.previous_element:
+ next_element.previous_element = self.previous_element
+ self.previous_element = None
+ last_child.next_element = None
+
+ self.parent = None
+ if (
+ self.previous_sibling is not None
+ and self.previous_sibling is not self.next_sibling
+ ):
+ self.previous_sibling.next_sibling = self.next_sibling
+ if (
+ self.next_sibling is not None
+ and self.next_sibling is not self.previous_sibling
+ ):
+ self.next_sibling.previous_sibling = self.previous_sibling
+ self.previous_sibling = self.next_sibling = None
+ return self
+
+ def decompose(self) -> None:
+ """Recursively destroys this `PageElement` and its children.
+
+ The element will be removed from the tree and wiped out; so
+ will everything beneath it.
+
+ The behavior of a decomposed `PageElement` is undefined and you
+ should never use one for anything, but if you need to *check*
+ whether an element has been decomposed, you can use the
+ `PageElement.decomposed` property.
+ """
+ self.extract()
+ e: _AtMostOneElement = self
+ next_up: _AtMostOneElement = None
+ while e is not None:
+ next_up = e.next_element
+ e.__dict__.clear()
+ if isinstance(e, Tag):
+ e.name = ""
+ e.contents = []
+ e._decomposed = True
+ e = next_up
+
+ def _last_descendant(
+ self, is_initialized: bool = True, accept_self: bool = True
+ ) -> _AtMostOneElement:
+ """Finds the last element beneath this object to be parsed.
+
+ Special note to help you figure things out if your type
+ checking is tripped up by the fact that this method returns
+ _AtMostOneElement instead of PageElement: the only time
+ this method returns None is if `accept_self` is False and the
+ `PageElement` has no children--either it's a NavigableString
+ or an empty Tag.
+
+ :param is_initialized: Has `PageElement.setup` been called on
+ this `PageElement` yet?
+
+ :param accept_self: Is ``self`` an acceptable answer to the
+ question?
+ """
+ if is_initialized and self.next_sibling is not None:
+ last_child = self.next_sibling.previous_element
+ else:
+ last_child = self
+ while isinstance(last_child, Tag) and last_child.contents:
+ last_child = last_child.contents[-1]
+ if not accept_self and last_child is self:
+ last_child = None
+ return last_child
+
+ _lastRecursiveChild = _deprecated_alias(
+ "_lastRecursiveChild", "_last_descendant", "4.0.0"
+ )
+
+ def insert_before(self, *args: _InsertableElement) -> List[PageElement]:
+ """Makes the given element(s) the immediate predecessor of this one.
+
+ All the elements will have the same `PageElement.parent` as
+ this one, and the given elements will occur immediately before
+ this one.
+
+ :param args: One or more PageElements.
+
+ :return The list of PageElements that were inserted.
+ """
+ parent = self.parent
+ if parent is None:
+ raise ValueError("Element has no parent, so 'before' has no meaning.")
+ if any(x is self for x in args):
+ raise ValueError("Can't insert an element before itself.")
+ results: List[PageElement] = []
+ for predecessor in args:
+ # Extract first so that the index won't be screwed up if they
+ # are siblings.
+ if isinstance(predecessor, PageElement):
+ predecessor.extract()
+ index = parent.index(self)
+ results.extend(parent.insert(index, predecessor))
+
+ return results
+
+ def insert_after(self, *args: _InsertableElement) -> List[PageElement]:
+ """Makes the given element(s) the immediate successor of this one.
+
+ The elements will have the same `PageElement.parent` as this
+ one, and the given elements will occur immediately after this
+ one.
+
+ :param args: One or more PageElements.
+
+ :return The list of PageElements that were inserted.
+ """
+ # Do all error checking before modifying the tree.
+ parent = self.parent
+ if parent is None:
+ raise ValueError("Element has no parent, so 'after' has no meaning.")
+ if any(x is self for x in args):
+ raise ValueError("Can't insert an element after itself.")
+
+ offset = 0
+ results: List[PageElement] = []
+ for successor in args:
+ # Extract first so that the index won't be screwed up if they
+ # are siblings.
+ if isinstance(successor, PageElement):
+ successor.extract()
+ index = parent.index(self)
+ results.extend(parent.insert(index + 1 + offset, successor))
+ offset += 1
+
+ return results
+
+ # For the suppression of this pyright warning, see discussion here:
+ # https://github.com/microsoft/pyright/issues/10929
+ @overload
+ def find_next( # pyright: ignore [reportOverlappingOverload]
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: None=None,
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneTag:
+ ...
+
+ @overload
+ def find_next(
+ self,
+ name: None=None,
+ attrs: None=None,
+ string: _StrainableString="",
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneNavigableString:
+ ...
+
+ def find_next(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: Optional[_StrainableString] = None,
+ **kwargs: _StrainableAttribute,
+ ) -> Union[_AtMostOneTag,_AtMostOneNavigableString,_AtMostOneElement]:
+ """Find the first PageElement that matches the given criteria and
+ appears later in the document than this PageElement.
+
+ All find_* methods take a common set of arguments. See the online
+ documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param string: A filter for a NavigableString with specific text.
+ :kwargs: Additional filters on attribute values.
+ """
+ return self._find_one(self.find_all_next, name, attrs, string, **kwargs)
+
+ findNext = _deprecated_function_alias("findNext", "find_next", "4.0.0")
+
+ @overload
+ def find_all_next( # pyright: ignore [reportOverlappingOverload]
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: None = None,
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> _SomeTags:
+ ...
+
+ @overload
+ def find_all_next(
+ self,
+ name: None = None,
+ attrs: None = None,
+ string: _StrainableString = "",
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> _SomeNavigableStrings:
+ ...
+
+ def find_all_next(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: Optional[_StrainableString] = None,
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> Union[_SomeTags,_SomeNavigableStrings,_QueryResults]:
+ """Find all `PageElement` objects that match the given criteria and
+ appear later in the document than this `PageElement`.
+
+ All find_* methods take a common set of arguments. See the online
+ documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param string: A filter for a NavigableString with specific text.
+ :param limit: Stop looking after finding this many results.
+ :param _stacklevel: Used internally to improve warning messages.
+ :kwargs: Additional filters on attribute values.
+ """
+ return self._find_all(
+ name,
+ attrs,
+ string,
+ limit,
+ self.next_elements,
+ _stacklevel=_stacklevel + 1,
+ **kwargs,
+ )
+
+ findAllNext = _deprecated_function_alias("findAllNext", "find_all_next", "4.0.0")
+
+ @overload
+ def find_next_sibling( # pyright: ignore [reportOverlappingOverload]
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: None=None,
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneTag:
+ ...
+
+ @overload
+ def find_next_sibling(
+ self,
+ name: None=None,
+ attrs: None=None,
+ string: _StrainableString="",
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneNavigableString:
+ ...
+
+ def find_next_sibling(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: Optional[_StrainableString] = None,
+ **kwargs: _StrainableAttribute,
+ ) -> Union[_AtMostOneTag,_AtMostOneNavigableString,_AtMostOneElement]:
+ """Find the closest sibling to this PageElement that matches the
+ given criteria and appears later in the document.
+
+ All find_* methods take a common set of arguments. See the
+ online documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param string: A filter for a `NavigableString` with specific text.
+ :kwargs: Additional filters on attribute values.
+ """
+ return self._find_one(self.find_next_siblings, name, attrs, string, **kwargs)
+
+ findNextSibling = _deprecated_function_alias(
+ "findNextSibling", "find_next_sibling", "4.0.0"
+ )
+
+ @overload
+ def find_next_siblings( # pyright: ignore [reportOverlappingOverload]
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: None = None,
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> _SomeTags:
+ ...
+
+ @overload
+ def find_next_siblings(
+ self,
+ name: None = None,
+ attrs: None = None,
+ string: _StrainableString = "",
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> _SomeNavigableStrings:
+ ...
+
+ def find_next_siblings(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: Optional[_StrainableString] = None,
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> Union[_SomeTags,_SomeNavigableStrings,_QueryResults]:
+ """Find all siblings of this `PageElement` that match the given criteria
+ and appear later in the document.
+
+ All find_* methods take a common set of arguments. See the online
+ documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param string: A filter for a `NavigableString` with specific text.
+ :param limit: Stop looking after finding this many results.
+ :param _stacklevel: Used internally to improve warning messages.
+ :kwargs: Additional filters on attribute values.
+ """
+ return self._find_all(
+ name,
+ attrs,
+ string,
+ limit,
+ self.next_siblings,
+ _stacklevel=_stacklevel + 1,
+ **kwargs,
+ )
+
+ findNextSiblings = _deprecated_function_alias(
+ "findNextSiblings", "find_next_siblings", "4.0.0"
+ )
+ fetchNextSiblings = _deprecated_function_alias(
+ "fetchNextSiblings", "find_next_siblings", "3.0.0"
+ )
+
+ @overload
+ def find_previous( # pyright: ignore [reportOverlappingOverload]
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: None=None,
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneTag:
+ ...
+
+ @overload
+ def find_previous(
+ self,
+ name: None=None,
+ attrs: None=None,
+ string: _StrainableString="",
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneNavigableString:
+ ...
+
+ def find_previous(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: Optional[_StrainableString] = None,
+ **kwargs: _StrainableAttribute,
+ ) -> Union[_AtMostOneTag,_AtMostOneNavigableString,_AtMostOneElement]:
+ """Look backwards in the document from this `PageElement` and find the
+ first `PageElement` that matches the given criteria.
+
+ All find_* methods take a common set of arguments. See the online
+ documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param string: A filter for a `NavigableString` with specific text.
+ :kwargs: Additional filters on attribute values.
+ """
+ return self._find_one(self.find_all_previous, name, attrs, string, **kwargs)
+
+ findPrevious = _deprecated_function_alias("findPrevious", "find_previous", "3.0.0")
+
+ @overload
+ def find_all_previous( # pyright: ignore [reportOverlappingOverload]
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: None = None,
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> _SomeTags:
+ ...
+
+ @overload
+ def find_all_previous(
+ self,
+ name: None = None,
+ attrs: None = None,
+ string: _StrainableString = "",
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> _SomeNavigableStrings:
+ ...
+
+ def find_all_previous(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: Optional[_StrainableString] = None,
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> Union[_SomeTags,_SomeNavigableStrings,_QueryResults]:
+ """Look backwards in the document from this `PageElement` and find all
+ `PageElement` that match the given criteria.
+
+ All find_* methods take a common set of arguments. See the online
+ documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param string: A filter for a `NavigableString` with specific text.
+ :param limit: Stop looking after finding this many results.
+ :param _stacklevel: Used internally to improve warning messages.
+ :kwargs: Additional filters on attribute values.
+ """
+ return self._find_all(
+ name,
+ attrs,
+ string,
+ limit,
+ self.previous_elements,
+ _stacklevel=_stacklevel + 1,
+ **kwargs,
+ )
+
+ findAllPrevious = _deprecated_function_alias(
+ "findAllPrevious", "find_all_previous", "4.0.0"
+ )
+ fetchAllPrevious = _deprecated_function_alias(
+ "fetchAllPrevious", "find_all_previous", "3.0.0"
+ )
+
+ @overload
+ def find_previous_sibling( # pyright: ignore [reportOverlappingOverload]
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: None=None,
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneTag:
+ ...
+
+ @overload
+ def find_previous_sibling(
+ self,
+ name: None=None,
+ attrs: None=None,
+ string: _StrainableString="",
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneNavigableString:
+ ...
+
+ def find_previous_sibling(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: Optional[_StrainableString] = None,
+ **kwargs: _StrainableAttribute,
+ ) -> Union[_AtMostOneTag,_AtMostOneNavigableString,_AtMostOneElement]:
+ """Returns the closest sibling to this `PageElement` that matches the
+ given criteria and appears earlier in the document.
+
+ All find_* methods take a common set of arguments. See the online
+ documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param string: A filter for a `NavigableString` with specific text.
+ :kwargs: Additional filters on attribute values.
+ """
+ return self._find_one(
+ self.find_previous_siblings, name, attrs, string, **kwargs
+ )
+
+ findPreviousSibling = _deprecated_function_alias(
+ "findPreviousSibling", "find_previous_sibling", "4.0.0"
+ )
+
+ @overload
+ def find_previous_siblings( # pyright: ignore [reportOverlappingOverload]
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: None = None,
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> _SomeTags:
+ ...
+
+ @overload
+ def find_previous_siblings(
+ self,
+ name: None = None,
+ attrs: None = None,
+ string: _StrainableString = "",
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> _SomeNavigableStrings:
+ ...
+
+ def find_previous_siblings(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ string: Optional[_StrainableString] = None,
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> Union[_SomeTags,_SomeNavigableStrings,_QueryResults]:
+ """Returns all siblings to this PageElement that match the
+ given criteria and appear earlier in the document.
+
+ All find_* methods take a common set of arguments. See the online
+ documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param string: A filter for a NavigableString with specific text.
+ :param limit: Stop looking after finding this many results.
+ :param _stacklevel: Used internally to improve warning messages.
+ :kwargs: Additional filters on attribute values.
+ """
+ return self._find_all(
+ name,
+ attrs,
+ string,
+ limit,
+ self.previous_siblings,
+ _stacklevel=_stacklevel + 1,
+ **kwargs,
+ )
+
+ findPreviousSiblings = _deprecated_function_alias(
+ "findPreviousSiblings", "find_previous_siblings", "4.0.0"
+ )
+ fetchPreviousSiblings = _deprecated_function_alias(
+ "fetchPreviousSiblings", "find_previous_siblings", "3.0.0"
+ )
+
+ def find_parent(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneTag:
+ """Find the closest parent of this PageElement that matches the given
+ criteria.
+
+ All find_* methods take a common set of arguments. See the online
+ documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param self: Whether the PageElement itself should be considered
+ as one of its 'parents'.
+ :kwargs: Additional filters on attribute values.
+ """
+ # NOTE: We can't use _find_one because findParents takes a different
+ # set of arguments.
+ r = None
+ results = self.find_parents(
+ name, attrs, 1, _stacklevel=3, **kwargs
+ )
+ if results:
+ r = results[0]
+ return r
+
+ findParent = _deprecated_function_alias("findParent", "find_parent", "4.0.0")
+
+ def find_parents(
+ self,
+ name: _FindMethodName = None,
+ attrs: Optional[_StrainableAttributes] = None,
+ limit: Optional[int] = None,
+ _stacklevel: int = 2,
+ **kwargs: _StrainableAttribute,
+ ) -> _SomeTags:
+ """Find all parents of this `PageElement` that match the given criteria.
+
+ All find_* methods take a common set of arguments. See the online
+ documentation for detailed explanations.
+
+ :param name: A filter on tag name.
+ :param attrs: Additional filters on attribute values.
+ :param limit: Stop looking after finding this many results.
+ :param _stacklevel: Used internally to improve warning messages.
+ :kwargs: Additional filters on attribute values.
+ """
+ iterator = self.parents
+ # Only Tags can have children, so this ResultSet will contain
+ # nothing but Tags.
+ return cast(ResultSet[Tag], self._find_all(
+ name, attrs, None, limit, iterator, _stacklevel=_stacklevel + 1, **kwargs
+ ))
+
+ findParents = _deprecated_function_alias("findParents", "find_parents", "4.0.0")
+ fetchParents = _deprecated_function_alias("fetchParents", "find_parents", "3.0.0")
+
+ @property
+ def next(self) -> _AtMostOneElement:
+ """The `PageElement`, if any, that was parsed just after this one."""
+ return self.next_element
+
+ @property
+ def previous(self) -> _AtMostOneElement:
+ """The `PageElement`, if any, that was parsed just before this one."""
+ return self.previous_element
+
+ # These methods do the real heavy lifting.
+
+ def _find_one(
+ self,
+ # TODO-TYPING: "There is no syntax to indicate optional or
+ # keyword arguments; such function types are rarely used
+ # as callback types." - So, not sure how to get more
+ # specific here.
+ method: Callable,
+ name: _FindMethodName,
+ attrs: Optional[_StrainableAttributes],
+ string: Optional[_StrainableString],
+ **kwargs: _StrainableAttribute,
+ ) -> _AtMostOneElement:
+ r: _AtMostOneElement = None
+ results: _QueryResults = method(name, attrs, string, 1, _stacklevel=4, **kwargs)
+ if results:
+ r = results[0]
+ return r
+
+ def _find_all(
+ self,
+ name: _FindMethodName,
+ attrs: Optional[_StrainableAttributes],
+ string: Optional[_StrainableString],
+ limit: Optional[int],
+ generator: Iterator[PageElement],
+ _stacklevel: int = 3,
+ **kwargs: _StrainableAttribute,
+ ) -> _QueryResults:
+ """Iterates over a generator looking for things that match."""
+
+ if string is None and "text" in kwargs:
+ string = kwargs.pop("text")
+ warnings.warn(
+ "The 'text' argument to find()-type methods is deprecated. Use 'string' instead.",
+ DeprecationWarning,
+ stacklevel=_stacklevel,
+ )
+
+ if "_class" in kwargs:
+ warnings.warn(
+ AttributeResemblesVariableWarning.MESSAGE
+ % dict(
+ original="_class",
+ autocorrect="class_",
+ ),
+ AttributeResemblesVariableWarning,
+ stacklevel=_stacklevel,
+ )
+
+ from bs4.filter import ElementFilter
+
+ if isinstance(name, ElementFilter):
+ matcher = name
+ else:
+ matcher = SoupStrainer(name, attrs, string, **kwargs)
+
+ result: MutableSequence[_OneElement]
+ if string is None and not limit and not attrs and not kwargs:
+ if name is True or name is None:
+ # Optimization to find all tags.
+ result = [element for element in generator if isinstance(element, Tag)]
+ return ResultSet(matcher, result)
+ elif isinstance(name, str):
+ # Optimization to find all tags with a given name.
+ if name.count(":") == 1:
+ # This is a name with a prefix. If this is a namespace-aware document,
+ # we need to match the local name against tag.name. If not,
+ # we need to match the fully-qualified name against tag.name.
+ prefix, local_name = name.split(":", 1)
+ else:
+ prefix = None
+ local_name = name
+ result = []
+ for element in generator:
+ if not isinstance(element, Tag):
+ continue
+ if element.name == name or (
+ element.name == local_name
+ and (prefix is None or element.prefix == prefix)
+ ):
+ result.append(element)
+ return ResultSet(matcher, result)
+ return matcher.find_all(generator, limit)
+
+ # These generators can be used to navigate starting from both
+ # NavigableStrings and Tags.
+ @property
+ def next_elements(self) -> Iterator[PageElement]:
+ """All PageElements that were parsed after this one."""
+ i = self.next_element
+ while i is not None:
+ successor = i.next_element
+ yield i
+ i = successor
+
+ @property
+ def self_and_next_elements(self) -> Iterator[PageElement]:
+ """This PageElement, then all PageElements that were parsed after it."""
+ return self._self_and(self.next_elements)
+
+ @property
+ def next_siblings(self) -> Iterator[PageElement]:
+ """All PageElements that are siblings of this one but were parsed
+ later.
+ """
+ i = self.next_sibling
+ while i is not None:
+ successor = i.next_sibling
+ yield i
+ i = successor
+
+ @property
+ def self_and_next_siblings(self) -> Iterator[PageElement]:
+ """This PageElement, then all of its siblings."""
+ return self._self_and(self.next_siblings)
+
+ @property
+ def previous_elements(self) -> Iterator[PageElement]:
+ """All PageElements that were parsed before this one.
+
+ :yield: A sequence of PageElements.
+ """
+ i = self.previous_element
+ while i is not None:
+ successor = i.previous_element
+ yield i
+ i = successor
+
+ @property
+ def self_and_previous_elements(self) -> Iterator[PageElement]:
+ """This PageElement, then all elements that were parsed
+ earlier."""
+ return self._self_and(self.previous_elements)
+
+ @property
+ def previous_siblings(self) -> Iterator[PageElement]:
+ """All PageElements that are siblings of this one but were parsed
+ earlier.
+
+ :yield: A sequence of PageElements.
+ """
+ i = self.previous_sibling
+ while i is not None:
+ successor = i.previous_sibling
+ yield i
+ i = successor
+
+ @property
+ def self_and_previous_siblings(self) -> Iterator[PageElement]:
+ """This PageElement, then all of its siblings that were parsed
+ earlier."""
+ return self._self_and(self.previous_siblings)
+
+ @property
+ def parents(self) -> Iterator[Tag]:
+ """All elements that are parents of this PageElement.
+
+ :yield: A sequence of Tags, ending with a BeautifulSoup object.
+ """
+ i = self.parent
+ while i is not None:
+ successor = i.parent
+ yield i
+ i = successor
+
+ @property
+ def self_and_parents(self) -> Iterator[PageElement]:
+ """This element, then all of its parents.
+
+ :yield: A sequence of PageElements, ending with a BeautifulSoup object.
+ """
+ return self._self_and(self.parents)
+
+ def _self_and(self, other_generator:Iterator[PageElement]) -> Iterator[PageElement]:
+ """Modify a generator by yielding this element, then everything
+ yielded by the other generator.
+ """
+ if not self.hidden:
+ yield self
+ for i in other_generator:
+ yield i
+
+ @property
+ def decomposed(self) -> bool:
+ """Check whether a PageElement has been decomposed."""
+ return getattr(self, "_decomposed", False) or False
+
+ @_deprecated("next_elements", "4.0.0")
+ def nextGenerator(self) -> Iterator[PageElement]:
+ ":meta private:"
+ return self.next_elements
+
+ @_deprecated("next_siblings", "4.0.0")
+ def nextSiblingGenerator(self) -> Iterator[PageElement]:
+ ":meta private:"
+ return self.next_siblings
+
+ @_deprecated("previous_elements", "4.0.0")
+ def previousGenerator(self) -> Iterator[PageElement]:
+ ":meta private:"
+ return self.previous_elements
+
+ @_deprecated("previous_siblings", "4.0.0")
+ def previousSiblingGenerator(self) -> Iterator[PageElement]:
+ ":meta private:"
+ return self.previous_siblings
+
+ @_deprecated("parents", "4.0.0")
+ def parentGenerator(self) -> Iterator[PageElement]:
+ ":meta private:"
+ return self.parents
+
+
+class NavigableString(str, PageElement):
+ """A Python string that is part of a parse tree.
+
+ When Beautiful Soup parses the markup ``penguin ``, it will
+ create a `NavigableString` for the string "penguin".
+ """
+
+ #: A string prepended to the body of the 'real' string
+ #: when formatting it as part of a document, such as the ''
+ #: in an HTML comment.
+ SUFFIX: str = ""
+
+ def __new__(cls, value: Union[str, bytes]) -> Self:
+ """Create a new NavigableString.
+
+ When unpickling a NavigableString, this method is called with
+ the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
+ passed in to the superclass's __new__ or the superclass won't know
+ how to handle non-ASCII characters.
+ """
+ if isinstance(value, str):
+ u = str.__new__(cls, value)
+ else:
+ u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
+ u.hidden = False
+ u.setup()
+ return u
+
+ def __deepcopy__(self, memo: Dict[Any, Any], recursive: bool = False) -> Self:
+ """A copy of a NavigableString has the same contents and class
+ as the original, but it is not connected to the parse tree.
+
+ :param recursive: This parameter is ignored; it's only defined
+ so that NavigableString.__deepcopy__ implements the same
+ signature as Tag.__deepcopy__.
+ """
+ return type(self)(self)
+
+ def __getnewargs__(self) -> Tuple[str]:
+ return (str(self),)
+
+ # TODO-TYPING This should be SupportsIndex|slice but SupportsIndex
+ # is introduced in 3.8. This can be changed once 3.7 support is dropped.
+ def __getitem__(self, key: Union[int|slice]) -> str: # type:ignore
+ """Raise an exception """
+ if isinstance(key, str):
+ raise TypeError("string indices must be integers, not '{0}'. Are you treating a NavigableString like a Tag?".format(key.__class__.__name__))
+ return super(NavigableString, self).__getitem__(key)
+
+ @property
+ def string(self) -> str:
+ """Convenience property defined to match `Tag.string`.
+
+ :return: This property always returns the `NavigableString` it was
+ called on.
+
+ :meta private:
+ """
+ return self
+
+ def output_ready(self, formatter: _FormatterOrName = "minimal") -> str:
+ """Run the string through the provided formatter, making it
+ ready for output as part of an HTML or XML document.
+
+ :param formatter: A `Formatter` object, or a string naming one
+ of the standard formatters.
+ """
+ output = self.format_string(self, formatter)
+ return self.PREFIX + output + self.SUFFIX
+
+ @property
+ def name(self) -> None:
+ """Since a NavigableString is not a Tag, it has no .name.
+
+ This property is implemented so that code like this doesn't crash
+ when run on a mixture of Tag and NavigableString objects:
+ [x.name for x in tag.children]
+
+ :meta private:
+ """
+ return None
+
+ @name.setter
+ def name(self, name: str) -> None:
+ """Prevent NavigableString.name from ever being set.
+
+ :meta private:
+ """
+ raise AttributeError("A NavigableString cannot be given a name.")
+
+ def _all_strings(
+ self, strip: bool = False, types: _OneOrMoreStringTypes = PageElement.default
+ ) -> Iterator[str]:
+ """Yield all strings of certain classes, possibly stripping them.
+
+ This makes it easy for NavigableString to implement methods
+ like get_text() as conveniences, creating a consistent
+ text-extraction API across all PageElements.
+
+ :param strip: If True, all strings will be stripped before being
+ yielded.
+
+ :param types: A tuple of NavigableString subclasses. If this
+ NavigableString isn't one of those subclasses, the
+ sequence will be empty. By default, the subclasses
+ considered are NavigableString and CData objects. That
+ means no comments, processing instructions, etc.
+
+ :yield: A sequence that either contains this string, or is empty.
+ """
+ if types is self.default:
+ # This is kept in Tag because it's full of subclasses of
+ # this class, which aren't defined until later in the file.
+ types = Tag.MAIN_CONTENT_STRING_TYPES
+
+ # Do nothing if the caller is looking for specific types of
+ # string, and we're of a different type.
+ #
+ # We check specific types instead of using isinstance(self,
+ # types) because all of these classes subclass
+ # NavigableString. Anyone who's using this feature probably
+ # wants generic NavigableStrings but not other stuff.
+ my_type = type(self)
+ if types is not None:
+ if isinstance(types, type):
+ # Looking for a single type.
+ if my_type is not types:
+ return
+ elif my_type not in types:
+ # Looking for one of a list of types.
+ return
+
+ value = self
+ if strip:
+ final_value = value.strip()
+ else:
+ final_value = self
+ if len(final_value) > 0:
+ yield final_value
+
+ @property
+ def strings(self) -> Iterator[str]:
+ """Yield this string, but only if it is interesting.
+
+ This is defined the way it is for compatibility with
+ `Tag.strings`. See `Tag` for information on which strings are
+ interesting in a given context.
+
+ :yield: A sequence that either contains this string, or is empty.
+ """
+ return self._all_strings()
+
+
+class PreformattedString(NavigableString):
+ """A `NavigableString` not subject to the normal formatting rules.
+
+ This is an abstract class used for special kinds of strings such
+ as comments (`Comment`) and CDATA blocks (`CData`).
+ """
+
+ PREFIX: str = ""
+ SUFFIX: str = ""
+
+ def output_ready(self, formatter: Optional[_FormatterOrName] = None) -> str:
+ """Make this string ready for output by adding any subclass-specific
+ prefix or suffix.
+
+ :param formatter: A `Formatter` object, or a string naming one
+ of the standard formatters. The string will be passed into the
+ `Formatter`, but only to trigger any side effects: the return
+ value is ignored.
+
+ :return: The string, with any subclass-specific prefix and
+ suffix added on.
+ """
+ if formatter is not None:
+ self.format_string(self, formatter)
+ return self.PREFIX + self + self.SUFFIX
+
+
+class CData(PreformattedString):
+ """A `CDATA section `_."""
+
+ PREFIX: str = ""
+
+
+class ProcessingInstruction(PreformattedString):
+ """A SGML processing instruction."""
+
+ PREFIX: str = ""
+ SUFFIX: str = ">"
+
+
+class XMLProcessingInstruction(ProcessingInstruction):
+ """An `XML processing instruction `_."""
+
+ PREFIX: str = ""
+ SUFFIX: str = "?>"
+
+
+class Comment(PreformattedString):
+ """An `HTML comment `_ or `XML comment `_."""
+
+ PREFIX: str = ""
+
+
+class Declaration(PreformattedString):
+ """An `XML declaration `_."""
+
+ PREFIX: str = ""
+ SUFFIX: str = "?>"
+
+
+class Doctype(PreformattedString):
+ """A `document type declaration `_."""
+
+ @classmethod
+ def for_name_and_ids(
+ cls, name: str, pub_id: Optional[str], system_id: Optional[str]
+ ) -> Doctype:
+ """Generate an appropriate document type declaration for a given
+ public ID and system ID.
+
+ :param name: The name of the document's root element, e.g. 'html'.
+ :param pub_id: The Formal Public Identifier for this document type,
+ e.g. '-//W3C//DTD XHTML 1.1//EN'
+ :param system_id: The system identifier for this document type,
+ e.g. 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
+ """
+ return Doctype(cls._string_for_name_and_ids(name, pub_id, system_id))
+
+ @classmethod
+ def _string_for_name_and_ids(
+ cls, name: str, pub_id: Optional[str], system_id: Optional[str]
+ ) -> str:
+ """Generate a string to be used as the basis of a Doctype object.
+
+ This is a separate method from for_name_and_ids() because the lxml
+ TreeBuilder needs to call it.
+ """
+ value = name or ""
+ if pub_id is not None:
+ value += ' PUBLIC "%s"' % pub_id
+ if system_id is not None:
+ value += ' "%s"' % system_id
+ elif system_id is not None:
+ value += ' SYSTEM "%s"' % system_id
+ return value
+
+ PREFIX: str = "\n"
+
+
+class Stylesheet(NavigableString):
+ """A `NavigableString` representing the contents of a `
+
+
+%(title)s
+
+'''
+
+DOC_HEADER_EXTERNALCSS = '''\
+
+
+
+
+ %(title)s
+
+
+
+
+%(title)s
+
+'''
+
+DOC_FOOTER = '''\
+
+
+'''
+
+
+class HtmlFormatter(Formatter):
+ r"""
+ Format tokens as HTML 4 ```` tags within a ```` tag, wrapped
+ in a ```` tag. The ``
``'s CSS class can be set by the `cssclass`
+ option.
+
+ If the `linenos` option is set to ``"table"``, the ``
`` is
+ additionally wrapped inside a `` `` which has one row and two
+ cells: one containing the line numbers and one containing the code.
+ Example:
+
+ .. sourcecode:: html
+
+
+
+
+ 1
+ 2
+
+
+ def foo (bar):
+ pass
+
+
+
+
+ (whitespace added to improve clarity).
+
+ Wrapping can be disabled using the `nowrap` option.
+
+ A list of lines can be specified using the `hl_lines` option to make these
+ lines highlighted (as of Pygments 0.11).
+
+ With the `full` option, a complete HTML 4 document is output, including
+ the style definitions inside a ``
+
+
+
+
+ {code}
+
+
+
+"""
+
+_TERM_COLORS = {"256color": ColorSystem.EIGHT_BIT, "16color": ColorSystem.STANDARD}
+
+
+class ConsoleDimensions(NamedTuple):
+ """Size of the terminal."""
+
+ width: int
+ """The width of the console in 'cells'."""
+ height: int
+ """The height of the console in lines."""
+
+
+@dataclass
+class ConsoleOptions:
+ """Options for __rich_console__ method."""
+
+ size: ConsoleDimensions
+ """Size of console."""
+ legacy_windows: bool
+ """legacy_windows: flag for legacy windows."""
+ min_width: int
+ """Minimum width of renderable."""
+ max_width: int
+ """Maximum width of renderable."""
+ is_terminal: bool
+ """True if the target is a terminal, otherwise False."""
+ encoding: str
+ """Encoding of terminal."""
+ max_height: int
+ """Height of container (starts as terminal)"""
+ justify: Optional[JustifyMethod] = None
+ """Justify value override for renderable."""
+ overflow: Optional[OverflowMethod] = None
+ """Overflow value override for renderable."""
+ no_wrap: Optional[bool] = False
+ """Disable wrapping for text."""
+ highlight: Optional[bool] = None
+ """Highlight override for render_str."""
+ markup: Optional[bool] = None
+ """Enable markup when rendering strings."""
+ height: Optional[int] = None
+
+ @property
+ def ascii_only(self) -> bool:
+ """Check if renderables should use ascii only."""
+ return not self.encoding.startswith("utf")
+
+ def copy(self) -> "ConsoleOptions":
+ """Return a copy of the options.
+
+ Returns:
+ ConsoleOptions: a copy of self.
+ """
+ options: ConsoleOptions = ConsoleOptions.__new__(ConsoleOptions)
+ options.__dict__ = self.__dict__.copy()
+ return options
+
+ def update(
+ self,
+ *,
+ width: Union[int, NoChange] = NO_CHANGE,
+ min_width: Union[int, NoChange] = NO_CHANGE,
+ max_width: Union[int, NoChange] = NO_CHANGE,
+ justify: Union[Optional[JustifyMethod], NoChange] = NO_CHANGE,
+ overflow: Union[Optional[OverflowMethod], NoChange] = NO_CHANGE,
+ no_wrap: Union[Optional[bool], NoChange] = NO_CHANGE,
+ highlight: Union[Optional[bool], NoChange] = NO_CHANGE,
+ markup: Union[Optional[bool], NoChange] = NO_CHANGE,
+ height: Union[Optional[int], NoChange] = NO_CHANGE,
+ ) -> "ConsoleOptions":
+ """Update values, return a copy."""
+ options = self.copy()
+ if not isinstance(width, NoChange):
+ options.min_width = options.max_width = max(0, width)
+ if not isinstance(min_width, NoChange):
+ options.min_width = min_width
+ if not isinstance(max_width, NoChange):
+ options.max_width = max_width
+ if not isinstance(justify, NoChange):
+ options.justify = justify
+ if not isinstance(overflow, NoChange):
+ options.overflow = overflow
+ if not isinstance(no_wrap, NoChange):
+ options.no_wrap = no_wrap
+ if not isinstance(highlight, NoChange):
+ options.highlight = highlight
+ if not isinstance(markup, NoChange):
+ options.markup = markup
+ if not isinstance(height, NoChange):
+ if height is not None:
+ options.max_height = height
+ options.height = None if height is None else max(0, height)
+ return options
+
+ def update_width(self, width: int) -> "ConsoleOptions":
+ """Update just the width, return a copy.
+
+ Args:
+ width (int): New width (sets both min_width and max_width)
+
+ Returns:
+ ~ConsoleOptions: New console options instance.
+ """
+ options = self.copy()
+ options.min_width = options.max_width = max(0, width)
+ return options
+
+ def update_height(self, height: int) -> "ConsoleOptions":
+ """Update the height, and return a copy.
+
+ Args:
+ height (int): New height
+
+ Returns:
+ ~ConsoleOptions: New Console options instance.
+ """
+ options = self.copy()
+ options.max_height = options.height = height
+ return options
+
+ def update_dimensions(self, width: int, height: int) -> "ConsoleOptions":
+ """Update the width and height, and return a copy.
+
+ Args:
+ width (int): New width (sets both min_width and max_width).
+ height (int): New height.
+
+ Returns:
+ ~ConsoleOptions: New console options instance.
+ """
+ options = self.copy()
+ options.min_width = options.max_width = max(0, width)
+ options.height = options.max_height = height
+ return options
+
+
+@runtime_checkable
+class RichCast(Protocol):
+ """An object that may be 'cast' to a console renderable."""
+
+ def __rich__(self) -> Union["ConsoleRenderable", str]: # pragma: no cover
+ ...
+
+
+@runtime_checkable
+class ConsoleRenderable(Protocol):
+ """An object that supports the console protocol."""
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult": # pragma: no cover
+ ...
+
+
+# A type that may be rendered by Console.
+RenderableType = Union[ConsoleRenderable, RichCast, str]
+
+
+# The result of calling a __rich_console__ method.
+RenderResult = Iterable[Union[RenderableType, Segment]]
+
+
+_null_highlighter = NullHighlighter()
+
+
+class CaptureError(Exception):
+ """An error in the Capture context manager."""
+
+
+class NewLine:
+ """A renderable to generate new line(s)"""
+
+ def __init__(self, count: int = 1) -> None:
+ self.count = count
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Iterable[Segment]:
+ yield Segment("\n" * self.count)
+
+
+class ScreenUpdate:
+ """Render a list of lines at a given offset."""
+
+ def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None:
+ self._lines = lines
+ self.x = x
+ self.y = y
+
+ def __rich_console__(
+ self, console: "Console", options: ConsoleOptions
+ ) -> RenderResult:
+ x = self.x
+ move_to = Control.move_to
+ for offset, line in enumerate(self._lines, self.y):
+ yield move_to(x, offset)
+ yield from line
+
+
+class Capture:
+ """Context manager to capture the result of printing to the console.
+ See :meth:`~rich.console.Console.capture` for how to use.
+
+ Args:
+ console (Console): A console instance to capture output.
+ """
+
+ def __init__(self, console: "Console") -> None:
+ self._console = console
+ self._result: Optional[str] = None
+
+ def __enter__(self) -> "Capture":
+ self._console.begin_capture()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self._result = self._console.end_capture()
+
+ def get(self) -> str:
+ """Get the result of the capture."""
+ if self._result is None:
+ raise CaptureError(
+ "Capture result is not available until context manager exits."
+ )
+ return self._result
+
+
+class ThemeContext:
+ """A context manager to use a temporary theme. See :meth:`~rich.console.Console.use_theme` for usage."""
+
+ def __init__(self, console: "Console", theme: Theme, inherit: bool = True) -> None:
+ self.console = console
+ self.theme = theme
+ self.inherit = inherit
+
+ def __enter__(self) -> "ThemeContext":
+ self.console.push_theme(self.theme)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.console.pop_theme()
+
+
+class PagerContext:
+ """A context manager that 'pages' content. See :meth:`~rich.console.Console.pager` for usage."""
+
+ def __init__(
+ self,
+ console: "Console",
+ pager: Optional[Pager] = None,
+ styles: bool = False,
+ links: bool = False,
+ ) -> None:
+ self._console = console
+ self.pager = SystemPager() if pager is None else pager
+ self.styles = styles
+ self.links = links
+
+ def __enter__(self) -> "PagerContext":
+ self._console._enter_buffer()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ if exc_type is None:
+ with self._console._lock:
+ buffer: List[Segment] = self._console._buffer[:]
+ del self._console._buffer[:]
+ segments: Iterable[Segment] = buffer
+ if not self.styles:
+ segments = Segment.strip_styles(segments)
+ elif not self.links:
+ segments = Segment.strip_links(segments)
+ content = self._console._render_buffer(segments)
+ self.pager.show(content)
+ self._console._exit_buffer()
+
+
+class ScreenContext:
+ """A context manager that enables an alternative screen. See :meth:`~rich.console.Console.screen` for usage."""
+
+ def __init__(
+ self, console: "Console", hide_cursor: bool, style: StyleType = ""
+ ) -> None:
+ self.console = console
+ self.hide_cursor = hide_cursor
+ self.screen = Screen(style=style)
+ self._changed = False
+
+ def update(
+ self, *renderables: RenderableType, style: Optional[StyleType] = None
+ ) -> None:
+ """Update the screen.
+
+ Args:
+ renderable (RenderableType, optional): Optional renderable to replace current renderable,
+ or None for no change. Defaults to None.
+ style: (Style, optional): Replacement style, or None for no change. Defaults to None.
+ """
+ if renderables:
+ self.screen.renderable = (
+ Group(*renderables) if len(renderables) > 1 else renderables[0]
+ )
+ if style is not None:
+ self.screen.style = style
+ self.console.print(self.screen, end="")
+
+ def __enter__(self) -> "ScreenContext":
+ self._changed = self.console.set_alt_screen(True)
+ if self._changed and self.hide_cursor:
+ self.console.show_cursor(False)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ if self._changed:
+ self.console.set_alt_screen(False)
+ if self.hide_cursor:
+ self.console.show_cursor(True)
+
+
+class Group:
+ """Takes a group of renderables and returns a renderable object that renders the group.
+
+ Args:
+ renderables (Iterable[RenderableType]): An iterable of renderable objects.
+ fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True.
+ """
+
+ def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None:
+ self._renderables = renderables
+ self.fit = fit
+ self._render: Optional[List[RenderableType]] = None
+
+ @property
+ def renderables(self) -> List["RenderableType"]:
+ if self._render is None:
+ self._render = list(self._renderables)
+ return self._render
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ if self.fit:
+ return measure_renderables(console, options, self.renderables)
+ else:
+ return Measurement(options.max_width, options.max_width)
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> RenderResult:
+ yield from self.renderables
+
+
+def group(fit: bool = True) -> Callable[..., Callable[..., Group]]:
+ """A decorator that turns an iterable of renderables in to a group.
+
+ Args:
+ fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True.
+ """
+
+ def decorator(
+ method: Callable[..., Iterable[RenderableType]]
+ ) -> Callable[..., Group]:
+ """Convert a method that returns an iterable of renderables in to a Group."""
+
+ @wraps(method)
+ def _replace(*args: Any, **kwargs: Any) -> Group:
+ renderables = method(*args, **kwargs)
+ return Group(*renderables, fit=fit)
+
+ return _replace
+
+ return decorator
+
+
+def _is_jupyter() -> bool: # pragma: no cover
+ """Check if we're running in a Jupyter notebook."""
+ try:
+ get_ipython # type: ignore
+ except NameError:
+ return False
+ ipython = get_ipython() # type: ignore
+ shell = ipython.__class__.__name__
+ if "google.colab" in str(ipython.__class__) or shell == "ZMQInteractiveShell":
+ return True # Jupyter notebook or qtconsole
+ elif shell == "TerminalInteractiveShell":
+ return False # Terminal running IPython
+ else:
+ return False # Other type (?)
+
+
+COLOR_SYSTEMS = {
+ "standard": ColorSystem.STANDARD,
+ "256": ColorSystem.EIGHT_BIT,
+ "truecolor": ColorSystem.TRUECOLOR,
+ "windows": ColorSystem.WINDOWS,
+}
+
+
+_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()}
+
+
+@dataclass
+class ConsoleThreadLocals(threading.local):
+ """Thread local values for Console context."""
+
+ theme_stack: ThemeStack
+ buffer: List[Segment] = field(default_factory=list)
+ buffer_index: int = 0
+
+
+class RenderHook(ABC):
+ """Provides hooks in to the render process."""
+
+ @abstractmethod
+ def process_renderables(
+ self, renderables: List[ConsoleRenderable]
+ ) -> List[ConsoleRenderable]:
+ """Called with a list of objects to render.
+
+ This method can return a new list of renderables, or modify and return the same list.
+
+ Args:
+ renderables (List[ConsoleRenderable]): A number of renderable objects.
+
+ Returns:
+ List[ConsoleRenderable]: A replacement list of renderables.
+ """
+
+
+_windows_console_features: Optional["WindowsConsoleFeatures"] = None
+
+
+def get_windows_console_features() -> "WindowsConsoleFeatures": # pragma: no cover
+ global _windows_console_features
+ if _windows_console_features is not None:
+ return _windows_console_features
+ from ._windows import get_windows_console_features
+
+ _windows_console_features = get_windows_console_features()
+ return _windows_console_features
+
+
+def detect_legacy_windows() -> bool:
+ """Detect legacy Windows."""
+ return WINDOWS and not get_windows_console_features().vt
+
+
+if detect_legacy_windows(): # pragma: no cover
+ from pip._vendor.colorama import init
+
+ init(strip=False)
+
+
+class Console:
+ """A high level console interface.
+
+ Args:
+ color_system (str, optional): The color system supported by your terminal,
+ either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect.
+ force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None.
+ force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None.
+ force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None.
+ soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False.
+ theme (Theme, optional): An optional style theme object, or ``None`` for default theme.
+ stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False.
+ file (IO, optional): A file object where the console should write to. Defaults to stdout.
+ quiet (bool, Optional): Boolean to suppress all output. Defaults to False.
+ width (int, optional): The width of the terminal. Leave as default to auto-detect width.
+ height (int, optional): The height of the terminal. Leave as default to auto-detect height.
+ style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None.
+ no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None.
+ tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8.
+ record (bool, optional): Boolean to enable recording of terminal output,
+ required to call :meth:`export_html` and :meth:`export_text`. Defaults to False.
+ markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True.
+ emoji (bool, optional): Enable emoji code. Defaults to True.
+ emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
+ highlight (bool, optional): Enable automatic highlighting. Defaults to True.
+ log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True.
+ log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True.
+ log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ".
+ highlighter (HighlighterType, optional): Default highlighter.
+ legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``.
+ safe_box (bool, optional): Restrict box options that don't render on legacy Windows.
+ get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log),
+ or None for datetime.now.
+ get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic.
+ """
+
+ _environ: Mapping[str, str] = os.environ
+
+ def __init__(
+ self,
+ *,
+ color_system: Optional[
+ Literal["auto", "standard", "256", "truecolor", "windows"]
+ ] = "auto",
+ force_terminal: Optional[bool] = None,
+ force_jupyter: Optional[bool] = None,
+ force_interactive: Optional[bool] = None,
+ soft_wrap: bool = False,
+ theme: Optional[Theme] = None,
+ stderr: bool = False,
+ file: Optional[IO[str]] = None,
+ quiet: bool = False,
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ style: Optional[StyleType] = None,
+ no_color: Optional[bool] = None,
+ tab_size: int = 8,
+ record: bool = False,
+ markup: bool = True,
+ emoji: bool = True,
+ emoji_variant: Optional[EmojiVariant] = None,
+ highlight: bool = True,
+ log_time: bool = True,
+ log_path: bool = True,
+ log_time_format: Union[str, FormatTimeCallable] = "[%X]",
+ highlighter: Optional["HighlighterType"] = ReprHighlighter(),
+ legacy_windows: Optional[bool] = None,
+ safe_box: bool = True,
+ get_datetime: Optional[Callable[[], datetime]] = None,
+ get_time: Optional[Callable[[], float]] = None,
+ _environ: Optional[Mapping[str, str]] = None,
+ ):
+ # Copy of os.environ allows us to replace it for testing
+ if _environ is not None:
+ self._environ = _environ
+
+ self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter
+ if self.is_jupyter:
+ width = width or 93
+ height = height or 100
+
+ self.soft_wrap = soft_wrap
+ self._width = width
+ self._height = height
+ self.tab_size = tab_size
+ self.record = record
+ self._markup = markup
+ self._emoji = emoji
+ self._emoji_variant: Optional[EmojiVariant] = emoji_variant
+ self._highlight = highlight
+ self.legacy_windows: bool = (
+ (detect_legacy_windows() and not self.is_jupyter)
+ if legacy_windows is None
+ else legacy_windows
+ )
+ if width is None:
+ columns = self._environ.get("COLUMNS")
+ if columns is not None and columns.isdigit():
+ width = int(columns) - self.legacy_windows
+ if height is None:
+ lines = self._environ.get("LINES")
+ if lines is not None and lines.isdigit():
+ height = int(lines)
+
+ self.soft_wrap = soft_wrap
+ self._width = width
+ self._height = height
+
+ self._color_system: Optional[ColorSystem]
+ self._force_terminal = force_terminal
+ self._file = file
+ self.quiet = quiet
+ self.stderr = stderr
+
+ if color_system is None:
+ self._color_system = None
+ elif color_system == "auto":
+ self._color_system = self._detect_color_system()
+ else:
+ self._color_system = COLOR_SYSTEMS[color_system]
+
+ self._lock = threading.RLock()
+ self._log_render = LogRender(
+ show_time=log_time,
+ show_path=log_path,
+ time_format=log_time_format,
+ )
+ self.highlighter: HighlighterType = highlighter or _null_highlighter
+ self.safe_box = safe_box
+ self.get_datetime = get_datetime or datetime.now
+ self.get_time = get_time or monotonic
+ self.style = style
+ self.no_color = (
+ no_color if no_color is not None else "NO_COLOR" in self._environ
+ )
+ self.is_interactive = (
+ (self.is_terminal and not self.is_dumb_terminal)
+ if force_interactive is None
+ else force_interactive
+ )
+
+ self._record_buffer_lock = threading.RLock()
+ self._thread_locals = ConsoleThreadLocals(
+ theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme)
+ )
+ self._record_buffer: List[Segment] = []
+ self._render_hooks: List[RenderHook] = []
+ self._live: Optional["Live"] = None
+ self._is_alt_screen = False
+
+ def __repr__(self) -> str:
+ return f""
+
+ @property
+ def file(self) -> IO[str]:
+ """Get the file object to write to."""
+ file = self._file or (sys.stderr if self.stderr else sys.stdout)
+ file = getattr(file, "rich_proxied_file", file)
+ return file
+
+ @file.setter
+ def file(self, new_file: IO[str]) -> None:
+ """Set a new file object."""
+ self._file = new_file
+
+ @property
+ def _buffer(self) -> List[Segment]:
+ """Get a thread local buffer."""
+ return self._thread_locals.buffer
+
+ @property
+ def _buffer_index(self) -> int:
+ """Get a thread local buffer."""
+ return self._thread_locals.buffer_index
+
+ @_buffer_index.setter
+ def _buffer_index(self, value: int) -> None:
+ self._thread_locals.buffer_index = value
+
+ @property
+ def _theme_stack(self) -> ThemeStack:
+ """Get the thread local theme stack."""
+ return self._thread_locals.theme_stack
+
+ def _detect_color_system(self) -> Optional[ColorSystem]:
+ """Detect color system from env vars."""
+ if self.is_jupyter:
+ return ColorSystem.TRUECOLOR
+ if not self.is_terminal or self.is_dumb_terminal:
+ return None
+ if WINDOWS: # pragma: no cover
+ if self.legacy_windows: # pragma: no cover
+ return ColorSystem.WINDOWS
+ windows_console_features = get_windows_console_features()
+ return (
+ ColorSystem.TRUECOLOR
+ if windows_console_features.truecolor
+ else ColorSystem.EIGHT_BIT
+ )
+ else:
+ color_term = self._environ.get("COLORTERM", "").strip().lower()
+ if color_term in ("truecolor", "24bit"):
+ return ColorSystem.TRUECOLOR
+ term = self._environ.get("TERM", "").strip().lower()
+ _term_name, _hyphen, colors = term.rpartition("-")
+ color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD)
+ return color_system
+
+ def _enter_buffer(self) -> None:
+ """Enter in to a buffer context, and buffer all output."""
+ self._buffer_index += 1
+
+ def _exit_buffer(self) -> None:
+ """Leave buffer context, and render content if required."""
+ self._buffer_index -= 1
+ self._check_buffer()
+
+ def set_live(self, live: "Live") -> None:
+ """Set Live instance. Used by Live context manager.
+
+ Args:
+ live (Live): Live instance using this Console.
+
+ Raises:
+ errors.LiveError: If this Console has a Live context currently active.
+ """
+ with self._lock:
+ if self._live is not None:
+ raise errors.LiveError("Only one live display may be active at once")
+ self._live = live
+
+ def clear_live(self) -> None:
+ """Clear the Live instance."""
+ with self._lock:
+ self._live = None
+
+ def push_render_hook(self, hook: RenderHook) -> None:
+ """Add a new render hook to the stack.
+
+ Args:
+ hook (RenderHook): Render hook instance.
+ """
+ with self._lock:
+ self._render_hooks.append(hook)
+
+ def pop_render_hook(self) -> None:
+ """Pop the last renderhook from the stack."""
+ with self._lock:
+ self._render_hooks.pop()
+
+ def __enter__(self) -> "Console":
+ """Own context manager to enter buffer context."""
+ self._enter_buffer()
+ return self
+
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
+ """Exit buffer context."""
+ self._exit_buffer()
+
+ def begin_capture(self) -> None:
+ """Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output."""
+ self._enter_buffer()
+
+ def end_capture(self) -> str:
+ """End capture mode and return captured string.
+
+ Returns:
+ str: Console output.
+ """
+ render_result = self._render_buffer(self._buffer)
+ del self._buffer[:]
+ self._exit_buffer()
+ return render_result
+
+ def push_theme(self, theme: Theme, *, inherit: bool = True) -> None:
+ """Push a new theme on to the top of the stack, replacing the styles from the previous theme.
+ Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather
+ than calling this method directly.
+
+ Args:
+ theme (Theme): A theme instance.
+ inherit (bool, optional): Inherit existing styles. Defaults to True.
+ """
+ self._theme_stack.push_theme(theme, inherit=inherit)
+
+ def pop_theme(self) -> None:
+ """Remove theme from top of stack, restoring previous theme."""
+ self._theme_stack.pop_theme()
+
+ def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext:
+ """Use a different theme for the duration of the context manager.
+
+ Args:
+ theme (Theme): Theme instance to user.
+ inherit (bool, optional): Inherit existing console styles. Defaults to True.
+
+ Returns:
+ ThemeContext: [description]
+ """
+ return ThemeContext(self, theme, inherit)
+
+ @property
+ def color_system(self) -> Optional[str]:
+ """Get color system string.
+
+ Returns:
+ Optional[str]: "standard", "256" or "truecolor".
+ """
+
+ if self._color_system is not None:
+ return _COLOR_SYSTEMS_NAMES[self._color_system]
+ else:
+ return None
+
+ @property
+ def encoding(self) -> str:
+ """Get the encoding of the console file, e.g. ``"utf-8"``.
+
+ Returns:
+ str: A standard encoding string.
+ """
+ return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower()
+
+ @property
+ def is_terminal(self) -> bool:
+ """Check if the console is writing to a terminal.
+
+ Returns:
+ bool: True if the console writing to a device capable of
+ understanding terminal codes, otherwise False.
+ """
+ if self._force_terminal is not None:
+ return self._force_terminal
+ isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None)
+ try:
+ return False if isatty is None else isatty()
+ except ValueError:
+ # in some situation (at the end of a pytest run for example) isatty() can raise
+ # ValueError: I/O operation on closed file
+ # return False because we aren't in a terminal anymore
+ return False
+
+ @property
+ def is_dumb_terminal(self) -> bool:
+ """Detect dumb terminal.
+
+ Returns:
+ bool: True if writing to a dumb terminal, otherwise False.
+
+ """
+ _term = self._environ.get("TERM", "")
+ is_dumb = _term.lower() in ("dumb", "unknown")
+ return self.is_terminal and is_dumb
+
+ @property
+ def options(self) -> ConsoleOptions:
+ """Get default console options."""
+ return ConsoleOptions(
+ max_height=self.size.height,
+ size=self.size,
+ legacy_windows=self.legacy_windows,
+ min_width=1,
+ max_width=self.width,
+ encoding=self.encoding,
+ is_terminal=self.is_terminal,
+ )
+
+ @property
+ def size(self) -> ConsoleDimensions:
+ """Get the size of the console.
+
+ Returns:
+ ConsoleDimensions: A named tuple containing the dimensions.
+ """
+
+ if self._width is not None and self._height is not None:
+ return ConsoleDimensions(self._width - self.legacy_windows, self._height)
+
+ if self.is_dumb_terminal:
+ return ConsoleDimensions(80, 25)
+
+ width: Optional[int] = None
+ height: Optional[int] = None
+
+ if WINDOWS: # pragma: no cover
+ try:
+ width, height = os.get_terminal_size()
+ except OSError: # Probably not a terminal
+ pass
+ else:
+ try:
+ width, height = os.get_terminal_size(sys.__stdin__.fileno())
+ except (AttributeError, ValueError, OSError):
+ try:
+ width, height = os.get_terminal_size(sys.__stdout__.fileno())
+ except (AttributeError, ValueError, OSError):
+ pass
+
+ columns = self._environ.get("COLUMNS")
+ if columns is not None and columns.isdigit():
+ width = int(columns)
+ lines = self._environ.get("LINES")
+ if lines is not None and lines.isdigit():
+ height = int(lines)
+
+ # get_terminal_size can report 0, 0 if run from pseudo-terminal
+ width = width or 80
+ height = height or 25
+ return ConsoleDimensions(
+ width - self.legacy_windows if self._width is None else self._width,
+ height if self._height is None else self._height,
+ )
+
+ @size.setter
+ def size(self, new_size: Tuple[int, int]) -> None:
+ """Set a new size for the terminal.
+
+ Args:
+ new_size (Tuple[int, int]): New width and height.
+ """
+ width, height = new_size
+ self._width = width
+ self._height = height
+
+ @property
+ def width(self) -> int:
+ """Get the width of the console.
+
+ Returns:
+ int: The width (in characters) of the console.
+ """
+ return self.size.width
+
+ @width.setter
+ def width(self, width: int) -> None:
+ """Set width.
+
+ Args:
+ width (int): New width.
+ """
+ self._width = width
+
+ @property
+ def height(self) -> int:
+ """Get the height of the console.
+
+ Returns:
+ int: The height (in lines) of the console.
+ """
+ return self.size.height
+
+ @height.setter
+ def height(self, height: int) -> None:
+ """Set height.
+
+ Args:
+ height (int): new height.
+ """
+ self._height = height
+
+ def bell(self) -> None:
+ """Play a 'bell' sound (if supported by the terminal)."""
+ self.control(Control.bell())
+
+ def capture(self) -> Capture:
+ """A context manager to *capture* the result of print() or log() in a string,
+ rather than writing it to the console.
+
+ Example:
+ >>> from rich.console import Console
+ >>> console = Console()
+ >>> with console.capture() as capture:
+ ... console.print("[bold magenta]Hello World[/]")
+ >>> print(capture.get())
+
+ Returns:
+ Capture: Context manager with disables writing to the terminal.
+ """
+ capture = Capture(self)
+ return capture
+
+ def pager(
+ self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False
+ ) -> PagerContext:
+ """A context manager to display anything printed within a "pager". The pager application
+ is defined by the system and will typically support at least pressing a key to scroll.
+
+ Args:
+ pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None.
+ styles (bool, optional): Show styles in pager. Defaults to False.
+ links (bool, optional): Show links in pager. Defaults to False.
+
+ Example:
+ >>> from rich.console import Console
+ >>> from rich.__main__ import make_test_card
+ >>> console = Console()
+ >>> with console.pager():
+ console.print(make_test_card())
+
+ Returns:
+ PagerContext: A context manager.
+ """
+ return PagerContext(self, pager=pager, styles=styles, links=links)
+
+ def line(self, count: int = 1) -> None:
+ """Write new line(s).
+
+ Args:
+ count (int, optional): Number of new lines. Defaults to 1.
+ """
+
+ assert count >= 0, "count must be >= 0"
+ self.print(NewLine(count))
+
+ def clear(self, home: bool = True) -> None:
+ """Clear the screen.
+
+ Args:
+ home (bool, optional): Also move the cursor to 'home' position. Defaults to True.
+ """
+ if home:
+ self.control(Control.clear(), Control.home())
+ else:
+ self.control(Control.clear())
+
+ def status(
+ self,
+ status: RenderableType,
+ *,
+ spinner: str = "dots",
+ spinner_style: str = "status.spinner",
+ speed: float = 1.0,
+ refresh_per_second: float = 12.5,
+ ) -> "Status":
+ """Display a status and spinner.
+
+ Args:
+ status (RenderableType): A status renderable (str or Text typically).
+ spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
+ spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
+ speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
+ refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
+
+ Returns:
+ Status: A Status object that may be used as a context manager.
+ """
+ from .status import Status
+
+ status_renderable = Status(
+ status,
+ console=self,
+ spinner=spinner,
+ spinner_style=spinner_style,
+ speed=speed,
+ refresh_per_second=refresh_per_second,
+ )
+ return status_renderable
+
+ def show_cursor(self, show: bool = True) -> bool:
+ """Show or hide the cursor.
+
+ Args:
+ show (bool, optional): Set visibility of the cursor.
+ """
+ if self.is_terminal and not self.legacy_windows:
+ self.control(Control.show_cursor(show))
+ return True
+ return False
+
+ def set_alt_screen(self, enable: bool = True) -> bool:
+ """Enables alternative screen mode.
+
+ Note, if you enable this mode, you should ensure that is disabled before
+ the application exits. See :meth:`~rich.Console.screen` for a context manager
+ that handles this for you.
+
+ Args:
+ enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True.
+
+ Returns:
+ bool: True if the control codes were written.
+
+ """
+ changed = False
+ if self.is_terminal and not self.legacy_windows:
+ self.control(Control.alt_screen(enable))
+ changed = True
+ self._is_alt_screen = enable
+ return changed
+
+ @property
+ def is_alt_screen(self) -> bool:
+ """Check if the alt screen was enabled.
+
+ Returns:
+ bool: True if the alt screen was enabled, otherwise False.
+ """
+ return self._is_alt_screen
+
+ def screen(
+ self, hide_cursor: bool = True, style: Optional[StyleType] = None
+ ) -> "ScreenContext":
+ """Context manager to enable and disable 'alternative screen' mode.
+
+ Args:
+ hide_cursor (bool, optional): Also hide the cursor. Defaults to False.
+ style (Style, optional): Optional style for screen. Defaults to None.
+
+ Returns:
+ ~ScreenContext: Context which enables alternate screen on enter, and disables it on exit.
+ """
+ return ScreenContext(self, hide_cursor=hide_cursor, style=style or "")
+
+ def measure(
+ self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None
+ ) -> Measurement:
+ """Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains
+ information regarding the number of characters required to print the renderable.
+
+ Args:
+ renderable (RenderableType): Any renderable or string.
+ options (Optional[ConsoleOptions], optional): Options to use when measuring, or None
+ to use default options. Defaults to None.
+
+ Returns:
+ Measurement: A measurement of the renderable.
+ """
+ measurement = Measurement.get(self, options or self.options, renderable)
+ return measurement
+
+ def render(
+ self, renderable: RenderableType, options: Optional[ConsoleOptions] = None
+ ) -> Iterable[Segment]:
+ """Render an object in to an iterable of `Segment` instances.
+
+ This method contains the logic for rendering objects with the console protocol.
+ You are unlikely to need to use it directly, unless you are extending the library.
+
+ Args:
+ renderable (RenderableType): An object supporting the console protocol, or
+ an object that may be converted to a string.
+ options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None.
+
+ Returns:
+ Iterable[Segment]: An iterable of segments that may be rendered.
+ """
+
+ _options = options or self.options
+ if _options.max_width < 1:
+ # No space to render anything. This prevents potential recursion errors.
+ return
+ render_iterable: RenderResult
+
+ renderable = rich_cast(renderable)
+ if hasattr(renderable, "__rich_console__") and not isclass(renderable):
+ render_iterable = renderable.__rich_console__(self, _options) # type: ignore
+ elif isinstance(renderable, str):
+ text_renderable = self.render_str(
+ renderable, highlight=_options.highlight, markup=_options.markup
+ )
+ render_iterable = text_renderable.__rich_console__(self, _options)
+ else:
+ raise errors.NotRenderableError(
+ f"Unable to render {renderable!r}; "
+ "A str, Segment or object with __rich_console__ method is required"
+ )
+
+ try:
+ iter_render = iter(render_iterable)
+ except TypeError:
+ raise errors.NotRenderableError(
+ f"object {render_iterable!r} is not renderable"
+ )
+ _Segment = Segment
+ for render_output in iter_render:
+ if isinstance(render_output, _Segment):
+ yield render_output
+ else:
+ yield from self.render(render_output, _options)
+
+ def render_lines(
+ self,
+ renderable: RenderableType,
+ options: Optional[ConsoleOptions] = None,
+ *,
+ style: Optional[Style] = None,
+ pad: bool = True,
+ new_lines: bool = False,
+ ) -> List[List[Segment]]:
+ """Render objects in to a list of lines.
+
+ The output of render_lines is useful when further formatting of rendered console text
+ is required, such as the Panel class which draws a border around any renderable object.
+
+ Args:
+ renderable (RenderableType): Any object renderable in the console.
+ options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``.
+ style (Style, optional): Optional style to apply to renderables. Defaults to ``None``.
+ pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``.
+ new_lines (bool, optional): Include "\n" characters at end of lines.
+
+ Returns:
+ List[List[Segment]]: A list of lines, where a line is a list of Segment objects.
+ """
+ with self._lock:
+ render_options = options or self.options
+ _rendered = self.render(renderable, render_options)
+ if style:
+ _rendered = Segment.apply_style(_rendered, style)
+ lines = list(
+ islice(
+ Segment.split_and_crop_lines(
+ _rendered,
+ render_options.max_width,
+ include_new_lines=new_lines,
+ pad=pad,
+ ),
+ None,
+ render_options.height,
+ )
+ )
+ if render_options.height is not None:
+ extra_lines = render_options.height - len(lines)
+ if extra_lines > 0:
+ pad_line = [
+ [Segment(" " * render_options.max_width, style), Segment("\n")]
+ if new_lines
+ else [Segment(" " * render_options.max_width, style)]
+ ]
+ lines.extend(pad_line * extra_lines)
+
+ return lines
+
+ def render_str(
+ self,
+ text: str,
+ *,
+ style: Union[str, Style] = "",
+ justify: Optional[JustifyMethod] = None,
+ overflow: Optional[OverflowMethod] = None,
+ emoji: Optional[bool] = None,
+ markup: Optional[bool] = None,
+ highlight: Optional[bool] = None,
+ highlighter: Optional[HighlighterType] = None,
+ ) -> "Text":
+ """Convert a string to a Text instance. This is is called automatically if
+ you print or log a string.
+
+ Args:
+ text (str): Text to render.
+ style (Union[str, Style], optional): Style to apply to rendered text.
+ justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``.
+ overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``.
+ emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default.
+ highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default.
+ highlighter (HighlighterType, optional): Optional highlighter to apply.
+ Returns:
+ ConsoleRenderable: Renderable object.
+
+ """
+ emoji_enabled = emoji or (emoji is None and self._emoji)
+ markup_enabled = markup or (markup is None and self._markup)
+ highlight_enabled = highlight or (highlight is None and self._highlight)
+
+ if markup_enabled:
+ rich_text = render_markup(
+ text,
+ style=style,
+ emoji=emoji_enabled,
+ emoji_variant=self._emoji_variant,
+ )
+ rich_text.justify = justify
+ rich_text.overflow = overflow
+ else:
+ rich_text = Text(
+ _emoji_replace(text, default_variant=self._emoji_variant)
+ if emoji_enabled
+ else text,
+ justify=justify,
+ overflow=overflow,
+ style=style,
+ )
+
+ _highlighter = (highlighter or self.highlighter) if highlight_enabled else None
+ if _highlighter is not None:
+ highlight_text = _highlighter(str(rich_text))
+ highlight_text.copy_styles(rich_text)
+ return highlight_text
+
+ return rich_text
+
+ def get_style(
+ self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None
+ ) -> Style:
+ """Get a Style instance by it's theme name or parse a definition.
+
+ Args:
+ name (str): The name of a style or a style definition.
+
+ Returns:
+ Style: A Style object.
+
+ Raises:
+ MissingStyle: If no style could be parsed from name.
+
+ """
+ if isinstance(name, Style):
+ return name
+
+ try:
+ style = self._theme_stack.get(name)
+ if style is None:
+ style = Style.parse(name)
+ return style.copy() if style.link else style
+ except errors.StyleSyntaxError as error:
+ if default is not None:
+ return self.get_style(default)
+ raise errors.MissingStyle(
+ f"Failed to get style {name!r}; {error}"
+ ) from None
+
+ def _collect_renderables(
+ self,
+ objects: Iterable[Any],
+ sep: str,
+ end: str,
+ *,
+ justify: Optional[JustifyMethod] = None,
+ emoji: Optional[bool] = None,
+ markup: Optional[bool] = None,
+ highlight: Optional[bool] = None,
+ ) -> List[ConsoleRenderable]:
+ """Combine a number of renderables and text into one renderable.
+
+ Args:
+ objects (Iterable[Any]): Anything that Rich can render.
+ sep (str): String to write between print data.
+ end (str): String to write at end of print data.
+ justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
+ emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use console default.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default.
+
+ Returns:
+ List[ConsoleRenderable]: A list of things to render.
+ """
+ renderables: List[ConsoleRenderable] = []
+ _append = renderables.append
+ text: List[Text] = []
+ append_text = text.append
+
+ append = _append
+ if justify in ("left", "center", "right"):
+
+ def align_append(renderable: RenderableType) -> None:
+ _append(Align(renderable, cast(AlignMethod, justify)))
+
+ append = align_append
+
+ _highlighter: HighlighterType = _null_highlighter
+ if highlight or (highlight is None and self._highlight):
+ _highlighter = self.highlighter
+
+ def check_text() -> None:
+ if text:
+ sep_text = Text(sep, justify=justify, end=end)
+ append(sep_text.join(text))
+ del text[:]
+
+ for renderable in objects:
+ renderable = rich_cast(renderable)
+ if isinstance(renderable, str):
+ append_text(
+ self.render_str(
+ renderable, emoji=emoji, markup=markup, highlighter=_highlighter
+ )
+ )
+ elif isinstance(renderable, Text):
+ append_text(renderable)
+ elif isinstance(renderable, ConsoleRenderable):
+ check_text()
+ append(renderable)
+ elif is_expandable(renderable):
+ check_text()
+ append(Pretty(renderable, highlighter=_highlighter))
+ else:
+ append_text(_highlighter(str(renderable)))
+
+ check_text()
+
+ if self.style is not None:
+ style = self.get_style(self.style)
+ renderables = [Styled(renderable, style) for renderable in renderables]
+
+ return renderables
+
+ def rule(
+ self,
+ title: TextType = "",
+ *,
+ characters: str = "─",
+ style: Union[str, Style] = "rule.line",
+ align: AlignMethod = "center",
+ ) -> None:
+ """Draw a line with optional centered title.
+
+ Args:
+ title (str, optional): Text to render over the rule. Defaults to "".
+ characters (str, optional): Character(s) to form the line. Defaults to "─".
+ style (str, optional): Style of line. Defaults to "rule.line".
+ align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
+ """
+ from .rule import Rule
+
+ rule = Rule(title=title, characters=characters, style=style, align=align)
+ self.print(rule)
+
+ def control(self, *control: Control) -> None:
+ """Insert non-printing control codes.
+
+ Args:
+ control_codes (str): Control codes, such as those that may move the cursor.
+ """
+ if not self.is_dumb_terminal:
+ with self:
+ self._buffer.extend(_control.segment for _control in control)
+
+ def out(
+ self,
+ *objects: Any,
+ sep: str = " ",
+ end: str = "\n",
+ style: Optional[Union[str, Style]] = None,
+ highlight: Optional[bool] = None,
+ ) -> None:
+ """Output to the terminal. This is a low-level way of writing to the terminal which unlike
+ :meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will
+ optionally apply highlighting and a basic style.
+
+ Args:
+ sep (str, optional): String to write between print data. Defaults to " ".
+ end (str, optional): String to write at end of print data. Defaults to "\\\\n".
+ style (Union[str, Style], optional): A style to apply to output. Defaults to None.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use
+ console default. Defaults to ``None``.
+ """
+ raw_output: str = sep.join(str(_object) for _object in objects)
+ self.print(
+ raw_output,
+ style=style,
+ highlight=highlight,
+ emoji=False,
+ markup=False,
+ no_wrap=True,
+ overflow="ignore",
+ crop=False,
+ end=end,
+ )
+
+ def print(
+ self,
+ *objects: Any,
+ sep: str = " ",
+ end: str = "\n",
+ style: Optional[Union[str, Style]] = None,
+ justify: Optional[JustifyMethod] = None,
+ overflow: Optional[OverflowMethod] = None,
+ no_wrap: Optional[bool] = None,
+ emoji: Optional[bool] = None,
+ markup: Optional[bool] = None,
+ highlight: Optional[bool] = None,
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ crop: bool = True,
+ soft_wrap: Optional[bool] = None,
+ new_line_start: bool = False,
+ ) -> None:
+ """Print to the console.
+
+ Args:
+ objects (positional args): Objects to log to the terminal.
+ sep (str, optional): String to write between print data. Defaults to " ".
+ end (str, optional): String to write at end of print data. Defaults to "\\\\n".
+ style (Union[str, Style], optional): A style to apply to output. Defaults to None.
+ justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``.
+ overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None.
+ no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None.
+ emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``.
+ width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``.
+ crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True.
+ soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for
+ Console default. Defaults to ``None``.
+ new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``.
+ """
+ if not objects:
+ objects = (NewLine(),)
+
+ if soft_wrap is None:
+ soft_wrap = self.soft_wrap
+ if soft_wrap:
+ if no_wrap is None:
+ no_wrap = True
+ if overflow is None:
+ overflow = "ignore"
+ crop = False
+ render_hooks = self._render_hooks[:]
+ with self:
+ renderables = self._collect_renderables(
+ objects,
+ sep,
+ end,
+ justify=justify,
+ emoji=emoji,
+ markup=markup,
+ highlight=highlight,
+ )
+ for hook in render_hooks:
+ renderables = hook.process_renderables(renderables)
+ render_options = self.options.update(
+ justify=justify,
+ overflow=overflow,
+ width=min(width, self.width) if width is not None else NO_CHANGE,
+ height=height,
+ no_wrap=no_wrap,
+ markup=markup,
+ highlight=highlight,
+ )
+
+ new_segments: List[Segment] = []
+ extend = new_segments.extend
+ render = self.render
+ if style is None:
+ for renderable in renderables:
+ extend(render(renderable, render_options))
+ else:
+ for renderable in renderables:
+ extend(
+ Segment.apply_style(
+ render(renderable, render_options), self.get_style(style)
+ )
+ )
+ if new_line_start:
+ if (
+ len("".join(segment.text for segment in new_segments).splitlines())
+ > 1
+ ):
+ new_segments.insert(0, Segment.line())
+ if crop:
+ buffer_extend = self._buffer.extend
+ for line in Segment.split_and_crop_lines(
+ new_segments, self.width, pad=False
+ ):
+ buffer_extend(line)
+ else:
+ self._buffer.extend(new_segments)
+
+ def print_json(
+ self,
+ json: Optional[str] = None,
+ *,
+ data: Any = None,
+ indent: Union[None, int, str] = 2,
+ highlight: bool = True,
+ skip_keys: bool = False,
+ ensure_ascii: bool = True,
+ check_circular: bool = True,
+ allow_nan: bool = True,
+ default: Optional[Callable[[Any], Any]] = None,
+ sort_keys: bool = False,
+ ) -> None:
+ """Pretty prints JSON. Output will be valid JSON.
+
+ Args:
+ json (Optional[str]): A string containing JSON.
+ data (Any): If json is not supplied, then encode this data.
+ indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2.
+ highlight (bool, optional): Enable highlighting of output: Defaults to True.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+ """
+ from pip._vendor.rich.json import JSON
+
+ if json is None:
+ json_renderable = JSON.from_data(
+ data,
+ indent=indent,
+ highlight=highlight,
+ skip_keys=skip_keys,
+ ensure_ascii=ensure_ascii,
+ check_circular=check_circular,
+ allow_nan=allow_nan,
+ default=default,
+ sort_keys=sort_keys,
+ )
+ else:
+ if not isinstance(json, str):
+ raise TypeError(
+ f"json must be str. Did you mean print_json(data={json!r}) ?"
+ )
+ json_renderable = JSON(
+ json,
+ indent=indent,
+ highlight=highlight,
+ skip_keys=skip_keys,
+ ensure_ascii=ensure_ascii,
+ check_circular=check_circular,
+ allow_nan=allow_nan,
+ default=default,
+ sort_keys=sort_keys,
+ )
+ self.print(json_renderable, soft_wrap=True)
+
+ def update_screen(
+ self,
+ renderable: RenderableType,
+ *,
+ region: Optional[Region] = None,
+ options: Optional[ConsoleOptions] = None,
+ ) -> None:
+ """Update the screen at a given offset.
+
+ Args:
+ renderable (RenderableType): A Rich renderable.
+ region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None.
+ x (int, optional): x offset. Defaults to 0.
+ y (int, optional): y offset. Defaults to 0.
+
+ Raises:
+ errors.NoAltScreen: If the Console isn't in alt screen mode.
+
+ """
+ if not self.is_alt_screen:
+ raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
+ render_options = options or self.options
+ if region is None:
+ x = y = 0
+ render_options = render_options.update_dimensions(
+ render_options.max_width, render_options.height or self.height
+ )
+ else:
+ x, y, width, height = region
+ render_options = render_options.update_dimensions(width, height)
+
+ lines = self.render_lines(renderable, options=render_options)
+ self.update_screen_lines(lines, x, y)
+
+ def update_screen_lines(
+ self, lines: List[List[Segment]], x: int = 0, y: int = 0
+ ) -> None:
+ """Update lines of the screen at a given offset.
+
+ Args:
+ lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`).
+ x (int, optional): x offset (column no). Defaults to 0.
+ y (int, optional): y offset (column no). Defaults to 0.
+
+ Raises:
+ errors.NoAltScreen: If the Console isn't in alt screen mode.
+ """
+ if not self.is_alt_screen:
+ raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
+ screen_update = ScreenUpdate(lines, x, y)
+ segments = self.render(screen_update)
+ self._buffer.extend(segments)
+ self._check_buffer()
+
+ def print_exception(
+ self,
+ *,
+ width: Optional[int] = 100,
+ extra_lines: int = 3,
+ theme: Optional[str] = None,
+ word_wrap: bool = False,
+ show_locals: bool = False,
+ suppress: Iterable[Union[str, ModuleType]] = (),
+ max_frames: int = 100,
+ ) -> None:
+ """Prints a rich render of the last exception and traceback.
+
+ Args:
+ width (Optional[int], optional): Number of characters used to render code. Defaults to 88.
+ extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
+ theme (str, optional): Override pygments theme used in traceback
+ word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+ """
+ from .traceback import Traceback
+
+ traceback = Traceback(
+ width=width,
+ extra_lines=extra_lines,
+ theme=theme,
+ word_wrap=word_wrap,
+ show_locals=show_locals,
+ suppress=suppress,
+ max_frames=max_frames,
+ )
+ self.print(traceback)
+
+ @staticmethod
+ def _caller_frame_info(
+ offset: int,
+ currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe,
+ ) -> Tuple[str, int, Dict[str, Any]]:
+ """Get caller frame information.
+
+ Args:
+ offset (int): the caller offset within the current frame stack.
+ currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to
+ retrieve the current frame. Defaults to ``inspect.currentframe``.
+
+ Returns:
+ Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and
+ the dictionary of local variables associated with the caller frame.
+
+ Raises:
+ RuntimeError: If the stack offset is invalid.
+ """
+ # Ignore the frame of this local helper
+ offset += 1
+
+ frame = currentframe()
+ if frame is not None:
+ # Use the faster currentframe where implemented
+ while offset and frame:
+ frame = frame.f_back
+ offset -= 1
+ assert frame is not None
+ return frame.f_code.co_filename, frame.f_lineno, frame.f_locals
+ else:
+ # Fallback to the slower stack
+ frame_info = inspect.stack()[offset]
+ return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals
+
+ def log(
+ self,
+ *objects: Any,
+ sep: str = " ",
+ end: str = "\n",
+ style: Optional[Union[str, Style]] = None,
+ justify: Optional[JustifyMethod] = None,
+ emoji: Optional[bool] = None,
+ markup: Optional[bool] = None,
+ highlight: Optional[bool] = None,
+ log_locals: bool = False,
+ _stack_offset: int = 1,
+ ) -> None:
+ """Log rich content to the terminal.
+
+ Args:
+ objects (positional args): Objects to log to the terminal.
+ sep (str, optional): String to write between print data. Defaults to " ".
+ end (str, optional): String to write at end of print data. Defaults to "\\\\n".
+ style (Union[str, Style], optional): A style to apply to output. Defaults to None.
+ justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
+ overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
+ emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None.
+ log_locals (bool, optional): Boolean to enable logging of locals where ``log()``
+ was called. Defaults to False.
+ _stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1.
+ """
+ if not objects:
+ objects = (NewLine(),)
+
+ render_hooks = self._render_hooks[:]
+
+ with self:
+ renderables = self._collect_renderables(
+ objects,
+ sep,
+ end,
+ justify=justify,
+ emoji=emoji,
+ markup=markup,
+ highlight=highlight,
+ )
+ if style is not None:
+ renderables = [Styled(renderable, style) for renderable in renderables]
+
+ filename, line_no, locals = self._caller_frame_info(_stack_offset)
+ link_path = None if filename.startswith("<") else os.path.abspath(filename)
+ path = filename.rpartition(os.sep)[-1]
+ if log_locals:
+ locals_map = {
+ key: value
+ for key, value in locals.items()
+ if not key.startswith("__")
+ }
+ renderables.append(render_scope(locals_map, title="[i]locals"))
+
+ renderables = [
+ self._log_render(
+ self,
+ renderables,
+ log_time=self.get_datetime(),
+ path=path,
+ line_no=line_no,
+ link_path=link_path,
+ )
+ ]
+ for hook in render_hooks:
+ renderables = hook.process_renderables(renderables)
+ new_segments: List[Segment] = []
+ extend = new_segments.extend
+ render = self.render
+ render_options = self.options
+ for renderable in renderables:
+ extend(render(renderable, render_options))
+ buffer_extend = self._buffer.extend
+ for line in Segment.split_and_crop_lines(
+ new_segments, self.width, pad=False
+ ):
+ buffer_extend(line)
+
+ def _check_buffer(self) -> None:
+ """Check if the buffer may be rendered."""
+ if self.quiet:
+ del self._buffer[:]
+ return
+ with self._lock:
+ if self._buffer_index == 0:
+ if self.is_jupyter: # pragma: no cover
+ from .jupyter import display
+
+ display(self._buffer, self._render_buffer(self._buffer[:]))
+ del self._buffer[:]
+ else:
+ text = self._render_buffer(self._buffer[:])
+ del self._buffer[:]
+ if text:
+ try:
+ if WINDOWS: # pragma: no cover
+ # https://bugs.python.org/issue37871
+ write = self.file.write
+ for line in text.splitlines(True):
+ write(line)
+ else:
+ self.file.write(text)
+ self.file.flush()
+ except UnicodeEncodeError as error:
+ error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***"
+ raise
+
+ def _render_buffer(self, buffer: Iterable[Segment]) -> str:
+ """Render buffered output, and clear buffer."""
+ output: List[str] = []
+ append = output.append
+ color_system = self._color_system
+ legacy_windows = self.legacy_windows
+ if self.record:
+ with self._record_buffer_lock:
+ self._record_buffer.extend(buffer)
+ not_terminal = not self.is_terminal
+ if self.no_color and color_system:
+ buffer = Segment.remove_color(buffer)
+ for text, style, control in buffer:
+ if style:
+ append(
+ style.render(
+ text,
+ color_system=color_system,
+ legacy_windows=legacy_windows,
+ )
+ )
+ elif not (not_terminal and control):
+ append(text)
+
+ rendered = "".join(output)
+ return rendered
+
+ def input(
+ self,
+ prompt: TextType = "",
+ *,
+ markup: bool = True,
+ emoji: bool = True,
+ password: bool = False,
+ stream: Optional[TextIO] = None,
+ ) -> str:
+ """Displays a prompt and waits for input from the user. The prompt may contain color / style.
+
+ It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded.
+
+ Args:
+ prompt (Union[str, Text]): Text to render in the prompt.
+ markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True.
+ emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True.
+ password: (bool, optional): Hide typed text. Defaults to False.
+ stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None.
+
+ Returns:
+ str: Text read from stdin.
+ """
+ prompt_str = ""
+ if prompt:
+ with self.capture() as capture:
+ self.print(prompt, markup=markup, emoji=emoji, end="")
+ prompt_str = capture.get()
+ if self.legacy_windows:
+ # Legacy windows doesn't like ANSI codes in getpass or input (colorama bug)?
+ self.file.write(prompt_str)
+ prompt_str = ""
+ if password:
+ result = getpass(prompt_str, stream=stream)
+ else:
+ if stream:
+ self.file.write(prompt_str)
+ result = stream.readline()
+ else:
+ result = input(prompt_str)
+ return result
+
+ def export_text(self, *, clear: bool = True, styles: bool = False) -> str:
+ """Generate text from console contents (requires record=True argument in constructor).
+
+ Args:
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text.
+ Defaults to ``False``.
+
+ Returns:
+ str: String containing console contents.
+
+ """
+ assert (
+ self.record
+ ), "To export console contents set record=True in the constructor or instance"
+
+ with self._record_buffer_lock:
+ if styles:
+ text = "".join(
+ (style.render(text) if style else text)
+ for text, style, _ in self._record_buffer
+ )
+ else:
+ text = "".join(
+ segment.text
+ for segment in self._record_buffer
+ if not segment.control
+ )
+ if clear:
+ del self._record_buffer[:]
+ return text
+
+ def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None:
+ """Generate text from console and save to a given location (requires record=True argument in constructor).
+
+ Args:
+ path (str): Path to write text files.
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
+ Defaults to ``False``.
+
+ """
+ text = self.export_text(clear=clear, styles=styles)
+ with open(path, "wt", encoding="utf-8") as write_file:
+ write_file.write(text)
+
+ def export_html(
+ self,
+ *,
+ theme: Optional[TerminalTheme] = None,
+ clear: bool = True,
+ code_format: Optional[str] = None,
+ inline_styles: bool = False,
+ ) -> str:
+ """Generate HTML from console contents (requires record=True argument in constructor).
+
+ Args:
+ theme (TerminalTheme, optional): TerminalTheme object containing console colors.
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ code_format (str, optional): Format string to render HTML, should contain {foreground}
+ {background} and {code}.
+ inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
+ larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
+ Defaults to False.
+
+ Returns:
+ str: String containing console contents as HTML.
+ """
+ assert (
+ self.record
+ ), "To export console contents set record=True in the constructor or instance"
+ fragments: List[str] = []
+ append = fragments.append
+ _theme = theme or DEFAULT_TERMINAL_THEME
+ stylesheet = ""
+
+ render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format
+
+ with self._record_buffer_lock:
+ if inline_styles:
+ for text, style, _ in Segment.filter_control(
+ Segment.simplify(self._record_buffer)
+ ):
+ text = escape(text)
+ if style:
+ rule = style.get_html_style(_theme)
+ if style.link:
+ text = f'{text} '
+ text = f'{text} ' if rule else text
+ append(text)
+ else:
+ styles: Dict[str, int] = {}
+ for text, style, _ in Segment.filter_control(
+ Segment.simplify(self._record_buffer)
+ ):
+ text = escape(text)
+ if style:
+ rule = style.get_html_style(_theme)
+ style_number = styles.setdefault(rule, len(styles) + 1)
+ if style.link:
+ text = f'{text} '
+ else:
+ text = f'{text} '
+ append(text)
+ stylesheet_rules: List[str] = []
+ stylesheet_append = stylesheet_rules.append
+ for style_rule, style_number in styles.items():
+ if style_rule:
+ stylesheet_append(f".r{style_number} {{{style_rule}}}")
+ stylesheet = "\n".join(stylesheet_rules)
+
+ rendered_code = render_code_format.format(
+ code="".join(fragments),
+ stylesheet=stylesheet,
+ foreground=_theme.foreground_color.hex,
+ background=_theme.background_color.hex,
+ )
+ if clear:
+ del self._record_buffer[:]
+ return rendered_code
+
+ def save_html(
+ self,
+ path: str,
+ *,
+ theme: Optional[TerminalTheme] = None,
+ clear: bool = True,
+ code_format: str = CONSOLE_HTML_FORMAT,
+ inline_styles: bool = False,
+ ) -> None:
+ """Generate HTML from console contents and write to a file (requires record=True argument in constructor).
+
+ Args:
+ path (str): Path to write html file.
+ theme (TerminalTheme, optional): TerminalTheme object containing console colors.
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ code_format (str, optional): Format string to render HTML, should contain {foreground}
+ {background} and {code}.
+ inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
+ larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
+ Defaults to False.
+
+ """
+ html = self.export_html(
+ theme=theme,
+ clear=clear,
+ code_format=code_format,
+ inline_styles=inline_styles,
+ )
+ with open(path, "wt", encoding="utf-8") as write_file:
+ write_file.write(html)
+
+
+if __name__ == "__main__": # pragma: no cover
+ console = Console()
+
+ console.log(
+ "JSONRPC [i]request[/i]",
+ 5,
+ 1.3,
+ True,
+ False,
+ None,
+ {
+ "jsonrpc": "2.0",
+ "method": "subtract",
+ "params": {"minuend": 42, "subtrahend": 23},
+ "id": 3,
+ },
+ )
+
+ console.log("Hello, World!", "{'a': 1}", repr(console))
+
+ console.print(
+ {
+ "name": None,
+ "empty": [],
+ "quiz": {
+ "sport": {
+ "answered": True,
+ "q1": {
+ "question": "Which one is correct team name in NBA?",
+ "options": [
+ "New York Bulls",
+ "Los Angeles Kings",
+ "Golden State Warriors",
+ "Huston Rocket",
+ ],
+ "answer": "Huston Rocket",
+ },
+ },
+ "maths": {
+ "answered": False,
+ "q1": {
+ "question": "5 + 7 = ?",
+ "options": [10, 11, 12, 13],
+ "answer": 12,
+ },
+ "q2": {
+ "question": "12 - 8 = ?",
+ "options": [1, 2, 3, 4],
+ "answer": 4,
+ },
+ },
+ },
+ }
+ )
+ console.log("foo")
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/constrain.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/constrain.py
new file mode 100644
index 0000000..65fdf56
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/constrain.py
@@ -0,0 +1,37 @@
+from typing import Optional, TYPE_CHECKING
+
+from .jupyter import JupyterMixin
+from .measure import Measurement
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderableType, RenderResult
+
+
+class Constrain(JupyterMixin):
+ """Constrain the width of a renderable to a given number of characters.
+
+ Args:
+ renderable (RenderableType): A renderable object.
+ width (int, optional): The maximum width (in characters) to render. Defaults to 80.
+ """
+
+ def __init__(self, renderable: "RenderableType", width: Optional[int] = 80) -> None:
+ self.renderable = renderable
+ self.width = width
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ if self.width is None:
+ yield self.renderable
+ else:
+ child_options = options.update_width(min(self.width, options.max_width))
+ yield from console.render(self.renderable, child_options)
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ if self.width is not None:
+ options = options.update_width(self.width)
+ measurement = Measurement.get(console, options, self.renderable)
+ return measurement
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/containers.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/containers.py
new file mode 100644
index 0000000..e29cf36
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/containers.py
@@ -0,0 +1,167 @@
+from itertools import zip_longest
+from typing import (
+ Iterator,
+ Iterable,
+ List,
+ Optional,
+ Union,
+ overload,
+ TypeVar,
+ TYPE_CHECKING,
+)
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ JustifyMethod,
+ OverflowMethod,
+ RenderResult,
+ RenderableType,
+ )
+ from .text import Text
+
+from .cells import cell_len
+from .measure import Measurement
+
+T = TypeVar("T")
+
+
+class Renderables:
+ """A list subclass which renders its contents to the console."""
+
+ def __init__(
+ self, renderables: Optional[Iterable["RenderableType"]] = None
+ ) -> None:
+ self._renderables: List["RenderableType"] = (
+ list(renderables) if renderables is not None else []
+ )
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ """Console render method to insert line-breaks."""
+ yield from self._renderables
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ dimensions = [
+ Measurement.get(console, options, renderable)
+ for renderable in self._renderables
+ ]
+ if not dimensions:
+ return Measurement(1, 1)
+ _min = max(dimension.minimum for dimension in dimensions)
+ _max = max(dimension.maximum for dimension in dimensions)
+ return Measurement(_min, _max)
+
+ def append(self, renderable: "RenderableType") -> None:
+ self._renderables.append(renderable)
+
+ def __iter__(self) -> Iterable["RenderableType"]:
+ return iter(self._renderables)
+
+
+class Lines:
+ """A list subclass which can render to the console."""
+
+ def __init__(self, lines: Iterable["Text"] = ()) -> None:
+ self._lines: List["Text"] = list(lines)
+
+ def __repr__(self) -> str:
+ return f"Lines({self._lines!r})"
+
+ def __iter__(self) -> Iterator["Text"]:
+ return iter(self._lines)
+
+ @overload
+ def __getitem__(self, index: int) -> "Text":
+ ...
+
+ @overload
+ def __getitem__(self, index: slice) -> List["Text"]:
+ ...
+
+ def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]:
+ return self._lines[index]
+
+ def __setitem__(self, index: int, value: "Text") -> "Lines":
+ self._lines[index] = value
+ return self
+
+ def __len__(self) -> int:
+ return self._lines.__len__()
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ """Console render method to insert line-breaks."""
+ yield from self._lines
+
+ def append(self, line: "Text") -> None:
+ self._lines.append(line)
+
+ def extend(self, lines: Iterable["Text"]) -> None:
+ self._lines.extend(lines)
+
+ def pop(self, index: int = -1) -> "Text":
+ return self._lines.pop(index)
+
+ def justify(
+ self,
+ console: "Console",
+ width: int,
+ justify: "JustifyMethod" = "left",
+ overflow: "OverflowMethod" = "fold",
+ ) -> None:
+ """Justify and overflow text to a given width.
+
+ Args:
+ console (Console): Console instance.
+ width (int): Number of characters per line.
+ justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left".
+ overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold".
+
+ """
+ from .text import Text
+
+ if justify == "left":
+ for line in self._lines:
+ line.truncate(width, overflow=overflow, pad=True)
+ elif justify == "center":
+ for line in self._lines:
+ line.rstrip()
+ line.truncate(width, overflow=overflow)
+ line.pad_left((width - cell_len(line.plain)) // 2)
+ line.pad_right(width - cell_len(line.plain))
+ elif justify == "right":
+ for line in self._lines:
+ line.rstrip()
+ line.truncate(width, overflow=overflow)
+ line.pad_left(width - cell_len(line.plain))
+ elif justify == "full":
+ for line_index, line in enumerate(self._lines):
+ if line_index == len(self._lines) - 1:
+ break
+ words = line.split(" ")
+ words_size = sum(cell_len(word.plain) for word in words)
+ num_spaces = len(words) - 1
+ spaces = [1 for _ in range(num_spaces)]
+ index = 0
+ if spaces:
+ while words_size + num_spaces < width:
+ spaces[len(spaces) - index - 1] += 1
+ num_spaces += 1
+ index = (index + 1) % len(spaces)
+ tokens: List[Text] = []
+ for index, (word, next_word) in enumerate(
+ zip_longest(words, words[1:])
+ ):
+ tokens.append(word)
+ if index < len(spaces):
+ style = word.get_style_at_offset(console, -1)
+ next_style = next_word.get_style_at_offset(console, 0)
+ space_style = style if style == next_style else line.style
+ tokens.append(Text(" " * spaces[index], style=space_style))
+ self[line_index] = Text("").join(tokens)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/control.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/control.py
new file mode 100644
index 0000000..c98d0d7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/control.py
@@ -0,0 +1,175 @@
+from typing import Any, Callable, Dict, Iterable, List, TYPE_CHECKING, Union
+
+from .segment import ControlCode, ControlType, Segment
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult
+
+STRIP_CONTROL_CODES = [
+ 8, # Backspace
+ 11, # Vertical tab
+ 12, # Form feed
+ 13, # Carriage return
+]
+_CONTROL_TRANSLATE = {_codepoint: None for _codepoint in STRIP_CONTROL_CODES}
+
+
+CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = {
+ ControlType.BELL: lambda: "\x07",
+ ControlType.CARRIAGE_RETURN: lambda: "\r",
+ ControlType.HOME: lambda: "\x1b[H",
+ ControlType.CLEAR: lambda: "\x1b[2J",
+ ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h",
+ ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l",
+ ControlType.SHOW_CURSOR: lambda: "\x1b[?25h",
+ ControlType.HIDE_CURSOR: lambda: "\x1b[?25l",
+ ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A",
+ ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B",
+ ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C",
+ ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D",
+ ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G",
+ ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K",
+ ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H",
+}
+
+
+class Control:
+ """A renderable that inserts a control code (non printable but may move cursor).
+
+ Args:
+ *codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a
+ tuple of ControlType and an integer parameter
+ """
+
+ __slots__ = ["segment"]
+
+ def __init__(self, *codes: Union[ControlType, ControlCode]) -> None:
+ control_codes: List[ControlCode] = [
+ (code,) if isinstance(code, ControlType) else code for code in codes
+ ]
+ _format_map = CONTROL_CODES_FORMAT
+ rendered_codes = "".join(
+ _format_map[code](*parameters) for code, *parameters in control_codes
+ )
+ self.segment = Segment(rendered_codes, None, control_codes)
+
+ @classmethod
+ def bell(cls) -> "Control":
+ """Ring the 'bell'."""
+ return cls(ControlType.BELL)
+
+ @classmethod
+ def home(cls) -> "Control":
+ """Move cursor to 'home' position."""
+ return cls(ControlType.HOME)
+
+ @classmethod
+ def move(cls, x: int = 0, y: int = 0) -> "Control":
+ """Move cursor relative to current position.
+
+ Args:
+ x (int): X offset.
+ y (int): Y offset.
+
+ Returns:
+ ~Control: Control object.
+
+ """
+
+ def get_codes() -> Iterable[ControlCode]:
+ control = ControlType
+ if x:
+ yield (
+ control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD,
+ abs(x),
+ )
+ if y:
+ yield (
+ control.CURSOR_DOWN if y > 0 else control.CURSOR_UP,
+ abs(y),
+ )
+
+ control = cls(*get_codes())
+ return control
+
+ @classmethod
+ def move_to_column(cls, x: int, y: int = 0) -> "Control":
+ """Move to the given column, optionally add offset to row.
+
+ Returns:
+ x (int): absolute x (column)
+ y (int): optional y offset (row)
+
+ Returns:
+ ~Control: Control object.
+ """
+
+ return (
+ cls(
+ (ControlType.CURSOR_MOVE_TO_COLUMN, x),
+ (
+ ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP,
+ abs(y),
+ ),
+ )
+ if y
+ else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x))
+ )
+
+ @classmethod
+ def move_to(cls, x: int, y: int) -> "Control":
+ """Move cursor to absolute position.
+
+ Args:
+ x (int): x offset (column)
+ y (int): y offset (row)
+
+ Returns:
+ ~Control: Control object.
+ """
+ return cls((ControlType.CURSOR_MOVE_TO, x, y))
+
+ @classmethod
+ def clear(cls) -> "Control":
+ """Clear the screen."""
+ return cls(ControlType.CLEAR)
+
+ @classmethod
+ def show_cursor(cls, show: bool) -> "Control":
+ """Show or hide the cursor."""
+ return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
+
+ @classmethod
+ def alt_screen(cls, enable: bool) -> "Control":
+ """Enable or disable alt screen."""
+ if enable:
+ return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
+ else:
+ return cls(ControlType.DISABLE_ALT_SCREEN)
+
+ def __str__(self) -> str:
+ return self.segment.text
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ if self.segment.text:
+ yield self.segment
+
+
+def strip_control_codes(
+ text: str, _translate_table: Dict[int, None] = _CONTROL_TRANSLATE
+) -> str:
+ """Remove control codes from text.
+
+ Args:
+ text (str): A string possibly contain control codes.
+
+ Returns:
+ str: String with control codes removed.
+ """
+ return text.translate(_translate_table)
+
+
+if __name__ == "__main__": # pragma: no cover
+ print(strip_control_codes("hello\rWorld"))
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/default_styles.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/default_styles.py
new file mode 100644
index 0000000..91ab232
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/default_styles.py
@@ -0,0 +1,183 @@
+from typing import Dict
+
+from .style import Style
+
+
+DEFAULT_STYLES: Dict[str, Style] = {
+ "none": Style.null(),
+ "reset": Style(
+ color="default",
+ bgcolor="default",
+ dim=False,
+ bold=False,
+ italic=False,
+ underline=False,
+ blink=False,
+ blink2=False,
+ reverse=False,
+ conceal=False,
+ strike=False,
+ ),
+ "dim": Style(dim=True),
+ "bright": Style(dim=False),
+ "bold": Style(bold=True),
+ "strong": Style(bold=True),
+ "code": Style(reverse=True, bold=True),
+ "italic": Style(italic=True),
+ "emphasize": Style(italic=True),
+ "underline": Style(underline=True),
+ "blink": Style(blink=True),
+ "blink2": Style(blink2=True),
+ "reverse": Style(reverse=True),
+ "strike": Style(strike=True),
+ "black": Style(color="black"),
+ "red": Style(color="red"),
+ "green": Style(color="green"),
+ "yellow": Style(color="yellow"),
+ "magenta": Style(color="magenta"),
+ "cyan": Style(color="cyan"),
+ "white": Style(color="white"),
+ "inspect.attr": Style(color="yellow", italic=True),
+ "inspect.attr.dunder": Style(color="yellow", italic=True, dim=True),
+ "inspect.callable": Style(bold=True, color="red"),
+ "inspect.def": Style(italic=True, color="bright_cyan"),
+ "inspect.error": Style(bold=True, color="red"),
+ "inspect.equals": Style(),
+ "inspect.help": Style(color="cyan"),
+ "inspect.doc": Style(dim=True),
+ "inspect.value.border": Style(color="green"),
+ "live.ellipsis": Style(bold=True, color="red"),
+ "layout.tree.row": Style(dim=False, color="red"),
+ "layout.tree.column": Style(dim=False, color="blue"),
+ "logging.keyword": Style(bold=True, color="yellow"),
+ "logging.level.notset": Style(dim=True),
+ "logging.level.debug": Style(color="green"),
+ "logging.level.info": Style(color="blue"),
+ "logging.level.warning": Style(color="red"),
+ "logging.level.error": Style(color="red", bold=True),
+ "logging.level.critical": Style(color="red", bold=True, reverse=True),
+ "log.level": Style.null(),
+ "log.time": Style(color="cyan", dim=True),
+ "log.message": Style.null(),
+ "log.path": Style(dim=True),
+ "repr.ellipsis": Style(color="yellow"),
+ "repr.indent": Style(color="green", dim=True),
+ "repr.error": Style(color="red", bold=True),
+ "repr.str": Style(color="green", italic=False, bold=False),
+ "repr.brace": Style(bold=True),
+ "repr.comma": Style(bold=True),
+ "repr.ipv4": Style(bold=True, color="bright_green"),
+ "repr.ipv6": Style(bold=True, color="bright_green"),
+ "repr.eui48": Style(bold=True, color="bright_green"),
+ "repr.eui64": Style(bold=True, color="bright_green"),
+ "repr.tag_start": Style(bold=True),
+ "repr.tag_name": Style(color="bright_magenta", bold=True),
+ "repr.tag_contents": Style(color="default"),
+ "repr.tag_end": Style(bold=True),
+ "repr.attrib_name": Style(color="yellow", italic=False),
+ "repr.attrib_equal": Style(bold=True),
+ "repr.attrib_value": Style(color="magenta", italic=False),
+ "repr.number": Style(color="cyan", bold=True, italic=False),
+ "repr.bool_true": Style(color="bright_green", italic=True),
+ "repr.bool_false": Style(color="bright_red", italic=True),
+ "repr.none": Style(color="magenta", italic=True),
+ "repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False),
+ "repr.uuid": Style(color="bright_yellow", bold=False),
+ "repr.call": Style(color="magenta", bold=True),
+ "repr.path": Style(color="magenta"),
+ "repr.filename": Style(color="bright_magenta"),
+ "rule.line": Style(color="bright_green"),
+ "rule.text": Style.null(),
+ "json.brace": Style(bold=True),
+ "json.bool_true": Style(color="bright_green", italic=True),
+ "json.bool_false": Style(color="bright_red", italic=True),
+ "json.null": Style(color="magenta", italic=True),
+ "json.number": Style(color="cyan", bold=True, italic=False),
+ "json.str": Style(color="green", italic=False, bold=False),
+ "json.key": Style(color="blue", bold=True),
+ "prompt": Style.null(),
+ "prompt.choices": Style(color="magenta", bold=True),
+ "prompt.default": Style(color="cyan", bold=True),
+ "prompt.invalid": Style(color="red"),
+ "prompt.invalid.choice": Style(color="red"),
+ "pretty": Style.null(),
+ "scope.border": Style(color="blue"),
+ "scope.key": Style(color="yellow", italic=True),
+ "scope.key.special": Style(color="yellow", italic=True, dim=True),
+ "scope.equals": Style(color="red"),
+ "table.header": Style(bold=True),
+ "table.footer": Style(bold=True),
+ "table.cell": Style.null(),
+ "table.title": Style(italic=True),
+ "table.caption": Style(italic=True, dim=True),
+ "traceback.error": Style(color="red", italic=True),
+ "traceback.border.syntax_error": Style(color="bright_red"),
+ "traceback.border": Style(color="red"),
+ "traceback.text": Style.null(),
+ "traceback.title": Style(color="red", bold=True),
+ "traceback.exc_type": Style(color="bright_red", bold=True),
+ "traceback.exc_value": Style.null(),
+ "traceback.offset": Style(color="bright_red", bold=True),
+ "bar.back": Style(color="grey23"),
+ "bar.complete": Style(color="rgb(249,38,114)"),
+ "bar.finished": Style(color="rgb(114,156,31)"),
+ "bar.pulse": Style(color="rgb(249,38,114)"),
+ "progress.description": Style.null(),
+ "progress.filesize": Style(color="green"),
+ "progress.filesize.total": Style(color="green"),
+ "progress.download": Style(color="green"),
+ "progress.elapsed": Style(color="yellow"),
+ "progress.percentage": Style(color="magenta"),
+ "progress.remaining": Style(color="cyan"),
+ "progress.data.speed": Style(color="red"),
+ "progress.spinner": Style(color="green"),
+ "status.spinner": Style(color="green"),
+ "tree": Style(),
+ "tree.line": Style(),
+ "markdown.paragraph": Style(),
+ "markdown.text": Style(),
+ "markdown.emph": Style(italic=True),
+ "markdown.strong": Style(bold=True),
+ "markdown.code": Style(bgcolor="black", color="bright_white"),
+ "markdown.code_block": Style(dim=True, color="cyan", bgcolor="black"),
+ "markdown.block_quote": Style(color="magenta"),
+ "markdown.list": Style(color="cyan"),
+ "markdown.item": Style(),
+ "markdown.item.bullet": Style(color="yellow", bold=True),
+ "markdown.item.number": Style(color="yellow", bold=True),
+ "markdown.hr": Style(color="yellow"),
+ "markdown.h1.border": Style(),
+ "markdown.h1": Style(bold=True),
+ "markdown.h2": Style(bold=True, underline=True),
+ "markdown.h3": Style(bold=True),
+ "markdown.h4": Style(bold=True, dim=True),
+ "markdown.h5": Style(underline=True),
+ "markdown.h6": Style(italic=True),
+ "markdown.h7": Style(italic=True, dim=True),
+ "markdown.link": Style(color="bright_blue"),
+ "markdown.link_url": Style(color="blue"),
+}
+
+
+if __name__ == "__main__": # pragma: no cover
+ import argparse
+ import io
+
+ from pip._vendor.rich.console import Console
+ from pip._vendor.rich.table import Table
+ from pip._vendor.rich.text import Text
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--html", action="store_true", help="Export as HTML table")
+ args = parser.parse_args()
+ html: bool = args.html
+ console = Console(record=True, width=70, file=io.StringIO()) if html else Console()
+
+ table = Table("Name", "Styling")
+
+ for style_name, style in DEFAULT_STYLES.items():
+ table.add_row(Text(style_name, style=style), str(style))
+
+ console.print(table)
+ if html:
+ print(console.export_html(inline_styles=True))
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/diagnose.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/diagnose.py
new file mode 100644
index 0000000..38728da
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/diagnose.py
@@ -0,0 +1,6 @@
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+ from pip._vendor.rich import inspect
+
+ console = Console()
+ inspect(console)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/emoji.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/emoji.py
new file mode 100644
index 0000000..791f046
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/emoji.py
@@ -0,0 +1,96 @@
+import sys
+from typing import TYPE_CHECKING, Optional, Union
+
+from .jupyter import JupyterMixin
+from .segment import Segment
+from .style import Style
+from ._emoji_codes import EMOJI
+from ._emoji_replace import _emoji_replace
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from pip._vendor.typing_extensions import Literal # pragma: no cover
+
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult
+
+
+EmojiVariant = Literal["emoji", "text"]
+
+
+class NoEmoji(Exception):
+ """No emoji by that name."""
+
+
+class Emoji(JupyterMixin):
+ __slots__ = ["name", "style", "_char", "variant"]
+
+ VARIANTS = {"text": "\uFE0E", "emoji": "\uFE0F"}
+
+ def __init__(
+ self,
+ name: str,
+ style: Union[str, Style] = "none",
+ variant: Optional[EmojiVariant] = None,
+ ) -> None:
+ """A single emoji character.
+
+ Args:
+ name (str): Name of emoji.
+ style (Union[str, Style], optional): Optional style. Defaults to None.
+
+ Raises:
+ NoEmoji: If the emoji doesn't exist.
+ """
+ self.name = name
+ self.style = style
+ self.variant = variant
+ try:
+ self._char = EMOJI[name]
+ except KeyError:
+ raise NoEmoji(f"No emoji called {name!r}")
+ if variant is not None:
+ self._char += self.VARIANTS.get(variant, "")
+
+ @classmethod
+ def replace(cls, text: str) -> str:
+ """Replace emoji markup with corresponding unicode characters.
+
+ Args:
+ text (str): A string with emojis codes, e.g. "Hello :smiley:!"
+
+ Returns:
+ str: A string with emoji codes replaces with actual emoji.
+ """
+ return _emoji_replace(text)
+
+ def __repr__(self) -> str:
+ return f""
+
+ def __str__(self) -> str:
+ return self._char
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ yield Segment(self._char, console.get_style(self.style))
+
+
+if __name__ == "__main__": # pragma: no cover
+ import sys
+
+ from pip._vendor.rich.columns import Columns
+ from pip._vendor.rich.console import Console
+
+ console = Console(record=True)
+
+ columns = Columns(
+ (f":{name}: {name}" for name in sorted(EMOJI.keys()) if "\u200D" not in name),
+ column_first=True,
+ )
+
+ console.print(columns)
+ if len(sys.argv) > 1:
+ console.save_html(sys.argv[1])
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/errors.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/errors.py
new file mode 100644
index 0000000..0bcbe53
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/errors.py
@@ -0,0 +1,34 @@
+class ConsoleError(Exception):
+ """An error in console operation."""
+
+
+class StyleError(Exception):
+ """An error in styles."""
+
+
+class StyleSyntaxError(ConsoleError):
+ """Style was badly formatted."""
+
+
+class MissingStyle(StyleError):
+ """No such style."""
+
+
+class StyleStackError(ConsoleError):
+ """Style stack is invalid."""
+
+
+class NotRenderableError(ConsoleError):
+ """Object is not renderable."""
+
+
+class MarkupError(ConsoleError):
+ """Markup was badly formatted."""
+
+
+class LiveError(ConsoleError):
+ """Error related to Live display."""
+
+
+class NoAltScreen(ConsoleError):
+ """Alt screen mode was required."""
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/file_proxy.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/file_proxy.py
new file mode 100644
index 0000000..3ec593a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/file_proxy.py
@@ -0,0 +1,54 @@
+import io
+from typing import List, Any, IO, TYPE_CHECKING
+
+from .ansi import AnsiDecoder
+from .text import Text
+
+if TYPE_CHECKING:
+ from .console import Console
+
+
+class FileProxy(io.TextIOBase):
+ """Wraps a file (e.g. sys.stdout) and redirects writes to a console."""
+
+ def __init__(self, console: "Console", file: IO[str]) -> None:
+ self.__console = console
+ self.__file = file
+ self.__buffer: List[str] = []
+ self.__ansi_decoder = AnsiDecoder()
+
+ @property
+ def rich_proxied_file(self) -> IO[str]:
+ """Get proxied file."""
+ return self.__file
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self.__file, name)
+
+ def write(self, text: str) -> int:
+ if not isinstance(text, str):
+ raise TypeError(f"write() argument must be str, not {type(text).__name__}")
+ buffer = self.__buffer
+ lines: List[str] = []
+ while text:
+ line, new_line, text = text.partition("\n")
+ if new_line:
+ lines.append("".join(buffer) + line)
+ del buffer[:]
+ else:
+ buffer.append(line)
+ break
+ if lines:
+ console = self.__console
+ with console:
+ output = Text("\n").join(
+ self.__ansi_decoder.decode_line(line) for line in lines
+ )
+ console.print(output)
+ return len(text)
+
+ def flush(self) -> None:
+ buffer = self.__buffer
+ if buffer:
+ self.__console.print("".join(buffer))
+ del buffer[:]
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/filesize.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/filesize.py
new file mode 100644
index 0000000..b3a0996
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/filesize.py
@@ -0,0 +1,89 @@
+# coding: utf-8
+"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2
+
+The functions declared in this module should cover the different
+usecases needed to generate a string representation of a file size
+using several different units. Since there are many standards regarding
+file size units, three different functions have been implemented.
+
+See Also:
+ * `Wikipedia: Binary prefix `_
+
+"""
+
+__all__ = ["decimal"]
+
+from typing import Iterable, List, Tuple, Optional
+
+
+def _to_str(
+ size: int,
+ suffixes: Iterable[str],
+ base: int,
+ *,
+ precision: Optional[int] = 1,
+ separator: Optional[str] = " ",
+) -> str:
+ if size == 1:
+ return "1 byte"
+ elif size < base:
+ return "{:,} bytes".format(size)
+
+ for i, suffix in enumerate(suffixes, 2): # noqa: B007
+ unit = base ** i
+ if size < unit:
+ break
+ return "{:,.{precision}f}{separator}{}".format(
+ (base * size / unit),
+ suffix,
+ precision=precision,
+ separator=separator,
+ )
+
+
+def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:
+ """Pick a suffix and base for the given size."""
+ for i, suffix in enumerate(suffixes):
+ unit = base ** i
+ if size < unit * base:
+ break
+ return unit, suffix
+
+
+def decimal(
+ size: int,
+ *,
+ precision: Optional[int] = 1,
+ separator: Optional[str] = " ",
+) -> str:
+ """Convert a filesize in to a string (powers of 1000, SI prefixes).
+
+ In this convention, ``1000 B = 1 kB``.
+
+ This is typically the format used to advertise the storage
+ capacity of USB flash drives and the like (*256 MB* meaning
+ actually a storage capacity of more than *256 000 000 B*),
+ or used by **Mac OS X** since v10.6 to report file sizes.
+
+ Arguments:
+ int (size): A file size.
+ int (precision): The number of decimal places to include (default = 1).
+ str (separator): The string to separate the value from the units (default = " ").
+
+ Returns:
+ `str`: A string containing a abbreviated file size and units.
+
+ Example:
+ >>> filesize.decimal(30000)
+ '30.0 kB'
+ >>> filesize.decimal(30000, precision=2, separator="")
+ '30.00kB'
+
+ """
+ return _to_str(
+ size,
+ ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"),
+ 1000,
+ precision=precision,
+ separator=separator,
+ )
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/highlighter.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/highlighter.py
new file mode 100644
index 0000000..8afdd01
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/highlighter.py
@@ -0,0 +1,147 @@
+from abc import ABC, abstractmethod
+from typing import List, Union
+
+from .text import Text
+
+
+def _combine_regex(*regexes: str) -> str:
+ """Combine a number of regexes in to a single regex.
+
+ Returns:
+ str: New regex with all regexes ORed together.
+ """
+ return "|".join(regexes)
+
+
+class Highlighter(ABC):
+ """Abstract base class for highlighters."""
+
+ def __call__(self, text: Union[str, Text]) -> Text:
+ """Highlight a str or Text instance.
+
+ Args:
+ text (Union[str, ~Text]): Text to highlight.
+
+ Raises:
+ TypeError: If not called with text or str.
+
+ Returns:
+ Text: A test instance with highlighting applied.
+ """
+ if isinstance(text, str):
+ highlight_text = Text(text)
+ elif isinstance(text, Text):
+ highlight_text = text.copy()
+ else:
+ raise TypeError(f"str or Text instance required, not {text!r}")
+ self.highlight(highlight_text)
+ return highlight_text
+
+ @abstractmethod
+ def highlight(self, text: Text) -> None:
+ """Apply highlighting in place to text.
+
+ Args:
+ text (~Text): A text object highlight.
+ """
+
+
+class NullHighlighter(Highlighter):
+ """A highlighter object that doesn't highlight.
+
+ May be used to disable highlighting entirely.
+
+ """
+
+ def highlight(self, text: Text) -> None:
+ """Nothing to do"""
+
+
+class RegexHighlighter(Highlighter):
+ """Applies highlighting from a list of regular expressions."""
+
+ highlights: List[str] = []
+ base_style: str = ""
+
+ def highlight(self, text: Text) -> None:
+ """Highlight :class:`rich.text.Text` using regular expressions.
+
+ Args:
+ text (~Text): Text to highlighted.
+
+ """
+
+ highlight_regex = text.highlight_regex
+ for re_highlight in self.highlights:
+ highlight_regex(re_highlight, style_prefix=self.base_style)
+
+
+class ReprHighlighter(RegexHighlighter):
+ """Highlights the text typically produced from ``__repr__`` methods."""
+
+ base_style = "repr."
+ highlights = [
+ r"(?P\<)(?P[\w\-\.\:]*)(?P[\w\W]*?)(?P\>)",
+ r"(?P[\w_]{1,50})=(?P\"?[\w_]+\"?)?",
+ r"(?P[\{\[\(\)\]\}])",
+ _combine_regex(
+ r"(?P[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})",
+ r"(?P([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})",
+ r"(?P(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})",
+ r"(?P(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})",
+ r"(?P[\w\.]*?)\(",
+ r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b",
+ r"(?P\.\.\.)",
+ r"(?P(?\B(\/[\w\.\-\_\+]+)*\/)(?P[\w\.\-\_\+]*)?",
+ r"(?b?\'\'\'.*?(?[a-fA-F0-9]{8}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{12})",
+ r"(?P(file|https|http|ws|wss):\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%\#]*)",
+ ),
+ ]
+
+
+class JSONHighlighter(RegexHighlighter):
+ """Highlights JSON"""
+
+ base_style = "json."
+ highlights = [
+ _combine_regex(
+ r"(?P[\{\[\(\)\]\}])",
+ r"\b(?Ptrue)\b|\b(?Pfalse)\b|\b(?Pnull)\b",
+ r"(?P(?b?\".*?(?b?\".*?(? None:
+ data = loads(json)
+ json = dumps(
+ data,
+ indent=indent,
+ skipkeys=skip_keys,
+ ensure_ascii=ensure_ascii,
+ check_circular=check_circular,
+ allow_nan=allow_nan,
+ default=default,
+ sort_keys=sort_keys,
+ )
+ highlighter = JSONHighlighter() if highlight else NullHighlighter()
+ self.text = highlighter(json)
+ self.text.no_wrap = True
+ self.text.overflow = None
+
+ @classmethod
+ def from_data(
+ cls,
+ data: Any,
+ indent: Union[None, int, str] = 2,
+ highlight: bool = True,
+ skip_keys: bool = False,
+ ensure_ascii: bool = True,
+ check_circular: bool = True,
+ allow_nan: bool = True,
+ default: Optional[Callable[[Any], Any]] = None,
+ sort_keys: bool = False,
+ ) -> "JSON":
+ """Encodes a JSON object from arbitrary data.
+
+ Args:
+ data (Any): An object that may be encoded in to JSON
+ indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2.
+ highlight (bool, optional): Enable highlighting. Defaults to True.
+ default (Callable, optional): Optional callable which will be called for objects that cannot be serialized. Defaults to None.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+
+ Returns:
+ JSON: New JSON object from the given data.
+ """
+ json_instance: "JSON" = cls.__new__(cls)
+ json = dumps(
+ data,
+ indent=indent,
+ skipkeys=skip_keys,
+ ensure_ascii=ensure_ascii,
+ check_circular=check_circular,
+ allow_nan=allow_nan,
+ default=default,
+ sort_keys=sort_keys,
+ )
+ highlighter = JSONHighlighter() if highlight else NullHighlighter()
+ json_instance.text = highlighter(json)
+ json_instance.text.no_wrap = True
+ json_instance.text.overflow = None
+ return json_instance
+
+ def __rich__(self) -> Text:
+ return self.text
+
+
+if __name__ == "__main__":
+
+ import argparse
+ import sys
+
+ parser = argparse.ArgumentParser(description="Pretty print json")
+ parser.add_argument(
+ "path",
+ metavar="PATH",
+ help="path to file, or - for stdin",
+ )
+ parser.add_argument(
+ "-i",
+ "--indent",
+ metavar="SPACES",
+ type=int,
+ help="Number of spaces in an indent",
+ default=2,
+ )
+ args = parser.parse_args()
+
+ from pip._vendor.rich.console import Console
+
+ console = Console()
+ error_console = Console(stderr=True)
+
+ try:
+ if args.path == "-":
+ json_data = sys.stdin.read()
+ else:
+ with open(args.path, "rt") as json_file:
+ json_data = json_file.read()
+ except Exception as error:
+ error_console.print(f"Unable to read {args.path!r}; {error}")
+ sys.exit(-1)
+
+ console.print(JSON(json_data, indent=args.indent), soft_wrap=True)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/jupyter.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/jupyter.py
new file mode 100644
index 0000000..bedf5cb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/jupyter.py
@@ -0,0 +1,92 @@
+from typing import Any, Dict, Iterable, List
+
+from . import get_console
+from .segment import Segment
+from .terminal_theme import DEFAULT_TERMINAL_THEME
+
+JUPYTER_HTML_FORMAT = """\
+{code}
+"""
+
+
+class JupyterRenderable:
+ """A shim to write html to Jupyter notebook."""
+
+ def __init__(self, html: str, text: str) -> None:
+ self.html = html
+ self.text = text
+
+ def _repr_mimebundle_(
+ self, include: Iterable[str], exclude: Iterable[str], **kwargs: Any
+ ) -> Dict[str, str]:
+ data = {"text/plain": self.text, "text/html": self.html}
+ if include:
+ data = {k: v for (k, v) in data.items() if k in include}
+ if exclude:
+ data = {k: v for (k, v) in data.items() if k not in exclude}
+ return data
+
+
+class JupyterMixin:
+ """Add to an Rich renderable to make it render in Jupyter notebook."""
+
+ __slots__ = ()
+
+ def _repr_mimebundle_(
+ self, include: Iterable[str], exclude: Iterable[str], **kwargs: Any
+ ) -> Dict[str, str]:
+ console = get_console()
+ segments = list(console.render(self, console.options)) # type: ignore
+ html = _render_segments(segments)
+ text = console._render_buffer(segments)
+ data = {"text/plain": text, "text/html": html}
+ if include:
+ data = {k: v for (k, v) in data.items() if k in include}
+ if exclude:
+ data = {k: v for (k, v) in data.items() if k not in exclude}
+ return data
+
+
+def _render_segments(segments: Iterable[Segment]) -> str:
+ def escape(text: str) -> str:
+ """Escape html."""
+ return text.replace("&", "&").replace("<", "<").replace(">", ">")
+
+ fragments: List[str] = []
+ append_fragment = fragments.append
+ theme = DEFAULT_TERMINAL_THEME
+ for text, style, control in Segment.simplify(segments):
+ if control:
+ continue
+ text = escape(text)
+ if style:
+ rule = style.get_html_style(theme)
+ text = f'{text} ' if rule else text
+ if style.link:
+ text = f'{text} '
+ append_fragment(text)
+
+ code = "".join(fragments)
+ html = JUPYTER_HTML_FORMAT.format(code=code)
+
+ return html
+
+
+def display(segments: Iterable[Segment], text: str) -> None:
+ """Render segments to Jupyter."""
+ html = _render_segments(segments)
+ jupyter_renderable = JupyterRenderable(html, text)
+ try:
+ from IPython.display import display as ipython_display
+
+ ipython_display(jupyter_renderable)
+ except ModuleNotFoundError:
+ # Handle the case where the Console has force_jupyter=True,
+ # but IPython is not installed.
+ pass
+
+
+def print(*args: Any, **kwargs: Any) -> None:
+ """Proxy for Console print."""
+ console = get_console()
+ return console.print(*args, **kwargs)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/layout.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/layout.py
new file mode 100644
index 0000000..22a4c54
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/layout.py
@@ -0,0 +1,444 @@
+from abc import ABC, abstractmethod
+from itertools import islice
+from operator import itemgetter
+from threading import RLock
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
+
+from ._ratio import ratio_resolve
+from .align import Align
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .highlighter import ReprHighlighter
+from .panel import Panel
+from .pretty import Pretty
+from .repr import rich_repr, Result
+from .region import Region
+from .segment import Segment
+from .style import StyleType
+
+if TYPE_CHECKING:
+ from pip._vendor.rich.tree import Tree
+
+
+class LayoutRender(NamedTuple):
+ """An individual layout render."""
+
+ region: Region
+ render: List[List[Segment]]
+
+
+RegionMap = Dict["Layout", Region]
+RenderMap = Dict["Layout", LayoutRender]
+
+
+class LayoutError(Exception):
+ """Layout related error."""
+
+
+class NoSplitter(LayoutError):
+ """Requested splitter does not exist."""
+
+
+class _Placeholder:
+ """An internal renderable used as a Layout placeholder."""
+
+ highlighter = ReprHighlighter()
+
+ def __init__(self, layout: "Layout", style: StyleType = "") -> None:
+ self.layout = layout
+ self.style = style
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ width = options.max_width
+ height = options.height or options.size.height
+ layout = self.layout
+ title = (
+ f"{layout.name!r} ({width} x {height})"
+ if layout.name
+ else f"({width} x {height})"
+ )
+ yield Panel(
+ Align.center(Pretty(layout), vertical="middle"),
+ style=self.style,
+ title=self.highlighter(title),
+ border_style="blue",
+ )
+
+
+class Splitter(ABC):
+ """Base class for a splitter."""
+
+ name: str = ""
+
+ @abstractmethod
+ def get_tree_icon(self) -> str:
+ """Get the icon (emoji) used in layout.tree"""
+
+ @abstractmethod
+ def divide(
+ self, children: Sequence["Layout"], region: Region
+ ) -> Iterable[Tuple["Layout", Region]]:
+ """Divide a region amongst several child layouts.
+
+ Args:
+ children (Sequence(Layout)): A number of child layouts.
+ region (Region): A rectangular region to divide.
+ """
+
+
+class RowSplitter(Splitter):
+ """Split a layout region in to rows."""
+
+ name = "row"
+
+ def get_tree_icon(self) -> str:
+ return "[layout.tree.row]⬌"
+
+ def divide(
+ self, children: Sequence["Layout"], region: Region
+ ) -> Iterable[Tuple["Layout", Region]]:
+ x, y, width, height = region
+ render_widths = ratio_resolve(width, children)
+ offset = 0
+ _Region = Region
+ for child, child_width in zip(children, render_widths):
+ yield child, _Region(x + offset, y, child_width, height)
+ offset += child_width
+
+
+class ColumnSplitter(Splitter):
+ """Split a layout region in to columns."""
+
+ name = "column"
+
+ def get_tree_icon(self) -> str:
+ return "[layout.tree.column]⬍"
+
+ def divide(
+ self, children: Sequence["Layout"], region: Region
+ ) -> Iterable[Tuple["Layout", Region]]:
+ x, y, width, height = region
+ render_heights = ratio_resolve(height, children)
+ offset = 0
+ _Region = Region
+ for child, child_height in zip(children, render_heights):
+ yield child, _Region(x, y + offset, width, child_height)
+ offset += child_height
+
+
+@rich_repr
+class Layout:
+ """A renderable to divide a fixed height in to rows or columns.
+
+ Args:
+ renderable (RenderableType, optional): Renderable content, or None for placeholder. Defaults to None.
+ name (str, optional): Optional identifier for Layout. Defaults to None.
+ size (int, optional): Optional fixed size of layout. Defaults to None.
+ minimum_size (int, optional): Minimum size of layout. Defaults to 1.
+ ratio (int, optional): Optional ratio for flexible layout. Defaults to 1.
+ visible (bool, optional): Visibility of layout. Defaults to True.
+ """
+
+ splitters = {"row": RowSplitter, "column": ColumnSplitter}
+
+ def __init__(
+ self,
+ renderable: Optional[RenderableType] = None,
+ *,
+ name: Optional[str] = None,
+ size: Optional[int] = None,
+ minimum_size: int = 1,
+ ratio: int = 1,
+ visible: bool = True,
+ height: Optional[int] = None,
+ ) -> None:
+ self._renderable = renderable or _Placeholder(self)
+ self.size = size
+ self.minimum_size = minimum_size
+ self.ratio = ratio
+ self.name = name
+ self.visible = visible
+ self.height = height
+ self.splitter: Splitter = self.splitters["column"]()
+ self._children: List[Layout] = []
+ self._render_map: RenderMap = {}
+ self._lock = RLock()
+
+ def __rich_repr__(self) -> Result:
+ yield "name", self.name, None
+ yield "size", self.size, None
+ yield "minimum_size", self.minimum_size, 1
+ yield "ratio", self.ratio, 1
+
+ @property
+ def renderable(self) -> RenderableType:
+ """Layout renderable."""
+ return self if self._children else self._renderable
+
+ @property
+ def children(self) -> List["Layout"]:
+ """Gets (visible) layout children."""
+ return [child for child in self._children if child.visible]
+
+ @property
+ def map(self) -> RenderMap:
+ """Get a map of the last render."""
+ return self._render_map
+
+ def get(self, name: str) -> Optional["Layout"]:
+ """Get a named layout, or None if it doesn't exist.
+
+ Args:
+ name (str): Name of layout.
+
+ Returns:
+ Optional[Layout]: Layout instance or None if no layout was found.
+ """
+ if self.name == name:
+ return self
+ else:
+ for child in self._children:
+ named_layout = child.get(name)
+ if named_layout is not None:
+ return named_layout
+ return None
+
+ def __getitem__(self, name: str) -> "Layout":
+ layout = self.get(name)
+ if layout is None:
+ raise KeyError(f"No layout with name {name!r}")
+ return layout
+
+ @property
+ def tree(self) -> "Tree":
+ """Get a tree renderable to show layout structure."""
+ from pip._vendor.rich.styled import Styled
+ from pip._vendor.rich.table import Table
+ from pip._vendor.rich.tree import Tree
+
+ def summary(layout: "Layout") -> Table:
+
+ icon = layout.splitter.get_tree_icon()
+
+ table = Table.grid(padding=(0, 1, 0, 0))
+
+ text: RenderableType = (
+ Pretty(layout) if layout.visible else Styled(Pretty(layout), "dim")
+ )
+ table.add_row(icon, text)
+ _summary = table
+ return _summary
+
+ layout = self
+ tree = Tree(
+ summary(layout),
+ guide_style=f"layout.tree.{layout.splitter.name}",
+ highlight=True,
+ )
+
+ def recurse(tree: "Tree", layout: "Layout") -> None:
+ for child in layout._children:
+ recurse(
+ tree.add(
+ summary(child),
+ guide_style=f"layout.tree.{child.splitter.name}",
+ ),
+ child,
+ )
+
+ recurse(tree, self)
+ return tree
+
+ def split(
+ self,
+ *layouts: Union["Layout", RenderableType],
+ splitter: Union[Splitter, str] = "column",
+ ) -> None:
+ """Split the layout in to multiple sub-layouts.
+
+ Args:
+ *layouts (Layout): Positional arguments should be (sub) Layout instances.
+ splitter (Union[Splitter, str]): Splitter instance or name of splitter.
+ """
+ _layouts = [
+ layout if isinstance(layout, Layout) else Layout(layout)
+ for layout in layouts
+ ]
+ try:
+ self.splitter = (
+ splitter
+ if isinstance(splitter, Splitter)
+ else self.splitters[splitter]()
+ )
+ except KeyError:
+ raise NoSplitter(f"No splitter called {splitter!r}")
+ self._children[:] = _layouts
+
+ def add_split(self, *layouts: Union["Layout", RenderableType]) -> None:
+ """Add a new layout(s) to existing split.
+
+ Args:
+ *layouts (Union[Layout, RenderableType]): Positional arguments should be renderables or (sub) Layout instances.
+
+ """
+ _layouts = (
+ layout if isinstance(layout, Layout) else Layout(layout)
+ for layout in layouts
+ )
+ self._children.extend(_layouts)
+
+ def split_row(self, *layouts: Union["Layout", RenderableType]) -> None:
+ """Split the layout in tow a row (Layouts side by side).
+
+ Args:
+ *layouts (Layout): Positional arguments should be (sub) Layout instances.
+ """
+ self.split(*layouts, splitter="row")
+
+ def split_column(self, *layouts: Union["Layout", RenderableType]) -> None:
+ """Split the layout in to a column (layouts stacked on top of each other).
+
+ Args:
+ *layouts (Layout): Positional arguments should be (sub) Layout instances.
+ """
+ self.split(*layouts, splitter="column")
+
+ def unsplit(self) -> None:
+ """Reset splits to initial state."""
+ del self._children[:]
+
+ def update(self, renderable: RenderableType) -> None:
+ """Update renderable.
+
+ Args:
+ renderable (RenderableType): New renderable object.
+ """
+ with self._lock:
+ self._renderable = renderable
+
+ def refresh_screen(self, console: "Console", layout_name: str) -> None:
+ """Refresh a sub-layout.
+
+ Args:
+ console (Console): Console instance where Layout is to be rendered.
+ layout_name (str): Name of layout.
+ """
+ with self._lock:
+ layout = self[layout_name]
+ region, _lines = self._render_map[layout]
+ (x, y, width, height) = region
+ lines = console.render_lines(
+ layout, console.options.update_dimensions(width, height)
+ )
+ self._render_map[layout] = LayoutRender(region, lines)
+ console.update_screen_lines(lines, x, y)
+
+ def _make_region_map(self, width: int, height: int) -> RegionMap:
+ """Create a dict that maps layout on to Region."""
+ stack: List[Tuple[Layout, Region]] = [(self, Region(0, 0, width, height))]
+ push = stack.append
+ pop = stack.pop
+ layout_regions: List[Tuple[Layout, Region]] = []
+ append_layout_region = layout_regions.append
+ while stack:
+ append_layout_region(pop())
+ layout, region = layout_regions[-1]
+ children = layout.children
+ if children:
+ for child_and_region in layout.splitter.divide(children, region):
+ push(child_and_region)
+
+ region_map = {
+ layout: region
+ for layout, region in sorted(layout_regions, key=itemgetter(1))
+ }
+ return region_map
+
+ def render(self, console: Console, options: ConsoleOptions) -> RenderMap:
+ """Render the sub_layouts.
+
+ Args:
+ console (Console): Console instance.
+ options (ConsoleOptions): Console options.
+
+ Returns:
+ RenderMap: A dict that maps Layout on to a tuple of Region, lines
+ """
+ render_width = options.max_width
+ render_height = options.height or console.height
+ region_map = self._make_region_map(render_width, render_height)
+ layout_regions = [
+ (layout, region)
+ for layout, region in region_map.items()
+ if not layout.children
+ ]
+ render_map: Dict["Layout", "LayoutRender"] = {}
+ render_lines = console.render_lines
+ update_dimensions = options.update_dimensions
+
+ for layout, region in layout_regions:
+ lines = render_lines(
+ layout.renderable, update_dimensions(region.width, region.height)
+ )
+ render_map[layout] = LayoutRender(region, lines)
+ return render_map
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ with self._lock:
+ width = options.max_width or console.width
+ height = options.height or console.height
+ render_map = self.render(console, options.update_dimensions(width, height))
+ self._render_map = render_map
+ layout_lines: List[List[Segment]] = [[] for _ in range(height)]
+ _islice = islice
+ for (region, lines) in render_map.values():
+ _x, y, _layout_width, layout_height = region
+ for row, line in zip(
+ _islice(layout_lines, y, y + layout_height), lines
+ ):
+ row.extend(line)
+
+ new_line = Segment.line()
+ for layout_row in layout_lines:
+ yield from layout_row
+ yield new_line
+
+
+if __name__ == "__main__":
+ from pip._vendor.rich.console import Console
+
+ console = Console()
+ layout = Layout()
+
+ layout.split_column(
+ Layout(name="header", size=3),
+ Layout(ratio=1, name="main"),
+ Layout(size=10, name="footer"),
+ )
+
+ layout["main"].split_row(Layout(name="side"), Layout(name="body", ratio=2))
+
+ layout["body"].split_row(Layout(name="content", ratio=2), Layout(name="s2"))
+
+ layout["s2"].split_column(
+ Layout(name="top"), Layout(name="middle"), Layout(name="bottom")
+ )
+
+ layout["side"].split_column(Layout(layout.tree, name="left1"), Layout(name="left2"))
+
+ layout["content"].update("foo")
+
+ console.print(layout)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/live.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/live.py
new file mode 100644
index 0000000..6db5b60
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/live.py
@@ -0,0 +1,365 @@
+import sys
+from threading import Event, RLock, Thread
+from types import TracebackType
+from typing import IO, Any, Callable, List, Optional, TextIO, Type, cast
+
+from . import get_console
+from .console import Console, ConsoleRenderable, RenderableType, RenderHook
+from .control import Control
+from .file_proxy import FileProxy
+from .jupyter import JupyterMixin
+from .live_render import LiveRender, VerticalOverflowMethod
+from .screen import Screen
+from .text import Text
+
+
+class _RefreshThread(Thread):
+ """A thread that calls refresh() at regular intervals."""
+
+ def __init__(self, live: "Live", refresh_per_second: float) -> None:
+ self.live = live
+ self.refresh_per_second = refresh_per_second
+ self.done = Event()
+ super().__init__(daemon=True)
+
+ def stop(self) -> None:
+ self.done.set()
+
+ def run(self) -> None:
+ while not self.done.wait(1 / self.refresh_per_second):
+ with self.live._lock:
+ if not self.done.is_set():
+ self.live.refresh()
+
+
+class Live(JupyterMixin, RenderHook):
+ """Renders an auto-updating live display of any given renderable.
+
+ Args:
+ renderable (RenderableType, optional): The renderable to live display. Defaults to displaying nothing.
+ console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout.
+ screen (bool, optional): Enable alternate screen mode. Defaults to False.
+ auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()` or `update()` with refresh flag. Defaults to True
+ refresh_per_second (float, optional): Number of times per second to refresh the live display. Defaults to 4.
+ transient (bool, optional): Clear the renderable on exit (has no effect when screen=True). Defaults to False.
+ redirect_stdout (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
+ redirect_stderr (bool, optional): Enable redirection of stderr. Defaults to True.
+ vertical_overflow (VerticalOverflowMethod, optional): How to handle renderable when it is too tall for the console. Defaults to "ellipsis".
+ get_renderable (Callable[[], RenderableType], optional): Optional callable to get renderable. Defaults to None.
+ """
+
+ def __init__(
+ self,
+ renderable: Optional[RenderableType] = None,
+ *,
+ console: Optional[Console] = None,
+ screen: bool = False,
+ auto_refresh: bool = True,
+ refresh_per_second: float = 4,
+ transient: bool = False,
+ redirect_stdout: bool = True,
+ redirect_stderr: bool = True,
+ vertical_overflow: VerticalOverflowMethod = "ellipsis",
+ get_renderable: Optional[Callable[[], RenderableType]] = None,
+ ) -> None:
+ assert refresh_per_second > 0, "refresh_per_second must be > 0"
+ self._renderable = renderable
+ self.console = console if console is not None else get_console()
+ self._screen = screen
+ self._alt_screen = False
+
+ self._redirect_stdout = redirect_stdout
+ self._redirect_stderr = redirect_stderr
+ self._restore_stdout: Optional[IO[str]] = None
+ self._restore_stderr: Optional[IO[str]] = None
+
+ self._lock = RLock()
+ self.ipy_widget: Optional[Any] = None
+ self.auto_refresh = auto_refresh
+ self._started: bool = False
+ self.transient = True if screen else transient
+
+ self._refresh_thread: Optional[_RefreshThread] = None
+ self.refresh_per_second = refresh_per_second
+
+ self.vertical_overflow = vertical_overflow
+ self._get_renderable = get_renderable
+ self._live_render = LiveRender(
+ self.get_renderable(), vertical_overflow=vertical_overflow
+ )
+
+ @property
+ def is_started(self) -> bool:
+ """Check if live display has been started."""
+ return self._started
+
+ def get_renderable(self) -> RenderableType:
+ renderable = (
+ self._get_renderable()
+ if self._get_renderable is not None
+ else self._renderable
+ )
+ return renderable or ""
+
+ def start(self, refresh: bool = False) -> None:
+ """Start live rendering display.
+
+ Args:
+ refresh (bool, optional): Also refresh. Defaults to False.
+ """
+ with self._lock:
+ if self._started:
+ return
+ self.console.set_live(self)
+ self._started = True
+ if self._screen:
+ self._alt_screen = self.console.set_alt_screen(True)
+ self.console.show_cursor(False)
+ self._enable_redirect_io()
+ self.console.push_render_hook(self)
+ if refresh:
+ self.refresh()
+ if self.auto_refresh:
+ self._refresh_thread = _RefreshThread(self, self.refresh_per_second)
+ self._refresh_thread.start()
+
+ def stop(self) -> None:
+ """Stop live rendering display."""
+ with self._lock:
+ if not self._started:
+ return
+ self.console.clear_live()
+ self._started = False
+
+ if self.auto_refresh and self._refresh_thread is not None:
+ self._refresh_thread.stop()
+ self._refresh_thread = None
+ # allow it to fully render on the last even if overflow
+ self.vertical_overflow = "visible"
+ with self.console:
+ try:
+ if not self._alt_screen and not self.console.is_jupyter:
+ self.refresh()
+ finally:
+ self._disable_redirect_io()
+ self.console.pop_render_hook()
+ if not self._alt_screen and self.console.is_terminal:
+ self.console.line()
+ self.console.show_cursor(True)
+ if self._alt_screen:
+ self.console.set_alt_screen(False)
+
+ if self.transient and not self._alt_screen:
+ self.console.control(self._live_render.restore_cursor())
+ if self.ipy_widget is not None and self.transient:
+ self.ipy_widget.close() # pragma: no cover
+
+ def __enter__(self) -> "Live":
+ self.start(refresh=self._renderable is not None)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.stop()
+
+ def _enable_redirect_io(self) -> None:
+ """Enable redirecting of stdout / stderr."""
+ if self.console.is_terminal or self.console.is_jupyter:
+ if self._redirect_stdout and not isinstance(sys.stdout, FileProxy):
+ self._restore_stdout = sys.stdout
+ sys.stdout = cast("TextIO", FileProxy(self.console, sys.stdout))
+ if self._redirect_stderr and not isinstance(sys.stderr, FileProxy):
+ self._restore_stderr = sys.stderr
+ sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr))
+
+ def _disable_redirect_io(self) -> None:
+ """Disable redirecting of stdout / stderr."""
+ if self._restore_stdout:
+ sys.stdout = cast("TextIO", self._restore_stdout)
+ self._restore_stdout = None
+ if self._restore_stderr:
+ sys.stderr = cast("TextIO", self._restore_stderr)
+ self._restore_stderr = None
+
+ @property
+ def renderable(self) -> RenderableType:
+ """Get the renderable that is being displayed
+
+ Returns:
+ RenderableType: Displayed renderable.
+ """
+ renderable = self.get_renderable()
+ return Screen(renderable) if self._alt_screen else renderable
+
+ def update(self, renderable: RenderableType, *, refresh: bool = False) -> None:
+ """Update the renderable that is being displayed
+
+ Args:
+ renderable (RenderableType): New renderable to use.
+ refresh (bool, optional): Refresh the display. Defaults to False.
+ """
+ with self._lock:
+ self._renderable = renderable
+ if refresh:
+ self.refresh()
+
+ def refresh(self) -> None:
+ """Update the display of the Live Render."""
+ with self._lock:
+ self._live_render.set_renderable(self.renderable)
+ if self.console.is_jupyter: # pragma: no cover
+ try:
+ from IPython.display import display
+ from ipywidgets import Output
+ except ImportError:
+ import warnings
+
+ warnings.warn('install "ipywidgets" for Jupyter support')
+ else:
+ if self.ipy_widget is None:
+ self.ipy_widget = Output()
+ display(self.ipy_widget)
+
+ with self.ipy_widget:
+ self.ipy_widget.clear_output(wait=True)
+ self.console.print(self._live_render.renderable)
+ elif self.console.is_terminal and not self.console.is_dumb_terminal:
+ with self.console:
+ self.console.print(Control())
+ elif (
+ not self._started and not self.transient
+ ): # if it is finished allow files or dumb-terminals to see final result
+ with self.console:
+ self.console.print(Control())
+
+ def process_renderables(
+ self, renderables: List[ConsoleRenderable]
+ ) -> List[ConsoleRenderable]:
+ """Process renderables to restore cursor and display progress."""
+ self._live_render.vertical_overflow = self.vertical_overflow
+ if self.console.is_interactive:
+ # lock needs acquiring as user can modify live_render renderable at any time unlike in Progress.
+ with self._lock:
+ reset = (
+ Control.home()
+ if self._alt_screen
+ else self._live_render.position_cursor()
+ )
+ renderables = [reset, *renderables, self._live_render]
+ elif (
+ not self._started and not self.transient
+ ): # if it is finished render the final output for files or dumb_terminals
+ renderables = [*renderables, self._live_render]
+
+ return renderables
+
+
+if __name__ == "__main__": # pragma: no cover
+ import random
+ import time
+ from itertools import cycle
+ from typing import Dict, List, Tuple
+
+ from .align import Align
+ from .console import Console
+ from .live import Live as Live
+ from .panel import Panel
+ from .rule import Rule
+ from .syntax import Syntax
+ from .table import Table
+
+ console = Console()
+
+ syntax = Syntax(
+ '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+ """Iterate and generate a tuple with a flag for last value."""
+ iter_values = iter(values)
+ try:
+ previous_value = next(iter_values)
+ except StopIteration:
+ return
+ for value in iter_values:
+ yield False, previous_value
+ previous_value = value
+ yield True, previous_value''',
+ "python",
+ line_numbers=True,
+ )
+
+ table = Table("foo", "bar", "baz")
+ table.add_row("1", "2", "3")
+
+ progress_renderables = [
+ "You can make the terminal shorter and taller to see the live table hide"
+ "Text may be printed while the progress bars are rendering.",
+ Panel("In fact, [i]any[/i] renderable will work"),
+ "Such as [magenta]tables[/]...",
+ table,
+ "Pretty printed structures...",
+ {"type": "example", "text": "Pretty printed"},
+ "Syntax...",
+ syntax,
+ Rule("Give it a try!"),
+ ]
+
+ examples = cycle(progress_renderables)
+
+ exchanges = [
+ "SGD",
+ "MYR",
+ "EUR",
+ "USD",
+ "AUD",
+ "JPY",
+ "CNH",
+ "HKD",
+ "CAD",
+ "INR",
+ "DKK",
+ "GBP",
+ "RUB",
+ "NZD",
+ "MXN",
+ "IDR",
+ "TWD",
+ "THB",
+ "VND",
+ ]
+ with Live(console=console) as live_table:
+ exchange_rate_dict: Dict[Tuple[str, str], float] = {}
+
+ for index in range(100):
+ select_exchange = exchanges[index % len(exchanges)]
+
+ for exchange in exchanges:
+ if exchange == select_exchange:
+ continue
+ time.sleep(0.4)
+ if random.randint(0, 10) < 1:
+ console.log(next(examples))
+ exchange_rate_dict[(select_exchange, exchange)] = 200 / (
+ (random.random() * 320) + 1
+ )
+ if len(exchange_rate_dict) > len(exchanges) - 1:
+ exchange_rate_dict.pop(list(exchange_rate_dict.keys())[0])
+ table = Table(title="Exchange Rates")
+
+ table.add_column("Source Currency")
+ table.add_column("Destination Currency")
+ table.add_column("Exchange Rate")
+
+ for ((source, dest), exchange_rate) in exchange_rate_dict.items():
+ table.add_row(
+ source,
+ dest,
+ Text(
+ f"{exchange_rate:.4f}",
+ style="red" if exchange_rate < 1.0 else "green",
+ ),
+ )
+
+ live_table.update(Align.center(table))
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/live_render.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/live_render.py
new file mode 100644
index 0000000..b90fbf7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/live_render.py
@@ -0,0 +1,113 @@
+import sys
+from typing import Optional, Tuple
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from pip._vendor.typing_extensions import Literal # pragma: no cover
+
+
+from ._loop import loop_last
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .control import Control
+from .segment import ControlType, Segment
+from .style import StyleType
+from .text import Text
+
+VerticalOverflowMethod = Literal["crop", "ellipsis", "visible"]
+
+
+class LiveRender:
+ """Creates a renderable that may be updated.
+
+ Args:
+ renderable (RenderableType): Any renderable object.
+ style (StyleType, optional): An optional style to apply to the renderable. Defaults to "".
+ """
+
+ def __init__(
+ self,
+ renderable: RenderableType,
+ style: StyleType = "",
+ vertical_overflow: VerticalOverflowMethod = "ellipsis",
+ ) -> None:
+ self.renderable = renderable
+ self.style = style
+ self.vertical_overflow = vertical_overflow
+ self._shape: Optional[Tuple[int, int]] = None
+
+ def set_renderable(self, renderable: RenderableType) -> None:
+ """Set a new renderable.
+
+ Args:
+ renderable (RenderableType): Any renderable object, including str.
+ """
+ self.renderable = renderable
+
+ def position_cursor(self) -> Control:
+ """Get control codes to move cursor to beginning of live render.
+
+ Returns:
+ Control: A control instance that may be printed.
+ """
+ if self._shape is not None:
+ _, height = self._shape
+ return Control(
+ ControlType.CARRIAGE_RETURN,
+ (ControlType.ERASE_IN_LINE, 2),
+ *(
+ (
+ (ControlType.CURSOR_UP, 1),
+ (ControlType.ERASE_IN_LINE, 2),
+ )
+ * (height - 1)
+ )
+ )
+ return Control()
+
+ def restore_cursor(self) -> Control:
+ """Get control codes to clear the render and restore the cursor to its previous position.
+
+ Returns:
+ Control: A Control instance that may be printed.
+ """
+ if self._shape is not None:
+ _, height = self._shape
+ return Control(
+ ControlType.CARRIAGE_RETURN,
+ *((ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2)) * height
+ )
+ return Control()
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+
+ renderable = self.renderable
+ style = console.get_style(self.style)
+ lines = console.render_lines(renderable, options, style=style, pad=False)
+ shape = Segment.get_shape(lines)
+
+ _, height = shape
+ if height > options.size.height:
+ if self.vertical_overflow == "crop":
+ lines = lines[: options.size.height]
+ shape = Segment.get_shape(lines)
+ elif self.vertical_overflow == "ellipsis":
+ lines = lines[: (options.size.height - 1)]
+ overflow_text = Text(
+ "...",
+ overflow="crop",
+ justify="center",
+ end="",
+ style="live.ellipsis",
+ )
+ lines.append(list(console.render(overflow_text)))
+ shape = Segment.get_shape(lines)
+ self._shape = shape
+
+ new_line = Segment.line()
+ for last, line in loop_last(lines):
+ yield from line
+ if not last:
+ yield new_line
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/logging.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/logging.py
new file mode 100644
index 0000000..002f1f7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/logging.py
@@ -0,0 +1,268 @@
+import logging
+from datetime import datetime
+from logging import Handler, LogRecord
+from pathlib import Path
+from typing import ClassVar, List, Optional, Type, Union
+
+from . import get_console
+from ._log_render import LogRender, FormatTimeCallable
+from .console import Console, ConsoleRenderable
+from .highlighter import Highlighter, ReprHighlighter
+from .text import Text
+from .traceback import Traceback
+
+
+class RichHandler(Handler):
+ """A logging handler that renders output with Rich. The time / level / message and file are displayed in columns.
+ The level is color coded, and the message is syntax highlighted.
+
+ Note:
+ Be careful when enabling console markup in log messages if you have configured logging for libraries not
+ under your control. If a dependency writes messages containing square brackets, it may not produce the intended output.
+
+ Args:
+ level (Union[int, str], optional): Log level. Defaults to logging.NOTSET.
+ console (:class:`~rich.console.Console`, optional): Optional console instance to write logs.
+ Default will use a global console instance writing to stdout.
+ show_time (bool, optional): Show a column for the time. Defaults to True.
+ omit_repeated_times (bool, optional): Omit repetition of the same time. Defaults to True.
+ show_level (bool, optional): Show a column for the level. Defaults to True.
+ show_path (bool, optional): Show the path to the original log call. Defaults to True.
+ enable_link_path (bool, optional): Enable terminal link of path column to file. Defaults to True.
+ highlighter (Highlighter, optional): Highlighter to style log messages, or None to use ReprHighlighter. Defaults to None.
+ markup (bool, optional): Enable console markup in log messages. Defaults to False.
+ rich_tracebacks (bool, optional): Enable rich tracebacks with syntax highlighting and formatting. Defaults to False.
+ tracebacks_width (Optional[int], optional): Number of characters used to render tracebacks, or None for full width. Defaults to None.
+ tracebacks_extra_lines (int, optional): Additional lines of code to render tracebacks, or None for full width. Defaults to None.
+ tracebacks_theme (str, optional): Override pygments theme used in traceback.
+ tracebacks_word_wrap (bool, optional): Enable word wrapping of long tracebacks lines. Defaults to True.
+ tracebacks_show_locals (bool, optional): Enable display of locals in tracebacks. Defaults to False.
+ locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to 10.
+ locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+ log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%x %X] ".
+ """
+
+ KEYWORDS: ClassVar[Optional[List[str]]] = [
+ "GET",
+ "POST",
+ "HEAD",
+ "PUT",
+ "DELETE",
+ "OPTIONS",
+ "TRACE",
+ "PATCH",
+ ]
+ HIGHLIGHTER_CLASS: ClassVar[Type[Highlighter]] = ReprHighlighter
+
+ def __init__(
+ self,
+ level: Union[int, str] = logging.NOTSET,
+ console: Optional[Console] = None,
+ *,
+ show_time: bool = True,
+ omit_repeated_times: bool = True,
+ show_level: bool = True,
+ show_path: bool = True,
+ enable_link_path: bool = True,
+ highlighter: Optional[Highlighter] = None,
+ markup: bool = False,
+ rich_tracebacks: bool = False,
+ tracebacks_width: Optional[int] = None,
+ tracebacks_extra_lines: int = 3,
+ tracebacks_theme: Optional[str] = None,
+ tracebacks_word_wrap: bool = True,
+ tracebacks_show_locals: bool = False,
+ locals_max_length: int = 10,
+ locals_max_string: int = 80,
+ log_time_format: Union[str, FormatTimeCallable] = "[%x %X]",
+ ) -> None:
+ super().__init__(level=level)
+ self.console = console or get_console()
+ self.highlighter = highlighter or self.HIGHLIGHTER_CLASS()
+ self._log_render = LogRender(
+ show_time=show_time,
+ show_level=show_level,
+ show_path=show_path,
+ time_format=log_time_format,
+ omit_repeated_times=omit_repeated_times,
+ level_width=None,
+ )
+ self.enable_link_path = enable_link_path
+ self.markup = markup
+ self.rich_tracebacks = rich_tracebacks
+ self.tracebacks_width = tracebacks_width
+ self.tracebacks_extra_lines = tracebacks_extra_lines
+ self.tracebacks_theme = tracebacks_theme
+ self.tracebacks_word_wrap = tracebacks_word_wrap
+ self.tracebacks_show_locals = tracebacks_show_locals
+ self.locals_max_length = locals_max_length
+ self.locals_max_string = locals_max_string
+
+ def get_level_text(self, record: LogRecord) -> Text:
+ """Get the level name from the record.
+
+ Args:
+ record (LogRecord): LogRecord instance.
+
+ Returns:
+ Text: A tuple of the style and level name.
+ """
+ level_name = record.levelname
+ level_text = Text.styled(
+ level_name.ljust(8), f"logging.level.{level_name.lower()}"
+ )
+ return level_text
+
+ def emit(self, record: LogRecord) -> None:
+ """Invoked by logging."""
+ message = self.format(record)
+ traceback = None
+ if (
+ self.rich_tracebacks
+ and record.exc_info
+ and record.exc_info != (None, None, None)
+ ):
+ exc_type, exc_value, exc_traceback = record.exc_info
+ assert exc_type is not None
+ assert exc_value is not None
+ traceback = Traceback.from_exception(
+ exc_type,
+ exc_value,
+ exc_traceback,
+ width=self.tracebacks_width,
+ extra_lines=self.tracebacks_extra_lines,
+ theme=self.tracebacks_theme,
+ word_wrap=self.tracebacks_word_wrap,
+ show_locals=self.tracebacks_show_locals,
+ locals_max_length=self.locals_max_length,
+ locals_max_string=self.locals_max_string,
+ )
+ message = record.getMessage()
+ if self.formatter:
+ record.message = record.getMessage()
+ formatter = self.formatter
+ if hasattr(formatter, "usesTime") and formatter.usesTime():
+ record.asctime = formatter.formatTime(record, formatter.datefmt)
+ message = formatter.formatMessage(record)
+
+ message_renderable = self.render_message(record, message)
+ log_renderable = self.render(
+ record=record, traceback=traceback, message_renderable=message_renderable
+ )
+ try:
+ self.console.print(log_renderable)
+ except Exception:
+ self.handleError(record)
+
+ def render_message(self, record: LogRecord, message: str) -> "ConsoleRenderable":
+ """Render message text in to Text.
+
+ record (LogRecord): logging Record.
+ message (str): String containing log message.
+
+ Returns:
+ ConsoleRenderable: Renderable to display log message.
+ """
+ use_markup = getattr(record, "markup", self.markup)
+ message_text = Text.from_markup(message) if use_markup else Text(message)
+
+ highlighter = getattr(record, "highlighter", self.highlighter)
+ if highlighter:
+ message_text = highlighter(message_text)
+
+ if self.KEYWORDS:
+ message_text.highlight_words(self.KEYWORDS, "logging.keyword")
+ return message_text
+
+ def render(
+ self,
+ *,
+ record: LogRecord,
+ traceback: Optional[Traceback],
+ message_renderable: "ConsoleRenderable",
+ ) -> "ConsoleRenderable":
+ """Render log for display.
+
+ Args:
+ record (LogRecord): logging Record.
+ traceback (Optional[Traceback]): Traceback instance or None for no Traceback.
+ message_renderable (ConsoleRenderable): Renderable (typically Text) containing log message contents.
+
+ Returns:
+ ConsoleRenderable: Renderable to display log.
+ """
+ path = Path(record.pathname).name
+ level = self.get_level_text(record)
+ time_format = None if self.formatter is None else self.formatter.datefmt
+ log_time = datetime.fromtimestamp(record.created)
+
+ log_renderable = self._log_render(
+ self.console,
+ [message_renderable] if not traceback else [message_renderable, traceback],
+ log_time=log_time,
+ time_format=time_format,
+ level=level,
+ path=path,
+ line_no=record.lineno,
+ link_path=record.pathname if self.enable_link_path else None,
+ )
+ return log_renderable
+
+
+if __name__ == "__main__": # pragma: no cover
+ from time import sleep
+
+ FORMAT = "%(message)s"
+ # FORMAT = "%(asctime)-15s - %(levelname)s - %(message)s"
+ logging.basicConfig(
+ level="NOTSET",
+ format=FORMAT,
+ datefmt="[%X]",
+ handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)],
+ )
+ log = logging.getLogger("rich")
+
+ log.info("Server starting...")
+ log.info("Listening on http://127.0.0.1:8080")
+ sleep(1)
+
+ log.info("GET /index.html 200 1298")
+ log.info("GET /imgs/backgrounds/back1.jpg 200 54386")
+ log.info("GET /css/styles.css 200 54386")
+ log.warning("GET /favicon.ico 404 242")
+ sleep(1)
+
+ log.debug(
+ "JSONRPC request\n--> %r\n<-- %r",
+ {
+ "version": "1.1",
+ "method": "confirmFruitPurchase",
+ "params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
+ "id": "194521489",
+ },
+ {"version": "1.1", "result": True, "error": None, "id": "194521489"},
+ )
+ log.debug(
+ "Loading configuration file /adasd/asdasd/qeqwe/qwrqwrqwr/sdgsdgsdg/werwerwer/dfgerert/ertertert/ertetert/werwerwer"
+ )
+ log.error("Unable to find 'pomelo' in database!")
+ log.info("POST /jsonrpc/ 200 65532")
+ log.info("POST /admin/ 401 42234")
+ log.warning("password was rejected for admin site.")
+
+ def divide() -> None:
+ number = 1
+ divisor = 0
+ foos = ["foo"] * 100
+ log.debug("in divide")
+ try:
+ number / divisor
+ except:
+ log.exception("An error of some kind occurred!")
+
+ divide()
+ sleep(1)
+ log.critical("Out of memory!")
+ log.info("Server exited with code=-1")
+ log.info("[bold]EXITING...[/bold]", extra=dict(markup=True))
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/markup.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/markup.py
new file mode 100644
index 0000000..6195402
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/markup.py
@@ -0,0 +1,244 @@
+from ast import literal_eval
+from operator import attrgetter
+import re
+from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union
+
+from .errors import MarkupError
+from .style import Style
+from .text import Span, Text
+from .emoji import EmojiVariant
+from ._emoji_replace import _emoji_replace
+
+
+RE_TAGS = re.compile(
+ r"""((\\*)\[([a-z#\/@].*?)\])""",
+ re.VERBOSE,
+)
+
+RE_HANDLER = re.compile(r"^([\w\.]*?)(\(.*?\))?$")
+
+
+class Tag(NamedTuple):
+ """A tag in console markup."""
+
+ name: str
+ """The tag name. e.g. 'bold'."""
+ parameters: Optional[str]
+ """Any additional parameters after the name."""
+
+ def __str__(self) -> str:
+ return (
+ self.name if self.parameters is None else f"{self.name} {self.parameters}"
+ )
+
+ @property
+ def markup(self) -> str:
+ """Get the string representation of this tag."""
+ return (
+ f"[{self.name}]"
+ if self.parameters is None
+ else f"[{self.name}={self.parameters}]"
+ )
+
+
+_ReStringMatch = Match[str] # regex match object
+_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
+_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
+
+
+def escape(
+ markup: str, _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#\/@].*?\])").sub
+) -> str:
+ """Escapes text so that it won't be interpreted as markup.
+
+ Args:
+ markup (str): Content to be inserted in to markup.
+
+ Returns:
+ str: Markup with square brackets escaped.
+ """
+
+ def escape_backslashes(match: Match[str]) -> str:
+ """Called by re.sub replace matches."""
+ backslashes, text = match.groups()
+ return f"{backslashes}{backslashes}\\{text}"
+
+ markup = _escape(escape_backslashes, markup)
+ return markup
+
+
+def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
+ """Parse markup in to an iterable of tuples of (position, text, tag).
+
+ Args:
+ markup (str): A string containing console markup
+
+ """
+ position = 0
+ _divmod = divmod
+ _Tag = Tag
+ for match in RE_TAGS.finditer(markup):
+ full_text, escapes, tag_text = match.groups()
+ start, end = match.span()
+ if start > position:
+ yield start, markup[position:start], None
+ if escapes:
+ backslashes, escaped = _divmod(len(escapes), 2)
+ if backslashes:
+ # Literal backslashes
+ yield start, "\\" * backslashes, None
+ start += backslashes * 2
+ if escaped:
+ # Escape of tag
+ yield start, full_text[len(escapes) :], None
+ position = end
+ continue
+ text, equals, parameters = tag_text.partition("=")
+ yield start, None, _Tag(text, parameters if equals else None)
+ position = end
+ if position < len(markup):
+ yield position, markup[position:], None
+
+
+def render(
+ markup: str,
+ style: Union[str, Style] = "",
+ emoji: bool = True,
+ emoji_variant: Optional[EmojiVariant] = None,
+) -> Text:
+ """Render console markup in to a Text instance.
+
+ Args:
+ markup (str): A string containing console markup.
+ emoji (bool, optional): Also render emoji code. Defaults to True.
+
+ Raises:
+ MarkupError: If there is a syntax error in the markup.
+
+ Returns:
+ Text: A test instance.
+ """
+ emoji_replace = _emoji_replace
+ if "[" not in markup:
+ return Text(
+ emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
+ style=style,
+ )
+ text = Text(style=style)
+ append = text.append
+ normalize = Style.normalize
+
+ style_stack: List[Tuple[int, Tag]] = []
+ pop = style_stack.pop
+
+ spans: List[Span] = []
+ append_span = spans.append
+
+ _Span = Span
+ _Tag = Tag
+
+ def pop_style(style_name: str) -> Tuple[int, Tag]:
+ """Pop tag matching given style name."""
+ for index, (_, tag) in enumerate(reversed(style_stack), 1):
+ if tag.name == style_name:
+ return pop(-index)
+ raise KeyError(style_name)
+
+ for position, plain_text, tag in _parse(markup):
+ if plain_text is not None:
+ append(emoji_replace(plain_text) if emoji else plain_text)
+ elif tag is not None:
+ if tag.name.startswith("/"): # Closing tag
+ style_name = tag.name[1:].strip()
+
+ if style_name: # explicit close
+ style_name = normalize(style_name)
+ try:
+ start, open_tag = pop_style(style_name)
+ except KeyError:
+ raise MarkupError(
+ f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
+ ) from None
+ else: # implicit close
+ try:
+ start, open_tag = pop()
+ except IndexError:
+ raise MarkupError(
+ f"closing tag '[/]' at position {position} has nothing to close"
+ ) from None
+
+ if open_tag.name.startswith("@"):
+ if open_tag.parameters:
+ handler_name = ""
+ parameters = open_tag.parameters.strip()
+ handler_match = RE_HANDLER.match(parameters)
+ if handler_match is not None:
+ handler_name, match_parameters = handler_match.groups()
+ parameters = (
+ "()" if match_parameters is None else match_parameters
+ )
+
+ try:
+ meta_params = literal_eval(parameters)
+ except SyntaxError as error:
+ raise MarkupError(
+ f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
+ )
+ except Exception as error:
+ raise MarkupError(
+ f"error parsing {open_tag.parameters!r}; {error}"
+ ) from None
+
+ if handler_name:
+ meta_params = (
+ handler_name,
+ meta_params
+ if isinstance(meta_params, tuple)
+ else (meta_params,),
+ )
+
+ else:
+ meta_params = ()
+
+ append_span(
+ _Span(
+ start, len(text), Style(meta={open_tag.name: meta_params})
+ )
+ )
+ else:
+ append_span(_Span(start, len(text), str(open_tag)))
+
+ else: # Opening tag
+ normalized_tag = _Tag(normalize(tag.name), tag.parameters)
+ style_stack.append((len(text), normalized_tag))
+
+ text_length = len(text)
+ while style_stack:
+ start, tag = style_stack.pop()
+ style = str(tag)
+ if style:
+ append_span(_Span(start, text_length, style))
+
+ text.spans = sorted(spans[::-1], key=attrgetter("start"))
+ return text
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ MARKUP = [
+ "[red]Hello World[/red]",
+ "[magenta]Hello [b]World[/b]",
+ "[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
+ "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
+ ":warning-emoji: [bold red blink] DANGER![/]",
+ ]
+
+ from pip._vendor.rich.table import Table
+ from pip._vendor.rich import print
+
+ grid = Table("Markup", "Result", padding=(0, 1))
+
+ for markup in MARKUP:
+ grid.add_row(Text(markup), markup)
+
+ print(grid)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/measure.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/measure.py
new file mode 100644
index 0000000..aea238d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/measure.py
@@ -0,0 +1,149 @@
+from operator import itemgetter
+from typing import Callable, Iterable, NamedTuple, Optional, TYPE_CHECKING
+
+from . import errors
+from .protocol import is_renderable, rich_cast
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderableType
+
+
+class Measurement(NamedTuple):
+ """Stores the minimum and maximum widths (in characters) required to render an object."""
+
+ minimum: int
+ """Minimum number of cells required to render."""
+ maximum: int
+ """Maximum number of cells required to render."""
+
+ @property
+ def span(self) -> int:
+ """Get difference between maximum and minimum."""
+ return self.maximum - self.minimum
+
+ def normalize(self) -> "Measurement":
+ """Get measurement that ensures that minimum <= maximum and minimum >= 0
+
+ Returns:
+ Measurement: A normalized measurement.
+ """
+ minimum, maximum = self
+ minimum = min(max(0, minimum), maximum)
+ return Measurement(max(0, minimum), max(0, max(minimum, maximum)))
+
+ def with_maximum(self, width: int) -> "Measurement":
+ """Get a RenderableWith where the widths are <= width.
+
+ Args:
+ width (int): Maximum desired width.
+
+ Returns:
+ Measurement: New Measurement object.
+ """
+ minimum, maximum = self
+ return Measurement(min(minimum, width), min(maximum, width))
+
+ def with_minimum(self, width: int) -> "Measurement":
+ """Get a RenderableWith where the widths are >= width.
+
+ Args:
+ width (int): Minimum desired width.
+
+ Returns:
+ Measurement: New Measurement object.
+ """
+ minimum, maximum = self
+ width = max(0, width)
+ return Measurement(max(minimum, width), max(maximum, width))
+
+ def clamp(
+ self, min_width: Optional[int] = None, max_width: Optional[int] = None
+ ) -> "Measurement":
+ """Clamp a measurement within the specified range.
+
+ Args:
+ min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None.
+ max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None.
+
+ Returns:
+ Measurement: New Measurement object.
+ """
+ measurement = self
+ if min_width is not None:
+ measurement = measurement.with_minimum(min_width)
+ if max_width is not None:
+ measurement = measurement.with_maximum(max_width)
+ return measurement
+
+ @classmethod
+ def get(
+ cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType"
+ ) -> "Measurement":
+ """Get a measurement for a renderable.
+
+ Args:
+ console (~rich.console.Console): Console instance.
+ options (~rich.console.ConsoleOptions): Console options.
+ renderable (RenderableType): An object that may be rendered with Rich.
+
+ Raises:
+ errors.NotRenderableError: If the object is not renderable.
+
+ Returns:
+ Measurement: Measurement object containing range of character widths required to render the object.
+ """
+ _max_width = options.max_width
+ if _max_width < 1:
+ return Measurement(0, 0)
+ if isinstance(renderable, str):
+ renderable = console.render_str(renderable, markup=options.markup)
+ renderable = rich_cast(renderable)
+ if is_renderable(renderable):
+ get_console_width: Optional[
+ Callable[["Console", "ConsoleOptions"], "Measurement"]
+ ] = getattr(renderable, "__rich_measure__", None)
+ if get_console_width is not None:
+ render_width = (
+ get_console_width(console, options)
+ .normalize()
+ .with_maximum(_max_width)
+ )
+ if render_width.maximum < 1:
+ return Measurement(0, 0)
+ return render_width.normalize()
+ else:
+ return Measurement(0, _max_width)
+ else:
+ raise errors.NotRenderableError(
+ f"Unable to get render width for {renderable!r}; "
+ "a str, Segment, or object with __rich_console__ method is required"
+ )
+
+
+def measure_renderables(
+ console: "Console",
+ options: "ConsoleOptions",
+ renderables: Iterable["RenderableType"],
+) -> "Measurement":
+ """Get a measurement that would fit a number of renderables.
+
+ Args:
+ console (~rich.console.Console): Console instance.
+ options (~rich.console.ConsoleOptions): Console options.
+ renderables (Iterable[RenderableType]): One or more renderable objects.
+
+ Returns:
+ Measurement: Measurement object containing range of character widths required to
+ contain all given renderables.
+ """
+ if not renderables:
+ return Measurement(0, 0)
+ get_measurement = Measurement.get
+ measurements = [
+ get_measurement(console, options, renderable) for renderable in renderables
+ ]
+ measured_width = Measurement(
+ max(measurements, key=itemgetter(0)).minimum,
+ max(measurements, key=itemgetter(1)).maximum,
+ )
+ return measured_width
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/padding.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/padding.py
new file mode 100644
index 0000000..1b2204f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/padding.py
@@ -0,0 +1,141 @@
+from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ RenderableType,
+ RenderResult,
+ )
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .style import Style
+from .segment import Segment
+
+
+PaddingDimensions = Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int, int]]
+
+
+class Padding(JupyterMixin):
+ """Draw space around content.
+
+ Example:
+ >>> print(Padding("Hello", (2, 4), style="on blue"))
+
+ Args:
+ renderable (RenderableType): String or other renderable.
+ pad (Union[int, Tuple[int]]): Padding for top, right, bottom, and left borders.
+ May be specified with 1, 2, or 4 integers (CSS style).
+ style (Union[str, Style], optional): Style for padding characters. Defaults to "none".
+ expand (bool, optional): Expand padding to fit available width. Defaults to True.
+ """
+
+ def __init__(
+ self,
+ renderable: "RenderableType",
+ pad: "PaddingDimensions" = (0, 0, 0, 0),
+ *,
+ style: Union[str, Style] = "none",
+ expand: bool = True,
+ ):
+ self.renderable = renderable
+ self.top, self.right, self.bottom, self.left = self.unpack(pad)
+ self.style = style
+ self.expand = expand
+
+ @classmethod
+ def indent(cls, renderable: "RenderableType", level: int) -> "Padding":
+ """Make padding instance to render an indent.
+
+ Args:
+ renderable (RenderableType): String or other renderable.
+ level (int): Number of characters to indent.
+
+ Returns:
+ Padding: A Padding instance.
+ """
+
+ return Padding(renderable, pad=(0, 0, 0, level), expand=False)
+
+ @staticmethod
+ def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]:
+ """Unpack padding specified in CSS style."""
+ if isinstance(pad, int):
+ return (pad, pad, pad, pad)
+ if len(pad) == 1:
+ _pad = pad[0]
+ return (_pad, _pad, _pad, _pad)
+ if len(pad) == 2:
+ pad_top, pad_right = cast(Tuple[int, int], pad)
+ return (pad_top, pad_right, pad_top, pad_right)
+ if len(pad) == 4:
+ top, right, bottom, left = cast(Tuple[int, int, int, int], pad)
+ return (top, right, bottom, left)
+ raise ValueError(f"1, 2 or 4 integers required for padding; {len(pad)} given")
+
+ def __repr__(self) -> str:
+ return f"Padding({self.renderable!r}, ({self.top},{self.right},{self.bottom},{self.left}))"
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ style = console.get_style(self.style)
+ if self.expand:
+ width = options.max_width
+ else:
+ width = min(
+ Measurement.get(console, options, self.renderable).maximum
+ + self.left
+ + self.right,
+ options.max_width,
+ )
+ render_options = options.update_width(width - self.left - self.right)
+ if render_options.height is not None:
+ render_options = render_options.update_height(
+ height=render_options.height - self.top - self.bottom
+ )
+ lines = console.render_lines(
+ self.renderable, render_options, style=style, pad=True
+ )
+ _Segment = Segment
+
+ left = _Segment(" " * self.left, style) if self.left else None
+ right = (
+ [_Segment(f'{" " * self.right}', style), _Segment.line()]
+ if self.right
+ else [_Segment.line()]
+ )
+ blank_line: Optional[List[Segment]] = None
+ if self.top:
+ blank_line = [_Segment(f'{" " * width}\n', style)]
+ yield from blank_line * self.top
+ if left:
+ for line in lines:
+ yield left
+ yield from line
+ yield from right
+ else:
+ for line in lines:
+ yield from line
+ yield from right
+ if self.bottom:
+ blank_line = blank_line or [_Segment(f'{" " * width}\n', style)]
+ yield from blank_line * self.bottom
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ max_width = options.max_width
+ extra_width = self.left + self.right
+ if max_width - extra_width < 1:
+ return Measurement(max_width, max_width)
+ measure_min, measure_max = Measurement.get(console, options, self.renderable)
+ measurement = Measurement(measure_min + extra_width, measure_max + extra_width)
+ measurement = measurement.with_maximum(max_width)
+ return measurement
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich import print
+
+ print(Padding("Hello, World", (2, 4), style="on blue"))
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/pager.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/pager.py
new file mode 100644
index 0000000..dbfb973
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/pager.py
@@ -0,0 +1,34 @@
+from abc import ABC, abstractmethod
+from typing import Any, Callable
+
+
+class Pager(ABC):
+ """Base class for a pager."""
+
+ @abstractmethod
+ def show(self, content: str) -> None:
+ """Show content in pager.
+
+ Args:
+ content (str): Content to be displayed.
+ """
+
+
+class SystemPager(Pager):
+ """Uses the pager installed on the system."""
+
+ def _pager(self, content: str) -> Any: # pragma: no cover
+ return __import__("pydoc").pager(content)
+
+ def show(self, content: str) -> None:
+ """Use the same pager used by pydoc."""
+ self._pager(content)
+
+
+if __name__ == "__main__": # pragma: no cover
+ from .__main__ import make_test_card
+ from .console import Console
+
+ console = Console()
+ with console.pager(styles=True):
+ console.print(make_test_card())
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/palette.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/palette.py
new file mode 100644
index 0000000..fa0c4dd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/palette.py
@@ -0,0 +1,100 @@
+from math import sqrt
+from functools import lru_cache
+from typing import Sequence, Tuple, TYPE_CHECKING
+
+from .color_triplet import ColorTriplet
+
+if TYPE_CHECKING:
+ from pip._vendor.rich.table import Table
+
+
+class Palette:
+ """A palette of available colors."""
+
+ def __init__(self, colors: Sequence[Tuple[int, int, int]]):
+ self._colors = colors
+
+ def __getitem__(self, number: int) -> ColorTriplet:
+ return ColorTriplet(*self._colors[number])
+
+ def __rich__(self) -> "Table":
+ from pip._vendor.rich.color import Color
+ from pip._vendor.rich.style import Style
+ from pip._vendor.rich.text import Text
+ from pip._vendor.rich.table import Table
+
+ table = Table(
+ "index",
+ "RGB",
+ "Color",
+ title="Palette",
+ caption=f"{len(self._colors)} colors",
+ highlight=True,
+ caption_justify="right",
+ )
+ for index, color in enumerate(self._colors):
+ table.add_row(
+ str(index),
+ repr(color),
+ Text(" " * 16, style=Style(bgcolor=Color.from_rgb(*color))),
+ )
+ return table
+
+ # This is somewhat inefficient and needs caching
+ @lru_cache(maxsize=1024)
+ def match(self, color: Tuple[int, int, int]) -> int:
+ """Find a color from a palette that most closely matches a given color.
+
+ Args:
+ color (Tuple[int, int, int]): RGB components in range 0 > 255.
+
+ Returns:
+ int: Index of closes matching color.
+ """
+ red1, green1, blue1 = color
+ _sqrt = sqrt
+ get_color = self._colors.__getitem__
+
+ def get_color_distance(index: int) -> float:
+ """Get the distance to a color."""
+ red2, green2, blue2 = get_color(index)
+ red_mean = (red1 + red2) // 2
+ red = red1 - red2
+ green = green1 - green2
+ blue = blue1 - blue2
+ return _sqrt(
+ (((512 + red_mean) * red * red) >> 8)
+ + 4 * green * green
+ + (((767 - red_mean) * blue * blue) >> 8)
+ )
+
+ min_index = min(range(len(self._colors)), key=get_color_distance)
+ return min_index
+
+
+if __name__ == "__main__": # pragma: no cover
+ import colorsys
+ from typing import Iterable
+ from pip._vendor.rich.color import Color
+ from pip._vendor.rich.console import Console, ConsoleOptions
+ from pip._vendor.rich.segment import Segment
+ from pip._vendor.rich.style import Style
+
+ class ColorBox:
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> Iterable[Segment]:
+ height = console.size.height - 3
+ for y in range(0, height):
+ for x in range(options.max_width):
+ h = x / options.max_width
+ l = y / (height + 1)
+ r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
+ r2, g2, b2 = colorsys.hls_to_rgb(h, l + (1 / height / 2), 1.0)
+ bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
+ color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
+ yield Segment("▄", Style(color=color, bgcolor=bgcolor))
+ yield Segment.line()
+
+ console = Console()
+ console.print(ColorBox())
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/panel.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/panel.py
new file mode 100644
index 0000000..151fe5f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/panel.py
@@ -0,0 +1,250 @@
+from typing import Optional, TYPE_CHECKING
+
+from .box import Box, ROUNDED
+
+from .align import AlignMethod
+from .jupyter import JupyterMixin
+from .measure import Measurement, measure_renderables
+from .padding import Padding, PaddingDimensions
+from .style import StyleType
+from .text import Text, TextType
+from .segment import Segment
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderableType, RenderResult
+
+
+class Panel(JupyterMixin):
+ """A console renderable that draws a border around its contents.
+
+ Example:
+ >>> console.print(Panel("Hello, World!"))
+
+ Args:
+ renderable (RenderableType): A console renderable object.
+ box (Box, optional): A Box instance that defines the look of the border (see :ref:`appendix_box`.
+ Defaults to box.ROUNDED.
+ safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
+ expand (bool, optional): If True the panel will stretch to fill the console
+ width, otherwise it will be sized to fit the contents. Defaults to True.
+ style (str, optional): The style of the panel (border and contents). Defaults to "none".
+ border_style (str, optional): The style of the border. Defaults to "none".
+ width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect.
+ height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect.
+ padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0.
+ highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False.
+ """
+
+ def __init__(
+ self,
+ renderable: "RenderableType",
+ box: Box = ROUNDED,
+ *,
+ title: Optional[TextType] = None,
+ title_align: AlignMethod = "center",
+ subtitle: Optional[TextType] = None,
+ subtitle_align: AlignMethod = "center",
+ safe_box: Optional[bool] = None,
+ expand: bool = True,
+ style: StyleType = "none",
+ border_style: StyleType = "none",
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ padding: PaddingDimensions = (0, 1),
+ highlight: bool = False,
+ ) -> None:
+ self.renderable = renderable
+ self.box = box
+ self.title = title
+ self.title_align: AlignMethod = title_align
+ self.subtitle = subtitle
+ self.subtitle_align = subtitle_align
+ self.safe_box = safe_box
+ self.expand = expand
+ self.style = style
+ self.border_style = border_style
+ self.width = width
+ self.height = height
+ self.padding = padding
+ self.highlight = highlight
+
+ @classmethod
+ def fit(
+ cls,
+ renderable: "RenderableType",
+ box: Box = ROUNDED,
+ *,
+ title: Optional[TextType] = None,
+ title_align: AlignMethod = "center",
+ subtitle: Optional[TextType] = None,
+ subtitle_align: AlignMethod = "center",
+ safe_box: Optional[bool] = None,
+ style: StyleType = "none",
+ border_style: StyleType = "none",
+ width: Optional[int] = None,
+ padding: PaddingDimensions = (0, 1),
+ ) -> "Panel":
+ """An alternative constructor that sets expand=False."""
+ return cls(
+ renderable,
+ box,
+ title=title,
+ title_align=title_align,
+ subtitle=subtitle,
+ subtitle_align=subtitle_align,
+ safe_box=safe_box,
+ style=style,
+ border_style=border_style,
+ width=width,
+ padding=padding,
+ expand=False,
+ )
+
+ @property
+ def _title(self) -> Optional[Text]:
+ if self.title:
+ title_text = (
+ Text.from_markup(self.title)
+ if isinstance(self.title, str)
+ else self.title.copy()
+ )
+ title_text.end = ""
+ title_text.plain = title_text.plain.replace("\n", " ")
+ title_text.no_wrap = True
+ title_text.expand_tabs()
+ title_text.pad(1)
+ return title_text
+ return None
+
+ @property
+ def _subtitle(self) -> Optional[Text]:
+ if self.subtitle:
+ subtitle_text = (
+ Text.from_markup(self.subtitle)
+ if isinstance(self.subtitle, str)
+ else self.subtitle.copy()
+ )
+ subtitle_text.end = ""
+ subtitle_text.plain = subtitle_text.plain.replace("\n", " ")
+ subtitle_text.no_wrap = True
+ subtitle_text.expand_tabs()
+ subtitle_text.pad(1)
+ return subtitle_text
+ return None
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ _padding = Padding.unpack(self.padding)
+ renderable = (
+ Padding(self.renderable, _padding) if any(_padding) else self.renderable
+ )
+ style = console.get_style(self.style)
+ border_style = style + console.get_style(self.border_style)
+ width = (
+ options.max_width
+ if self.width is None
+ else min(options.max_width, self.width)
+ )
+
+ safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box
+ box = self.box.substitute(options, safe=safe_box)
+
+ title_text = self._title
+ if title_text is not None:
+ title_text.style = border_style
+
+ child_width = (
+ width - 2
+ if self.expand
+ else console.measure(
+ renderable, options=options.update_width(width - 2)
+ ).maximum
+ )
+ child_height = self.height or options.height or None
+ if child_height:
+ child_height -= 2
+ if title_text is not None:
+ child_width = min(
+ options.max_width - 2, max(child_width, title_text.cell_len + 2)
+ )
+
+ width = child_width + 2
+ child_options = options.update(
+ width=child_width, height=child_height, highlight=self.highlight
+ )
+ lines = console.render_lines(renderable, child_options, style=style)
+
+ line_start = Segment(box.mid_left, border_style)
+ line_end = Segment(f"{box.mid_right}", border_style)
+ new_line = Segment.line()
+ if title_text is None or width <= 4:
+ yield Segment(box.get_top([width - 2]), border_style)
+ else:
+ title_text.align(self.title_align, width - 4, character=box.top)
+ yield Segment(box.top_left + box.top, border_style)
+ yield from console.render(title_text)
+ yield Segment(box.top + box.top_right, border_style)
+
+ yield new_line
+ for line in lines:
+ yield line_start
+ yield from line
+ yield line_end
+ yield new_line
+
+ subtitle_text = self._subtitle
+ if subtitle_text is not None:
+ subtitle_text.style = border_style
+
+ if subtitle_text is None or width <= 4:
+ yield Segment(box.get_bottom([width - 2]), border_style)
+ else:
+ subtitle_text.align(self.subtitle_align, width - 4, character=box.bottom)
+ yield Segment(box.bottom_left + box.bottom, border_style)
+ yield from console.render(subtitle_text)
+ yield Segment(box.bottom + box.bottom_right, border_style)
+
+ yield new_line
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ _title = self._title
+ _, right, _, left = Padding.unpack(self.padding)
+ padding = left + right
+ renderables = [self.renderable, _title] if _title else [self.renderable]
+
+ if self.width is None:
+ width = (
+ measure_renderables(
+ console,
+ options.update_width(options.max_width - padding - 2),
+ renderables,
+ ).maximum
+ + padding
+ + 2
+ )
+ else:
+ width = self.width
+ return Measurement(width, width)
+
+
+if __name__ == "__main__": # pragma: no cover
+ from .console import Console
+
+ c = Console()
+
+ from .padding import Padding
+ from .box import ROUNDED, DOUBLE
+
+ p = Panel(
+ "Hello, World!",
+ title="rich.Panel",
+ style="white on blue",
+ box=DOUBLE,
+ padding=1,
+ )
+
+ c.print()
+ c.print(p)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/pretty.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/pretty.py
new file mode 100644
index 0000000..606ee33
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/pretty.py
@@ -0,0 +1,903 @@
+import builtins
+import dataclasses
+import inspect
+import os
+import re
+import sys
+from array import array
+from collections import Counter, UserDict, UserList, defaultdict, deque
+from dataclasses import dataclass, fields, is_dataclass
+from inspect import isclass
+from itertools import islice
+from types import MappingProxyType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ DefaultDict,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+)
+
+from pip._vendor.rich.repr import RichReprResult
+
+try:
+ import attr as _attr_module
+except ImportError: # pragma: no cover
+ _attr_module = None # type: ignore
+
+
+from . import get_console
+from ._loop import loop_last
+from ._pick import pick_bool
+from .abc import RichRenderable
+from .cells import cell_len
+from .highlighter import ReprHighlighter
+from .jupyter import JupyterMixin, JupyterRenderable
+from .measure import Measurement
+from .text import Text
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ HighlighterType,
+ JustifyMethod,
+ OverflowMethod,
+ RenderResult,
+ )
+
+
+def _is_attr_object(obj: Any) -> bool:
+ """Check if an object was created with attrs module."""
+ return _attr_module is not None and _attr_module.has(type(obj))
+
+
+def _get_attr_fields(obj: Any) -> Iterable["_attr_module.Attribute[Any]"]:
+ """Get fields for an attrs object."""
+ return _attr_module.fields(type(obj)) if _attr_module is not None else []
+
+
+def _is_dataclass_repr(obj: object) -> bool:
+ """Check if an instance of a dataclass contains the default repr.
+
+ Args:
+ obj (object): A dataclass instance.
+
+ Returns:
+ bool: True if the default repr is used, False if there is a custom repr.
+ """
+ # Digging in to a lot of internals here
+ # Catching all exceptions in case something is missing on a non CPython implementation
+ try:
+ return obj.__repr__.__code__.co_filename == dataclasses.__file__
+ except Exception: # pragma: no coverage
+ return False
+
+
+def _ipy_display_hook(
+ value: Any,
+ console: Optional["Console"] = None,
+ overflow: "OverflowMethod" = "ignore",
+ crop: bool = False,
+ indent_guides: bool = False,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ expand_all: bool = False,
+) -> None:
+ from .console import ConsoleRenderable # needed here to prevent circular import
+
+ # always skip rich generated jupyter renderables or None values
+ if isinstance(value, JupyterRenderable) or value is None:
+ return
+
+ console = console or get_console()
+ if console.is_jupyter:
+ # Delegate rendering to IPython if the object (and IPython) supports it
+ # https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display
+ ipython_repr_methods = [
+ "_repr_html_",
+ "_repr_markdown_",
+ "_repr_json_",
+ "_repr_latex_",
+ "_repr_jpeg_",
+ "_repr_png_",
+ "_repr_svg_",
+ "_repr_mimebundle_",
+ ]
+ for repr_method in ipython_repr_methods:
+ method = getattr(value, repr_method, None)
+ if inspect.ismethod(method):
+ # Calling the method ourselves isn't ideal. The interface for the `_repr_*_` methods
+ # specifies that if they return None, then they should not be rendered
+ # by the notebook.
+ try:
+ repr_result = method()
+ except Exception:
+ continue # If the method raises, treat it as if it doesn't exist, try any others
+ if repr_result is not None:
+ return # Delegate rendering to IPython
+
+ # certain renderables should start on a new line
+ if isinstance(value, ConsoleRenderable):
+ console.line()
+
+ console.print(
+ value
+ if isinstance(value, RichRenderable)
+ else Pretty(
+ value,
+ overflow=overflow,
+ indent_guides=indent_guides,
+ max_length=max_length,
+ max_string=max_string,
+ expand_all=expand_all,
+ margin=12,
+ ),
+ crop=crop,
+ new_line_start=True,
+ )
+
+
+def install(
+ console: Optional["Console"] = None,
+ overflow: "OverflowMethod" = "ignore",
+ crop: bool = False,
+ indent_guides: bool = False,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ expand_all: bool = False,
+) -> None:
+ """Install automatic pretty printing in the Python REPL.
+
+ Args:
+ console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
+ overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
+ crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+ expand_all (bool, optional): Expand all containers. Defaults to False.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+ """
+ from pip._vendor.rich import get_console
+
+ console = console or get_console()
+ assert console is not None
+
+ def display_hook(value: Any) -> None:
+ """Replacement sys.displayhook which prettifies objects with Rich."""
+ if value is not None:
+ assert console is not None
+ builtins._ = None # type: ignore
+ console.print(
+ value
+ if isinstance(value, RichRenderable)
+ else Pretty(
+ value,
+ overflow=overflow,
+ indent_guides=indent_guides,
+ max_length=max_length,
+ max_string=max_string,
+ expand_all=expand_all,
+ ),
+ crop=crop,
+ )
+ builtins._ = value # type: ignore
+
+ try: # pragma: no cover
+ ip = get_ipython() # type: ignore
+ from IPython.core.formatters import BaseFormatter
+
+ class RichFormatter(BaseFormatter): # type: ignore
+ pprint: bool = True
+
+ def __call__(self, value: Any) -> Any:
+ if self.pprint:
+ return _ipy_display_hook(
+ value,
+ console=get_console(),
+ overflow=overflow,
+ indent_guides=indent_guides,
+ max_length=max_length,
+ max_string=max_string,
+ expand_all=expand_all,
+ )
+ else:
+ return repr(value)
+
+ # replace plain text formatter with rich formatter
+ rich_formatter = RichFormatter()
+ ip.display_formatter.formatters["text/plain"] = rich_formatter
+ except Exception:
+ sys.displayhook = display_hook
+
+
+class Pretty(JupyterMixin):
+ """A rich renderable that pretty prints an object.
+
+ Args:
+ _object (Any): An object to pretty print.
+ highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
+ indent_size (int, optional): Number of spaces in indent. Defaults to 4.
+ justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
+ overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
+ no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+ max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
+ expand_all (bool, optional): Expand all containers. Defaults to False.
+ margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
+ insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
+ """
+
+ def __init__(
+ self,
+ _object: Any,
+ highlighter: Optional["HighlighterType"] = None,
+ *,
+ indent_size: int = 4,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ no_wrap: Optional[bool] = False,
+ indent_guides: bool = False,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+ expand_all: bool = False,
+ margin: int = 0,
+ insert_line: bool = False,
+ ) -> None:
+ self._object = _object
+ self.highlighter = highlighter or ReprHighlighter()
+ self.indent_size = indent_size
+ self.justify: Optional["JustifyMethod"] = justify
+ self.overflow: Optional["OverflowMethod"] = overflow
+ self.no_wrap = no_wrap
+ self.indent_guides = indent_guides
+ self.max_length = max_length
+ self.max_string = max_string
+ self.max_depth = max_depth
+ self.expand_all = expand_all
+ self.margin = margin
+ self.insert_line = insert_line
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ pretty_str = pretty_repr(
+ self._object,
+ max_width=options.max_width - self.margin,
+ indent_size=self.indent_size,
+ max_length=self.max_length,
+ max_string=self.max_string,
+ max_depth=self.max_depth,
+ expand_all=self.expand_all,
+ )
+ pretty_text = Text(
+ pretty_str,
+ justify=self.justify or options.justify,
+ overflow=self.overflow or options.overflow,
+ no_wrap=pick_bool(self.no_wrap, options.no_wrap),
+ style="pretty",
+ )
+ pretty_text = (
+ self.highlighter(pretty_text)
+ if pretty_text
+ else Text(
+ f"{type(self._object)}.__repr__ returned empty string",
+ style="dim italic",
+ )
+ )
+ if self.indent_guides and not options.ascii_only:
+ pretty_text = pretty_text.with_indent_guides(
+ self.indent_size, style="repr.indent"
+ )
+ if self.insert_line and "\n" in pretty_text:
+ yield ""
+ yield pretty_text
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ pretty_str = pretty_repr(
+ self._object,
+ max_width=options.max_width,
+ indent_size=self.indent_size,
+ max_length=self.max_length,
+ max_string=self.max_string,
+ )
+ text_width = (
+ max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
+ )
+ return Measurement(text_width, text_width)
+
+
+def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
+ return (
+ f"defaultdict({_object.default_factory!r}, {{",
+ "})",
+ f"defaultdict({_object.default_factory!r}, {{}})",
+ )
+
+
+def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
+ return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
+
+
+_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
+ os._Environ: lambda _object: ("environ({", "})", "environ({})"),
+ array: _get_braces_for_array,
+ defaultdict: _get_braces_for_defaultdict,
+ Counter: lambda _object: ("Counter({", "})", "Counter()"),
+ deque: lambda _object: ("deque([", "])", "deque()"),
+ dict: lambda _object: ("{", "}", "{}"),
+ UserDict: lambda _object: ("{", "}", "{}"),
+ frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
+ list: lambda _object: ("[", "]", "[]"),
+ UserList: lambda _object: ("[", "]", "[]"),
+ set: lambda _object: ("{", "}", "set()"),
+ tuple: lambda _object: ("(", ")", "()"),
+ MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
+}
+_CONTAINERS = tuple(_BRACES.keys())
+_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
+
+
+def is_expandable(obj: Any) -> bool:
+ """Check if an object may be expanded by pretty print."""
+ return (
+ isinstance(obj, _CONTAINERS)
+ or (is_dataclass(obj))
+ or (hasattr(obj, "__rich_repr__"))
+ or _is_attr_object(obj)
+ ) and not isclass(obj)
+
+
+@dataclass
+class Node:
+ """A node in a repr tree. May be atomic or a container."""
+
+ key_repr: str = ""
+ value_repr: str = ""
+ open_brace: str = ""
+ close_brace: str = ""
+ empty: str = ""
+ last: bool = False
+ is_tuple: bool = False
+ children: Optional[List["Node"]] = None
+ key_separator = ": "
+ separator: str = ", "
+
+ def iter_tokens(self) -> Iterable[str]:
+ """Generate tokens for this node."""
+ if self.key_repr:
+ yield self.key_repr
+ yield self.key_separator
+ if self.value_repr:
+ yield self.value_repr
+ elif self.children is not None:
+ if self.children:
+ yield self.open_brace
+ if self.is_tuple and len(self.children) == 1:
+ yield from self.children[0].iter_tokens()
+ yield ","
+ else:
+ for child in self.children:
+ yield from child.iter_tokens()
+ if not child.last:
+ yield self.separator
+ yield self.close_brace
+ else:
+ yield self.empty
+
+ def check_length(self, start_length: int, max_length: int) -> bool:
+ """Check the length fits within a limit.
+
+ Args:
+ start_length (int): Starting length of the line (indent, prefix, suffix).
+ max_length (int): Maximum length.
+
+ Returns:
+ bool: True if the node can be rendered within max length, otherwise False.
+ """
+ total_length = start_length
+ for token in self.iter_tokens():
+ total_length += cell_len(token)
+ if total_length > max_length:
+ return False
+ return True
+
+ def __str__(self) -> str:
+ repr_text = "".join(self.iter_tokens())
+ return repr_text
+
+ def render(
+ self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
+ ) -> str:
+ """Render the node to a pretty repr.
+
+ Args:
+ max_width (int, optional): Maximum width of the repr. Defaults to 80.
+ indent_size (int, optional): Size of indents. Defaults to 4.
+ expand_all (bool, optional): Expand all levels. Defaults to False.
+
+ Returns:
+ str: A repr string of the original object.
+ """
+ lines = [_Line(node=self, is_root=True)]
+ line_no = 0
+ while line_no < len(lines):
+ line = lines[line_no]
+ if line.expandable and not line.expanded:
+ if expand_all or not line.check_length(max_width):
+ lines[line_no : line_no + 1] = line.expand(indent_size)
+ line_no += 1
+
+ repr_str = "\n".join(str(line) for line in lines)
+ return repr_str
+
+
+@dataclass
+class _Line:
+ """A line in repr output."""
+
+ parent: Optional["_Line"] = None
+ is_root: bool = False
+ node: Optional[Node] = None
+ text: str = ""
+ suffix: str = ""
+ whitespace: str = ""
+ expanded: bool = False
+ last: bool = False
+
+ @property
+ def expandable(self) -> bool:
+ """Check if the line may be expanded."""
+ return bool(self.node is not None and self.node.children)
+
+ def check_length(self, max_length: int) -> bool:
+ """Check this line fits within a given number of cells."""
+ start_length = (
+ len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
+ )
+ assert self.node is not None
+ return self.node.check_length(start_length, max_length)
+
+ def expand(self, indent_size: int) -> Iterable["_Line"]:
+ """Expand this line by adding children on their own line."""
+ node = self.node
+ assert node is not None
+ whitespace = self.whitespace
+ assert node.children
+ if node.key_repr:
+ new_line = yield _Line(
+ text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
+ whitespace=whitespace,
+ )
+ else:
+ new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
+ child_whitespace = self.whitespace + " " * indent_size
+ tuple_of_one = node.is_tuple and len(node.children) == 1
+ for last, child in loop_last(node.children):
+ separator = "," if tuple_of_one else node.separator
+ line = _Line(
+ parent=new_line,
+ node=child,
+ whitespace=child_whitespace,
+ suffix=separator,
+ last=last and not tuple_of_one,
+ )
+ yield line
+
+ yield _Line(
+ text=node.close_brace,
+ whitespace=whitespace,
+ suffix=self.suffix,
+ last=self.last,
+ )
+
+ def __str__(self) -> str:
+ if self.last:
+ return f"{self.whitespace}{self.text}{self.node or ''}"
+ else:
+ return (
+ f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
+ )
+
+
+def traverse(
+ _object: Any,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+) -> Node:
+ """Traverse object and generate a tree.
+
+ Args:
+ _object (Any): Object to be traversed.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
+ Defaults to None.
+ max_depth (int, optional): Maximum depth of data structures, or None for no maximum.
+ Defaults to None.
+
+ Returns:
+ Node: The root of a tree structure which can be used to render a pretty repr.
+ """
+
+ def to_repr(obj: Any) -> str:
+ """Get repr string for an object, but catch errors."""
+ if (
+ max_string is not None
+ and isinstance(obj, (bytes, str))
+ and len(obj) > max_string
+ ):
+ truncated = len(obj) - max_string
+ obj_repr = f"{obj[:max_string]!r}+{truncated}"
+ else:
+ try:
+ obj_repr = repr(obj)
+ except Exception as error:
+ obj_repr = f""
+ return obj_repr
+
+ visited_ids: Set[int] = set()
+ push_visited = visited_ids.add
+ pop_visited = visited_ids.remove
+
+ def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:
+ """Walk the object depth first."""
+
+ obj_type = type(obj)
+ py_version = (sys.version_info.major, sys.version_info.minor)
+ children: List[Node]
+ reached_max_depth = max_depth is not None and depth >= max_depth
+
+ def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
+ for arg in rich_args:
+ if isinstance(arg, tuple):
+ if len(arg) == 3:
+ key, child, default = arg
+ if default == child:
+ continue
+ yield key, child
+ elif len(arg) == 2:
+ key, child = arg
+ yield key, child
+ elif len(arg) == 1:
+ yield arg[0]
+ else:
+ yield arg
+
+ try:
+ fake_attributes = hasattr(
+ obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
+ )
+ except Exception:
+ fake_attributes = False
+
+ rich_repr_result: Optional[RichReprResult] = None
+ if not fake_attributes:
+ try:
+ if hasattr(obj, "__rich_repr__") and not isclass(obj):
+ rich_repr_result = obj.__rich_repr__()
+ except Exception:
+ pass
+
+ if rich_repr_result is not None:
+ angular = getattr(obj.__rich_repr__, "angular", False)
+ args = list(iter_rich_args(rich_repr_result))
+ class_name = obj.__class__.__name__
+
+ if args:
+ children = []
+ append = children.append
+
+ if reached_max_depth:
+ node = Node(value_repr=f"...")
+ else:
+ if angular:
+ node = Node(
+ open_brace=f"<{class_name} ",
+ close_brace=">",
+ children=children,
+ last=root,
+ separator=" ",
+ )
+ else:
+ node = Node(
+ open_brace=f"{class_name}(",
+ close_brace=")",
+ children=children,
+ last=root,
+ )
+ for last, arg in loop_last(args):
+ if isinstance(arg, tuple):
+ key, child = arg
+ child_node = _traverse(child, depth=depth + 1)
+ child_node.last = last
+ child_node.key_repr = key
+ child_node.key_separator = "="
+ append(child_node)
+ else:
+ child_node = _traverse(arg, depth=depth + 1)
+ child_node.last = last
+ append(child_node)
+ else:
+ node = Node(
+ value_repr=f"<{class_name}>" if angular else f"{class_name}()",
+ children=[],
+ last=root,
+ )
+ elif _is_attr_object(obj) and not fake_attributes:
+ children = []
+ append = children.append
+
+ attr_fields = _get_attr_fields(obj)
+ if attr_fields:
+ if reached_max_depth:
+ node = Node(value_repr=f"...")
+ else:
+ node = Node(
+ open_brace=f"{obj.__class__.__name__}(",
+ close_brace=")",
+ children=children,
+ last=root,
+ )
+
+ def iter_attrs() -> Iterable[
+ Tuple[str, Any, Optional[Callable[[Any], str]]]
+ ]:
+ """Iterate over attr fields and values."""
+ for attr in attr_fields:
+ if attr.repr:
+ try:
+ value = getattr(obj, attr.name)
+ except Exception as error:
+ # Can happen, albeit rarely
+ yield (attr.name, error, None)
+ else:
+ yield (
+ attr.name,
+ value,
+ attr.repr if callable(attr.repr) else None,
+ )
+
+ for last, (name, value, repr_callable) in loop_last(iter_attrs()):
+ if repr_callable:
+ child_node = Node(value_repr=str(repr_callable(value)))
+ else:
+ child_node = _traverse(value, depth=depth + 1)
+ child_node.last = last
+ child_node.key_repr = name
+ child_node.key_separator = "="
+ append(child_node)
+ else:
+ node = Node(
+ value_repr=f"{obj.__class__.__name__}()", children=[], last=root
+ )
+
+ elif (
+ is_dataclass(obj)
+ and not isinstance(obj, type)
+ and not fake_attributes
+ and (_is_dataclass_repr(obj) or py_version == (3, 6))
+ ):
+ obj_id = id(obj)
+ if obj_id in visited_ids:
+ # Recursion detected
+ return Node(value_repr="...")
+ push_visited(obj_id)
+
+ children = []
+ append = children.append
+ if reached_max_depth:
+ node = Node(value_repr=f"...")
+ else:
+ node = Node(
+ open_brace=f"{obj.__class__.__name__}(",
+ close_brace=")",
+ children=children,
+ last=root,
+ )
+
+ for last, field in loop_last(
+ field for field in fields(obj) if field.repr
+ ):
+ child_node = _traverse(getattr(obj, field.name), depth=depth + 1)
+ child_node.key_repr = field.name
+ child_node.last = last
+ child_node.key_separator = "="
+ append(child_node)
+
+ pop_visited(obj_id)
+
+ elif isinstance(obj, _CONTAINERS):
+ for container_type in _CONTAINERS:
+ if isinstance(obj, container_type):
+ obj_type = container_type
+ break
+
+ obj_id = id(obj)
+ if obj_id in visited_ids:
+ # Recursion detected
+ return Node(value_repr="...")
+ push_visited(obj_id)
+
+ open_brace, close_brace, empty = _BRACES[obj_type](obj)
+
+ if reached_max_depth:
+ node = Node(value_repr=f"...", last=root)
+ elif obj_type.__repr__ != type(obj).__repr__:
+ node = Node(value_repr=to_repr(obj), last=root)
+ elif obj:
+ children = []
+ node = Node(
+ open_brace=open_brace,
+ close_brace=close_brace,
+ children=children,
+ last=root,
+ )
+ append = children.append
+ num_items = len(obj)
+ last_item_index = num_items - 1
+
+ if isinstance(obj, _MAPPING_CONTAINERS):
+ iter_items = iter(obj.items())
+ if max_length is not None:
+ iter_items = islice(iter_items, max_length)
+ for index, (key, child) in enumerate(iter_items):
+ child_node = _traverse(child, depth=depth + 1)
+ child_node.key_repr = to_repr(key)
+ child_node.last = index == last_item_index
+ append(child_node)
+ else:
+ iter_values = iter(obj)
+ if max_length is not None:
+ iter_values = islice(iter_values, max_length)
+ for index, child in enumerate(iter_values):
+ child_node = _traverse(child, depth=depth + 1)
+ child_node.last = index == last_item_index
+ append(child_node)
+ if max_length is not None and num_items > max_length:
+ append(Node(value_repr=f"... +{num_items-max_length}", last=True))
+ else:
+ node = Node(empty=empty, children=[], last=root)
+
+ pop_visited(obj_id)
+ else:
+ node = Node(value_repr=to_repr(obj), last=root)
+ node.is_tuple = isinstance(obj, tuple)
+ return node
+
+ node = _traverse(_object, root=True)
+ return node
+
+
+def pretty_repr(
+ _object: Any,
+ *,
+ max_width: int = 80,
+ indent_size: int = 4,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+ expand_all: bool = False,
+) -> str:
+ """Prettify repr string by expanding on to new lines to fit within a given width.
+
+ Args:
+ _object (Any): Object to repr.
+ max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
+ indent_size (int, optional): Number of spaces to indent. Defaults to 4.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
+ Defaults to None.
+ max_depth (int, optional): Maximum depth of nested data structure, or None for no depth.
+ Defaults to None.
+ expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
+
+ Returns:
+ str: A possibly multi-line representation of the object.
+ """
+
+ if isinstance(_object, Node):
+ node = _object
+ else:
+ node = traverse(
+ _object, max_length=max_length, max_string=max_string, max_depth=max_depth
+ )
+ repr_str = node.render(
+ max_width=max_width, indent_size=indent_size, expand_all=expand_all
+ )
+ return repr_str
+
+
+def pprint(
+ _object: Any,
+ *,
+ console: Optional["Console"] = None,
+ indent_guides: bool = True,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+ expand_all: bool = False,
+) -> None:
+ """A convenience function for pretty printing.
+
+ Args:
+ _object (Any): Object to pretty print.
+ console (Console, optional): Console instance, or None to use default. Defaults to None.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
+ max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to True.
+ expand_all (bool, optional): Expand all containers. Defaults to False.
+ """
+ _console = get_console() if console is None else console
+ _console.print(
+ Pretty(
+ _object,
+ max_length=max_length,
+ max_string=max_string,
+ max_depth=max_depth,
+ indent_guides=indent_guides,
+ expand_all=expand_all,
+ overflow="ignore",
+ ),
+ soft_wrap=True,
+ )
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ class BrokenRepr:
+ def __repr__(self) -> str:
+ 1 / 0
+ return "this will fail"
+
+ d = defaultdict(int)
+ d["foo"] = 5
+ data = {
+ "foo": [
+ 1,
+ "Hello World!",
+ 100.123,
+ 323.232,
+ 432324.0,
+ {5, 6, 7, (1, 2, 3, 4), 8},
+ ],
+ "bar": frozenset({1, 2, 3}),
+ "defaultdict": defaultdict(
+ list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
+ ),
+ "counter": Counter(
+ [
+ "apple",
+ "orange",
+ "pear",
+ "kumquat",
+ "kumquat",
+ "durian" * 100,
+ ]
+ ),
+ "atomic": (False, True, None),
+ "Broken": BrokenRepr(),
+ }
+ data["foo"].append(data) # type: ignore
+
+ from pip._vendor.rich import print
+
+ print(Pretty(data, indent_guides=True, max_string=20))
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/progress.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/progress.py
new file mode 100644
index 0000000..1f670db
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/progress.py
@@ -0,0 +1,1036 @@
+from abc import ABC, abstractmethod
+from collections import deque
+from collections.abc import Sized
+from dataclasses import dataclass, field
+from datetime import timedelta
+from math import ceil
+from threading import Event, RLock, Thread
+from types import TracebackType
+from typing import (
+ Any,
+ Callable,
+ Deque,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ NewType,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+)
+
+from . import filesize, get_console
+from .console import Console, JustifyMethod, RenderableType, Group
+from .highlighter import Highlighter
+from .jupyter import JupyterMixin
+from .live import Live
+from .progress_bar import ProgressBar
+from .spinner import Spinner
+from .style import StyleType
+from .table import Column, Table
+from .text import Text, TextType
+
+TaskID = NewType("TaskID", int)
+
+ProgressType = TypeVar("ProgressType")
+
+GetTimeCallable = Callable[[], float]
+
+
+class _TrackThread(Thread):
+ """A thread to periodically update progress."""
+
+ def __init__(self, progress: "Progress", task_id: "TaskID", update_period: float):
+ self.progress = progress
+ self.task_id = task_id
+ self.update_period = update_period
+ self.done = Event()
+
+ self.completed = 0
+ super().__init__()
+
+ def run(self) -> None:
+ task_id = self.task_id
+ advance = self.progress.advance
+ update_period = self.update_period
+ last_completed = 0
+ wait = self.done.wait
+ while not wait(update_period):
+ completed = self.completed
+ if last_completed != completed:
+ advance(task_id, completed - last_completed)
+ last_completed = completed
+
+ self.progress.update(self.task_id, completed=self.completed, refresh=True)
+
+ def __enter__(self) -> "_TrackThread":
+ self.start()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.done.set()
+ self.join()
+
+
+def track(
+ sequence: Union[Sequence[ProgressType], Iterable[ProgressType]],
+ description: str = "Working...",
+ total: Optional[float] = None,
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ update_period: float = 0.1,
+ disable: bool = False,
+) -> Iterable[ProgressType]:
+ """Track progress by iterating over a sequence.
+
+ Args:
+ sequence (Iterable[ProgressType]): A sequence (must support "len") you wish to iterate over.
+ description (str, optional): Description of task show next to progress bar. Defaults to "Working".
+ total: (float, optional): Total number of steps. Default is len(sequence).
+ auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ console (Console, optional): Console to write to. Default creates internal Console instance.
+ refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
+ disable (bool, optional): Disable display of progress.
+ Returns:
+ Iterable[ProgressType]: An iterable of the values in the sequence.
+
+ """
+
+ columns: List["ProgressColumn"] = (
+ [TextColumn("[progress.description]{task.description}")] if description else []
+ )
+ columns.extend(
+ (
+ BarColumn(
+ style=style,
+ complete_style=complete_style,
+ finished_style=finished_style,
+ pulse_style=pulse_style,
+ ),
+ TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
+ TimeRemainingColumn(),
+ )
+ )
+ progress = Progress(
+ *columns,
+ auto_refresh=auto_refresh,
+ console=console,
+ transient=transient,
+ get_time=get_time,
+ refresh_per_second=refresh_per_second or 10,
+ disable=disable,
+ )
+
+ with progress:
+ yield from progress.track(
+ sequence, total=total, description=description, update_period=update_period
+ )
+
+
+class ProgressColumn(ABC):
+ """Base class for a widget to use in progress display."""
+
+ max_refresh: Optional[float] = None
+
+ def __init__(self, table_column: Optional[Column] = None) -> None:
+ self._table_column = table_column
+ self._renderable_cache: Dict[TaskID, Tuple[float, RenderableType]] = {}
+ self._update_time: Optional[float] = None
+
+ def get_table_column(self) -> Column:
+ """Get a table column, used to build tasks table."""
+ return self._table_column or Column()
+
+ def __call__(self, task: "Task") -> RenderableType:
+ """Called by the Progress object to return a renderable for the given task.
+
+ Args:
+ task (Task): An object containing information regarding the task.
+
+ Returns:
+ RenderableType: Anything renderable (including str).
+ """
+ current_time = task.get_time()
+ if self.max_refresh is not None and not task.completed:
+ try:
+ timestamp, renderable = self._renderable_cache[task.id]
+ except KeyError:
+ pass
+ else:
+ if timestamp + self.max_refresh > current_time:
+ return renderable
+
+ renderable = self.render(task)
+ self._renderable_cache[task.id] = (current_time, renderable)
+ return renderable
+
+ @abstractmethod
+ def render(self, task: "Task") -> RenderableType:
+ """Should return a renderable object."""
+
+
+class RenderableColumn(ProgressColumn):
+ """A column to insert an arbitrary column.
+
+ Args:
+ renderable (RenderableType, optional): Any renderable. Defaults to empty string.
+ """
+
+ def __init__(
+ self, renderable: RenderableType = "", *, table_column: Optional[Column] = None
+ ):
+ self.renderable = renderable
+ super().__init__(table_column=table_column)
+
+ def render(self, task: "Task") -> RenderableType:
+ return self.renderable
+
+
+class SpinnerColumn(ProgressColumn):
+ """A column with a 'spinner' animation.
+
+ Args:
+ spinner_name (str, optional): Name of spinner animation. Defaults to "dots".
+ style (StyleType, optional): Style of spinner. Defaults to "progress.spinner".
+ speed (float, optional): Speed factor of spinner. Defaults to 1.0.
+ finished_text (TextType, optional): Text used when task is finished. Defaults to " ".
+ """
+
+ def __init__(
+ self,
+ spinner_name: str = "dots",
+ style: Optional[StyleType] = "progress.spinner",
+ speed: float = 1.0,
+ finished_text: TextType = " ",
+ table_column: Optional[Column] = None,
+ ):
+ self.spinner = Spinner(spinner_name, style=style, speed=speed)
+ self.finished_text = (
+ Text.from_markup(finished_text)
+ if isinstance(finished_text, str)
+ else finished_text
+ )
+ super().__init__(table_column=table_column)
+
+ def set_spinner(
+ self,
+ spinner_name: str,
+ spinner_style: Optional[StyleType] = "progress.spinner",
+ speed: float = 1.0,
+ ) -> None:
+ """Set a new spinner.
+
+ Args:
+ spinner_name (str): Spinner name, see python -m rich.spinner.
+ spinner_style (Optional[StyleType], optional): Spinner style. Defaults to "progress.spinner".
+ speed (float, optional): Speed factor of spinner. Defaults to 1.0.
+ """
+ self.spinner = Spinner(spinner_name, style=spinner_style, speed=speed)
+
+ def render(self, task: "Task") -> RenderableType:
+ text = (
+ self.finished_text
+ if task.finished
+ else self.spinner.render(task.get_time())
+ )
+ return text
+
+
+class TextColumn(ProgressColumn):
+ """A column containing text."""
+
+ def __init__(
+ self,
+ text_format: str,
+ style: StyleType = "none",
+ justify: JustifyMethod = "left",
+ markup: bool = True,
+ highlighter: Optional[Highlighter] = None,
+ table_column: Optional[Column] = None,
+ ) -> None:
+ self.text_format = text_format
+ self.justify: JustifyMethod = justify
+ self.style = style
+ self.markup = markup
+ self.highlighter = highlighter
+ super().__init__(table_column=table_column or Column(no_wrap=True))
+
+ def render(self, task: "Task") -> Text:
+ _text = self.text_format.format(task=task)
+ if self.markup:
+ text = Text.from_markup(_text, style=self.style, justify=self.justify)
+ else:
+ text = Text(_text, style=self.style, justify=self.justify)
+ if self.highlighter:
+ self.highlighter.highlight(text)
+ return text
+
+
+class BarColumn(ProgressColumn):
+ """Renders a visual progress bar.
+
+ Args:
+ bar_width (Optional[int], optional): Width of bar or None for full width. Defaults to 40.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ """
+
+ def __init__(
+ self,
+ bar_width: Optional[int] = 40,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ table_column: Optional[Column] = None,
+ ) -> None:
+ self.bar_width = bar_width
+ self.style = style
+ self.complete_style = complete_style
+ self.finished_style = finished_style
+ self.pulse_style = pulse_style
+ super().__init__(table_column=table_column)
+
+ def render(self, task: "Task") -> ProgressBar:
+ """Gets a progress bar widget for a task."""
+ return ProgressBar(
+ total=max(0, task.total),
+ completed=max(0, task.completed),
+ width=None if self.bar_width is None else max(1, self.bar_width),
+ pulse=not task.started,
+ animation_time=task.get_time(),
+ style=self.style,
+ complete_style=self.complete_style,
+ finished_style=self.finished_style,
+ pulse_style=self.pulse_style,
+ )
+
+
+class TimeElapsedColumn(ProgressColumn):
+ """Renders time elapsed."""
+
+ def render(self, task: "Task") -> Text:
+ """Show time remaining."""
+ elapsed = task.finished_time if task.finished else task.elapsed
+ if elapsed is None:
+ return Text("-:--:--", style="progress.elapsed")
+ delta = timedelta(seconds=int(elapsed))
+ return Text(str(delta), style="progress.elapsed")
+
+
+class TimeRemainingColumn(ProgressColumn):
+ """Renders estimated time remaining."""
+
+ # Only refresh twice a second to prevent jitter
+ max_refresh = 0.5
+
+ def render(self, task: "Task") -> Text:
+ """Show time remaining."""
+ remaining = task.time_remaining
+ if remaining is None:
+ return Text("-:--:--", style="progress.remaining")
+ remaining_delta = timedelta(seconds=int(remaining))
+ return Text(str(remaining_delta), style="progress.remaining")
+
+
+class FileSizeColumn(ProgressColumn):
+ """Renders completed filesize."""
+
+ def render(self, task: "Task") -> Text:
+ """Show data completed."""
+ data_size = filesize.decimal(int(task.completed))
+ return Text(data_size, style="progress.filesize")
+
+
+class TotalFileSizeColumn(ProgressColumn):
+ """Renders total filesize."""
+
+ def render(self, task: "Task") -> Text:
+ """Show data completed."""
+ data_size = filesize.decimal(int(task.total))
+ return Text(data_size, style="progress.filesize.total")
+
+
+class DownloadColumn(ProgressColumn):
+ """Renders file size downloaded and total, e.g. '0.5/2.3 GB'.
+
+ Args:
+ binary_units (bool, optional): Use binary units, KiB, MiB etc. Defaults to False.
+ """
+
+ def __init__(
+ self, binary_units: bool = False, table_column: Optional[Column] = None
+ ) -> None:
+ self.binary_units = binary_units
+ super().__init__(table_column=table_column)
+
+ def render(self, task: "Task") -> Text:
+ """Calculate common unit for completed and total."""
+ completed = int(task.completed)
+ total = int(task.total)
+ if self.binary_units:
+ unit, suffix = filesize.pick_unit_and_suffix(
+ total,
+ ["bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"],
+ 1024,
+ )
+ else:
+ unit, suffix = filesize.pick_unit_and_suffix(
+ total, ["bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"], 1000
+ )
+ completed_ratio = completed / unit
+ total_ratio = total / unit
+ precision = 0 if unit == 1 else 1
+ completed_str = f"{completed_ratio:,.{precision}f}"
+ total_str = f"{total_ratio:,.{precision}f}"
+ download_status = f"{completed_str}/{total_str} {suffix}"
+ download_text = Text(download_status, style="progress.download")
+ return download_text
+
+
+class TransferSpeedColumn(ProgressColumn):
+ """Renders human readable transfer speed."""
+
+ def render(self, task: "Task") -> Text:
+ """Show data transfer speed."""
+ speed = task.finished_speed or task.speed
+ if speed is None:
+ return Text("?", style="progress.data.speed")
+ data_speed = filesize.decimal(int(speed))
+ return Text(f"{data_speed}/s", style="progress.data.speed")
+
+
+class ProgressSample(NamedTuple):
+ """Sample of progress for a given time."""
+
+ timestamp: float
+ """Timestamp of sample."""
+ completed: float
+ """Number of steps completed."""
+
+
+@dataclass
+class Task:
+ """Information regarding a progress task.
+
+ This object should be considered read-only outside of the :class:`~Progress` class.
+
+ """
+
+ id: TaskID
+ """Task ID associated with this task (used in Progress methods)."""
+
+ description: str
+ """str: Description of the task."""
+
+ total: float
+ """str: Total number of steps in this task."""
+
+ completed: float
+ """float: Number of steps completed"""
+
+ _get_time: GetTimeCallable
+ """Callable to get the current time."""
+
+ finished_time: Optional[float] = None
+ """float: Time task was finished."""
+
+ visible: bool = True
+ """bool: Indicates if this task is visible in the progress display."""
+
+ fields: Dict[str, Any] = field(default_factory=dict)
+ """dict: Arbitrary fields passed in via Progress.update."""
+
+ start_time: Optional[float] = field(default=None, init=False, repr=False)
+ """Optional[float]: Time this task was started, or None if not started."""
+
+ stop_time: Optional[float] = field(default=None, init=False, repr=False)
+ """Optional[float]: Time this task was stopped, or None if not stopped."""
+
+ finished_speed: Optional[float] = None
+ """Optional[float]: The last speed for a finished task."""
+
+ _progress: Deque[ProgressSample] = field(
+ default_factory=deque, init=False, repr=False
+ )
+
+ _lock: RLock = field(repr=False, default_factory=RLock)
+ """Thread lock."""
+
+ def get_time(self) -> float:
+ """float: Get the current time, in seconds."""
+ return self._get_time()
+
+ @property
+ def started(self) -> bool:
+ """bool: Check if the task as started."""
+ return self.start_time is not None
+
+ @property
+ def remaining(self) -> float:
+ """float: Get the number of steps remaining."""
+ return self.total - self.completed
+
+ @property
+ def elapsed(self) -> Optional[float]:
+ """Optional[float]: Time elapsed since task was started, or ``None`` if the task hasn't started."""
+ if self.start_time is None:
+ return None
+ if self.stop_time is not None:
+ return self.stop_time - self.start_time
+ return self.get_time() - self.start_time
+
+ @property
+ def finished(self) -> bool:
+ """Check if the task has finished."""
+ return self.finished_time is not None
+
+ @property
+ def percentage(self) -> float:
+ """float: Get progress of task as a percentage."""
+ if not self.total:
+ return 0.0
+ completed = (self.completed / self.total) * 100.0
+ completed = min(100.0, max(0.0, completed))
+ return completed
+
+ @property
+ def speed(self) -> Optional[float]:
+ """Optional[float]: Get the estimated speed in steps per second."""
+ if self.start_time is None:
+ return None
+ with self._lock:
+ progress = self._progress
+ if not progress:
+ return None
+ total_time = progress[-1].timestamp - progress[0].timestamp
+ if total_time == 0:
+ return None
+ iter_progress = iter(progress)
+ next(iter_progress)
+ total_completed = sum(sample.completed for sample in iter_progress)
+ speed = total_completed / total_time
+ return speed
+
+ @property
+ def time_remaining(self) -> Optional[float]:
+ """Optional[float]: Get estimated time to completion, or ``None`` if no data."""
+ if self.finished:
+ return 0.0
+ speed = self.speed
+ if not speed:
+ return None
+ estimate = ceil(self.remaining / speed)
+ return estimate
+
+ def _reset(self) -> None:
+ """Reset progress."""
+ self._progress.clear()
+ self.finished_time = None
+ self.finished_speed = None
+
+
+class Progress(JupyterMixin):
+ """Renders an auto-updating progress bar(s).
+
+ Args:
+ console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout.
+ auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()`.
+ refresh_per_second (Optional[float], optional): Number of times per second to refresh the progress information or None to use default (10). Defaults to None.
+ speed_estimate_period: (float, optional): Period (in seconds) used to calculate the speed estimate. Defaults to 30.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ redirect_stdout: (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
+ redirect_stderr: (bool, optional): Enable redirection of stderr. Defaults to True.
+ get_time: (Callable, optional): A callable that gets the current time, or None to use Console.get_time. Defaults to None.
+ disable (bool, optional): Disable progress display. Defaults to False
+ expand (bool, optional): Expand tasks table to fit width. Defaults to False.
+ """
+
+ def __init__(
+ self,
+ *columns: Union[str, ProgressColumn],
+ console: Optional[Console] = None,
+ auto_refresh: bool = True,
+ refresh_per_second: float = 10,
+ speed_estimate_period: float = 30.0,
+ transient: bool = False,
+ redirect_stdout: bool = True,
+ redirect_stderr: bool = True,
+ get_time: Optional[GetTimeCallable] = None,
+ disable: bool = False,
+ expand: bool = False,
+ ) -> None:
+ assert (
+ refresh_per_second is None or refresh_per_second > 0
+ ), "refresh_per_second must be > 0"
+ self._lock = RLock()
+ self.columns = columns or (
+ TextColumn("[progress.description]{task.description}"),
+ BarColumn(),
+ TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
+ TimeRemainingColumn(),
+ )
+ self.speed_estimate_period = speed_estimate_period
+
+ self.disable = disable
+ self.expand = expand
+ self._tasks: Dict[TaskID, Task] = {}
+ self._task_index: TaskID = TaskID(0)
+ self.live = Live(
+ console=console or get_console(),
+ auto_refresh=auto_refresh,
+ refresh_per_second=refresh_per_second,
+ transient=transient,
+ redirect_stdout=redirect_stdout,
+ redirect_stderr=redirect_stderr,
+ get_renderable=self.get_renderable,
+ )
+ self.get_time = get_time or self.console.get_time
+ self.print = self.console.print
+ self.log = self.console.log
+
+ @property
+ def console(self) -> Console:
+ return self.live.console
+
+ @property
+ def tasks(self) -> List[Task]:
+ """Get a list of Task instances."""
+ with self._lock:
+ return list(self._tasks.values())
+
+ @property
+ def task_ids(self) -> List[TaskID]:
+ """A list of task IDs."""
+ with self._lock:
+ return list(self._tasks.keys())
+
+ @property
+ def finished(self) -> bool:
+ """Check if all tasks have been completed."""
+ with self._lock:
+ if not self._tasks:
+ return True
+ return all(task.finished for task in self._tasks.values())
+
+ def start(self) -> None:
+ """Start the progress display."""
+ if not self.disable:
+ self.live.start(refresh=True)
+
+ def stop(self) -> None:
+ """Stop the progress display."""
+ self.live.stop()
+ if not self.console.is_interactive:
+ self.console.print()
+
+ def __enter__(self) -> "Progress":
+ self.start()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.stop()
+
+ def track(
+ self,
+ sequence: Union[Iterable[ProgressType], Sequence[ProgressType]],
+ total: Optional[float] = None,
+ task_id: Optional[TaskID] = None,
+ description: str = "Working...",
+ update_period: float = 0.1,
+ ) -> Iterable[ProgressType]:
+ """Track progress by iterating over a sequence.
+
+ Args:
+ sequence (Sequence[ProgressType]): A sequence of values you want to iterate over and track progress.
+ total: (float, optional): Total number of steps. Default is len(sequence).
+ task_id: (TaskID): Task to track. Default is new task.
+ description: (str, optional): Description of task, if new task is created.
+ update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
+
+ Returns:
+ Iterable[ProgressType]: An iterable of values taken from the provided sequence.
+ """
+
+ if total is None:
+ if isinstance(sequence, Sized):
+ task_total = float(len(sequence))
+ else:
+ raise ValueError(
+ f"unable to get size of {sequence!r}, please specify 'total'"
+ )
+ else:
+ task_total = total
+
+ if task_id is None:
+ task_id = self.add_task(description, total=task_total)
+ else:
+ self.update(task_id, total=task_total)
+
+ if self.live.auto_refresh:
+ with _TrackThread(self, task_id, update_period) as track_thread:
+ for value in sequence:
+ yield value
+ track_thread.completed += 1
+ else:
+ advance = self.advance
+ refresh = self.refresh
+ for value in sequence:
+ yield value
+ advance(task_id, 1)
+ refresh()
+
+ def start_task(self, task_id: TaskID) -> None:
+ """Start a task.
+
+ Starts a task (used when calculating elapsed time). You may need to call this manually,
+ if you called ``add_task`` with ``start=False``.
+
+ Args:
+ task_id (TaskID): ID of task.
+ """
+ with self._lock:
+ task = self._tasks[task_id]
+ if task.start_time is None:
+ task.start_time = self.get_time()
+
+ def stop_task(self, task_id: TaskID) -> None:
+ """Stop a task.
+
+ This will freeze the elapsed time on the task.
+
+ Args:
+ task_id (TaskID): ID of task.
+ """
+ with self._lock:
+ task = self._tasks[task_id]
+ current_time = self.get_time()
+ if task.start_time is None:
+ task.start_time = current_time
+ task.stop_time = current_time
+
+ def update(
+ self,
+ task_id: TaskID,
+ *,
+ total: Optional[float] = None,
+ completed: Optional[float] = None,
+ advance: Optional[float] = None,
+ description: Optional[str] = None,
+ visible: Optional[bool] = None,
+ refresh: bool = False,
+ **fields: Any,
+ ) -> None:
+ """Update information associated with a task.
+
+ Args:
+ task_id (TaskID): Task id (returned by add_task).
+ total (float, optional): Updates task.total if not None.
+ completed (float, optional): Updates task.completed if not None.
+ advance (float, optional): Add a value to task.completed if not None.
+ description (str, optional): Change task description if not None.
+ visible (bool, optional): Set visible flag if not None.
+ refresh (bool): Force a refresh of progress information. Default is False.
+ **fields (Any): Additional data fields required for rendering.
+ """
+ with self._lock:
+ task = self._tasks[task_id]
+ completed_start = task.completed
+
+ if total is not None and total != task.total:
+ task.total = total
+ task._reset()
+ if advance is not None:
+ task.completed += advance
+ if completed is not None:
+ task.completed = completed
+ if description is not None:
+ task.description = description
+ if visible is not None:
+ task.visible = visible
+ task.fields.update(fields)
+ update_completed = task.completed - completed_start
+
+ current_time = self.get_time()
+ old_sample_time = current_time - self.speed_estimate_period
+ _progress = task._progress
+
+ popleft = _progress.popleft
+ while _progress and _progress[0].timestamp < old_sample_time:
+ popleft()
+ while len(_progress) > 1000:
+ popleft()
+ if update_completed > 0:
+ _progress.append(ProgressSample(current_time, update_completed))
+ if task.completed >= task.total and task.finished_time is None:
+ task.finished_time = task.elapsed
+
+ if refresh:
+ self.refresh()
+
+ def reset(
+ self,
+ task_id: TaskID,
+ *,
+ start: bool = True,
+ total: Optional[float] = None,
+ completed: int = 0,
+ visible: Optional[bool] = None,
+ description: Optional[str] = None,
+ **fields: Any,
+ ) -> None:
+ """Reset a task so completed is 0 and the clock is reset.
+
+ Args:
+ task_id (TaskID): ID of task.
+ start (bool, optional): Start the task after reset. Defaults to True.
+ total (float, optional): New total steps in task, or None to use current total. Defaults to None.
+ completed (int, optional): Number of steps completed. Defaults to 0.
+ **fields (str): Additional data fields required for rendering.
+ """
+ current_time = self.get_time()
+ with self._lock:
+ task = self._tasks[task_id]
+ task._reset()
+ task.start_time = current_time if start else None
+ if total is not None:
+ task.total = total
+ task.completed = completed
+ if visible is not None:
+ task.visible = visible
+ if fields:
+ task.fields = fields
+ if description is not None:
+ task.description = description
+ task.finished_time = None
+ self.refresh()
+
+ def advance(self, task_id: TaskID, advance: float = 1) -> None:
+ """Advance task by a number of steps.
+
+ Args:
+ task_id (TaskID): ID of task.
+ advance (float): Number of steps to advance. Default is 1.
+ """
+ current_time = self.get_time()
+ with self._lock:
+ task = self._tasks[task_id]
+ completed_start = task.completed
+ task.completed += advance
+ update_completed = task.completed - completed_start
+ old_sample_time = current_time - self.speed_estimate_period
+ _progress = task._progress
+
+ popleft = _progress.popleft
+ while _progress and _progress[0].timestamp < old_sample_time:
+ popleft()
+ while len(_progress) > 1000:
+ popleft()
+ _progress.append(ProgressSample(current_time, update_completed))
+ if task.completed >= task.total and task.finished_time is None:
+ task.finished_time = task.elapsed
+ task.finished_speed = task.speed
+
+ def refresh(self) -> None:
+ """Refresh (render) the progress information."""
+ if not self.disable and self.live.is_started:
+ self.live.refresh()
+
+ def get_renderable(self) -> RenderableType:
+ """Get a renderable for the progress display."""
+ renderable = Group(*self.get_renderables())
+ return renderable
+
+ def get_renderables(self) -> Iterable[RenderableType]:
+ """Get a number of renderables for the progress display."""
+ table = self.make_tasks_table(self.tasks)
+ yield table
+
+ def make_tasks_table(self, tasks: Iterable[Task]) -> Table:
+ """Get a table to render the Progress display.
+
+ Args:
+ tasks (Iterable[Task]): An iterable of Task instances, one per row of the table.
+
+ Returns:
+ Table: A table instance.
+ """
+ table_columns = (
+ (
+ Column(no_wrap=True)
+ if isinstance(_column, str)
+ else _column.get_table_column().copy()
+ )
+ for _column in self.columns
+ )
+ table = Table.grid(*table_columns, padding=(0, 1), expand=self.expand)
+
+ for task in tasks:
+ if task.visible:
+ table.add_row(
+ *(
+ (
+ column.format(task=task)
+ if isinstance(column, str)
+ else column(task)
+ )
+ for column in self.columns
+ )
+ )
+ return table
+
+ def __rich__(self) -> RenderableType:
+ """Makes the Progress class itself renderable."""
+ with self._lock:
+ return self.get_renderable()
+
+ def add_task(
+ self,
+ description: str,
+ start: bool = True,
+ total: float = 100.0,
+ completed: int = 0,
+ visible: bool = True,
+ **fields: Any,
+ ) -> TaskID:
+ """Add a new 'task' to the Progress display.
+
+ Args:
+ description (str): A description of the task.
+ start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False,
+ you will need to call `start` manually. Defaults to True.
+ total (float, optional): Number of total steps in the progress if know. Defaults to 100.
+ completed (int, optional): Number of steps completed so far.. Defaults to 0.
+ visible (bool, optional): Enable display of the task. Defaults to True.
+ **fields (str): Additional data fields required for rendering.
+
+ Returns:
+ TaskID: An ID you can use when calling `update`.
+ """
+ with self._lock:
+ task = Task(
+ self._task_index,
+ description,
+ total,
+ completed,
+ visible=visible,
+ fields=fields,
+ _get_time=self.get_time,
+ _lock=self._lock,
+ )
+ self._tasks[self._task_index] = task
+ if start:
+ self.start_task(self._task_index)
+ new_task_index = self._task_index
+ self._task_index = TaskID(int(self._task_index) + 1)
+ self.refresh()
+ return new_task_index
+
+ def remove_task(self, task_id: TaskID) -> None:
+ """Delete a task if it exists.
+
+ Args:
+ task_id (TaskID): A task ID.
+
+ """
+ with self._lock:
+ del self._tasks[task_id]
+
+
+if __name__ == "__main__": # pragma: no coverage
+
+ import random
+ import time
+
+ from .panel import Panel
+ from .rule import Rule
+ from .syntax import Syntax
+ from .table import Table
+
+ syntax = Syntax(
+ '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+ """Iterate and generate a tuple with a flag for last value."""
+ iter_values = iter(values)
+ try:
+ previous_value = next(iter_values)
+ except StopIteration:
+ return
+ for value in iter_values:
+ yield False, previous_value
+ previous_value = value
+ yield True, previous_value''',
+ "python",
+ line_numbers=True,
+ )
+
+ table = Table("foo", "bar", "baz")
+ table.add_row("1", "2", "3")
+
+ progress_renderables = [
+ "Text may be printed while the progress bars are rendering.",
+ Panel("In fact, [i]any[/i] renderable will work"),
+ "Such as [magenta]tables[/]...",
+ table,
+ "Pretty printed structures...",
+ {"type": "example", "text": "Pretty printed"},
+ "Syntax...",
+ syntax,
+ Rule("Give it a try!"),
+ ]
+
+ from itertools import cycle
+
+ examples = cycle(progress_renderables)
+
+ console = Console(record=True)
+
+ with Progress(
+ SpinnerColumn(),
+ TextColumn("[progress.description]{task.description}"),
+ BarColumn(),
+ TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
+ TimeRemainingColumn(),
+ TimeElapsedColumn(),
+ console=console,
+ transient=True,
+ ) as progress:
+
+ task1 = progress.add_task("[red]Downloading", total=1000)
+ task2 = progress.add_task("[green]Processing", total=1000)
+ task3 = progress.add_task("[yellow]Thinking", total=1000, start=False)
+
+ while not progress.finished:
+ progress.update(task1, advance=0.5)
+ progress.update(task2, advance=0.3)
+ time.sleep(0.01)
+ if random.randint(0, 100) < 1:
+ progress.log(next(examples))
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/progress_bar.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/progress_bar.py
new file mode 100644
index 0000000..1797b5f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/progress_bar.py
@@ -0,0 +1,216 @@
+import math
+from functools import lru_cache
+from time import monotonic
+from typing import Iterable, List, Optional
+
+from .color import Color, blend_rgb
+from .color_triplet import ColorTriplet
+from .console import Console, ConsoleOptions, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style, StyleType
+
+# Number of characters before 'pulse' animation repeats
+PULSE_SIZE = 20
+
+
+class ProgressBar(JupyterMixin):
+ """Renders a (progress) bar. Used by rich.progress.
+
+ Args:
+ total (float, optional): Number of steps in the bar. Defaults to 100.
+ completed (float, optional): Number of steps completed. Defaults to 0.
+ width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
+ pulse (bool, optional): Enable pulse effect. Defaults to False.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time.
+ """
+
+ def __init__(
+ self,
+ total: float = 100.0,
+ completed: float = 0,
+ width: Optional[int] = None,
+ pulse: bool = False,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ animation_time: Optional[float] = None,
+ ):
+ self.total = total
+ self.completed = completed
+ self.width = width
+ self.pulse = pulse
+ self.style = style
+ self.complete_style = complete_style
+ self.finished_style = finished_style
+ self.pulse_style = pulse_style
+ self.animation_time = animation_time
+
+ self._pulse_segments: Optional[List[Segment]] = None
+
+ def __repr__(self) -> str:
+ return f""
+
+ @property
+ def percentage_completed(self) -> float:
+ """Calculate percentage complete."""
+ completed = (self.completed / self.total) * 100.0
+ completed = min(100, max(0.0, completed))
+ return completed
+
+ @lru_cache(maxsize=16)
+ def _get_pulse_segments(
+ self,
+ fore_style: Style,
+ back_style: Style,
+ color_system: str,
+ no_color: bool,
+ ascii: bool = False,
+ ) -> List[Segment]:
+ """Get a list of segments to render a pulse animation.
+
+ Returns:
+ List[Segment]: A list of segments, one segment per character.
+ """
+ bar = "-" if ascii else "━"
+ segments: List[Segment] = []
+ if color_system not in ("standard", "eight_bit", "truecolor") or no_color:
+ segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2)
+ segments += [Segment(" " if no_color else bar, back_style)] * (
+ PULSE_SIZE - (PULSE_SIZE // 2)
+ )
+ return segments
+
+ append = segments.append
+ fore_color = (
+ fore_style.color.get_truecolor()
+ if fore_style.color
+ else ColorTriplet(255, 0, 255)
+ )
+ back_color = (
+ back_style.color.get_truecolor()
+ if back_style.color
+ else ColorTriplet(0, 0, 0)
+ )
+ cos = math.cos
+ pi = math.pi
+ _Segment = Segment
+ _Style = Style
+ from_triplet = Color.from_triplet
+
+ for index in range(PULSE_SIZE):
+ position = index / PULSE_SIZE
+ fade = 0.5 + cos((position * pi * 2)) / 2.0
+ color = blend_rgb(fore_color, back_color, cross_fade=fade)
+ append(_Segment(bar, _Style(color=from_triplet(color))))
+ return segments
+
+ def update(self, completed: float, total: Optional[float] = None) -> None:
+ """Update progress with new values.
+
+ Args:
+ completed (float): Number of steps completed.
+ total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None.
+ """
+ self.completed = completed
+ self.total = total if total is not None else self.total
+
+ def _render_pulse(
+ self, console: Console, width: int, ascii: bool = False
+ ) -> Iterable[Segment]:
+ """Renders the pulse animation.
+
+ Args:
+ console (Console): Console instance.
+ width (int): Width in characters of pulse animation.
+
+ Returns:
+ RenderResult: [description]
+
+ Yields:
+ Iterator[Segment]: Segments to render pulse
+ """
+ fore_style = console.get_style(self.pulse_style, default="white")
+ back_style = console.get_style(self.style, default="black")
+
+ pulse_segments = self._get_pulse_segments(
+ fore_style, back_style, console.color_system, console.no_color, ascii=ascii
+ )
+ segment_count = len(pulse_segments)
+ current_time = (
+ monotonic() if self.animation_time is None else self.animation_time
+ )
+ segments = pulse_segments * (int(width / segment_count) + 2)
+ offset = int(-current_time * 15) % segment_count
+ segments = segments[offset : offset + width]
+ yield from segments
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+
+ width = min(self.width or options.max_width, options.max_width)
+ ascii = options.legacy_windows or options.ascii_only
+ if self.pulse:
+ yield from self._render_pulse(console, width, ascii=ascii)
+ return
+
+ completed = min(self.total, max(0, self.completed))
+
+ bar = "-" if ascii else "━"
+ half_bar_right = " " if ascii else "╸"
+ half_bar_left = " " if ascii else "╺"
+ complete_halves = (
+ int(width * 2 * completed / self.total) if self.total else width * 2
+ )
+ bar_count = complete_halves // 2
+ half_bar_count = complete_halves % 2
+ style = console.get_style(self.style)
+ complete_style = console.get_style(
+ self.complete_style if self.completed < self.total else self.finished_style
+ )
+ _Segment = Segment
+ if bar_count:
+ yield _Segment(bar * bar_count, complete_style)
+ if half_bar_count:
+ yield _Segment(half_bar_right * half_bar_count, complete_style)
+
+ if not console.no_color:
+ remaining_bars = width - bar_count - half_bar_count
+ if remaining_bars and console.color_system is not None:
+ if not half_bar_count and bar_count:
+ yield _Segment(half_bar_left, style)
+ remaining_bars -= 1
+ if remaining_bars:
+ yield _Segment(bar * remaining_bars, style)
+
+ def __rich_measure__(
+ self, console: Console, options: ConsoleOptions
+ ) -> Measurement:
+ return (
+ Measurement(self.width, self.width)
+ if self.width is not None
+ else Measurement(4, options.max_width)
+ )
+
+
+if __name__ == "__main__": # pragma: no cover
+ console = Console()
+ bar = ProgressBar(width=50, total=100)
+
+ import time
+
+ console.show_cursor(False)
+ for n in range(0, 101, 1):
+ bar.update(n)
+ console.print(bar)
+ console.file.write("\r")
+ time.sleep(0.05)
+ console.show_cursor(True)
+ console.print()
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/prompt.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/prompt.py
new file mode 100644
index 0000000..b2cea2b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/prompt.py
@@ -0,0 +1,376 @@
+from typing import Any, Generic, List, Optional, TextIO, TypeVar, Union, overload
+
+from . import get_console
+from .console import Console
+from .text import Text, TextType
+
+PromptType = TypeVar("PromptType")
+DefaultType = TypeVar("DefaultType")
+
+
+class PromptError(Exception):
+ """Exception base class for prompt related errors."""
+
+
+class InvalidResponse(PromptError):
+ """Exception to indicate a response was invalid. Raise this within process_response() to indicate an error
+ and provide an error message.
+
+ Args:
+ message (Union[str, Text]): Error message.
+ """
+
+ def __init__(self, message: TextType) -> None:
+ self.message = message
+
+ def __rich__(self) -> TextType:
+ return self.message
+
+
+class PromptBase(Generic[PromptType]):
+ """Ask the user for input until a valid response is received. This is the base class, see one of
+ the concrete classes for examples.
+
+ Args:
+ prompt (TextType, optional): Prompt text. Defaults to "".
+ console (Console, optional): A Console instance or None to use global console. Defaults to None.
+ password (bool, optional): Enable password input. Defaults to False.
+ choices (List[str], optional): A list of valid choices. Defaults to None.
+ show_default (bool, optional): Show default in prompt. Defaults to True.
+ show_choices (bool, optional): Show choices in prompt. Defaults to True.
+ """
+
+ response_type: type = str
+
+ validate_error_message = "[prompt.invalid]Please enter a valid value"
+ illegal_choice_message = (
+ "[prompt.invalid.choice]Please select one of the available options"
+ )
+ prompt_suffix = ": "
+
+ choices: Optional[List[str]] = None
+
+ def __init__(
+ self,
+ prompt: TextType = "",
+ *,
+ console: Optional[Console] = None,
+ password: bool = False,
+ choices: Optional[List[str]] = None,
+ show_default: bool = True,
+ show_choices: bool = True,
+ ) -> None:
+ self.console = console or get_console()
+ self.prompt = (
+ Text.from_markup(prompt, style="prompt")
+ if isinstance(prompt, str)
+ else prompt
+ )
+ self.password = password
+ if choices is not None:
+ self.choices = choices
+ self.show_default = show_default
+ self.show_choices = show_choices
+
+ @classmethod
+ @overload
+ def ask(
+ cls,
+ prompt: TextType = "",
+ *,
+ console: Optional[Console] = None,
+ password: bool = False,
+ choices: Optional[List[str]] = None,
+ show_default: bool = True,
+ show_choices: bool = True,
+ default: DefaultType,
+ stream: Optional[TextIO] = None,
+ ) -> Union[DefaultType, PromptType]:
+ ...
+
+ @classmethod
+ @overload
+ def ask(
+ cls,
+ prompt: TextType = "",
+ *,
+ console: Optional[Console] = None,
+ password: bool = False,
+ choices: Optional[List[str]] = None,
+ show_default: bool = True,
+ show_choices: bool = True,
+ stream: Optional[TextIO] = None,
+ ) -> PromptType:
+ ...
+
+ @classmethod
+ def ask(
+ cls,
+ prompt: TextType = "",
+ *,
+ console: Optional[Console] = None,
+ password: bool = False,
+ choices: Optional[List[str]] = None,
+ show_default: bool = True,
+ show_choices: bool = True,
+ default: Any = ...,
+ stream: Optional[TextIO] = None,
+ ) -> Any:
+ """Shortcut to construct and run a prompt loop and return the result.
+
+ Example:
+ >>> filename = Prompt.ask("Enter a filename")
+
+ Args:
+ prompt (TextType, optional): Prompt text. Defaults to "".
+ console (Console, optional): A Console instance or None to use global console. Defaults to None.
+ password (bool, optional): Enable password input. Defaults to False.
+ choices (List[str], optional): A list of valid choices. Defaults to None.
+ show_default (bool, optional): Show default in prompt. Defaults to True.
+ show_choices (bool, optional): Show choices in prompt. Defaults to True.
+ stream (TextIO, optional): Optional text file open for reading to get input. Defaults to None.
+ """
+ _prompt = cls(
+ prompt,
+ console=console,
+ password=password,
+ choices=choices,
+ show_default=show_default,
+ show_choices=show_choices,
+ )
+ return _prompt(default=default, stream=stream)
+
+ def render_default(self, default: DefaultType) -> Text:
+ """Turn the supplied default in to a Text instance.
+
+ Args:
+ default (DefaultType): Default value.
+
+ Returns:
+ Text: Text containing rendering of default value.
+ """
+ return Text(f"({default})", "prompt.default")
+
+ def make_prompt(self, default: DefaultType) -> Text:
+ """Make prompt text.
+
+ Args:
+ default (DefaultType): Default value.
+
+ Returns:
+ Text: Text to display in prompt.
+ """
+ prompt = self.prompt.copy()
+ prompt.end = ""
+
+ if self.show_choices and self.choices:
+ _choices = "/".join(self.choices)
+ choices = f"[{_choices}]"
+ prompt.append(" ")
+ prompt.append(choices, "prompt.choices")
+
+ if (
+ default != ...
+ and self.show_default
+ and isinstance(default, (str, self.response_type))
+ ):
+ prompt.append(" ")
+ _default = self.render_default(default)
+ prompt.append(_default)
+
+ prompt.append(self.prompt_suffix)
+
+ return prompt
+
+ @classmethod
+ def get_input(
+ cls,
+ console: Console,
+ prompt: TextType,
+ password: bool,
+ stream: Optional[TextIO] = None,
+ ) -> str:
+ """Get input from user.
+
+ Args:
+ console (Console): Console instance.
+ prompt (TextType): Prompt text.
+ password (bool): Enable password entry.
+
+ Returns:
+ str: String from user.
+ """
+ return console.input(prompt, password=password, stream=stream)
+
+ def check_choice(self, value: str) -> bool:
+ """Check value is in the list of valid choices.
+
+ Args:
+ value (str): Value entered by user.
+
+ Returns:
+ bool: True if choice was valid, otherwise False.
+ """
+ assert self.choices is not None
+ return value.strip() in self.choices
+
+ def process_response(self, value: str) -> PromptType:
+ """Process response from user, convert to prompt type.
+
+ Args:
+ value (str): String typed by user.
+
+ Raises:
+ InvalidResponse: If ``value`` is invalid.
+
+ Returns:
+ PromptType: The value to be returned from ask method.
+ """
+ value = value.strip()
+ try:
+ return_value = self.response_type(value)
+ except ValueError:
+ raise InvalidResponse(self.validate_error_message)
+
+ if self.choices is not None and not self.check_choice(value):
+ raise InvalidResponse(self.illegal_choice_message)
+
+ return return_value # type: ignore
+
+ def on_validate_error(self, value: str, error: InvalidResponse) -> None:
+ """Called to handle validation error.
+
+ Args:
+ value (str): String entered by user.
+ error (InvalidResponse): Exception instance the initiated the error.
+ """
+ self.console.print(error)
+
+ def pre_prompt(self) -> None:
+ """Hook to display something before the prompt."""
+
+ @overload
+ def __call__(self, *, stream: Optional[TextIO] = None) -> PromptType:
+ ...
+
+ @overload
+ def __call__(
+ self, *, default: DefaultType, stream: Optional[TextIO] = None
+ ) -> Union[PromptType, DefaultType]:
+ ...
+
+ def __call__(self, *, default: Any = ..., stream: Optional[TextIO] = None) -> Any:
+ """Run the prompt loop.
+
+ Args:
+ default (Any, optional): Optional default value.
+
+ Returns:
+ PromptType: Processed value.
+ """
+ while True:
+ self.pre_prompt()
+ prompt = self.make_prompt(default)
+ value = self.get_input(self.console, prompt, self.password, stream=stream)
+ if value == "" and default != ...:
+ return default
+ try:
+ return_value = self.process_response(value)
+ except InvalidResponse as error:
+ self.on_validate_error(value, error)
+ continue
+ else:
+ return return_value
+
+
+class Prompt(PromptBase[str]):
+ """A prompt that returns a str.
+
+ Example:
+ >>> name = Prompt.ask("Enter your name")
+
+
+ """
+
+ response_type = str
+
+
+class IntPrompt(PromptBase[int]):
+ """A prompt that returns an integer.
+
+ Example:
+ >>> burrito_count = IntPrompt.ask("How many burritos do you want to order")
+
+ """
+
+ response_type = int
+ validate_error_message = "[prompt.invalid]Please enter a valid integer number"
+
+
+class FloatPrompt(PromptBase[int]):
+ """A prompt that returns a float.
+
+ Example:
+ >>> temperature = FloatPrompt.ask("Enter desired temperature")
+
+ """
+
+ response_type = float
+ validate_error_message = "[prompt.invalid]Please enter a number"
+
+
+class Confirm(PromptBase[bool]):
+ """A yes / no confirmation prompt.
+
+ Example:
+ >>> if Confirm.ask("Continue"):
+ run_job()
+
+ """
+
+ response_type = bool
+ validate_error_message = "[prompt.invalid]Please enter Y or N"
+ choices: List[str] = ["y", "n"]
+
+ def render_default(self, default: DefaultType) -> Text:
+ """Render the default as (y) or (n) rather than True/False."""
+ yes, no = self.choices
+ return Text(f"({yes})" if default else f"({no})", style="prompt.default")
+
+ def process_response(self, value: str) -> bool:
+ """Convert choices to a bool."""
+ value = value.strip().lower()
+ if value not in self.choices:
+ raise InvalidResponse(self.validate_error_message)
+ return value == self.choices[0]
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from pip._vendor.rich import print
+
+ if Confirm.ask("Run [i]prompt[/i] tests?", default=True):
+ while True:
+ result = IntPrompt.ask(
+ ":rocket: Enter a number between [b]1[/b] and [b]10[/b]", default=5
+ )
+ if result >= 1 and result <= 10:
+ break
+ print(":pile_of_poo: [prompt.invalid]Number must be between 1 and 10")
+ print(f"number={result}")
+
+ while True:
+ password = Prompt.ask(
+ "Please enter a password [cyan](must be at least 5 characters)",
+ password=True,
+ )
+ if len(password) >= 5:
+ break
+ print("[prompt.invalid]password too short")
+ print(f"password={password!r}")
+
+ fruit = Prompt.ask("Enter a fruit", choices=["apple", "orange", "pear"])
+ print(f"fruit={fruit!r}")
+
+ else:
+ print("[b]OK :loudly_crying_face:")
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/protocol.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/protocol.py
new file mode 100644
index 0000000..6248052
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/protocol.py
@@ -0,0 +1,42 @@
+from typing import Any, Callable, cast, Set, TYPE_CHECKING
+from inspect import isclass
+
+if TYPE_CHECKING:
+ from pip._vendor.rich.console import RenderableType
+
+_GIBBERISH = """aihwerij235234ljsdnp34ksodfipwoe234234jlskjdf"""
+
+
+def is_renderable(check_object: Any) -> bool:
+ """Check if an object may be rendered by Rich."""
+ return (
+ isinstance(check_object, str)
+ or hasattr(check_object, "__rich__")
+ or hasattr(check_object, "__rich_console__")
+ )
+
+
+def rich_cast(renderable: object) -> "RenderableType":
+ """Cast an object to a renderable by calling __rich__ if present.
+
+ Args:
+ renderable (object): A potentially renderable object
+
+ Returns:
+ object: The result of recursively calling __rich__.
+ """
+ from pip._vendor.rich.console import RenderableType
+
+ rich_visited_set: Set[type] = set() # Prevent potential infinite loop
+ while hasattr(renderable, "__rich__") and not isclass(renderable):
+ # Detect object which claim to have all the attributes
+ if hasattr(renderable, _GIBBERISH):
+ return repr(renderable)
+ cast_method = getattr(renderable, "__rich__")
+ renderable = cast_method()
+ renderable_type = type(renderable)
+ if renderable_type in rich_visited_set:
+ break
+ rich_visited_set.add(renderable_type)
+
+ return cast(RenderableType, renderable)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/region.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/region.py
new file mode 100644
index 0000000..75b3631
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/region.py
@@ -0,0 +1,10 @@
+from typing import NamedTuple
+
+
+class Region(NamedTuple):
+ """Defines a rectangular region of the screen."""
+
+ x: int
+ y: int
+ width: int
+ height: int
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/repr.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/repr.py
new file mode 100644
index 0000000..17147fd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/repr.py
@@ -0,0 +1,151 @@
+from functools import partial
+import inspect
+
+from typing import (
+ Any,
+ Callable,
+ Iterable,
+ List,
+ Optional,
+ overload,
+ Union,
+ Tuple,
+ Type,
+ TypeVar,
+)
+
+
+T = TypeVar("T")
+
+
+Result = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]]
+RichReprResult = Result
+
+
+class ReprError(Exception):
+ """An error occurred when attempting to build a repr."""
+
+
+@overload
+def auto(cls: Optional[T]) -> T:
+ ...
+
+
+@overload
+def auto(*, angular: bool = False) -> Callable[[T], T]:
+ ...
+
+
+def auto(
+ cls: Optional[T] = None, *, angular: Optional[bool] = None
+) -> Union[T, Callable[[T], T]]:
+ """Class decorator to create __repr__ from __rich_repr__"""
+
+ def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]:
+ def auto_repr(self: Type[T]) -> str:
+ """Create repr string from __rich_repr__"""
+ repr_str: List[str] = []
+ append = repr_str.append
+
+ angular = getattr(self.__rich_repr__, "angular", False) # type: ignore
+ for arg in self.__rich_repr__(): # type: ignore
+ if isinstance(arg, tuple):
+ if len(arg) == 1:
+ append(repr(arg[0]))
+ else:
+ key, value, *default = arg
+ if key is None:
+ append(repr(value))
+ else:
+ if len(default) and default[0] == value:
+ continue
+ append(f"{key}={value!r}")
+ else:
+ append(repr(arg))
+ if angular:
+ return f"<{self.__class__.__name__} {' '.join(repr_str)}>"
+ else:
+ return f"{self.__class__.__name__}({', '.join(repr_str)})"
+
+ def auto_rich_repr(self: Type[T]) -> Result:
+ """Auto generate __rich_rep__ from signature of __init__"""
+ try:
+ signature = inspect.signature(self.__init__) ## type: ignore
+ for name, param in signature.parameters.items():
+ if param.kind == param.POSITIONAL_ONLY:
+ yield getattr(self, name)
+ elif param.kind in (
+ param.POSITIONAL_OR_KEYWORD,
+ param.KEYWORD_ONLY,
+ ):
+ if param.default == param.empty:
+ yield getattr(self, param.name)
+ else:
+ yield param.name, getattr(self, param.name), param.default
+ except Exception as error:
+ raise ReprError(
+ f"Failed to auto generate __rich_repr__; {error}"
+ ) from None
+
+ if not hasattr(cls, "__rich_repr__"):
+ auto_rich_repr.__doc__ = "Build a rich repr"
+ cls.__rich_repr__ = auto_rich_repr # type: ignore
+
+ auto_repr.__doc__ = "Return repr(self)"
+ cls.__repr__ = auto_repr # type: ignore
+ if angular is not None:
+ cls.__rich_repr__.angular = angular # type: ignore
+ return cls
+
+ if cls is None:
+ return partial(do_replace, angular=angular) # type: ignore
+ else:
+ return do_replace(cls, angular=angular) # type: ignore
+
+
+@overload
+def rich_repr(cls: Optional[T]) -> T:
+ ...
+
+
+@overload
+def rich_repr(*, angular: bool = False) -> Callable[[T], T]:
+ ...
+
+
+def rich_repr(
+ cls: Optional[T] = None, *, angular: bool = False
+) -> Union[T, Callable[[T], T]]:
+ if cls is None:
+ return auto(angular=angular)
+ else:
+ return auto(cls)
+
+
+if __name__ == "__main__":
+
+ @auto
+ class Foo:
+ def __rich_repr__(self) -> Result:
+ yield "foo"
+ yield "bar", {"shopping": ["eggs", "ham", "pineapple"]}
+ yield "buy", "hand sanitizer"
+
+ foo = Foo()
+ from pip._vendor.rich.console import Console
+
+ console = Console()
+
+ console.rule("Standard repr")
+ console.print(foo)
+
+ console.print(foo, width=60)
+ console.print(foo, width=30)
+
+ console.rule("Angular repr")
+ Foo.__rich_repr__.angular = True # type: ignore
+
+ console.print(foo)
+
+ console.print(foo, width=60)
+ console.print(foo, width=30)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/rule.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/rule.py
new file mode 100644
index 0000000..ce4754f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/rule.py
@@ -0,0 +1,115 @@
+from typing import Union
+
+from .align import AlignMethod
+from .cells import cell_len, set_cell_size
+from .console import Console, ConsoleOptions, RenderResult
+from .jupyter import JupyterMixin
+from .style import Style
+from .text import Text
+
+
+class Rule(JupyterMixin):
+ """A console renderable to draw a horizontal rule (line).
+
+ Args:
+ title (Union[str, Text], optional): Text to render in the rule. Defaults to "".
+ characters (str, optional): Character(s) used to draw the line. Defaults to "─".
+ style (StyleType, optional): Style of Rule. Defaults to "rule.line".
+ end (str, optional): Character at end of Rule. defaults to "\\\\n"
+ align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
+ """
+
+ def __init__(
+ self,
+ title: Union[str, Text] = "",
+ *,
+ characters: str = "─",
+ style: Union[str, Style] = "rule.line",
+ end: str = "\n",
+ align: AlignMethod = "center",
+ ) -> None:
+ if cell_len(characters) < 1:
+ raise ValueError(
+ "'characters' argument must have a cell width of at least 1"
+ )
+ if align not in ("left", "center", "right"):
+ raise ValueError(
+ f'invalid value for align, expected "left", "center", "right" (not {align!r})'
+ )
+ self.title = title
+ self.characters = characters
+ self.style = style
+ self.end = end
+ self.align = align
+
+ def __repr__(self) -> str:
+ return f"Rule({self.title!r}, {self.characters!r})"
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ width = options.max_width
+
+ # Python3.6 doesn't have an isascii method on str
+ isascii = getattr(str, "isascii", None) or (
+ lambda s: all(ord(c) < 128 for c in s)
+ )
+ characters = (
+ "-"
+ if (options.ascii_only and not isascii(self.characters))
+ else self.characters
+ )
+
+ chars_len = cell_len(characters)
+ if not self.title:
+ rule_text = Text(characters * ((width // chars_len) + 1), self.style)
+ rule_text.truncate(width)
+ rule_text.plain = set_cell_size(rule_text.plain, width)
+ yield rule_text
+ return
+
+ if isinstance(self.title, Text):
+ title_text = self.title
+ else:
+ title_text = console.render_str(self.title, style="rule.text")
+
+ title_text.plain = title_text.plain.replace("\n", " ")
+ title_text.expand_tabs()
+ rule_text = Text(end=self.end)
+
+ if self.align == "center":
+ title_text.truncate(width - 4, overflow="ellipsis")
+ side_width = (width - cell_len(title_text.plain)) // 2
+ left = Text(characters * (side_width // chars_len + 1))
+ left.truncate(side_width - 1)
+ right_length = width - cell_len(left.plain) - cell_len(title_text.plain)
+ right = Text(characters * (side_width // chars_len + 1))
+ right.truncate(right_length)
+ rule_text.append(left.plain + " ", self.style)
+ rule_text.append(title_text)
+ rule_text.append(" " + right.plain, self.style)
+ elif self.align == "left":
+ title_text.truncate(width - 2, overflow="ellipsis")
+ rule_text.append(title_text)
+ rule_text.append(" ")
+ rule_text.append(characters * (width - rule_text.cell_len), self.style)
+ elif self.align == "right":
+ title_text.truncate(width - 2, overflow="ellipsis")
+ rule_text.append(characters * (width - title_text.cell_len - 1), self.style)
+ rule_text.append(" ")
+ rule_text.append(title_text)
+
+ rule_text.plain = set_cell_size(rule_text.plain, width)
+ yield rule_text
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+ import sys
+
+ try:
+ text = sys.argv[1]
+ except IndexError:
+ text = "Hello, World"
+ console = Console()
+ console.print(Rule(title=text))
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/scope.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/scope.py
new file mode 100644
index 0000000..6822b8c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/scope.py
@@ -0,0 +1,86 @@
+from collections.abc import Mapping
+from typing import TYPE_CHECKING, Any, Optional, Tuple
+
+from .highlighter import ReprHighlighter
+from .panel import Panel
+from .pretty import Pretty
+from .table import Table
+from .text import Text, TextType
+
+if TYPE_CHECKING:
+ from .console import ConsoleRenderable
+
+
+def render_scope(
+ scope: "Mapping[str, Any]",
+ *,
+ title: Optional[TextType] = None,
+ sort_keys: bool = True,
+ indent_guides: bool = False,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+) -> "ConsoleRenderable":
+ """Render python variables in a given scope.
+
+ Args:
+ scope (Mapping): A mapping containing variable names and values.
+ title (str, optional): Optional title. Defaults to None.
+ sort_keys (bool, optional): Enable sorting of items. Defaults to True.
+ indent_guides (bool, optional): Enable indentaton guides. Defaults to False.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+
+ Returns:
+ ConsoleRenderable: A renderable object.
+ """
+ highlighter = ReprHighlighter()
+ items_table = Table.grid(padding=(0, 1), expand=False)
+ items_table.add_column(justify="right")
+
+ def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
+ """Sort special variables first, then alphabetically."""
+ key, _ = item
+ return (not key.startswith("__"), key.lower())
+
+ items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items()
+ for key, value in items:
+ key_text = Text.assemble(
+ (key, "scope.key.special" if key.startswith("__") else "scope.key"),
+ (" =", "scope.equals"),
+ )
+ items_table.add_row(
+ key_text,
+ Pretty(
+ value,
+ highlighter=highlighter,
+ indent_guides=indent_guides,
+ max_length=max_length,
+ max_string=max_string,
+ ),
+ )
+ return Panel.fit(
+ items_table,
+ title=title,
+ border_style="scope.border",
+ padding=(0, 1),
+ )
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich import print
+
+ print()
+
+ def test(foo: float, bar: float) -> None:
+ list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"]
+ dict_of_things = {
+ "version": "1.1",
+ "method": "confirmFruitPurchase",
+ "params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
+ "id": "194521489",
+ }
+ print(render_scope(locals(), title="[i]locals", sort_keys=False))
+
+ test(20.3423, 3.1427)
+ print()
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/screen.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/screen.py
new file mode 100644
index 0000000..7f416e1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/screen.py
@@ -0,0 +1,54 @@
+from typing import Optional, TYPE_CHECKING
+
+from .segment import Segment
+from .style import StyleType
+from ._loop import loop_last
+
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ RenderResult,
+ RenderableType,
+ Group,
+ )
+
+
+class Screen:
+ """A renderable that fills the terminal screen and crops excess.
+
+ Args:
+ renderable (RenderableType): Child renderable.
+ style (StyleType, optional): Optional background style. Defaults to None.
+ """
+
+ renderable: "RenderableType"
+
+ def __init__(
+ self,
+ *renderables: "RenderableType",
+ style: Optional[StyleType] = None,
+ application_mode: bool = False,
+ ) -> None:
+ from pip._vendor.rich.console import Group
+
+ self.renderable = Group(*renderables)
+ self.style = style
+ self.application_mode = application_mode
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ width, height = options.size
+ style = console.get_style(self.style) if self.style else None
+ render_options = options.update(width=width, height=height)
+ lines = console.render_lines(
+ self.renderable or "", render_options, style=style, pad=True
+ )
+ lines = Segment.set_shape(lines, width, height, style=style)
+ new_line = Segment("\n\r") if self.application_mode else Segment.line()
+ for last, line in loop_last(lines):
+ yield from line
+ if not last:
+ yield new_line
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/segment.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/segment.py
new file mode 100644
index 0000000..94ca730
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/segment.py
@@ -0,0 +1,720 @@
+from enum import IntEnum
+from functools import lru_cache
+from itertools import filterfalse
+from logging import getLogger
+from operator import attrgetter
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+
+from .cells import (
+ _is_single_cell_widths,
+ cell_len,
+ get_character_cell_size,
+ set_cell_size,
+)
+from .repr import Result, rich_repr
+from .style import Style
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult
+
+log = getLogger("rich")
+
+
+class ControlType(IntEnum):
+ """Non-printable control codes which typically translate to ANSI codes."""
+
+ BELL = 1
+ CARRIAGE_RETURN = 2
+ HOME = 3
+ CLEAR = 4
+ SHOW_CURSOR = 5
+ HIDE_CURSOR = 6
+ ENABLE_ALT_SCREEN = 7
+ DISABLE_ALT_SCREEN = 8
+ CURSOR_UP = 9
+ CURSOR_DOWN = 10
+ CURSOR_FORWARD = 11
+ CURSOR_BACKWARD = 12
+ CURSOR_MOVE_TO_COLUMN = 13
+ CURSOR_MOVE_TO = 14
+ ERASE_IN_LINE = 15
+
+
+ControlCode = Union[
+ Tuple[ControlType], Tuple[ControlType, int], Tuple[ControlType, int, int]
+]
+
+
+@rich_repr()
+class Segment(NamedTuple):
+ """A piece of text with associated style. Segments are produced by the Console render process and
+ are ultimately converted in to strings to be written to the terminal.
+
+ Args:
+ text (str): A piece of text.
+ style (:class:`~rich.style.Style`, optional): An optional style to apply to the text.
+ control (Tuple[ControlCode..], optional): Optional sequence of control codes.
+ """
+
+ text: str = ""
+ """Raw text."""
+ style: Optional[Style] = None
+ """An optional style."""
+ control: Optional[Sequence[ControlCode]] = None
+ """Optional sequence of control codes."""
+
+ def __rich_repr__(self) -> Result:
+ yield self.text
+ if self.control is None:
+ if self.style is not None:
+ yield self.style
+ else:
+ yield self.style
+ yield self.control
+
+ def __bool__(self) -> bool:
+ """Check if the segment contains text."""
+ return bool(self.text)
+
+ @property
+ def cell_length(self) -> int:
+ """Get cell length of segment."""
+ return 0 if self.control else cell_len(self.text)
+
+ @property
+ def is_control(self) -> bool:
+ """Check if the segment contains control codes."""
+ return self.control is not None
+
+ @classmethod
+ @lru_cache(1024 * 16)
+ def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]: # type: ignore
+
+ text, style, control = segment
+ _Segment = Segment
+
+ cell_length = segment.cell_length
+ if cut >= cell_length:
+ return segment, _Segment("", style, control)
+
+ cell_size = get_character_cell_size
+
+ pos = int((cut / cell_length) * len(text))
+
+ before = text[:pos]
+ cell_pos = cell_len(before)
+ if cell_pos == cut:
+ return (
+ _Segment(before, style, control),
+ _Segment(text[pos:], style, control),
+ )
+ while pos < len(text):
+ char = text[pos]
+ pos += 1
+ cell_pos += cell_size(char)
+ before = text[:pos]
+ if cell_pos == cut:
+ return (
+ _Segment(before, style, control),
+ _Segment(text[pos:], style, control),
+ )
+ if cell_pos > cut:
+ return (
+ _Segment(before[: pos - 1] + " ", style, control),
+ _Segment(" " + text[pos:], style, control),
+ )
+
+ def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
+ """Split segment in to two segments at the specified column.
+
+ If the cut point falls in the middle of a 2-cell wide character then it is replaced
+ by two spaces, to preserve the display width of the parent segment.
+
+ Returns:
+ Tuple[Segment, Segment]: Two segments.
+ """
+ text, style, control = self
+
+ if _is_single_cell_widths(text):
+ # Fast path with all 1 cell characters
+ if cut >= len(text):
+ return self, Segment("", style, control)
+ return (
+ Segment(text[:cut], style, control),
+ Segment(text[cut:], style, control),
+ )
+
+ return self._split_cells(self, cut)
+
+ @classmethod
+ def line(cls) -> "Segment":
+ """Make a new line segment."""
+ return cls("\n")
+
+ @classmethod
+ def apply_style(
+ cls,
+ segments: Iterable["Segment"],
+ style: Optional[Style] = None,
+ post_style: Optional[Style] = None,
+ ) -> Iterable["Segment"]:
+ """Apply style(s) to an iterable of segments.
+
+ Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``.
+
+ Args:
+ segments (Iterable[Segment]): Segments to process.
+ style (Style, optional): Base style. Defaults to None.
+ post_style (Style, optional): Style to apply on top of segment style. Defaults to None.
+
+ Returns:
+ Iterable[Segments]: A new iterable of segments (possibly the same iterable).
+ """
+ result_segments = segments
+ if style:
+ apply = style.__add__
+ result_segments = (
+ cls(text, None if control else apply(_style), control)
+ for text, _style, control in result_segments
+ )
+ if post_style:
+ result_segments = (
+ cls(
+ text,
+ (
+ None
+ if control
+ else (_style + post_style if _style else post_style)
+ ),
+ control,
+ )
+ for text, _style, control in result_segments
+ )
+ return result_segments
+
+ @classmethod
+ def filter_control(
+ cls, segments: Iterable["Segment"], is_control: bool = False
+ ) -> Iterable["Segment"]:
+ """Filter segments by ``is_control`` attribute.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of Segment instances.
+ is_control (bool, optional): is_control flag to match in search.
+
+ Returns:
+ Iterable[Segment]: And iterable of Segment instances.
+
+ """
+ if is_control:
+ return filter(attrgetter("control"), segments)
+ else:
+ return filterfalse(attrgetter("control"), segments)
+
+ @classmethod
+ def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]:
+ """Split a sequence of segments in to a list of lines.
+
+ Args:
+ segments (Iterable[Segment]): Segments potentially containing line feeds.
+
+ Yields:
+ Iterable[List[Segment]]: Iterable of segment lists, one per line.
+ """
+ line: List[Segment] = []
+ append = line.append
+
+ for segment in segments:
+ if "\n" in segment.text and not segment.control:
+ text, style, _ = segment
+ while text:
+ _text, new_line, text = text.partition("\n")
+ if _text:
+ append(cls(_text, style))
+ if new_line:
+ yield line
+ line = []
+ append = line.append
+ else:
+ append(segment)
+ if line:
+ yield line
+
+ @classmethod
+ def split_and_crop_lines(
+ cls,
+ segments: Iterable["Segment"],
+ length: int,
+ style: Optional[Style] = None,
+ pad: bool = True,
+ include_new_lines: bool = True,
+ ) -> Iterable[List["Segment"]]:
+ """Split segments in to lines, and crop lines greater than a given length.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of segments, probably
+ generated from console.render.
+ length (int): Desired line length.
+ style (Style, optional): Style to use for any padding.
+ pad (bool): Enable padding of lines that are less than `length`.
+
+ Returns:
+ Iterable[List[Segment]]: An iterable of lines of segments.
+ """
+ line: List[Segment] = []
+ append = line.append
+
+ adjust_line_length = cls.adjust_line_length
+ new_line_segment = cls("\n")
+
+ for segment in segments:
+ if "\n" in segment.text and not segment.control:
+ text, style, _ = segment
+ while text:
+ _text, new_line, text = text.partition("\n")
+ if _text:
+ append(cls(_text, style))
+ if new_line:
+ cropped_line = adjust_line_length(
+ line, length, style=style, pad=pad
+ )
+ if include_new_lines:
+ cropped_line.append(new_line_segment)
+ yield cropped_line
+ del line[:]
+ else:
+ append(segment)
+ if line:
+ yield adjust_line_length(line, length, style=style, pad=pad)
+
+ @classmethod
+ def adjust_line_length(
+ cls,
+ line: List["Segment"],
+ length: int,
+ style: Optional[Style] = None,
+ pad: bool = True,
+ ) -> List["Segment"]:
+ """Adjust a line to a given width (cropping or padding as required).
+
+ Args:
+ segments (Iterable[Segment]): A list of segments in a single line.
+ length (int): The desired width of the line.
+ style (Style, optional): The style of padding if used (space on the end). Defaults to None.
+ pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True.
+
+ Returns:
+ List[Segment]: A line of segments with the desired length.
+ """
+ line_length = sum(segment.cell_length for segment in line)
+ new_line: List[Segment]
+
+ if line_length < length:
+ if pad:
+ new_line = line + [cls(" " * (length - line_length), style)]
+ else:
+ new_line = line[:]
+ elif line_length > length:
+ new_line = []
+ append = new_line.append
+ line_length = 0
+ for segment in line:
+ segment_length = segment.cell_length
+ if line_length + segment_length < length or segment.control:
+ append(segment)
+ line_length += segment_length
+ else:
+ text, segment_style, _ = segment
+ text = set_cell_size(text, length - line_length)
+ append(cls(text, segment_style))
+ break
+ else:
+ new_line = line[:]
+ return new_line
+
+ @classmethod
+ def get_line_length(cls, line: List["Segment"]) -> int:
+ """Get the length of list of segments.
+
+ Args:
+ line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters),
+
+ Returns:
+ int: The length of the line.
+ """
+ _cell_len = cell_len
+ return sum(_cell_len(segment.text) for segment in line)
+
+ @classmethod
+ def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]:
+ """Get the shape (enclosing rectangle) of a list of lines.
+
+ Args:
+ lines (List[List[Segment]]): A list of lines (no '\\\\n' characters).
+
+ Returns:
+ Tuple[int, int]: Width and height in characters.
+ """
+ get_line_length = cls.get_line_length
+ max_width = max(get_line_length(line) for line in lines) if lines else 0
+ return (max_width, len(lines))
+
+ @classmethod
+ def set_shape(
+ cls,
+ lines: List[List["Segment"]],
+ width: int,
+ height: Optional[int] = None,
+ style: Optional[Style] = None,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Set the shape of a list of lines (enclosing rectangle).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style, optional): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ _height = height or len(lines)
+
+ blank = (
+ [cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)]
+ )
+
+ adjust_line_length = cls.adjust_line_length
+ shaped_lines = lines[:_height]
+ shaped_lines[:] = [
+ adjust_line_length(line, width, style=style) for line in lines
+ ]
+ if len(shaped_lines) < _height:
+ shaped_lines.extend([blank] * (_height - len(shaped_lines)))
+ return shaped_lines
+
+ @classmethod
+ def align_top(
+ cls: Type["Segment"],
+ lines: List[List["Segment"]],
+ width: int,
+ height: int,
+ style: Style,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Aligns lines to top (adds extra lines to bottom as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ extra_lines = height - len(lines)
+ if not extra_lines:
+ return lines[:]
+ lines = lines[:height]
+ blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+ lines = lines + [[blank]] * extra_lines
+ return lines
+
+ @classmethod
+ def align_bottom(
+ cls: Type["Segment"],
+ lines: List[List["Segment"]],
+ width: int,
+ height: int,
+ style: Style,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Aligns render to bottom (adds extra lines above as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added. Defaults to None.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ extra_lines = height - len(lines)
+ if not extra_lines:
+ return lines[:]
+ lines = lines[:height]
+ blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+ lines = [[blank]] * extra_lines + lines
+ return lines
+
+ @classmethod
+ def align_middle(
+ cls: Type["Segment"],
+ lines: List[List["Segment"]],
+ width: int,
+ height: int,
+ style: Style,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Aligns lines to middle (adds extra lines to above and below as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ extra_lines = height - len(lines)
+ if not extra_lines:
+ return lines[:]
+ lines = lines[:height]
+ blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+ top_lines = extra_lines // 2
+ bottom_lines = extra_lines - top_lines
+ lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines
+ return lines
+
+ @classmethod
+ def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Simplify an iterable of segments by combining contiguous segments with the same style.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of segments.
+
+ Returns:
+ Iterable[Segment]: A possibly smaller iterable of segments that will render the same way.
+ """
+ iter_segments = iter(segments)
+ try:
+ last_segment = next(iter_segments)
+ except StopIteration:
+ return
+
+ _Segment = Segment
+ for segment in iter_segments:
+ if last_segment.style == segment.style and not segment.control:
+ last_segment = _Segment(
+ last_segment.text + segment.text, last_segment.style
+ )
+ else:
+ yield last_segment
+ last_segment = segment
+ yield last_segment
+
+ @classmethod
+ def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Remove all links from an iterable of styles.
+
+ Args:
+ segments (Iterable[Segment]): An iterable segments.
+
+ Yields:
+ Segment: Segments with link removed.
+ """
+ for segment in segments:
+ if segment.control or segment.style is None:
+ yield segment
+ else:
+ text, style, _control = segment
+ yield cls(text, style.update_link(None) if style else None)
+
+ @classmethod
+ def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Remove all styles from an iterable of segments.
+
+ Args:
+ segments (Iterable[Segment]): An iterable segments.
+
+ Yields:
+ Segment: Segments with styles replace with None
+ """
+ for text, _style, control in segments:
+ yield cls(text, None, control)
+
+ @classmethod
+ def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Remove all color from an iterable of segments.
+
+ Args:
+ segments (Iterable[Segment]): An iterable segments.
+
+ Yields:
+ Segment: Segments with colorless style.
+ """
+
+ cache: Dict[Style, Style] = {}
+ for text, style, control in segments:
+ if style:
+ colorless_style = cache.get(style)
+ if colorless_style is None:
+ colorless_style = style.without_color
+ cache[style] = colorless_style
+ yield cls(text, colorless_style, control)
+ else:
+ yield cls(text, None, control)
+
+ @classmethod
+ def divide(
+ cls, segments: Iterable["Segment"], cuts: Iterable[int]
+ ) -> Iterable[List["Segment"]]:
+ """Divides an iterable of segments in to portions.
+
+ Args:
+ cuts (Iterable[int]): Cell positions where to divide.
+
+ Yields:
+ [Iterable[List[Segment]]]: An iterable of Segments in List.
+ """
+ split_segments: List["Segment"] = []
+ add_segment = split_segments.append
+
+ iter_cuts = iter(cuts)
+
+ while True:
+ try:
+ cut = next(iter_cuts)
+ except StopIteration:
+ return []
+ if cut != 0:
+ break
+ yield []
+ pos = 0
+
+ for segment in segments:
+ while segment.text:
+ end_pos = pos + segment.cell_length
+ if end_pos < cut:
+ add_segment(segment)
+ pos = end_pos
+ break
+
+ try:
+ if end_pos == cut:
+ add_segment(segment)
+ yield split_segments[:]
+ del split_segments[:]
+ pos = end_pos
+ break
+ else:
+ before, segment = segment.split_cells(cut - pos)
+ add_segment(before)
+ yield split_segments[:]
+ del split_segments[:]
+ pos = cut
+ finally:
+ try:
+ cut = next(iter_cuts)
+ except StopIteration:
+ if split_segments:
+ yield split_segments[:]
+ return
+ yield split_segments[:]
+
+
+class Segments:
+ """A simple renderable to render an iterable of segments. This class may be useful if
+ you want to print segments outside of a __rich_console__ method.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of segments.
+ new_lines (bool, optional): Add new lines between segments. Defaults to False.
+ """
+
+ def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None:
+ self.segments = list(segments)
+ self.new_lines = new_lines
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ if self.new_lines:
+ line = Segment.line()
+ for segment in self.segments:
+ yield segment
+ yield line
+ else:
+ yield from self.segments
+
+
+class SegmentLines:
+ def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None:
+ """A simple renderable containing a number of lines of segments. May be used as an intermediate
+ in rendering process.
+
+ Args:
+ lines (Iterable[List[Segment]]): Lists of segments forming lines.
+ new_lines (bool, optional): Insert new lines after each line. Defaults to False.
+ """
+ self.lines = list(lines)
+ self.new_lines = new_lines
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ if self.new_lines:
+ new_line = Segment.line()
+ for line in self.lines:
+ yield from line
+ yield new_line
+ else:
+ for line in self.lines:
+ yield from line
+
+
+if __name__ == "__main__":
+
+ if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+ from pip._vendor.rich.syntax import Syntax
+ from pip._vendor.rich.text import Text
+
+ code = """from rich.console import Console
+ console = Console()
+ text = Text.from_markup("Hello, [bold magenta]World[/]!")
+ console.print(text)"""
+
+ text = Text.from_markup("Hello, [bold magenta]World[/]!")
+
+ console = Console()
+
+ console.rule("rich.Segment")
+ console.print(
+ "A Segment is the last step in the Rich render process before generating text with ANSI codes."
+ )
+ console.print("\nConsider the following code:\n")
+ console.print(Syntax(code, "python", line_numbers=True))
+ console.print()
+ console.print(
+ "When you call [b]print()[/b], Rich [i]renders[/i] the object in to the the following:\n"
+ )
+ fragments = list(console.render(text))
+ console.print(fragments)
+ console.print()
+ console.print(
+ "The Segments are then processed to produce the following output:\n"
+ )
+ console.print(text)
+ console.print(
+ "\nYou will only need to know this if you are implementing your own Rich renderables."
+ )
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/spinner.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/spinner.py
new file mode 100644
index 0000000..5b13b1e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/spinner.py
@@ -0,0 +1,134 @@
+from typing import cast, List, Optional, TYPE_CHECKING
+
+from ._spinners import SPINNERS
+from .measure import Measurement
+from .table import Table
+from .text import Text
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult, RenderableType
+ from .style import StyleType
+
+
+class Spinner:
+ def __init__(
+ self,
+ name: str,
+ text: "RenderableType" = "",
+ *,
+ style: Optional["StyleType"] = None,
+ speed: float = 1.0,
+ ) -> None:
+ """A spinner animation.
+
+ Args:
+ name (str): Name of spinner (run python -m rich.spinner).
+ text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
+ style (StyleType, optional): Style for spinner animation. Defaults to None.
+ speed (float, optional): Speed factor for animation. Defaults to 1.0.
+
+ Raises:
+ KeyError: If name isn't one of the supported spinner animations.
+ """
+ try:
+ spinner = SPINNERS[name]
+ except KeyError:
+ raise KeyError(f"no spinner called {name!r}")
+ self.text = Text.from_markup(text) if isinstance(text, str) else text
+ self.frames = cast(List[str], spinner["frames"])[:]
+ self.interval = cast(float, spinner["interval"])
+ self.start_time: Optional[float] = None
+ self.style = style
+ self.speed = speed
+ self.frame_no_offset: float = 0.0
+ self._update_speed = 0.0
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ yield self.render(console.get_time())
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ text = self.render(0)
+ return Measurement.get(console, options, text)
+
+ def render(self, time: float) -> "RenderableType":
+ """Render the spinner for a given time.
+
+ Args:
+ time (float): Time in seconds.
+
+ Returns:
+ RenderableType: A renderable containing animation frame.
+ """
+ if self.start_time is None:
+ self.start_time = time
+
+ frame_no = ((time - self.start_time) * self.speed) / (
+ self.interval / 1000.0
+ ) + self.frame_no_offset
+ frame = Text(
+ self.frames[int(frame_no) % len(self.frames)], style=self.style or ""
+ )
+
+ if self._update_speed:
+ self.frame_no_offset = frame_no
+ self.start_time = time
+ self.speed = self._update_speed
+ self._update_speed = 0.0
+
+ if not self.text:
+ return frame
+ elif isinstance(self.text, (str, Text)):
+ return Text.assemble(frame, " ", self.text)
+ else:
+ table = Table.grid(padding=1)
+ table.add_row(frame, self.text)
+ return table
+
+ def update(
+ self,
+ *,
+ text: "RenderableType" = "",
+ style: Optional["StyleType"] = None,
+ speed: Optional[float] = None,
+ ) -> None:
+ """Updates attributes of a spinner after it has been started.
+
+ Args:
+ text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
+ style (StyleType, optional): Style for spinner animation. Defaults to None.
+ speed (float, optional): Speed factor for animation. Defaults to None.
+ """
+ if text:
+ self.text = Text.from_markup(text) if isinstance(text, str) else text
+ if style:
+ self.style = style
+ if speed:
+ self._update_speed = speed
+
+
+if __name__ == "__main__": # pragma: no cover
+ from time import sleep
+
+ from .columns import Columns
+ from .panel import Panel
+ from .live import Live
+
+ all_spinners = Columns(
+ [
+ Spinner(spinner_name, text=Text(repr(spinner_name), style="green"))
+ for spinner_name in sorted(SPINNERS.keys())
+ ],
+ column_first=True,
+ expand=True,
+ )
+
+ with Live(
+ Panel(all_spinners, title="Spinners", border_style="blue"),
+ refresh_per_second=20,
+ ) as live:
+ while True:
+ sleep(0.1)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/status.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/status.py
new file mode 100644
index 0000000..09eff40
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/status.py
@@ -0,0 +1,132 @@
+from types import TracebackType
+from typing import Optional, Type
+
+from .console import Console, RenderableType
+from .jupyter import JupyterMixin
+from .live import Live
+from .spinner import Spinner
+from .style import StyleType
+
+
+class Status(JupyterMixin):
+ """Displays a status indicator with a 'spinner' animation.
+
+ Args:
+ status (RenderableType): A status renderable (str or Text typically).
+ console (Console, optional): Console instance to use, or None for global console. Defaults to None.
+ spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
+ spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
+ speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
+ refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
+ """
+
+ def __init__(
+ self,
+ status: RenderableType,
+ *,
+ console: Optional[Console] = None,
+ spinner: str = "dots",
+ spinner_style: StyleType = "status.spinner",
+ speed: float = 1.0,
+ refresh_per_second: float = 12.5,
+ ):
+ self.status = status
+ self.spinner_style = spinner_style
+ self.speed = speed
+ self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed)
+ self._live = Live(
+ self.renderable,
+ console=console,
+ refresh_per_second=refresh_per_second,
+ transient=True,
+ )
+
+ @property
+ def renderable(self) -> Spinner:
+ return self._spinner
+
+ @property
+ def console(self) -> "Console":
+ """Get the Console used by the Status objects."""
+ return self._live.console
+
+ def update(
+ self,
+ status: Optional[RenderableType] = None,
+ *,
+ spinner: Optional[str] = None,
+ spinner_style: Optional[StyleType] = None,
+ speed: Optional[float] = None,
+ ) -> None:
+ """Update status.
+
+ Args:
+ status (Optional[RenderableType], optional): New status renderable or None for no change. Defaults to None.
+ spinner (Optional[str], optional): New spinner or None for no change. Defaults to None.
+ spinner_style (Optional[StyleType], optional): New spinner style or None for no change. Defaults to None.
+ speed (Optional[float], optional): Speed factor for spinner animation or None for no change. Defaults to None.
+ """
+ if status is not None:
+ self.status = status
+ if spinner_style is not None:
+ self.spinner_style = spinner_style
+ if speed is not None:
+ self.speed = speed
+ if spinner is not None:
+ self._spinner = Spinner(
+ spinner, text=self.status, style=self.spinner_style, speed=self.speed
+ )
+ self._live.update(self.renderable, refresh=True)
+ else:
+ self._spinner.update(
+ text=self.status, style=self.spinner_style, speed=self.speed
+ )
+
+ def start(self) -> None:
+ """Start the status animation."""
+ self._live.start()
+
+ def stop(self) -> None:
+ """Stop the spinner animation."""
+ self._live.stop()
+
+ def __rich__(self) -> RenderableType:
+ return self.renderable
+
+ def __enter__(self) -> "Status":
+ self.start()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.stop()
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from time import sleep
+
+ from .console import Console
+
+ console = Console()
+ with console.status("[magenta]Covid detector booting up") as status:
+ sleep(3)
+ console.log("Importing advanced AI")
+ sleep(3)
+ console.log("Advanced Covid AI Ready")
+ sleep(3)
+ status.update(status="[bold blue] Scanning for Covid", spinner="earth")
+ sleep(3)
+ console.log("Found 10,000,000,000 copies of Covid32.exe")
+ sleep(3)
+ status.update(
+ status="[bold red]Moving Covid32.exe to Trash",
+ spinner="bouncingBall",
+ spinner_style="yellow",
+ )
+ sleep(5)
+ console.print("[bold green]Covid deleted successfully")
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/style.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/style.py
new file mode 100644
index 0000000..0787c33
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/style.py
@@ -0,0 +1,785 @@
+import sys
+from functools import lru_cache
+from marshal import loads, dumps
+from random import randint
+from typing import Any, cast, Dict, Iterable, List, Optional, Type, Union
+
+from . import errors
+from .color import Color, ColorParseError, ColorSystem, blend_rgb
+from .repr import rich_repr, Result
+from .terminal_theme import DEFAULT_TERMINAL_THEME, TerminalTheme
+
+
+# Style instances and style definitions are often interchangeable
+StyleType = Union[str, "Style"]
+
+
+class _Bit:
+ """A descriptor to get/set a style attribute bit."""
+
+ __slots__ = ["bit"]
+
+ def __init__(self, bit_no: int) -> None:
+ self.bit = 1 << bit_no
+
+ def __get__(self, obj: "Style", objtype: Type["Style"]) -> Optional[bool]:
+ if obj._set_attributes & self.bit:
+ return obj._attributes & self.bit != 0
+ return None
+
+
+@rich_repr
+class Style:
+ """A terminal style.
+
+ A terminal style consists of a color (`color`), a background color (`bgcolor`), and a number of attributes, such
+ as bold, italic etc. The attributes have 3 states: they can either be on
+ (``True``), off (``False``), or not set (``None``).
+
+ Args:
+ color (Union[Color, str], optional): Color of terminal text. Defaults to None.
+ bgcolor (Union[Color, str], optional): Color of terminal background. Defaults to None.
+ bold (bool, optional): Enable bold text. Defaults to None.
+ dim (bool, optional): Enable dim text. Defaults to None.
+ italic (bool, optional): Enable italic text. Defaults to None.
+ underline (bool, optional): Enable underlined text. Defaults to None.
+ blink (bool, optional): Enabled blinking text. Defaults to None.
+ blink2 (bool, optional): Enable fast blinking text. Defaults to None.
+ reverse (bool, optional): Enabled reverse text. Defaults to None.
+ conceal (bool, optional): Enable concealed text. Defaults to None.
+ strike (bool, optional): Enable strikethrough text. Defaults to None.
+ underline2 (bool, optional): Enable doubly underlined text. Defaults to None.
+ frame (bool, optional): Enable framed text. Defaults to None.
+ encircle (bool, optional): Enable encircled text. Defaults to None.
+ overline (bool, optional): Enable overlined text. Defaults to None.
+ link (str, link): Link URL. Defaults to None.
+
+ """
+
+ _color: Optional[Color]
+ _bgcolor: Optional[Color]
+ _attributes: int
+ _set_attributes: int
+ _hash: int
+ _null: bool
+ _meta: Optional[bytes]
+
+ __slots__ = [
+ "_color",
+ "_bgcolor",
+ "_attributes",
+ "_set_attributes",
+ "_link",
+ "_link_id",
+ "_ansi",
+ "_style_definition",
+ "_hash",
+ "_null",
+ "_meta",
+ ]
+
+ # maps bits on to SGR parameter
+ _style_map = {
+ 0: "1",
+ 1: "2",
+ 2: "3",
+ 3: "4",
+ 4: "5",
+ 5: "6",
+ 6: "7",
+ 7: "8",
+ 8: "9",
+ 9: "21",
+ 10: "51",
+ 11: "52",
+ 12: "53",
+ }
+
+ STYLE_ATTRIBUTES = {
+ "dim": "dim",
+ "d": "dim",
+ "bold": "bold",
+ "b": "bold",
+ "italic": "italic",
+ "i": "italic",
+ "underline": "underline",
+ "u": "underline",
+ "blink": "blink",
+ "blink2": "blink2",
+ "reverse": "reverse",
+ "r": "reverse",
+ "conceal": "conceal",
+ "c": "conceal",
+ "strike": "strike",
+ "s": "strike",
+ "underline2": "underline2",
+ "uu": "underline2",
+ "frame": "frame",
+ "encircle": "encircle",
+ "overline": "overline",
+ "o": "overline",
+ }
+
+ def __init__(
+ self,
+ *,
+ color: Optional[Union[Color, str]] = None,
+ bgcolor: Optional[Union[Color, str]] = None,
+ bold: Optional[bool] = None,
+ dim: Optional[bool] = None,
+ italic: Optional[bool] = None,
+ underline: Optional[bool] = None,
+ blink: Optional[bool] = None,
+ blink2: Optional[bool] = None,
+ reverse: Optional[bool] = None,
+ conceal: Optional[bool] = None,
+ strike: Optional[bool] = None,
+ underline2: Optional[bool] = None,
+ frame: Optional[bool] = None,
+ encircle: Optional[bool] = None,
+ overline: Optional[bool] = None,
+ link: Optional[str] = None,
+ meta: Optional[Dict[str, Any]] = None,
+ ):
+ self._ansi: Optional[str] = None
+ self._style_definition: Optional[str] = None
+
+ def _make_color(color: Union[Color, str]) -> Color:
+ return color if isinstance(color, Color) else Color.parse(color)
+
+ self._color = None if color is None else _make_color(color)
+ self._bgcolor = None if bgcolor is None else _make_color(bgcolor)
+ self._set_attributes = sum(
+ (
+ bold is not None,
+ dim is not None and 2,
+ italic is not None and 4,
+ underline is not None and 8,
+ blink is not None and 16,
+ blink2 is not None and 32,
+ reverse is not None and 64,
+ conceal is not None and 128,
+ strike is not None and 256,
+ underline2 is not None and 512,
+ frame is not None and 1024,
+ encircle is not None and 2048,
+ overline is not None and 4096,
+ )
+ )
+ self._attributes = (
+ sum(
+ (
+ bold and 1 or 0,
+ dim and 2 or 0,
+ italic and 4 or 0,
+ underline and 8 or 0,
+ blink and 16 or 0,
+ blink2 and 32 or 0,
+ reverse and 64 or 0,
+ conceal and 128 or 0,
+ strike and 256 or 0,
+ underline2 and 512 or 0,
+ frame and 1024 or 0,
+ encircle and 2048 or 0,
+ overline and 4096 or 0,
+ )
+ )
+ if self._set_attributes
+ else 0
+ )
+
+ self._link = link
+ self._link_id = f"{randint(0, 999999)}" if link else ""
+ self._meta = None if meta is None else dumps(meta)
+ self._hash = hash(
+ (
+ self._color,
+ self._bgcolor,
+ self._attributes,
+ self._set_attributes,
+ link,
+ self._meta,
+ )
+ )
+ self._null = not (self._set_attributes or color or bgcolor or link or meta)
+
+ @classmethod
+ def null(cls) -> "Style":
+ """Create an 'null' style, equivalent to Style(), but more performant."""
+ return NULL_STYLE
+
+ @classmethod
+ def from_color(
+ cls, color: Optional[Color] = None, bgcolor: Optional[Color] = None
+ ) -> "Style":
+ """Create a new style with colors and no attributes.
+
+ Returns:
+ color (Optional[Color]): A (foreground) color, or None for no color. Defaults to None.
+ bgcolor (Optional[Color]): A (background) color, or None for no color. Defaults to None.
+ """
+ style: Style = cls.__new__(Style)
+ style._ansi = None
+ style._style_definition = None
+ style._color = color
+ style._bgcolor = bgcolor
+ style._set_attributes = 0
+ style._attributes = 0
+ style._link = None
+ style._link_id = ""
+ style._meta = None
+ style._hash = hash(
+ (
+ color,
+ bgcolor,
+ None,
+ None,
+ None,
+ None,
+ )
+ )
+ style._null = not (color or bgcolor)
+ return style
+
+ @classmethod
+ def from_meta(cls, meta: Optional[Dict[str, Any]]) -> "Style":
+ """Create a new style with meta data.
+
+ Returns:
+ meta (Optional[Dict[str, Any]]): A dictionary of meta data. Defaults to None.
+ """
+ style: Style = cls.__new__(Style)
+ style._ansi = None
+ style._style_definition = None
+ style._color = None
+ style._bgcolor = None
+ style._set_attributes = 0
+ style._attributes = 0
+ style._link = None
+ style._link_id = ""
+ style._meta = dumps(meta)
+ style._hash = hash(
+ (
+ None,
+ None,
+ None,
+ None,
+ None,
+ style._meta,
+ )
+ )
+ style._null = not (meta)
+ return style
+
+ @classmethod
+ def on(cls, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Style":
+ """Create a blank style with meta information.
+
+ Example:
+ style = Style.on(click=self.on_click)
+
+ Args:
+ meta (Optiona[Dict[str, Any]], optional): An optional dict of meta information.
+ **handlers (Any): Keyword arguments are translated in to handlers.
+
+ Returns:
+ Style: A Style with meta information attached.
+ """
+ meta = {} if meta is None else meta
+ meta.update({f"@{key}": value for key, value in handlers.items()})
+ return cls.from_meta(meta)
+
+ bold = _Bit(0)
+ dim = _Bit(1)
+ italic = _Bit(2)
+ underline = _Bit(3)
+ blink = _Bit(4)
+ blink2 = _Bit(5)
+ reverse = _Bit(6)
+ conceal = _Bit(7)
+ strike = _Bit(8)
+ underline2 = _Bit(9)
+ frame = _Bit(10)
+ encircle = _Bit(11)
+ overline = _Bit(12)
+
+ @property
+ def link_id(self) -> str:
+ """Get a link id, used in ansi code for links."""
+ return self._link_id
+
+ def __str__(self) -> str:
+ """Re-generate style definition from attributes."""
+ if self._style_definition is None:
+ attributes: List[str] = []
+ append = attributes.append
+ bits = self._set_attributes
+ if bits & 0b0000000001111:
+ if bits & 1:
+ append("bold" if self.bold else "not bold")
+ if bits & (1 << 1):
+ append("dim" if self.dim else "not dim")
+ if bits & (1 << 2):
+ append("italic" if self.italic else "not italic")
+ if bits & (1 << 3):
+ append("underline" if self.underline else "not underline")
+ if bits & 0b0000111110000:
+ if bits & (1 << 4):
+ append("blink" if self.blink else "not blink")
+ if bits & (1 << 5):
+ append("blink2" if self.blink2 else "not blink2")
+ if bits & (1 << 6):
+ append("reverse" if self.reverse else "not reverse")
+ if bits & (1 << 7):
+ append("conceal" if self.conceal else "not conceal")
+ if bits & (1 << 8):
+ append("strike" if self.strike else "not strike")
+ if bits & 0b1111000000000:
+ if bits & (1 << 9):
+ append("underline2" if self.underline2 else "not underline2")
+ if bits & (1 << 10):
+ append("frame" if self.frame else "not frame")
+ if bits & (1 << 11):
+ append("encircle" if self.encircle else "not encircle")
+ if bits & (1 << 12):
+ append("overline" if self.overline else "not overline")
+ if self._color is not None:
+ append(self._color.name)
+ if self._bgcolor is not None:
+ append("on")
+ append(self._bgcolor.name)
+ if self._link:
+ append("link")
+ append(self._link)
+ self._style_definition = " ".join(attributes) or "none"
+ return self._style_definition
+
+ def __bool__(self) -> bool:
+ """A Style is false if it has no attributes, colors, or links."""
+ return not self._null
+
+ def _make_ansi_codes(self, color_system: ColorSystem) -> str:
+ """Generate ANSI codes for this style.
+
+ Args:
+ color_system (ColorSystem): Color system.
+
+ Returns:
+ str: String containing codes.
+ """
+ if self._ansi is None:
+ sgr: List[str] = []
+ append = sgr.append
+ _style_map = self._style_map
+ attributes = self._attributes & self._set_attributes
+ if attributes:
+ if attributes & 1:
+ append(_style_map[0])
+ if attributes & 2:
+ append(_style_map[1])
+ if attributes & 4:
+ append(_style_map[2])
+ if attributes & 8:
+ append(_style_map[3])
+ if attributes & 0b0000111110000:
+ for bit in range(4, 9):
+ if attributes & (1 << bit):
+ append(_style_map[bit])
+ if attributes & 0b1111000000000:
+ for bit in range(9, 13):
+ if attributes & (1 << bit):
+ append(_style_map[bit])
+ if self._color is not None:
+ sgr.extend(self._color.downgrade(color_system).get_ansi_codes())
+ if self._bgcolor is not None:
+ sgr.extend(
+ self._bgcolor.downgrade(color_system).get_ansi_codes(
+ foreground=False
+ )
+ )
+ self._ansi = ";".join(sgr)
+ return self._ansi
+
+ @classmethod
+ @lru_cache(maxsize=1024)
+ def normalize(cls, style: str) -> str:
+ """Normalize a style definition so that styles with the same effect have the same string
+ representation.
+
+ Args:
+ style (str): A style definition.
+
+ Returns:
+ str: Normal form of style definition.
+ """
+ try:
+ return str(cls.parse(style))
+ except errors.StyleSyntaxError:
+ return style.strip().lower()
+
+ @classmethod
+ def pick_first(cls, *values: Optional[StyleType]) -> StyleType:
+ """Pick first non-None style."""
+ for value in values:
+ if value is not None:
+ return value
+ raise ValueError("expected at least one non-None style")
+
+ def __rich_repr__(self) -> Result:
+ yield "color", self.color, None
+ yield "bgcolor", self.bgcolor, None
+ yield "bold", self.bold, None,
+ yield "dim", self.dim, None,
+ yield "italic", self.italic, None
+ yield "underline", self.underline, None,
+ yield "blink", self.blink, None
+ yield "blink2", self.blink2, None
+ yield "reverse", self.reverse, None
+ yield "conceal", self.conceal, None
+ yield "strike", self.strike, None
+ yield "underline2", self.underline2, None
+ yield "frame", self.frame, None
+ yield "encircle", self.encircle, None
+ yield "link", self.link, None
+ if self._meta:
+ yield "meta", self.meta
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, Style):
+ return NotImplemented
+ return (
+ self._color == other._color
+ and self._bgcolor == other._bgcolor
+ and self._set_attributes == other._set_attributes
+ and self._attributes == other._attributes
+ and self._link == other._link
+ and self._meta == other._meta
+ )
+
+ def __hash__(self) -> int:
+ return self._hash
+
+ @property
+ def color(self) -> Optional[Color]:
+ """The foreground color or None if it is not set."""
+ return self._color
+
+ @property
+ def bgcolor(self) -> Optional[Color]:
+ """The background color or None if it is not set."""
+ return self._bgcolor
+
+ @property
+ def link(self) -> Optional[str]:
+ """Link text, if set."""
+ return self._link
+
+ @property
+ def transparent_background(self) -> bool:
+ """Check if the style specified a transparent background."""
+ return self.bgcolor is None or self.bgcolor.is_default
+
+ @property
+ def background_style(self) -> "Style":
+ """A Style with background only."""
+ return Style(bgcolor=self.bgcolor)
+
+ @property
+ def meta(self) -> Dict[str, Any]:
+ """Get meta information (can not be changed after construction)."""
+ return {} if self._meta is None else cast(Dict[str, Any], loads(self._meta))
+
+ @property
+ def without_color(self) -> "Style":
+ """Get a copy of the style with color removed."""
+ if self._null:
+ return NULL_STYLE
+ style: Style = self.__new__(Style)
+ style._ansi = None
+ style._style_definition = None
+ style._color = None
+ style._bgcolor = None
+ style._attributes = self._attributes
+ style._set_attributes = self._set_attributes
+ style._link = self._link
+ style._link_id = f"{randint(0, 999999)}" if self._link else ""
+ style._hash = self._hash
+ style._null = False
+ style._meta = None
+ return style
+
+ @classmethod
+ @lru_cache(maxsize=4096)
+ def parse(cls, style_definition: str) -> "Style":
+ """Parse a style definition.
+
+ Args:
+ style_definition (str): A string containing a style.
+
+ Raises:
+ errors.StyleSyntaxError: If the style definition syntax is invalid.
+
+ Returns:
+ `Style`: A Style instance.
+ """
+ if style_definition.strip() == "none" or not style_definition:
+ return cls.null()
+
+ STYLE_ATTRIBUTES = cls.STYLE_ATTRIBUTES
+ color: Optional[str] = None
+ bgcolor: Optional[str] = None
+ attributes: Dict[str, Optional[Any]] = {}
+ link: Optional[str] = None
+
+ words = iter(style_definition.split())
+ for original_word in words:
+ word = original_word.lower()
+ if word == "on":
+ word = next(words, "")
+ if not word:
+ raise errors.StyleSyntaxError("color expected after 'on'")
+ try:
+ Color.parse(word) is None
+ except ColorParseError as error:
+ raise errors.StyleSyntaxError(
+ f"unable to parse {word!r} as background color; {error}"
+ ) from None
+ bgcolor = word
+
+ elif word == "not":
+ word = next(words, "")
+ attribute = STYLE_ATTRIBUTES.get(word)
+ if attribute is None:
+ raise errors.StyleSyntaxError(
+ f"expected style attribute after 'not', found {word!r}"
+ )
+ attributes[attribute] = False
+
+ elif word == "link":
+ word = next(words, "")
+ if not word:
+ raise errors.StyleSyntaxError("URL expected after 'link'")
+ link = word
+
+ elif word in STYLE_ATTRIBUTES:
+ attributes[STYLE_ATTRIBUTES[word]] = True
+
+ else:
+ try:
+ Color.parse(word)
+ except ColorParseError as error:
+ raise errors.StyleSyntaxError(
+ f"unable to parse {word!r} as color; {error}"
+ ) from None
+ color = word
+ style = Style(color=color, bgcolor=bgcolor, link=link, **attributes)
+ return style
+
+ @lru_cache(maxsize=1024)
+ def get_html_style(self, theme: Optional[TerminalTheme] = None) -> str:
+ """Get a CSS style rule."""
+ theme = theme or DEFAULT_TERMINAL_THEME
+ css: List[str] = []
+ append = css.append
+
+ color = self.color
+ bgcolor = self.bgcolor
+ if self.reverse:
+ color, bgcolor = bgcolor, color
+ if self.dim:
+ foreground_color = (
+ theme.foreground_color if color is None else color.get_truecolor(theme)
+ )
+ color = Color.from_triplet(
+ blend_rgb(foreground_color, theme.background_color, 0.5)
+ )
+ if color is not None:
+ theme_color = color.get_truecolor(theme)
+ append(f"color: {theme_color.hex}")
+ append(f"text-decoration-color: {theme_color.hex}")
+ if bgcolor is not None:
+ theme_color = bgcolor.get_truecolor(theme, foreground=False)
+ append(f"background-color: {theme_color.hex}")
+ if self.bold:
+ append("font-weight: bold")
+ if self.italic:
+ append("font-style: italic")
+ if self.underline:
+ append("text-decoration: underline")
+ if self.strike:
+ append("text-decoration: line-through")
+ if self.overline:
+ append("text-decoration: overline")
+ return "; ".join(css)
+
+ @classmethod
+ def combine(cls, styles: Iterable["Style"]) -> "Style":
+ """Combine styles and get result.
+
+ Args:
+ styles (Iterable[Style]): Styles to combine.
+
+ Returns:
+ Style: A new style instance.
+ """
+ iter_styles = iter(styles)
+ return sum(iter_styles, next(iter_styles))
+
+ @classmethod
+ def chain(cls, *styles: "Style") -> "Style":
+ """Combine styles from positional argument in to a single style.
+
+ Args:
+ *styles (Iterable[Style]): Styles to combine.
+
+ Returns:
+ Style: A new style instance.
+ """
+ iter_styles = iter(styles)
+ return sum(iter_styles, next(iter_styles))
+
+ def copy(self) -> "Style":
+ """Get a copy of this style.
+
+ Returns:
+ Style: A new Style instance with identical attributes.
+ """
+ if self._null:
+ return NULL_STYLE
+ style: Style = self.__new__(Style)
+ style._ansi = self._ansi
+ style._style_definition = self._style_definition
+ style._color = self._color
+ style._bgcolor = self._bgcolor
+ style._attributes = self._attributes
+ style._set_attributes = self._set_attributes
+ style._link = self._link
+ style._link_id = f"{randint(0, 999999)}" if self._link else ""
+ style._hash = self._hash
+ style._null = False
+ style._meta = self._meta
+ return style
+
+ def update_link(self, link: Optional[str] = None) -> "Style":
+ """Get a copy with a different value for link.
+
+ Args:
+ link (str, optional): New value for link. Defaults to None.
+
+ Returns:
+ Style: A new Style instance.
+ """
+ style: Style = self.__new__(Style)
+ style._ansi = self._ansi
+ style._style_definition = self._style_definition
+ style._color = self._color
+ style._bgcolor = self._bgcolor
+ style._attributes = self._attributes
+ style._set_attributes = self._set_attributes
+ style._link = link
+ style._link_id = f"{randint(0, 999999)}" if link else ""
+ style._hash = self._hash
+ style._null = False
+ style._meta = self._meta
+ return style
+
+ def render(
+ self,
+ text: str = "",
+ *,
+ color_system: Optional[ColorSystem] = ColorSystem.TRUECOLOR,
+ legacy_windows: bool = False,
+ ) -> str:
+ """Render the ANSI codes for the style.
+
+ Args:
+ text (str, optional): A string to style. Defaults to "".
+ color_system (Optional[ColorSystem], optional): Color system to render to. Defaults to ColorSystem.TRUECOLOR.
+
+ Returns:
+ str: A string containing ANSI style codes.
+ """
+ if not text or color_system is None:
+ return text
+ attrs = self._make_ansi_codes(color_system)
+ rendered = f"\x1b[{attrs}m{text}\x1b[0m" if attrs else text
+ if self._link and not legacy_windows:
+ rendered = (
+ f"\x1b]8;id={self._link_id};{self._link}\x1b\\{rendered}\x1b]8;;\x1b\\"
+ )
+ return rendered
+
+ def test(self, text: Optional[str] = None) -> None:
+ """Write text with style directly to terminal.
+
+ This method is for testing purposes only.
+
+ Args:
+ text (Optional[str], optional): Text to style or None for style name.
+
+ """
+ text = text or str(self)
+ sys.stdout.write(f"{self.render(text)}\n")
+
+ def __add__(self, style: Optional["Style"]) -> "Style":
+ if not (isinstance(style, Style) or style is None):
+ return NotImplemented
+ if style is None or style._null:
+ return self
+ if self._null:
+ return style
+ new_style: Style = self.__new__(Style)
+ new_style._ansi = None
+ new_style._style_definition = None
+ new_style._color = style._color or self._color
+ new_style._bgcolor = style._bgcolor or self._bgcolor
+ new_style._attributes = (self._attributes & ~style._set_attributes) | (
+ style._attributes & style._set_attributes
+ )
+ new_style._set_attributes = self._set_attributes | style._set_attributes
+ new_style._link = style._link or self._link
+ new_style._link_id = style._link_id or self._link_id
+ new_style._hash = style._hash
+ new_style._null = self._null or style._null
+ if self._meta and style._meta:
+ new_style._meta = dumps({**self.meta, **style.meta})
+ else:
+ new_style._meta = self._meta or style._meta
+ return new_style
+
+
+NULL_STYLE = Style()
+
+
+class StyleStack:
+ """A stack of styles."""
+
+ __slots__ = ["_stack"]
+
+ def __init__(self, default_style: "Style") -> None:
+ self._stack: List[Style] = [default_style]
+
+ def __repr__(self) -> str:
+ return f""
+
+ @property
+ def current(self) -> Style:
+ """Get the Style at the top of the stack."""
+ return self._stack[-1]
+
+ def push(self, style: Style) -> None:
+ """Push a new style on to the stack.
+
+ Args:
+ style (Style): New style to combine with current style.
+ """
+ self._stack.append(self._stack[-1] + style)
+
+ def pop(self) -> Style:
+ """Pop last style and discard.
+
+ Returns:
+ Style: New current style (also available as stack.current)
+ """
+ self._stack.pop()
+ return self._stack[-1]
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/styled.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/styled.py
new file mode 100644
index 0000000..91cd0db
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/styled.py
@@ -0,0 +1,42 @@
+from typing import TYPE_CHECKING
+
+from .measure import Measurement
+from .segment import Segment
+from .style import StyleType
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult, RenderableType
+
+
+class Styled:
+ """Apply a style to a renderable.
+
+ Args:
+ renderable (RenderableType): Any renderable.
+ style (StyleType): A style to apply across the entire renderable.
+ """
+
+ def __init__(self, renderable: "RenderableType", style: "StyleType") -> None:
+ self.renderable = renderable
+ self.style = style
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ style = console.get_style(self.style)
+ rendered_segments = console.render(self.renderable, options)
+ segments = Segment.apply_style(rendered_segments, style)
+ return segments
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ return Measurement.get(console, options, self.renderable)
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich import print
+ from pip._vendor.rich.panel import Panel
+
+ panel = Styled(Panel("hello"), "on blue")
+ print(panel)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/syntax.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/syntax.py
new file mode 100644
index 0000000..58cc103
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/syntax.py
@@ -0,0 +1,735 @@
+import os.path
+import platform
+from pip._vendor.rich.containers import Lines
+import textwrap
+from abc import ABC, abstractmethod
+from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union
+
+from pip._vendor.pygments.lexer import Lexer
+from pip._vendor.pygments.lexers import get_lexer_by_name, guess_lexer_for_filename
+from pip._vendor.pygments.style import Style as PygmentsStyle
+from pip._vendor.pygments.styles import get_style_by_name
+from pip._vendor.pygments.token import (
+ Comment,
+ Error,
+ Generic,
+ Keyword,
+ Name,
+ Number,
+ Operator,
+ String,
+ Token,
+ Whitespace,
+)
+from pip._vendor.pygments.util import ClassNotFound
+
+from ._loop import loop_first
+from .color import Color, blend_rgb
+from .console import Console, ConsoleOptions, JustifyMethod, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style
+from .text import Text
+
+TokenType = Tuple[str, ...]
+
+WINDOWS = platform.system() == "Windows"
+DEFAULT_THEME = "monokai"
+
+# The following styles are based on https://github.com/pygments/pygments/blob/master/pygments/formatters/terminal.py
+# A few modifications were made
+
+ANSI_LIGHT: Dict[TokenType, Style] = {
+ Token: Style(),
+ Whitespace: Style(color="white"),
+ Comment: Style(dim=True),
+ Comment.Preproc: Style(color="cyan"),
+ Keyword: Style(color="blue"),
+ Keyword.Type: Style(color="cyan"),
+ Operator.Word: Style(color="magenta"),
+ Name.Builtin: Style(color="cyan"),
+ Name.Function: Style(color="green"),
+ Name.Namespace: Style(color="cyan", underline=True),
+ Name.Class: Style(color="green", underline=True),
+ Name.Exception: Style(color="cyan"),
+ Name.Decorator: Style(color="magenta", bold=True),
+ Name.Variable: Style(color="red"),
+ Name.Constant: Style(color="red"),
+ Name.Attribute: Style(color="cyan"),
+ Name.Tag: Style(color="bright_blue"),
+ String: Style(color="yellow"),
+ Number: Style(color="blue"),
+ Generic.Deleted: Style(color="bright_red"),
+ Generic.Inserted: Style(color="green"),
+ Generic.Heading: Style(bold=True),
+ Generic.Subheading: Style(color="magenta", bold=True),
+ Generic.Prompt: Style(bold=True),
+ Generic.Error: Style(color="bright_red"),
+ Error: Style(color="red", underline=True),
+}
+
+ANSI_DARK: Dict[TokenType, Style] = {
+ Token: Style(),
+ Whitespace: Style(color="bright_black"),
+ Comment: Style(dim=True),
+ Comment.Preproc: Style(color="bright_cyan"),
+ Keyword: Style(color="bright_blue"),
+ Keyword.Type: Style(color="bright_cyan"),
+ Operator.Word: Style(color="bright_magenta"),
+ Name.Builtin: Style(color="bright_cyan"),
+ Name.Function: Style(color="bright_green"),
+ Name.Namespace: Style(color="bright_cyan", underline=True),
+ Name.Class: Style(color="bright_green", underline=True),
+ Name.Exception: Style(color="bright_cyan"),
+ Name.Decorator: Style(color="bright_magenta", bold=True),
+ Name.Variable: Style(color="bright_red"),
+ Name.Constant: Style(color="bright_red"),
+ Name.Attribute: Style(color="bright_cyan"),
+ Name.Tag: Style(color="bright_blue"),
+ String: Style(color="yellow"),
+ Number: Style(color="bright_blue"),
+ Generic.Deleted: Style(color="bright_red"),
+ Generic.Inserted: Style(color="bright_green"),
+ Generic.Heading: Style(bold=True),
+ Generic.Subheading: Style(color="bright_magenta", bold=True),
+ Generic.Prompt: Style(bold=True),
+ Generic.Error: Style(color="bright_red"),
+ Error: Style(color="red", underline=True),
+}
+
+RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK}
+
+
+class SyntaxTheme(ABC):
+ """Base class for a syntax theme."""
+
+ @abstractmethod
+ def get_style_for_token(self, token_type: TokenType) -> Style:
+ """Get a style for a given Pygments token."""
+ raise NotImplementedError # pragma: no cover
+
+ @abstractmethod
+ def get_background_style(self) -> Style:
+ """Get the background color."""
+ raise NotImplementedError # pragma: no cover
+
+
+class PygmentsSyntaxTheme(SyntaxTheme):
+ """Syntax theme that delegates to Pygments theme."""
+
+ def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None:
+ self._style_cache: Dict[TokenType, Style] = {}
+ if isinstance(theme, str):
+ try:
+ self._pygments_style_class = get_style_by_name(theme)
+ except ClassNotFound:
+ self._pygments_style_class = get_style_by_name("default")
+ else:
+ self._pygments_style_class = theme
+
+ self._background_color = self._pygments_style_class.background_color
+ self._background_style = Style(bgcolor=self._background_color)
+
+ def get_style_for_token(self, token_type: TokenType) -> Style:
+ """Get a style from a Pygments class."""
+ try:
+ return self._style_cache[token_type]
+ except KeyError:
+ try:
+ pygments_style = self._pygments_style_class.style_for_token(token_type)
+ except KeyError:
+ style = Style.null()
+ else:
+ color = pygments_style["color"]
+ bgcolor = pygments_style["bgcolor"]
+ style = Style(
+ color="#" + color if color else "#000000",
+ bgcolor="#" + bgcolor if bgcolor else self._background_color,
+ bold=pygments_style["bold"],
+ italic=pygments_style["italic"],
+ underline=pygments_style["underline"],
+ )
+ self._style_cache[token_type] = style
+ return style
+
+ def get_background_style(self) -> Style:
+ return self._background_style
+
+
+class ANSISyntaxTheme(SyntaxTheme):
+ """Syntax theme to use standard colors."""
+
+ def __init__(self, style_map: Dict[TokenType, Style]) -> None:
+ self.style_map = style_map
+ self._missing_style = Style.null()
+ self._background_style = Style.null()
+ self._style_cache: Dict[TokenType, Style] = {}
+
+ def get_style_for_token(self, token_type: TokenType) -> Style:
+ """Look up style in the style map."""
+ try:
+ return self._style_cache[token_type]
+ except KeyError:
+ # Styles form a hierarchy
+ # We need to go from most to least specific
+ # e.g. ("foo", "bar", "baz") to ("foo", "bar") to ("foo",)
+ get_style = self.style_map.get
+ token = tuple(token_type)
+ style = self._missing_style
+ while token:
+ _style = get_style(token)
+ if _style is not None:
+ style = _style
+ break
+ token = token[:-1]
+ self._style_cache[token_type] = style
+ return style
+
+ def get_background_style(self) -> Style:
+ return self._background_style
+
+
+class Syntax(JupyterMixin):
+ """Construct a Syntax object to render syntax highlighted code.
+
+ Args:
+ code (str): Code to highlight.
+ lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/)
+ theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai".
+ dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False.
+ line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
+ start_line (int, optional): Starting number for line numbers. Defaults to 1.
+ line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render.
+ highlight_lines (Set[int]): A set of line numbers to highlight.
+ code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
+ tab_size (int, optional): Size of tabs. Defaults to 4.
+ word_wrap (bool, optional): Enable word wrapping.
+ background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
+ indent_guides (bool, optional): Show indent guides. Defaults to False.
+ """
+
+ _pygments_style_class: Type[PygmentsStyle]
+ _theme: SyntaxTheme
+
+ @classmethod
+ def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme:
+ """Get a syntax theme instance."""
+ if isinstance(name, SyntaxTheme):
+ return name
+ theme: SyntaxTheme
+ if name in RICH_SYNTAX_THEMES:
+ theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name])
+ else:
+ theme = PygmentsSyntaxTheme(name)
+ return theme
+
+ def __init__(
+ self,
+ code: str,
+ lexer: Union[Lexer, str],
+ *,
+ theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
+ dedent: bool = False,
+ line_numbers: bool = False,
+ start_line: int = 1,
+ line_range: Optional[Tuple[int, int]] = None,
+ highlight_lines: Optional[Set[int]] = None,
+ code_width: Optional[int] = None,
+ tab_size: int = 4,
+ word_wrap: bool = False,
+ background_color: Optional[str] = None,
+ indent_guides: bool = False,
+ ) -> None:
+ self.code = code
+ self._lexer = lexer
+ self.dedent = dedent
+ self.line_numbers = line_numbers
+ self.start_line = start_line
+ self.line_range = line_range
+ self.highlight_lines = highlight_lines or set()
+ self.code_width = code_width
+ self.tab_size = tab_size
+ self.word_wrap = word_wrap
+ self.background_color = background_color
+ self.background_style = (
+ Style(bgcolor=background_color) if background_color else Style()
+ )
+ self.indent_guides = indent_guides
+
+ self._theme = self.get_theme(theme)
+
+ @classmethod
+ def from_path(
+ cls,
+ path: str,
+ encoding: str = "utf-8",
+ theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
+ dedent: bool = False,
+ line_numbers: bool = False,
+ line_range: Optional[Tuple[int, int]] = None,
+ start_line: int = 1,
+ highlight_lines: Optional[Set[int]] = None,
+ code_width: Optional[int] = None,
+ tab_size: int = 4,
+ word_wrap: bool = False,
+ background_color: Optional[str] = None,
+ indent_guides: bool = False,
+ ) -> "Syntax":
+ """Construct a Syntax object from a file.
+
+ Args:
+ path (str): Path to file to highlight.
+ encoding (str): Encoding of file.
+ theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs".
+ dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True.
+ line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
+ start_line (int, optional): Starting number for line numbers. Defaults to 1.
+ line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render.
+ highlight_lines (Set[int]): A set of line numbers to highlight.
+ code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
+ tab_size (int, optional): Size of tabs. Defaults to 4.
+ word_wrap (bool, optional): Enable word wrapping of code.
+ background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
+ indent_guides (bool, optional): Show indent guides. Defaults to False.
+
+ Returns:
+ [Syntax]: A Syntax object that may be printed to the console
+ """
+ with open(path, "rt", encoding=encoding) as code_file:
+ code = code_file.read()
+
+ lexer = None
+ lexer_name = "default"
+ try:
+ _, ext = os.path.splitext(path)
+ if ext:
+ extension = ext.lstrip(".").lower()
+ lexer = get_lexer_by_name(extension)
+ lexer_name = lexer.name
+ except ClassNotFound:
+ pass
+
+ if lexer is None:
+ try:
+ lexer_name = guess_lexer_for_filename(path, code).name
+ except ClassNotFound:
+ pass
+
+ return cls(
+ code,
+ lexer_name,
+ theme=theme,
+ dedent=dedent,
+ line_numbers=line_numbers,
+ line_range=line_range,
+ start_line=start_line,
+ highlight_lines=highlight_lines,
+ code_width=code_width,
+ tab_size=tab_size,
+ word_wrap=word_wrap,
+ background_color=background_color,
+ indent_guides=indent_guides,
+ )
+
+ def _get_base_style(self) -> Style:
+ """Get the base style."""
+ default_style = self._theme.get_background_style() + self.background_style
+ return default_style
+
+ def _get_token_color(self, token_type: TokenType) -> Optional[Color]:
+ """Get a color (if any) for the given token.
+
+ Args:
+ token_type (TokenType): A token type tuple from Pygments.
+
+ Returns:
+ Optional[Color]: Color from theme, or None for no color.
+ """
+ style = self._theme.get_style_for_token(token_type)
+ return style.color
+
+ @property
+ def lexer(self) -> Optional[Lexer]:
+ """The lexer for this syntax, or None if no lexer was found.
+
+ Tries to find the lexer by name if a string was passed to the constructor.
+ """
+
+ if isinstance(self._lexer, Lexer):
+ return self._lexer
+ try:
+ return get_lexer_by_name(
+ self._lexer,
+ stripnl=False,
+ ensurenl=True,
+ tabsize=self.tab_size,
+ )
+ except ClassNotFound:
+ return None
+
+ def highlight(
+ self, code: str, line_range: Optional[Tuple[int, int]] = None
+ ) -> Text:
+ """Highlight code and return a Text instance.
+
+ Args:
+ code (str): Code to highlight.
+ line_range(Tuple[int, int], optional): Optional line range to highlight.
+
+ Returns:
+ Text: A text instance containing highlighted syntax.
+ """
+
+ base_style = self._get_base_style()
+ justify: JustifyMethod = (
+ "default" if base_style.transparent_background else "left"
+ )
+
+ text = Text(
+ justify=justify,
+ style=base_style,
+ tab_size=self.tab_size,
+ no_wrap=not self.word_wrap,
+ )
+ _get_theme_style = self._theme.get_style_for_token
+
+ lexer = self.lexer
+
+ if lexer is None:
+ text.append(code)
+ else:
+ if line_range:
+ # More complicated path to only stylize a portion of the code
+ # This speeds up further operations as there are less spans to process
+ line_start, line_end = line_range
+
+ def line_tokenize() -> Iterable[Tuple[Any, str]]:
+ """Split tokens to one per line."""
+ assert lexer
+
+ for token_type, token in lexer.get_tokens(code):
+ while token:
+ line_token, new_line, token = token.partition("\n")
+ yield token_type, line_token + new_line
+
+ def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]:
+ """Convert tokens to spans."""
+ tokens = iter(line_tokenize())
+ line_no = 0
+ _line_start = line_start - 1
+
+ # Skip over tokens until line start
+ while line_no < _line_start:
+ _token_type, token = next(tokens)
+ yield (token, None)
+ if token.endswith("\n"):
+ line_no += 1
+ # Generate spans until line end
+ for token_type, token in tokens:
+ yield (token, _get_theme_style(token_type))
+ if token.endswith("\n"):
+ line_no += 1
+ if line_no >= line_end:
+ break
+
+ text.append_tokens(tokens_to_spans())
+
+ else:
+ text.append_tokens(
+ (token, _get_theme_style(token_type))
+ for token_type, token in lexer.get_tokens(code)
+ )
+ if self.background_color is not None:
+ text.stylize(f"on {self.background_color}")
+ return text
+
+ def _get_line_numbers_color(self, blend: float = 0.3) -> Color:
+ background_style = self._theme.get_background_style() + self.background_style
+ background_color = background_style.bgcolor
+ if background_color is None or background_color.is_system_defined:
+ return Color.default()
+ foreground_color = self._get_token_color(Token.Text)
+ if foreground_color is None or foreground_color.is_system_defined:
+ return foreground_color or Color.default()
+ new_color = blend_rgb(
+ background_color.get_truecolor(),
+ foreground_color.get_truecolor(),
+ cross_fade=blend,
+ )
+ return Color.from_triplet(new_color)
+
+ @property
+ def _numbers_column_width(self) -> int:
+ """Get the number of characters used to render the numbers column."""
+ column_width = 0
+ if self.line_numbers:
+ column_width = len(str(self.start_line + self.code.count("\n"))) + 2
+ return column_width
+
+ def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]:
+ """Get background, number, and highlight styles for line numbers."""
+ background_style = self._get_base_style()
+ if background_style.transparent_background:
+ return Style.null(), Style(dim=True), Style.null()
+ if console.color_system in ("256", "truecolor"):
+ number_style = Style.chain(
+ background_style,
+ self._theme.get_style_for_token(Token.Text),
+ Style(color=self._get_line_numbers_color()),
+ self.background_style,
+ )
+ highlight_number_style = Style.chain(
+ background_style,
+ self._theme.get_style_for_token(Token.Text),
+ Style(bold=True, color=self._get_line_numbers_color(0.9)),
+ self.background_style,
+ )
+ else:
+ number_style = background_style + Style(dim=True)
+ highlight_number_style = background_style + Style(dim=False)
+ return background_style, number_style, highlight_number_style
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ if self.code_width is not None:
+ width = self.code_width + self._numbers_column_width
+ return Measurement(self._numbers_column_width, width)
+ return Measurement(self._numbers_column_width, options.max_width)
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+
+ transparent_background = self._get_base_style().transparent_background
+ code_width = (
+ (
+ (options.max_width - self._numbers_column_width - 1)
+ if self.line_numbers
+ else options.max_width
+ )
+ if self.code_width is None
+ else self.code_width
+ )
+
+ line_offset = 0
+ if self.line_range:
+ start_line, end_line = self.line_range
+ line_offset = max(0, start_line - 1)
+
+ ends_on_nl = self.code.endswith("\n")
+ code = self.code if ends_on_nl else self.code + "\n"
+ code = textwrap.dedent(code) if self.dedent else code
+ code = code.expandtabs(self.tab_size)
+ text = self.highlight(code, self.line_range)
+
+ (
+ background_style,
+ number_style,
+ highlight_number_style,
+ ) = self._get_number_styles(console)
+
+ if not self.line_numbers and not self.word_wrap and not self.line_range:
+ if not ends_on_nl:
+ text.remove_suffix("\n")
+ # Simple case of just rendering text
+ style = (
+ self._get_base_style()
+ + self._theme.get_style_for_token(Comment)
+ + Style(dim=True)
+ + self.background_style
+ )
+ if self.indent_guides and not options.ascii_only:
+ text = text.with_indent_guides(self.tab_size, style=style)
+ text.overflow = "crop"
+ if style.transparent_background:
+ yield from console.render(
+ text, options=options.update(width=code_width)
+ )
+ else:
+ syntax_lines = console.render_lines(
+ text,
+ options.update(width=code_width, height=None),
+ style=self.background_style,
+ pad=True,
+ new_lines=True,
+ )
+ for syntax_line in syntax_lines:
+ yield from syntax_line
+ return
+
+ lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl)
+ if self.line_range:
+ lines = lines[line_offset:end_line]
+
+ if self.indent_guides and not options.ascii_only:
+ style = (
+ self._get_base_style()
+ + self._theme.get_style_for_token(Comment)
+ + Style(dim=True)
+ + self.background_style
+ )
+ lines = (
+ Text("\n")
+ .join(lines)
+ .with_indent_guides(self.tab_size, style=style)
+ .split("\n", allow_blank=True)
+ )
+
+ numbers_column_width = self._numbers_column_width
+ render_options = options.update(width=code_width)
+
+ highlight_line = self.highlight_lines.__contains__
+ _Segment = Segment
+ padding = _Segment(" " * numbers_column_width + " ", background_style)
+ new_line = _Segment("\n")
+
+ line_pointer = "> " if options.legacy_windows else "❱ "
+
+ for line_no, line in enumerate(lines, self.start_line + line_offset):
+ if self.word_wrap:
+ wrapped_lines = console.render_lines(
+ line,
+ render_options.update(height=None),
+ style=background_style,
+ pad=not transparent_background,
+ )
+
+ else:
+ segments = list(line.render(console, end=""))
+ if options.no_wrap:
+ wrapped_lines = [segments]
+ else:
+ wrapped_lines = [
+ _Segment.adjust_line_length(
+ segments,
+ render_options.max_width,
+ style=background_style,
+ pad=not transparent_background,
+ )
+ ]
+ if self.line_numbers:
+ for first, wrapped_line in loop_first(wrapped_lines):
+ if first:
+ line_column = str(line_no).rjust(numbers_column_width - 2) + " "
+ if highlight_line(line_no):
+ yield _Segment(line_pointer, Style(color="red"))
+ yield _Segment(line_column, highlight_number_style)
+ else:
+ yield _Segment(" ", highlight_number_style)
+ yield _Segment(line_column, number_style)
+ else:
+ yield padding
+ yield from wrapped_line
+ yield new_line
+ else:
+ for wrapped_line in wrapped_lines:
+ yield from wrapped_line
+ yield new_line
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ import argparse
+ import sys
+
+ parser = argparse.ArgumentParser(
+ description="Render syntax to the console with Rich"
+ )
+ parser.add_argument(
+ "path",
+ metavar="PATH",
+ help="path to file, or - for stdin",
+ )
+ parser.add_argument(
+ "-c",
+ "--force-color",
+ dest="force_color",
+ action="store_true",
+ default=None,
+ help="force color for non-terminals",
+ )
+ parser.add_argument(
+ "-i",
+ "--indent-guides",
+ dest="indent_guides",
+ action="store_true",
+ default=False,
+ help="display indent guides",
+ )
+ parser.add_argument(
+ "-l",
+ "--line-numbers",
+ dest="line_numbers",
+ action="store_true",
+ help="render line numbers",
+ )
+ parser.add_argument(
+ "-w",
+ "--width",
+ type=int,
+ dest="width",
+ default=None,
+ help="width of output (default will auto-detect)",
+ )
+ parser.add_argument(
+ "-r",
+ "--wrap",
+ dest="word_wrap",
+ action="store_true",
+ default=False,
+ help="word wrap long lines",
+ )
+ parser.add_argument(
+ "-s",
+ "--soft-wrap",
+ action="store_true",
+ dest="soft_wrap",
+ default=False,
+ help="enable soft wrapping mode",
+ )
+ parser.add_argument(
+ "-t", "--theme", dest="theme", default="monokai", help="pygments theme"
+ )
+ parser.add_argument(
+ "-b",
+ "--background-color",
+ dest="background_color",
+ default=None,
+ help="Override background color",
+ )
+ parser.add_argument(
+ "-x",
+ "--lexer",
+ default="default",
+ dest="lexer_name",
+ help="Lexer name",
+ )
+ args = parser.parse_args()
+
+ from pip._vendor.rich.console import Console
+
+ console = Console(force_terminal=args.force_color, width=args.width)
+
+ if args.path == "-":
+ code = sys.stdin.read()
+ syntax = Syntax(
+ code=code,
+ lexer=args.lexer_name,
+ line_numbers=args.line_numbers,
+ word_wrap=args.word_wrap,
+ theme=args.theme,
+ background_color=args.background_color,
+ indent_guides=args.indent_guides,
+ )
+ else:
+ syntax = Syntax.from_path(
+ args.path,
+ line_numbers=args.line_numbers,
+ word_wrap=args.word_wrap,
+ theme=args.theme,
+ background_color=args.background_color,
+ indent_guides=args.indent_guides,
+ )
+ console.print(syntax, soft_wrap=args.soft_wrap)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/table.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/table.py
new file mode 100644
index 0000000..da43860
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/table.py
@@ -0,0 +1,968 @@
+from dataclasses import dataclass, field, replace
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
+
+from . import box, errors
+from ._loop import loop_first_last, loop_last
+from ._pick import pick_bool
+from ._ratio import ratio_distribute, ratio_reduce
+from .align import VerticalAlignMethod
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .padding import Padding, PaddingDimensions
+from .protocol import is_renderable
+from .segment import Segment
+from .style import Style, StyleType
+from .text import Text, TextType
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ JustifyMethod,
+ OverflowMethod,
+ RenderableType,
+ RenderResult,
+ )
+
+
+@dataclass
+class Column:
+ """Defines a column in a table."""
+
+ header: "RenderableType" = ""
+ """RenderableType: Renderable for the header (typically a string)"""
+
+ footer: "RenderableType" = ""
+ """RenderableType: Renderable for the footer (typically a string)"""
+
+ header_style: StyleType = ""
+ """StyleType: The style of the header."""
+
+ footer_style: StyleType = ""
+ """StyleType: The style of the footer."""
+
+ style: StyleType = ""
+ """StyleType: The style of the column."""
+
+ justify: "JustifyMethod" = "left"
+ """str: How to justify text within the column ("left", "center", "right", or "full")"""
+
+ vertical: "VerticalAlignMethod" = "top"
+ """str: How to vertically align content ("top", "middle", or "bottom")"""
+
+ overflow: "OverflowMethod" = "ellipsis"
+ """str: Overflow method."""
+
+ width: Optional[int] = None
+ """Optional[int]: Width of the column, or ``None`` (default) to auto calculate width."""
+
+ min_width: Optional[int] = None
+ """Optional[int]: Minimum width of column, or ``None`` for no minimum. Defaults to None."""
+
+ max_width: Optional[int] = None
+ """Optional[int]: Maximum width of column, or ``None`` for no maximum. Defaults to None."""
+
+ ratio: Optional[int] = None
+ """Optional[int]: Ratio to use when calculating column width, or ``None`` (default) to adapt to column contents."""
+
+ no_wrap: bool = False
+ """bool: Prevent wrapping of text within the column. Defaults to ``False``."""
+
+ _index: int = 0
+ """Index of column."""
+
+ _cells: List["RenderableType"] = field(default_factory=list)
+
+ def copy(self) -> "Column":
+ """Return a copy of this Column."""
+ return replace(self, _cells=[])
+
+ @property
+ def cells(self) -> Iterable["RenderableType"]:
+ """Get all cells in the column, not including header."""
+ yield from self._cells
+
+ @property
+ def flexible(self) -> bool:
+ """Check if this column is flexible."""
+ return self.ratio is not None
+
+
+@dataclass
+class Row:
+ """Information regarding a row."""
+
+ style: Optional[StyleType] = None
+ """Style to apply to row."""
+
+ end_section: bool = False
+ """Indicated end of section, which will force a line beneath the row."""
+
+
+class _Cell(NamedTuple):
+ """A single cell in a table."""
+
+ style: StyleType
+ """Style to apply to cell."""
+ renderable: "RenderableType"
+ """Cell renderable."""
+ vertical: VerticalAlignMethod
+ """Cell vertical alignment."""
+
+
+class Table(JupyterMixin):
+ """A console renderable to draw a table.
+
+ Args:
+ *headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
+ title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None.
+ caption (Union[str, Text], optional): The table caption rendered below. Defaults to None.
+ width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None.
+ min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None.
+ box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD.
+ safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
+ padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1).
+ collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False.
+ pad_edge (bool, optional): Enable padding of edge cells. Defaults to True.
+ expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
+ show_header (bool, optional): Show a header row. Defaults to True.
+ show_footer (bool, optional): Show a footer row. Defaults to False.
+ show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True.
+ show_lines (bool, optional): Draw lines between every row. Defaults to False.
+ leading (bool, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0.
+ style (Union[str, Style], optional): Default style for the table. Defaults to "none".
+ row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None.
+ header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header".
+ footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer".
+ border_style (Union[str, Style], optional): Style of the border. Defaults to None.
+ title_style (Union[str, Style], optional): Style of the title. Defaults to None.
+ caption_style (Union[str, Style], optional): Style of the caption. Defaults to None.
+ title_justify (str, optional): Justify method for title. Defaults to "center".
+ caption_justify (str, optional): Justify method for caption. Defaults to "center".
+ highlight (bool, optional): Highlight cell contents (if str). Defaults to False.
+ """
+
+ columns: List[Column]
+ rows: List[Row]
+
+ def __init__(
+ self,
+ *headers: Union[Column, str],
+ title: Optional[TextType] = None,
+ caption: Optional[TextType] = None,
+ width: Optional[int] = None,
+ min_width: Optional[int] = None,
+ box: Optional[box.Box] = box.HEAVY_HEAD,
+ safe_box: Optional[bool] = None,
+ padding: PaddingDimensions = (0, 1),
+ collapse_padding: bool = False,
+ pad_edge: bool = True,
+ expand: bool = False,
+ show_header: bool = True,
+ show_footer: bool = False,
+ show_edge: bool = True,
+ show_lines: bool = False,
+ leading: int = 0,
+ style: StyleType = "none",
+ row_styles: Optional[Iterable[StyleType]] = None,
+ header_style: Optional[StyleType] = "table.header",
+ footer_style: Optional[StyleType] = "table.footer",
+ border_style: Optional[StyleType] = None,
+ title_style: Optional[StyleType] = None,
+ caption_style: Optional[StyleType] = None,
+ title_justify: "JustifyMethod" = "center",
+ caption_justify: "JustifyMethod" = "center",
+ highlight: bool = False,
+ ) -> None:
+
+ self.columns: List[Column] = []
+ self.rows: List[Row] = []
+ self.title = title
+ self.caption = caption
+ self.width = width
+ self.min_width = min_width
+ self.box = box
+ self.safe_box = safe_box
+ self._padding = Padding.unpack(padding)
+ self.pad_edge = pad_edge
+ self._expand = expand
+ self.show_header = show_header
+ self.show_footer = show_footer
+ self.show_edge = show_edge
+ self.show_lines = show_lines
+ self.leading = leading
+ self.collapse_padding = collapse_padding
+ self.style = style
+ self.header_style = header_style or ""
+ self.footer_style = footer_style or ""
+ self.border_style = border_style
+ self.title_style = title_style
+ self.caption_style = caption_style
+ self.title_justify: "JustifyMethod" = title_justify
+ self.caption_justify: "JustifyMethod" = caption_justify
+ self.highlight = highlight
+ self.row_styles: Sequence[StyleType] = list(row_styles or [])
+ append_column = self.columns.append
+ for header in headers:
+ if isinstance(header, str):
+ self.add_column(header=header)
+ else:
+ header._index = len(self.columns)
+ append_column(header)
+
+ @classmethod
+ def grid(
+ cls,
+ *headers: Union[Column, str],
+ padding: PaddingDimensions = 0,
+ collapse_padding: bool = True,
+ pad_edge: bool = False,
+ expand: bool = False,
+ ) -> "Table":
+ """Get a table with no lines, headers, or footer.
+
+ Args:
+ *headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
+ padding (PaddingDimensions, optional): Get padding around cells. Defaults to 0.
+ collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to True.
+ pad_edge (bool, optional): Enable padding around edges of table. Defaults to False.
+ expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
+
+ Returns:
+ Table: A table instance.
+ """
+ return cls(
+ *headers,
+ box=None,
+ padding=padding,
+ collapse_padding=collapse_padding,
+ show_header=False,
+ show_footer=False,
+ show_edge=False,
+ pad_edge=pad_edge,
+ expand=expand,
+ )
+
+ @property
+ def expand(self) -> bool:
+ """Setting a non-None self.width implies expand."""
+ return self._expand or self.width is not None
+
+ @expand.setter
+ def expand(self, expand: bool) -> None:
+ """Set expand."""
+ self._expand = expand
+
+ @property
+ def _extra_width(self) -> int:
+ """Get extra width to add to cell content."""
+ width = 0
+ if self.box and self.show_edge:
+ width += 2
+ if self.box:
+ width += len(self.columns) - 1
+ return width
+
+ @property
+ def row_count(self) -> int:
+ """Get the current number of rows."""
+ return len(self.rows)
+
+ def get_row_style(self, console: "Console", index: int) -> StyleType:
+ """Get the current row style."""
+ style = Style.null()
+ if self.row_styles:
+ style += console.get_style(self.row_styles[index % len(self.row_styles)])
+ row_style = self.rows[index].style
+ if row_style is not None:
+ style += console.get_style(row_style)
+ return style
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ max_width = options.max_width
+ if self.width is not None:
+ max_width = self.width
+ if max_width < 0:
+ return Measurement(0, 0)
+
+ extra_width = self._extra_width
+ max_width = sum(
+ self._calculate_column_widths(
+ console, options.update_width(max_width - extra_width)
+ )
+ )
+ _measure_column = self._measure_column
+
+ measurements = [
+ _measure_column(console, options.update_width(max_width), column)
+ for column in self.columns
+ ]
+ minimum_width = (
+ sum(measurement.minimum for measurement in measurements) + extra_width
+ )
+ maximum_width = (
+ sum(measurement.maximum for measurement in measurements) + extra_width
+ if (self.width is None)
+ else self.width
+ )
+ measurement = Measurement(minimum_width, maximum_width)
+ measurement = measurement.clamp(self.min_width)
+ return measurement
+
+ @property
+ def padding(self) -> Tuple[int, int, int, int]:
+ """Get cell padding."""
+ return self._padding
+
+ @padding.setter
+ def padding(self, padding: PaddingDimensions) -> "Table":
+ """Set cell padding."""
+ self._padding = Padding.unpack(padding)
+ return self
+
+ def add_column(
+ self,
+ header: "RenderableType" = "",
+ footer: "RenderableType" = "",
+ *,
+ header_style: Optional[StyleType] = None,
+ footer_style: Optional[StyleType] = None,
+ style: Optional[StyleType] = None,
+ justify: "JustifyMethod" = "left",
+ vertical: "VerticalAlignMethod" = "top",
+ overflow: "OverflowMethod" = "ellipsis",
+ width: Optional[int] = None,
+ min_width: Optional[int] = None,
+ max_width: Optional[int] = None,
+ ratio: Optional[int] = None,
+ no_wrap: bool = False,
+ ) -> None:
+ """Add a column to the table.
+
+ Args:
+ header (RenderableType, optional): Text or renderable for the header.
+ Defaults to "".
+ footer (RenderableType, optional): Text or renderable for the footer.
+ Defaults to "".
+ header_style (Union[str, Style], optional): Style for the header, or None for default. Defaults to None.
+ footer_style (Union[str, Style], optional): Style for the footer, or None for default. Defaults to None.
+ style (Union[str, Style], optional): Style for the column cells, or None for default. Defaults to None.
+ justify (JustifyMethod, optional): Alignment for cells. Defaults to "left".
+ vertical (VerticalAlignMethod, optional): Vertical alignment, one of "top", "middle", or "bottom". Defaults to "top".
+ overflow (OverflowMethod): Overflow method: "crop", "fold", "ellipsis". Defaults to "ellipsis".
+ width (int, optional): Desired width of column in characters, or None to fit to contents. Defaults to None.
+ min_width (Optional[int], optional): Minimum width of column, or ``None`` for no minimum. Defaults to None.
+ max_width (Optional[int], optional): Maximum width of column, or ``None`` for no maximum. Defaults to None.
+ ratio (int, optional): Flexible ratio for the column (requires ``Table.expand`` or ``Table.width``). Defaults to None.
+ no_wrap (bool, optional): Set to ``True`` to disable wrapping of this column.
+ """
+
+ column = Column(
+ _index=len(self.columns),
+ header=header,
+ footer=footer,
+ header_style=header_style or "",
+ footer_style=footer_style or "",
+ style=style or "",
+ justify=justify,
+ vertical=vertical,
+ overflow=overflow,
+ width=width,
+ min_width=min_width,
+ max_width=max_width,
+ ratio=ratio,
+ no_wrap=no_wrap,
+ )
+ self.columns.append(column)
+
+ def add_row(
+ self,
+ *renderables: Optional["RenderableType"],
+ style: Optional[StyleType] = None,
+ end_section: bool = False,
+ ) -> None:
+ """Add a row of renderables.
+
+ Args:
+ *renderables (None or renderable): Each cell in a row must be a renderable object (including str),
+ or ``None`` for a blank cell.
+ style (StyleType, optional): An optional style to apply to the entire row. Defaults to None.
+ end_section (bool, optional): End a section and draw a line. Defaults to False.
+
+ Raises:
+ errors.NotRenderableError: If you add something that can't be rendered.
+ """
+
+ def add_cell(column: Column, renderable: "RenderableType") -> None:
+ column._cells.append(renderable)
+
+ cell_renderables: List[Optional["RenderableType"]] = list(renderables)
+
+ columns = self.columns
+ if len(cell_renderables) < len(columns):
+ cell_renderables = [
+ *cell_renderables,
+ *[None] * (len(columns) - len(cell_renderables)),
+ ]
+ for index, renderable in enumerate(cell_renderables):
+ if index == len(columns):
+ column = Column(_index=index)
+ for _ in self.rows:
+ add_cell(column, Text(""))
+ self.columns.append(column)
+ else:
+ column = columns[index]
+ if renderable is None:
+ add_cell(column, "")
+ elif is_renderable(renderable):
+ add_cell(column, renderable)
+ else:
+ raise errors.NotRenderableError(
+ f"unable to render {type(renderable).__name__}; a string or other renderable object is required"
+ )
+ self.rows.append(Row(style=style, end_section=end_section))
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+
+ if not self.columns:
+ yield Segment("\n")
+ return
+
+ max_width = options.max_width
+ if self.width is not None:
+ max_width = self.width
+
+ extra_width = self._extra_width
+ widths = self._calculate_column_widths(
+ console, options.update_width(max_width - extra_width)
+ )
+ table_width = sum(widths) + extra_width
+
+ render_options = options.update(
+ width=table_width, highlight=self.highlight, height=None
+ )
+
+ def render_annotation(
+ text: TextType, style: StyleType, justify: "JustifyMethod" = "center"
+ ) -> "RenderResult":
+ render_text = (
+ console.render_str(text, style=style, highlight=False)
+ if isinstance(text, str)
+ else text
+ )
+ return console.render(
+ render_text, options=render_options.update(justify=justify)
+ )
+
+ if self.title:
+ yield from render_annotation(
+ self.title,
+ style=Style.pick_first(self.title_style, "table.title"),
+ justify=self.title_justify,
+ )
+ yield from self._render(console, render_options, widths)
+ if self.caption:
+ yield from render_annotation(
+ self.caption,
+ style=Style.pick_first(self.caption_style, "table.caption"),
+ justify=self.caption_justify,
+ )
+
+ def _calculate_column_widths(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> List[int]:
+ """Calculate the widths of each column, including padding, not including borders."""
+ max_width = options.max_width
+ columns = self.columns
+ width_ranges = [
+ self._measure_column(console, options, column) for column in columns
+ ]
+ widths = [_range.maximum or 1 for _range in width_ranges]
+ get_padding_width = self._get_padding_width
+ extra_width = self._extra_width
+ if self.expand:
+ ratios = [col.ratio or 0 for col in columns if col.flexible]
+ if any(ratios):
+ fixed_widths = [
+ 0 if column.flexible else _range.maximum
+ for _range, column in zip(width_ranges, columns)
+ ]
+ flex_minimum = [
+ (column.width or 1) + get_padding_width(column._index)
+ for column in columns
+ if column.flexible
+ ]
+ flexible_width = max_width - sum(fixed_widths)
+ flex_widths = ratio_distribute(flexible_width, ratios, flex_minimum)
+ iter_flex_widths = iter(flex_widths)
+ for index, column in enumerate(columns):
+ if column.flexible:
+ widths[index] = fixed_widths[index] + next(iter_flex_widths)
+ table_width = sum(widths)
+
+ if table_width > max_width:
+ widths = self._collapse_widths(
+ widths,
+ [(column.width is None and not column.no_wrap) for column in columns],
+ max_width,
+ )
+ table_width = sum(widths)
+ # last resort, reduce columns evenly
+ if table_width > max_width:
+ excess_width = table_width - max_width
+ widths = ratio_reduce(excess_width, [1] * len(widths), widths, widths)
+ table_width = sum(widths)
+
+ width_ranges = [
+ self._measure_column(console, options.update_width(width), column)
+ for width, column in zip(widths, columns)
+ ]
+ widths = [_range.maximum or 0 for _range in width_ranges]
+
+ if (table_width < max_width and self.expand) or (
+ self.min_width is not None and table_width < (self.min_width - extra_width)
+ ):
+ _max_width = (
+ max_width
+ if self.min_width is None
+ else min(self.min_width - extra_width, max_width)
+ )
+ pad_widths = ratio_distribute(_max_width - table_width, widths)
+ widths = [_width + pad for _width, pad in zip(widths, pad_widths)]
+
+ return widths
+
+ @classmethod
+ def _collapse_widths(
+ cls, widths: List[int], wrapable: List[bool], max_width: int
+ ) -> List[int]:
+ """Reduce widths so that the total is under max_width.
+
+ Args:
+ widths (List[int]): List of widths.
+ wrapable (List[bool]): List of booleans that indicate if a column may shrink.
+ max_width (int): Maximum width to reduce to.
+
+ Returns:
+ List[int]: A new list of widths.
+ """
+ total_width = sum(widths)
+ excess_width = total_width - max_width
+ if any(wrapable):
+ while total_width and excess_width > 0:
+ max_column = max(
+ width for width, allow_wrap in zip(widths, wrapable) if allow_wrap
+ )
+ second_max_column = max(
+ width if allow_wrap and width != max_column else 0
+ for width, allow_wrap in zip(widths, wrapable)
+ )
+ column_difference = max_column - second_max_column
+ ratios = [
+ (1 if (width == max_column and allow_wrap) else 0)
+ for width, allow_wrap in zip(widths, wrapable)
+ ]
+ if not any(ratios) or not column_difference:
+ break
+ max_reduce = [min(excess_width, column_difference)] * len(widths)
+ widths = ratio_reduce(excess_width, ratios, max_reduce, widths)
+
+ total_width = sum(widths)
+ excess_width = total_width - max_width
+ return widths
+
+ def _get_cells(
+ self, console: "Console", column_index: int, column: Column
+ ) -> Iterable[_Cell]:
+ """Get all the cells with padding and optional header."""
+
+ collapse_padding = self.collapse_padding
+ pad_edge = self.pad_edge
+ padding = self.padding
+ any_padding = any(padding)
+
+ first_column = column_index == 0
+ last_column = column_index == len(self.columns) - 1
+
+ _padding_cache: Dict[Tuple[bool, bool], Tuple[int, int, int, int]] = {}
+
+ def get_padding(first_row: bool, last_row: bool) -> Tuple[int, int, int, int]:
+ cached = _padding_cache.get((first_row, last_row))
+ if cached:
+ return cached
+ top, right, bottom, left = padding
+
+ if collapse_padding:
+ if not first_column:
+ left = max(0, left - right)
+ if not last_row:
+ bottom = max(0, top - bottom)
+
+ if not pad_edge:
+ if first_column:
+ left = 0
+ if last_column:
+ right = 0
+ if first_row:
+ top = 0
+ if last_row:
+ bottom = 0
+ _padding = (top, right, bottom, left)
+ _padding_cache[(first_row, last_row)] = _padding
+ return _padding
+
+ raw_cells: List[Tuple[StyleType, "RenderableType"]] = []
+ _append = raw_cells.append
+ get_style = console.get_style
+ if self.show_header:
+ header_style = get_style(self.header_style or "") + get_style(
+ column.header_style
+ )
+ _append((header_style, column.header))
+ cell_style = get_style(column.style or "")
+ for cell in column.cells:
+ _append((cell_style, cell))
+ if self.show_footer:
+ footer_style = get_style(self.footer_style or "") + get_style(
+ column.footer_style
+ )
+ _append((footer_style, column.footer))
+
+ if any_padding:
+ _Padding = Padding
+ for first, last, (style, renderable) in loop_first_last(raw_cells):
+ yield _Cell(
+ style,
+ _Padding(renderable, get_padding(first, last)),
+ getattr(renderable, "vertical", None) or column.vertical,
+ )
+ else:
+ for (style, renderable) in raw_cells:
+ yield _Cell(
+ style,
+ renderable,
+ getattr(renderable, "vertical", None) or column.vertical,
+ )
+
+ def _get_padding_width(self, column_index: int) -> int:
+ """Get extra width from padding."""
+ _, pad_right, _, pad_left = self.padding
+ if self.collapse_padding:
+ if column_index > 0:
+ pad_left = max(0, pad_left - pad_right)
+ return pad_left + pad_right
+
+ def _measure_column(
+ self,
+ console: "Console",
+ options: "ConsoleOptions",
+ column: Column,
+ ) -> Measurement:
+ """Get the minimum and maximum width of the column."""
+
+ max_width = options.max_width
+ if max_width < 1:
+ return Measurement(0, 0)
+
+ padding_width = self._get_padding_width(column._index)
+
+ if column.width is not None:
+ # Fixed width column
+ return Measurement(
+ column.width + padding_width, column.width + padding_width
+ ).with_maximum(max_width)
+ # Flexible column, we need to measure contents
+ min_widths: List[int] = []
+ max_widths: List[int] = []
+ append_min = min_widths.append
+ append_max = max_widths.append
+ get_render_width = Measurement.get
+ for cell in self._get_cells(console, column._index, column):
+ _min, _max = get_render_width(console, options, cell.renderable)
+ append_min(_min)
+ append_max(_max)
+
+ measurement = Measurement(
+ max(min_widths) if min_widths else 1,
+ max(max_widths) if max_widths else max_width,
+ ).with_maximum(max_width)
+ measurement = measurement.clamp(
+ None if column.min_width is None else column.min_width + padding_width,
+ None if column.max_width is None else column.max_width + padding_width,
+ )
+ return measurement
+
+ def _render(
+ self, console: "Console", options: "ConsoleOptions", widths: List[int]
+ ) -> "RenderResult":
+ table_style = console.get_style(self.style or "")
+
+ border_style = table_style + console.get_style(self.border_style or "")
+ _column_cells = (
+ self._get_cells(console, column_index, column)
+ for column_index, column in enumerate(self.columns)
+ )
+ row_cells: List[Tuple[_Cell, ...]] = list(zip(*_column_cells))
+ _box = (
+ self.box.substitute(
+ options, safe=pick_bool(self.safe_box, console.safe_box)
+ )
+ if self.box
+ else None
+ )
+
+ # _box = self.box
+ new_line = Segment.line()
+
+ columns = self.columns
+ show_header = self.show_header
+ show_footer = self.show_footer
+ show_edge = self.show_edge
+ show_lines = self.show_lines
+ leading = self.leading
+
+ _Segment = Segment
+ if _box:
+ box_segments = [
+ (
+ _Segment(_box.head_left, border_style),
+ _Segment(_box.head_right, border_style),
+ _Segment(_box.head_vertical, border_style),
+ ),
+ (
+ _Segment(_box.foot_left, border_style),
+ _Segment(_box.foot_right, border_style),
+ _Segment(_box.foot_vertical, border_style),
+ ),
+ (
+ _Segment(_box.mid_left, border_style),
+ _Segment(_box.mid_right, border_style),
+ _Segment(_box.mid_vertical, border_style),
+ ),
+ ]
+ if show_edge:
+ yield _Segment(_box.get_top(widths), border_style)
+ yield new_line
+ else:
+ box_segments = []
+
+ get_row_style = self.get_row_style
+ get_style = console.get_style
+
+ for index, (first, last, row_cell) in enumerate(loop_first_last(row_cells)):
+ header_row = first and show_header
+ footer_row = last and show_footer
+ row = (
+ self.rows[index - show_header]
+ if (not header_row and not footer_row)
+ else None
+ )
+ max_height = 1
+ cells: List[List[List[Segment]]] = []
+ if header_row or footer_row:
+ row_style = Style.null()
+ else:
+ row_style = get_style(
+ get_row_style(console, index - 1 if show_header else index)
+ )
+ for width, cell, column in zip(widths, row_cell, columns):
+ render_options = options.update(
+ width=width,
+ justify=column.justify,
+ no_wrap=column.no_wrap,
+ overflow=column.overflow,
+ height=None,
+ )
+ lines = console.render_lines(
+ cell.renderable,
+ render_options,
+ style=get_style(cell.style) + row_style,
+ )
+ max_height = max(max_height, len(lines))
+ cells.append(lines)
+
+ row_height = max(len(cell) for cell in cells)
+
+ def align_cell(
+ cell: List[List[Segment]],
+ vertical: "VerticalAlignMethod",
+ width: int,
+ style: Style,
+ ) -> List[List[Segment]]:
+ if header_row:
+ vertical = "bottom"
+ elif footer_row:
+ vertical = "top"
+
+ if vertical == "top":
+ return _Segment.align_top(cell, width, row_height, style)
+ elif vertical == "middle":
+ return _Segment.align_middle(cell, width, row_height, style)
+ return _Segment.align_bottom(cell, width, row_height, style)
+
+ cells[:] = [
+ _Segment.set_shape(
+ align_cell(
+ cell,
+ _cell.vertical,
+ width,
+ get_style(_cell.style) + row_style,
+ ),
+ width,
+ max_height,
+ )
+ for width, _cell, cell, column in zip(widths, row_cell, cells, columns)
+ ]
+
+ if _box:
+ if last and show_footer:
+ yield _Segment(
+ _box.get_row(widths, "foot", edge=show_edge), border_style
+ )
+ yield new_line
+ left, right, _divider = box_segments[0 if first else (2 if last else 1)]
+
+ # If the column divider is whitespace also style it with the row background
+ divider = (
+ _divider
+ if _divider.text.strip()
+ else _Segment(
+ _divider.text, row_style.background_style + _divider.style
+ )
+ )
+ for line_no in range(max_height):
+ if show_edge:
+ yield left
+ for last_cell, rendered_cell in loop_last(cells):
+ yield from rendered_cell[line_no]
+ if not last_cell:
+ yield divider
+ if show_edge:
+ yield right
+ yield new_line
+ else:
+ for line_no in range(max_height):
+ for rendered_cell in cells:
+ yield from rendered_cell[line_no]
+ yield new_line
+ if _box and first and show_header:
+ yield _Segment(
+ _box.get_row(widths, "head", edge=show_edge), border_style
+ )
+ yield new_line
+ end_section = row and row.end_section
+ if _box and (show_lines or leading or end_section):
+ if (
+ not last
+ and not (show_footer and index >= len(row_cells) - 2)
+ and not (show_header and header_row)
+ ):
+ if leading:
+ yield _Segment(
+ _box.get_row(widths, "mid", edge=show_edge) * leading,
+ border_style,
+ )
+ else:
+ yield _Segment(
+ _box.get_row(widths, "row", edge=show_edge), border_style
+ )
+ yield new_line
+
+ if _box and show_edge:
+ yield _Segment(_box.get_bottom(widths), border_style)
+ yield new_line
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+ from pip._vendor.rich.highlighter import ReprHighlighter
+ from pip._vendor.rich.table import Table as Table
+
+ from ._timer import timer
+
+ with timer("Table render"):
+ table = Table(
+ title="Star Wars Movies",
+ caption="Rich example table",
+ caption_justify="right",
+ )
+
+ table.add_column(
+ "Released", header_style="bright_cyan", style="cyan", no_wrap=True
+ )
+ table.add_column("Title", style="magenta")
+ table.add_column("Box Office", justify="right", style="green")
+
+ table.add_row(
+ "Dec 20, 2019",
+ "Star Wars: The Rise of Skywalker",
+ "$952,110,690",
+ )
+ table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
+ table.add_row(
+ "Dec 15, 2017",
+ "Star Wars Ep. V111: The Last Jedi",
+ "$1,332,539,889",
+ style="on black",
+ end_section=True,
+ )
+ table.add_row(
+ "Dec 16, 2016",
+ "Rogue One: A Star Wars Story",
+ "$1,332,439,889",
+ )
+
+ def header(text: str) -> None:
+ console.print()
+ console.rule(highlight(text))
+ console.print()
+
+ console = Console()
+ highlight = ReprHighlighter()
+ header("Example Table")
+ console.print(table, justify="center")
+
+ table.expand = True
+ header("expand=True")
+ console.print(table)
+
+ table.width = 50
+ header("width=50")
+
+ console.print(table, justify="center")
+
+ table.width = None
+ table.expand = False
+ table.row_styles = ["dim", "none"]
+ header("row_styles=['dim', 'none']")
+
+ console.print(table, justify="center")
+
+ table.width = None
+ table.expand = False
+ table.row_styles = ["dim", "none"]
+ table.leading = 1
+ header("leading=1, row_styles=['dim', 'none']")
+ console.print(table, justify="center")
+
+ table.width = None
+ table.expand = False
+ table.row_styles = ["dim", "none"]
+ table.show_lines = True
+ table.leading = 0
+ header("show_lines=True, row_styles=['dim', 'none']")
+ console.print(table, justify="center")
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/tabulate.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/tabulate.py
new file mode 100644
index 0000000..6889f2d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/tabulate.py
@@ -0,0 +1,51 @@
+from collections.abc import Mapping
+from typing import Any, Optional
+import warnings
+
+from pip._vendor.rich.console import JustifyMethod
+
+from . import box
+from .highlighter import ReprHighlighter
+from .pretty import Pretty
+from .table import Table
+
+
+def tabulate_mapping(
+ mapping: "Mapping[Any, Any]",
+ title: Optional[str] = None,
+ caption: Optional[str] = None,
+ title_justify: Optional[JustifyMethod] = None,
+ caption_justify: Optional[JustifyMethod] = None,
+) -> Table:
+ """Generate a simple table from a mapping.
+
+ Args:
+ mapping (Mapping): A mapping object (e.g. a dict);
+ title (str, optional): Optional title to be displayed over the table.
+ caption (str, optional): Optional caption to be displayed below the table.
+ title_justify (str, optional): Justify method for title. Defaults to None.
+ caption_justify (str, optional): Justify method for caption. Defaults to None.
+
+ Returns:
+ Table: A table instance which may be rendered by the Console.
+ """
+ warnings.warn("tabulate_mapping will be deprecated in Rich v11", DeprecationWarning)
+ table = Table(
+ show_header=False,
+ title=title,
+ caption=caption,
+ box=box.ROUNDED,
+ border_style="blue",
+ )
+ table.title = title
+ table.caption = caption
+ if title_justify is not None:
+ table.title_justify = title_justify
+ if caption_justify is not None:
+ table.caption_justify = caption_justify
+ highlighter = ReprHighlighter()
+ for key, value in mapping.items():
+ table.add_row(
+ Pretty(key, highlighter=highlighter), Pretty(value, highlighter=highlighter)
+ )
+ return table
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/terminal_theme.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/terminal_theme.py
new file mode 100644
index 0000000..801ac0b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/terminal_theme.py
@@ -0,0 +1,55 @@
+from typing import List, Optional, Tuple
+
+from .color_triplet import ColorTriplet
+from .palette import Palette
+
+_ColorTuple = Tuple[int, int, int]
+
+
+class TerminalTheme:
+ """A color theme used when exporting console content.
+
+ Args:
+ background (Tuple[int, int, int]): The background color.
+ foreground (Tuple[int, int, int]): The foreground (text) color.
+ normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors.
+ bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None
+ to repeat normal intensity. Defaults to None.
+ """
+
+ def __init__(
+ self,
+ background: _ColorTuple,
+ foreground: _ColorTuple,
+ normal: List[_ColorTuple],
+ bright: Optional[List[_ColorTuple]] = None,
+ ) -> None:
+ self.background_color = ColorTriplet(*background)
+ self.foreground_color = ColorTriplet(*foreground)
+ self.ansi_colors = Palette(normal + (bright or normal))
+
+
+DEFAULT_TERMINAL_THEME = TerminalTheme(
+ (255, 255, 255),
+ (0, 0, 0),
+ [
+ (0, 0, 0),
+ (128, 0, 0),
+ (0, 128, 0),
+ (128, 128, 0),
+ (0, 0, 128),
+ (128, 0, 128),
+ (0, 128, 128),
+ (192, 192, 192),
+ ],
+ [
+ (128, 128, 128),
+ (255, 0, 0),
+ (0, 255, 0),
+ (255, 255, 0),
+ (0, 0, 255),
+ (255, 0, 255),
+ (0, 255, 255),
+ (255, 255, 255),
+ ],
+)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/text.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/text.py
new file mode 100644
index 0000000..ea12c09
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/text.py
@@ -0,0 +1,1282 @@
+import re
+from functools import partial, reduce
+from math import gcd
+from operator import itemgetter
+from pip._vendor.rich.emoji import EmojiVariant
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Tuple,
+ Union,
+)
+
+from ._loop import loop_last
+from ._pick import pick_bool
+from ._wrap import divide_line
+from .align import AlignMethod
+from .cells import cell_len, set_cell_size
+from .containers import Lines
+from .control import strip_control_codes
+from .emoji import EmojiVariant
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style, StyleType
+
+if TYPE_CHECKING: # pragma: no cover
+ from .console import Console, ConsoleOptions, JustifyMethod, OverflowMethod
+
+DEFAULT_JUSTIFY: "JustifyMethod" = "default"
+DEFAULT_OVERFLOW: "OverflowMethod" = "fold"
+
+
+_re_whitespace = re.compile(r"\s+$")
+
+TextType = Union[str, "Text"]
+
+GetStyleCallable = Callable[[str], Optional[StyleType]]
+
+
+class Span(NamedTuple):
+ """A marked up region in some text."""
+
+ start: int
+ """Span start index."""
+ end: int
+ """Span end index."""
+ style: Union[str, Style]
+ """Style associated with the span."""
+
+ def __repr__(self) -> str:
+ return (
+ f"Span({self.start}, {self.end}, {self.style!r})"
+ if (isinstance(self.style, Style) and self.style._meta)
+ else f"Span({self.start}, {self.end}, {repr(self.style)})"
+ )
+
+ def __bool__(self) -> bool:
+ return self.end > self.start
+
+ def split(self, offset: int) -> Tuple["Span", Optional["Span"]]:
+ """Split a span in to 2 from a given offset."""
+
+ if offset < self.start:
+ return self, None
+ if offset >= self.end:
+ return self, None
+
+ start, end, style = self
+ span1 = Span(start, min(end, offset), style)
+ span2 = Span(span1.end, end, style)
+ return span1, span2
+
+ def move(self, offset: int) -> "Span":
+ """Move start and end by a given offset.
+
+ Args:
+ offset (int): Number of characters to add to start and end.
+
+ Returns:
+ TextSpan: A new TextSpan with adjusted position.
+ """
+ start, end, style = self
+ return Span(start + offset, end + offset, style)
+
+ def right_crop(self, offset: int) -> "Span":
+ """Crop the span at the given offset.
+
+ Args:
+ offset (int): A value between start and end.
+
+ Returns:
+ Span: A new (possibly smaller) span.
+ """
+ start, end, style = self
+ if offset >= end:
+ return self
+ return Span(start, min(offset, end), style)
+
+
+class Text(JupyterMixin):
+ """Text with color / style.
+
+ Args:
+ text (str, optional): Default unstyled text. Defaults to "".
+ style (Union[str, Style], optional): Base style for text. Defaults to "".
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+ no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
+ end (str, optional): Character to end text with. Defaults to "\\\\n".
+ tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
+ spans (List[Span], optional). A list of predefined style spans. Defaults to None.
+ """
+
+ __slots__ = [
+ "_text",
+ "style",
+ "justify",
+ "overflow",
+ "no_wrap",
+ "end",
+ "tab_size",
+ "_spans",
+ "_length",
+ ]
+
+ def __init__(
+ self,
+ text: str = "",
+ style: Union[str, Style] = "",
+ *,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ no_wrap: Optional[bool] = None,
+ end: str = "\n",
+ tab_size: Optional[int] = 8,
+ spans: Optional[List[Span]] = None,
+ ) -> None:
+ self._text = [strip_control_codes(text)]
+ self.style = style
+ self.justify: Optional["JustifyMethod"] = justify
+ self.overflow: Optional["OverflowMethod"] = overflow
+ self.no_wrap = no_wrap
+ self.end = end
+ self.tab_size = tab_size
+ self._spans: List[Span] = spans or []
+ self._length: int = len(text)
+
+ def __len__(self) -> int:
+ return self._length
+
+ def __bool__(self) -> bool:
+ return bool(self._length)
+
+ def __str__(self) -> str:
+ return self.plain
+
+ def __repr__(self) -> str:
+ return f""
+
+ def __add__(self, other: Any) -> "Text":
+ if isinstance(other, (str, Text)):
+ result = self.copy()
+ result.append(other)
+ return result
+ return NotImplemented
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Text):
+ return NotImplemented
+ return self.plain == other.plain and self._spans == other._spans
+
+ def __contains__(self, other: object) -> bool:
+ if isinstance(other, str):
+ return other in self.plain
+ elif isinstance(other, Text):
+ return other.plain in self.plain
+ return False
+
+ def __getitem__(self, slice: Union[int, slice]) -> "Text":
+ def get_text_at(offset: int) -> "Text":
+ _Span = Span
+ text = Text(
+ self.plain[offset],
+ spans=[
+ _Span(0, 1, style)
+ for start, end, style in self._spans
+ if end > offset >= start
+ ],
+ end="",
+ )
+ return text
+
+ if isinstance(slice, int):
+ return get_text_at(slice)
+ else:
+ start, stop, step = slice.indices(len(self.plain))
+ if step == 1:
+ lines = self.divide([start, stop])
+ return lines[1]
+ else:
+ # This would be a bit of work to implement efficiently
+ # For now, its not required
+ raise TypeError("slices with step!=1 are not supported")
+
+ @property
+ def cell_len(self) -> int:
+ """Get the number of cells required to render this text."""
+ return cell_len(self.plain)
+
+ @property
+ def markup(self) -> str:
+ """Get console markup to render this Text.
+
+ Returns:
+ str: A string potentially creating markup tags.
+ """
+ from .markup import escape
+
+ output: List[str] = []
+
+ plain = self.plain
+ markup_spans = [
+ (0, False, self.style),
+ *((span.start, False, span.style) for span in self._spans),
+ *((span.end, True, span.style) for span in self._spans),
+ (len(plain), True, self.style),
+ ]
+ markup_spans.sort(key=itemgetter(0, 1))
+ position = 0
+ append = output.append
+ for offset, closing, style in markup_spans:
+ if offset > position:
+ append(escape(plain[position:offset]))
+ position = offset
+ if style:
+ append(f"[/{style}]" if closing else f"[{style}]")
+ markup = "".join(output)
+ return markup
+
+ @classmethod
+ def from_markup(
+ cls,
+ text: str,
+ *,
+ style: Union[str, Style] = "",
+ emoji: bool = True,
+ emoji_variant: Optional[EmojiVariant] = None,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ ) -> "Text":
+ """Create Text instance from markup.
+
+ Args:
+ text (str): A string containing console markup.
+ emoji (bool, optional): Also render emoji code. Defaults to True.
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+
+ Returns:
+ Text: A Text instance with markup rendered.
+ """
+ from .markup import render
+
+ rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)
+ rendered_text.justify = justify
+ rendered_text.overflow = overflow
+ return rendered_text
+
+ @classmethod
+ def from_ansi(
+ cls,
+ text: str,
+ *,
+ style: Union[str, Style] = "",
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ no_wrap: Optional[bool] = None,
+ end: str = "\n",
+ tab_size: Optional[int] = 8,
+ ) -> "Text":
+ """Create a Text object from a string containing ANSI escape codes.
+
+ Args:
+ text (str): A string containing escape codes.
+ style (Union[str, Style], optional): Base style for text. Defaults to "".
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+ no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
+ end (str, optional): Character to end text with. Defaults to "\\\\n".
+ tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
+ """
+ from .ansi import AnsiDecoder
+
+ joiner = Text(
+ "\n",
+ justify=justify,
+ overflow=overflow,
+ no_wrap=no_wrap,
+ end=end,
+ tab_size=tab_size,
+ style=style,
+ )
+ decoder = AnsiDecoder()
+ result = joiner.join(line for line in decoder.decode(text))
+ return result
+
+ @classmethod
+ def styled(
+ cls,
+ text: str,
+ style: StyleType = "",
+ *,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ ) -> "Text":
+ """Construct a Text instance with a pre-applied styled. A style applied in this way won't be used
+ to pad the text when it is justified.
+
+ Args:
+ text (str): A string containing console markup.
+ style (Union[str, Style]): Style to apply to the text. Defaults to "".
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+
+ Returns:
+ Text: A text instance with a style applied to the entire string.
+ """
+ styled_text = cls(text, justify=justify, overflow=overflow)
+ styled_text.stylize(style)
+ return styled_text
+
+ @classmethod
+ def assemble(
+ cls,
+ *parts: Union[str, "Text", Tuple[str, StyleType]],
+ style: Union[str, Style] = "",
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ no_wrap: Optional[bool] = None,
+ end: str = "\n",
+ tab_size: int = 8,
+ meta: Optional[Dict[str, Any]] = None,
+ ) -> "Text":
+ """Construct a text instance by combining a sequence of strings with optional styles.
+ The positional arguments should be either strings, or a tuple of string + style.
+
+ Args:
+ style (Union[str, Style], optional): Base style for text. Defaults to "".
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+ end (str, optional): Character to end text with. Defaults to "\\\\n".
+ tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
+ meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None
+
+ Returns:
+ Text: A new text instance.
+ """
+ text = cls(
+ style=style,
+ justify=justify,
+ overflow=overflow,
+ no_wrap=no_wrap,
+ end=end,
+ tab_size=tab_size,
+ )
+ append = text.append
+ _Text = Text
+ for part in parts:
+ if isinstance(part, (_Text, str)):
+ append(part)
+ else:
+ append(*part)
+ if meta:
+ text.apply_meta(meta)
+ return text
+
+ @property
+ def plain(self) -> str:
+ """Get the text as a single string."""
+ if len(self._text) != 1:
+ self._text[:] = ["".join(self._text)]
+ return self._text[0]
+
+ @plain.setter
+ def plain(self, new_text: str) -> None:
+ """Set the text to a new value."""
+ if new_text != self.plain:
+ self._text[:] = [new_text]
+ old_length = self._length
+ self._length = len(new_text)
+ if old_length > self._length:
+ self._trim_spans()
+
+ @property
+ def spans(self) -> List[Span]:
+ """Get a reference to the internal list of spans."""
+ return self._spans
+
+ @spans.setter
+ def spans(self, spans: List[Span]) -> None:
+ """Set spans."""
+ self._spans = spans[:]
+
+ def blank_copy(self, plain: str = "") -> "Text":
+ """Return a new Text instance with copied meta data (but not the string or spans)."""
+ copy_self = Text(
+ plain,
+ style=self.style,
+ justify=self.justify,
+ overflow=self.overflow,
+ no_wrap=self.no_wrap,
+ end=self.end,
+ tab_size=self.tab_size,
+ )
+ return copy_self
+
+ def copy(self) -> "Text":
+ """Return a copy of this instance."""
+ copy_self = Text(
+ self.plain,
+ style=self.style,
+ justify=self.justify,
+ overflow=self.overflow,
+ no_wrap=self.no_wrap,
+ end=self.end,
+ tab_size=self.tab_size,
+ )
+ copy_self._spans[:] = self._spans
+ return copy_self
+
+ def stylize(
+ self,
+ style: Union[str, Style],
+ start: int = 0,
+ end: Optional[int] = None,
+ ) -> None:
+ """Apply a style to the text, or a portion of the text.
+
+ Args:
+ style (Union[str, Style]): Style instance or style definition to apply.
+ start (int): Start offset (negative indexing is supported). Defaults to 0.
+ end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
+
+ """
+ if style:
+ length = len(self)
+ if start < 0:
+ start = length + start
+ if end is None:
+ end = length
+ if end < 0:
+ end = length + end
+ if start >= length or end <= start:
+ # Span not in text or not valid
+ return
+ self._spans.append(Span(start, min(length, end), style))
+
+ def apply_meta(
+ self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None
+ ) -> None:
+ """Apply meta data to the text, or a portion of the text.
+
+ Args:
+ meta (Dict[str, Any]): A dict of meta information.
+ start (int): Start offset (negative indexing is supported). Defaults to 0.
+ end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
+
+ """
+ style = Style.from_meta(meta)
+ self.stylize(style, start=start, end=end)
+
+ def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text":
+ """Apply event handlers (used by Textual project).
+
+ Example:
+ >>> from rich.text import Text
+ >>> text = Text("hello world")
+ >>> text.on(click="view.toggle('world')")
+
+ Args:
+ meta (Dict[str, Any]): Mapping of meta information.
+ **handlers: Keyword args are prefixed with "@" to defined handlers.
+
+ Returns:
+ Text: Self is returned to method may be chained.
+ """
+ meta = {} if meta is None else meta
+ meta.update({f"@{key}": value for key, value in handlers.items()})
+ self.stylize(Style.from_meta(meta))
+ return self
+
+ def remove_suffix(self, suffix: str) -> None:
+ """Remove a suffix if it exists.
+
+ Args:
+ suffix (str): Suffix to remove.
+ """
+ if self.plain.endswith(suffix):
+ self.right_crop(len(suffix))
+
+ def get_style_at_offset(self, console: "Console", offset: int) -> Style:
+ """Get the style of a character at give offset.
+
+ Args:
+ console (~Console): Console where text will be rendered.
+ offset (int): Offset in to text (negative indexing supported)
+
+ Returns:
+ Style: A Style instance.
+ """
+ # TODO: This is a little inefficient, it is only used by full justify
+ if offset < 0:
+ offset = len(self) + offset
+ get_style = console.get_style
+ style = get_style(self.style).copy()
+ for start, end, span_style in self._spans:
+ if end > offset >= start:
+ style += get_style(span_style, default="")
+ return style
+
+ def highlight_regex(
+ self,
+ re_highlight: str,
+ style: Optional[Union[GetStyleCallable, StyleType]] = None,
+ *,
+ style_prefix: str = "",
+ ) -> int:
+ """Highlight text with a regular expression, where group names are
+ translated to styles.
+
+ Args:
+ re_highlight (str): A regular expression.
+ style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable
+ which accepts the matched text and returns a style. Defaults to None.
+ style_prefix (str, optional): Optional prefix to add to style group names.
+
+ Returns:
+ int: Number of regex matches
+ """
+ count = 0
+ append_span = self._spans.append
+ _Span = Span
+ plain = self.plain
+ for match in re.finditer(re_highlight, plain):
+ get_span = match.span
+ if style:
+ start, end = get_span()
+ match_style = style(plain[start:end]) if callable(style) else style
+ if match_style is not None and end > start:
+ append_span(_Span(start, end, match_style))
+
+ count += 1
+ for name in match.groupdict().keys():
+ start, end = get_span(name)
+ if start != -1 and end > start:
+ append_span(_Span(start, end, f"{style_prefix}{name}"))
+ return count
+
+ def highlight_words(
+ self,
+ words: Iterable[str],
+ style: Union[str, Style],
+ *,
+ case_sensitive: bool = True,
+ ) -> int:
+ """Highlight words with a style.
+
+ Args:
+ words (Iterable[str]): Worlds to highlight.
+ style (Union[str, Style]): Style to apply.
+ case_sensitive (bool, optional): Enable case sensitive matchings. Defaults to True.
+
+ Returns:
+ int: Number of words highlighted.
+ """
+ re_words = "|".join(re.escape(word) for word in words)
+ add_span = self._spans.append
+ count = 0
+ _Span = Span
+ for match in re.finditer(
+ re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE
+ ):
+ start, end = match.span(0)
+ add_span(_Span(start, end, style))
+ count += 1
+ return count
+
+ def rstrip(self) -> None:
+ """Strip whitespace from end of text."""
+ self.plain = self.plain.rstrip()
+
+ def rstrip_end(self, size: int) -> None:
+ """Remove whitespace beyond a certain width at the end of the text.
+
+ Args:
+ size (int): The desired size of the text.
+ """
+ text_length = len(self)
+ if text_length > size:
+ excess = text_length - size
+ whitespace_match = _re_whitespace.search(self.plain)
+ if whitespace_match is not None:
+ whitespace_count = len(whitespace_match.group(0))
+ self.right_crop(min(whitespace_count, excess))
+
+ def set_length(self, new_length: int) -> None:
+ """Set new length of the text, clipping or padding is required."""
+ length = len(self)
+ if length != new_length:
+ if length < new_length:
+ self.pad_right(new_length - length)
+ else:
+ self.right_crop(length - new_length)
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Iterable[Segment]:
+ tab_size: int = console.tab_size or self.tab_size or 8
+ justify = self.justify or options.justify or DEFAULT_JUSTIFY
+
+ overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW
+
+ lines = self.wrap(
+ console,
+ options.max_width,
+ justify=justify,
+ overflow=overflow,
+ tab_size=tab_size or 8,
+ no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),
+ )
+ all_lines = Text("\n").join(lines)
+ yield from all_lines.render(console, end=self.end)
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ text = self.plain
+ lines = text.splitlines()
+ max_text_width = max(cell_len(line) for line in lines) if lines else 0
+ words = text.split()
+ min_text_width = (
+ max(cell_len(word) for word in words) if words else max_text_width
+ )
+ return Measurement(min_text_width, max_text_width)
+
+ def render(self, console: "Console", end: str = "") -> Iterable["Segment"]:
+ """Render the text as Segments.
+
+ Args:
+ console (Console): Console instance.
+ end (Optional[str], optional): Optional end character.
+
+ Returns:
+ Iterable[Segment]: Result of render that may be written to the console.
+ """
+ _Segment = Segment
+ text = self.plain
+ if not self._spans:
+ yield Segment(text)
+ if end:
+ yield _Segment(end)
+ return
+ get_style = partial(console.get_style, default=Style.null())
+
+ enumerated_spans = list(enumerate(self._spans, 1))
+ style_map = {index: get_style(span.style) for index, span in enumerated_spans}
+ style_map[0] = get_style(self.style)
+
+ spans = [
+ (0, False, 0),
+ *((span.start, False, index) for index, span in enumerated_spans),
+ *((span.end, True, index) for index, span in enumerated_spans),
+ (len(text), True, 0),
+ ]
+ spans.sort(key=itemgetter(0, 1))
+
+ stack: List[int] = []
+ stack_append = stack.append
+ stack_pop = stack.remove
+
+ style_cache: Dict[Tuple[Style, ...], Style] = {}
+ style_cache_get = style_cache.get
+ combine = Style.combine
+
+ def get_current_style() -> Style:
+ """Construct current style from stack."""
+ styles = tuple(style_map[_style_id] for _style_id in sorted(stack))
+ cached_style = style_cache_get(styles)
+ if cached_style is not None:
+ return cached_style
+ current_style = combine(styles)
+ style_cache[styles] = current_style
+ return current_style
+
+ for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):
+ if leaving:
+ stack_pop(style_id)
+ else:
+ stack_append(style_id)
+ if next_offset > offset:
+ yield _Segment(text[offset:next_offset], get_current_style())
+ if end:
+ yield _Segment(end)
+
+ def join(self, lines: Iterable["Text"]) -> "Text":
+ """Join text together with this instance as the separator.
+
+ Args:
+ lines (Iterable[Text]): An iterable of Text instances to join.
+
+ Returns:
+ Text: A new text instance containing join text.
+ """
+
+ new_text = self.blank_copy()
+
+ def iter_text() -> Iterable["Text"]:
+ if self.plain:
+ for last, line in loop_last(lines):
+ yield line
+ if not last:
+ yield self
+ else:
+ yield from lines
+
+ extend_text = new_text._text.extend
+ append_span = new_text._spans.append
+ extend_spans = new_text._spans.extend
+ offset = 0
+ _Span = Span
+
+ for text in iter_text():
+ extend_text(text._text)
+ if text.style:
+ append_span(_Span(offset, offset + len(text), text.style))
+ extend_spans(
+ _Span(offset + start, offset + end, style)
+ for start, end, style in text._spans
+ )
+ offset += len(text)
+ new_text._length = offset
+ return new_text
+
+ def expand_tabs(self, tab_size: Optional[int] = None) -> None:
+ """Converts tabs to spaces.
+
+ Args:
+ tab_size (int, optional): Size of tabs. Defaults to 8.
+
+ """
+ if "\t" not in self.plain:
+ return
+ pos = 0
+ if tab_size is None:
+ tab_size = self.tab_size
+ assert tab_size is not None
+ result = self.blank_copy()
+ append = result.append
+
+ _style = self.style
+ for line in self.split("\n", include_separator=True):
+ parts = line.split("\t", include_separator=True)
+ for part in parts:
+ if part.plain.endswith("\t"):
+ part._text = [part.plain[:-1] + " "]
+ append(part)
+ pos += len(part)
+ spaces = tab_size - ((pos - 1) % tab_size) - 1
+ if spaces:
+ append(" " * spaces, _style)
+ pos += spaces
+ else:
+ append(part)
+ self._text = [result.plain]
+ self._length = len(self.plain)
+ self._spans[:] = result._spans
+
+ def truncate(
+ self,
+ max_width: int,
+ *,
+ overflow: Optional["OverflowMethod"] = None,
+ pad: bool = False,
+ ) -> None:
+ """Truncate text if it is longer that a given width.
+
+ Args:
+ max_width (int): Maximum number of characters in text.
+ overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow.
+ pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.
+ """
+ _overflow = overflow or self.overflow or DEFAULT_OVERFLOW
+ if _overflow != "ignore":
+ length = cell_len(self.plain)
+ if length > max_width:
+ if _overflow == "ellipsis":
+ self.plain = set_cell_size(self.plain, max_width - 1) + "…"
+ else:
+ self.plain = set_cell_size(self.plain, max_width)
+ if pad and length < max_width:
+ spaces = max_width - length
+ self._text = [f"{self.plain}{' ' * spaces}"]
+ self._length = len(self.plain)
+
+ def _trim_spans(self) -> None:
+ """Remove or modify any spans that are over the end of the text."""
+ max_offset = len(self.plain)
+ _Span = Span
+ self._spans[:] = [
+ (
+ span
+ if span.end < max_offset
+ else _Span(span.start, min(max_offset, span.end), span.style)
+ )
+ for span in self._spans
+ if span.start < max_offset
+ ]
+
+ def pad(self, count: int, character: str = " ") -> None:
+ """Pad left and right with a given number of characters.
+
+ Args:
+ count (int): Width of padding.
+ """
+ assert len(character) == 1, "Character must be a string of length 1"
+ if count:
+ pad_characters = character * count
+ self.plain = f"{pad_characters}{self.plain}{pad_characters}"
+ _Span = Span
+ self._spans[:] = [
+ _Span(start + count, end + count, style)
+ for start, end, style in self._spans
+ ]
+
+ def pad_left(self, count: int, character: str = " ") -> None:
+ """Pad the left with a given character.
+
+ Args:
+ count (int): Number of characters to pad.
+ character (str, optional): Character to pad with. Defaults to " ".
+ """
+ assert len(character) == 1, "Character must be a string of length 1"
+ if count:
+ self.plain = f"{character * count}{self.plain}"
+ _Span = Span
+ self._spans[:] = [
+ _Span(start + count, end + count, style)
+ for start, end, style in self._spans
+ ]
+
+ def pad_right(self, count: int, character: str = " ") -> None:
+ """Pad the right with a given character.
+
+ Args:
+ count (int): Number of characters to pad.
+ character (str, optional): Character to pad with. Defaults to " ".
+ """
+ assert len(character) == 1, "Character must be a string of length 1"
+ if count:
+ self.plain = f"{self.plain}{character * count}"
+
+ def align(self, align: AlignMethod, width: int, character: str = " ") -> None:
+ """Align text to a given width.
+
+ Args:
+ align (AlignMethod): One of "left", "center", or "right".
+ width (int): Desired width.
+ character (str, optional): Character to pad with. Defaults to " ".
+ """
+ self.truncate(width)
+ excess_space = width - cell_len(self.plain)
+ if excess_space:
+ if align == "left":
+ self.pad_right(excess_space, character)
+ elif align == "center":
+ left = excess_space // 2
+ self.pad_left(left, character)
+ self.pad_right(excess_space - left, character)
+ else:
+ self.pad_left(excess_space, character)
+
+ def append(
+ self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None
+ ) -> "Text":
+ """Add text with an optional style.
+
+ Args:
+ text (Union[Text, str]): A str or Text to append.
+ style (str, optional): A style name. Defaults to None.
+
+ Returns:
+ Text: Returns self for chaining.
+ """
+
+ if not isinstance(text, (str, Text)):
+ raise TypeError("Only str or Text can be appended to Text")
+
+ if len(text):
+ if isinstance(text, str):
+ text = strip_control_codes(text)
+ self._text.append(text)
+ offset = len(self)
+ text_length = len(text)
+ if style is not None:
+ self._spans.append(Span(offset, offset + text_length, style))
+ self._length += text_length
+ elif isinstance(text, Text):
+ _Span = Span
+ if style is not None:
+ raise ValueError(
+ "style must not be set when appending Text instance"
+ )
+ text_length = self._length
+ if text.style is not None:
+ self._spans.append(
+ _Span(text_length, text_length + len(text), text.style)
+ )
+ self._text.append(text.plain)
+ self._spans.extend(
+ _Span(start + text_length, end + text_length, style)
+ for start, end, style in text._spans
+ )
+ self._length += len(text)
+ return self
+
+ def append_text(self, text: "Text") -> "Text":
+ """Append another Text instance. This method is more performant that Text.append, but
+ only works for Text.
+
+ Returns:
+ Text: Returns self for chaining.
+ """
+ _Span = Span
+ text_length = self._length
+ if text.style is not None:
+ self._spans.append(_Span(text_length, text_length + len(text), text.style))
+ self._text.append(text.plain)
+ self._spans.extend(
+ _Span(start + text_length, end + text_length, style)
+ for start, end, style in text._spans
+ )
+ self._length += len(text)
+ return self
+
+ def append_tokens(
+ self, tokens: Iterable[Tuple[str, Optional[StyleType]]]
+ ) -> "Text":
+ """Append iterable of str and style. Style may be a Style instance or a str style definition.
+
+ Args:
+ pairs (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.
+
+ Returns:
+ Text: Returns self for chaining.
+ """
+ append_text = self._text.append
+ append_span = self._spans.append
+ _Span = Span
+ offset = len(self)
+ for content, style in tokens:
+ append_text(content)
+ if style is not None:
+ append_span(_Span(offset, offset + len(content), style))
+ offset += len(content)
+ self._length = offset
+ return self
+
+ def copy_styles(self, text: "Text") -> None:
+ """Copy styles from another Text instance.
+
+ Args:
+ text (Text): A Text instance to copy styles from, must be the same length.
+ """
+ self._spans.extend(text._spans)
+
+ def split(
+ self,
+ separator: str = "\n",
+ *,
+ include_separator: bool = False,
+ allow_blank: bool = False,
+ ) -> Lines:
+ """Split rich text in to lines, preserving styles.
+
+ Args:
+ separator (str, optional): String to split on. Defaults to "\\\\n".
+ include_separator (bool, optional): Include the separator in the lines. Defaults to False.
+ allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.
+
+ Returns:
+ List[RichText]: A list of rich text, one per line of the original.
+ """
+ assert separator, "separator must not be empty"
+
+ text = self.plain
+ if separator not in text:
+ return Lines([self.copy()])
+
+ if include_separator:
+ lines = self.divide(
+ match.end() for match in re.finditer(re.escape(separator), text)
+ )
+ else:
+
+ def flatten_spans() -> Iterable[int]:
+ for match in re.finditer(re.escape(separator), text):
+ start, end = match.span()
+ yield start
+ yield end
+
+ lines = Lines(
+ line for line in self.divide(flatten_spans()) if line.plain != separator
+ )
+
+ if not allow_blank and text.endswith(separator):
+ lines.pop()
+
+ return lines
+
+ def divide(self, offsets: Iterable[int]) -> Lines:
+ """Divide text in to a number of lines at given offsets.
+
+ Args:
+ offsets (Iterable[int]): Offsets used to divide text.
+
+ Returns:
+ Lines: New RichText instances between offsets.
+ """
+ _offsets = list(offsets)
+
+ if not _offsets:
+ return Lines([self.copy()])
+
+ text = self.plain
+ text_length = len(text)
+ divide_offsets = [0, *_offsets, text_length]
+ line_ranges = list(zip(divide_offsets, divide_offsets[1:]))
+
+ style = self.style
+ justify = self.justify
+ overflow = self.overflow
+ _Text = Text
+ new_lines = Lines(
+ _Text(
+ text[start:end],
+ style=style,
+ justify=justify,
+ overflow=overflow,
+ )
+ for start, end in line_ranges
+ )
+ if not self._spans:
+ return new_lines
+
+ _line_appends = [line._spans.append for line in new_lines._lines]
+ line_count = len(line_ranges)
+ _Span = Span
+
+ for span_start, span_end, style in self._spans:
+
+ lower_bound = 0
+ upper_bound = line_count
+ start_line_no = (lower_bound + upper_bound) // 2
+
+ while True:
+ line_start, line_end = line_ranges[start_line_no]
+ if span_start < line_start:
+ upper_bound = start_line_no - 1
+ elif span_start > line_end:
+ lower_bound = start_line_no + 1
+ else:
+ break
+ start_line_no = (lower_bound + upper_bound) // 2
+
+ if span_end < line_end:
+ end_line_no = start_line_no
+ else:
+ end_line_no = lower_bound = start_line_no
+ upper_bound = line_count
+
+ while True:
+ line_start, line_end = line_ranges[end_line_no]
+ if span_end < line_start:
+ upper_bound = end_line_no - 1
+ elif span_end > line_end:
+ lower_bound = end_line_no + 1
+ else:
+ break
+ end_line_no = (lower_bound + upper_bound) // 2
+
+ for line_no in range(start_line_no, end_line_no + 1):
+ line_start, line_end = line_ranges[line_no]
+ new_start = max(0, span_start - line_start)
+ new_end = min(span_end - line_start, line_end - line_start)
+ if new_end > new_start:
+ _line_appends[line_no](_Span(new_start, new_end, style))
+
+ return new_lines
+
+ def right_crop(self, amount: int = 1) -> None:
+ """Remove a number of characters from the end of the text."""
+ max_offset = len(self.plain) - amount
+ _Span = Span
+ self._spans[:] = [
+ (
+ span
+ if span.end < max_offset
+ else _Span(span.start, min(max_offset, span.end), span.style)
+ )
+ for span in self._spans
+ if span.start < max_offset
+ ]
+ self._text = [self.plain[:-amount]]
+ self._length -= amount
+
+ def wrap(
+ self,
+ console: "Console",
+ width: int,
+ *,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ tab_size: int = 8,
+ no_wrap: Optional[bool] = None,
+ ) -> Lines:
+ """Word wrap the text.
+
+ Args:
+ console (Console): Console instance.
+ width (int): Number of characters per line.
+ emoji (bool, optional): Also render emoji code. Defaults to True.
+ justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default".
+ overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
+ tab_size (int, optional): Default tab size. Defaults to 8.
+ no_wrap (bool, optional): Disable wrapping, Defaults to False.
+
+ Returns:
+ Lines: Number of lines.
+ """
+ wrap_justify = justify or self.justify or DEFAULT_JUSTIFY
+ wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
+
+ no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore"
+
+ lines = Lines()
+ for line in self.split(allow_blank=True):
+ if "\t" in line:
+ line.expand_tabs(tab_size)
+ if no_wrap:
+ new_lines = Lines([line])
+ else:
+ offsets = divide_line(str(line), width, fold=wrap_overflow == "fold")
+ new_lines = line.divide(offsets)
+ for line in new_lines:
+ line.rstrip_end(width)
+ if wrap_justify:
+ new_lines.justify(
+ console, width, justify=wrap_justify, overflow=wrap_overflow
+ )
+ for line in new_lines:
+ line.truncate(width, overflow=wrap_overflow)
+ lines.extend(new_lines)
+ return lines
+
+ def fit(self, width: int) -> Lines:
+ """Fit the text in to given width by chopping in to lines.
+
+ Args:
+ width (int): Maximum characters in a line.
+
+ Returns:
+ Lines: List of lines.
+ """
+ lines: Lines = Lines()
+ append = lines.append
+ for line in self.split():
+ line.set_length(width)
+ append(line)
+ return lines
+
+ def detect_indentation(self) -> int:
+ """Auto-detect indentation of code.
+
+ Returns:
+ int: Number of spaces used to indent code.
+ """
+
+ _indentations = {
+ len(match.group(1))
+ for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE)
+ }
+
+ try:
+ indentation = (
+ reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1
+ )
+ except TypeError:
+ indentation = 1
+
+ return indentation
+
+ def with_indent_guides(
+ self,
+ indent_size: Optional[int] = None,
+ *,
+ character: str = "│",
+ style: StyleType = "dim green",
+ ) -> "Text":
+ """Adds indent guide lines to text.
+
+ Args:
+ indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.
+ character (str, optional): Character to use for indentation. Defaults to "│".
+ style (Union[Style, str], optional): Style of indent guides.
+
+ Returns:
+ Text: New text with indentation guides.
+ """
+
+ _indent_size = self.detect_indentation() if indent_size is None else indent_size
+
+ text = self.copy()
+ text.expand_tabs()
+ indent_line = f"{character}{' ' * (_indent_size - 1)}"
+
+ re_indent = re.compile(r"^( *)(.*)$")
+ new_lines: List[Text] = []
+ add_line = new_lines.append
+ blank_lines = 0
+ for line in text.split(allow_blank=True):
+ match = re_indent.match(line.plain)
+ if not match or not match.group(2):
+ blank_lines += 1
+ continue
+ indent = match.group(1)
+ full_indents, remaining_space = divmod(len(indent), _indent_size)
+ new_indent = f"{indent_line * full_indents}{' ' * remaining_space}"
+ line.plain = new_indent + line.plain[len(new_indent) :]
+ line.stylize(style, 0, len(new_indent))
+ if blank_lines:
+ new_lines.extend([Text(new_indent, style=style)] * blank_lines)
+ blank_lines = 0
+ add_line(line)
+ if blank_lines:
+ new_lines.extend([Text("", style=style)] * blank_lines)
+
+ new_text = text.blank_copy("\n").join(new_lines)
+ return new_text
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+
+ text = Text(
+ """\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"""
+ )
+ text.highlight_words(["Lorem"], "bold")
+ text.highlight_words(["ipsum"], "italic")
+
+ console = Console()
+
+ console.rule("justify='left'")
+ console.print(text, style="red")
+ console.print()
+
+ console.rule("justify='center'")
+ console.print(text, style="green", justify="center")
+ console.print()
+
+ console.rule("justify='right'")
+ console.print(text, style="blue", justify="right")
+ console.print()
+
+ console.rule("justify='full'")
+ console.print(text, style="magenta", justify="full")
+ console.print()
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/theme.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/theme.py
new file mode 100644
index 0000000..bfb3c7f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/theme.py
@@ -0,0 +1,112 @@
+import configparser
+from typing import Dict, List, IO, Mapping, Optional
+
+from .default_styles import DEFAULT_STYLES
+from .style import Style, StyleType
+
+
+class Theme:
+ """A container for style information, used by :class:`~rich.console.Console`.
+
+ Args:
+ styles (Dict[str, Style], optional): A mapping of style names on to styles. Defaults to None for a theme with no styles.
+ inherit (bool, optional): Inherit default styles. Defaults to True.
+ """
+
+ styles: Dict[str, Style]
+
+ def __init__(
+ self, styles: Optional[Mapping[str, StyleType]] = None, inherit: bool = True
+ ):
+ self.styles = DEFAULT_STYLES.copy() if inherit else {}
+ if styles is not None:
+ self.styles.update(
+ {
+ name: style if isinstance(style, Style) else Style.parse(style)
+ for name, style in styles.items()
+ }
+ )
+
+ @property
+ def config(self) -> str:
+ """Get contents of a config file for this theme."""
+ config = "[styles]\n" + "\n".join(
+ f"{name} = {style}" for name, style in sorted(self.styles.items())
+ )
+ return config
+
+ @classmethod
+ def from_file(
+ cls, config_file: IO[str], source: Optional[str] = None, inherit: bool = True
+ ) -> "Theme":
+ """Load a theme from a text mode file.
+
+ Args:
+ config_file (IO[str]): An open conf file.
+ source (str, optional): The filename of the open file. Defaults to None.
+ inherit (bool, optional): Inherit default styles. Defaults to True.
+
+ Returns:
+ Theme: A New theme instance.
+ """
+ config = configparser.ConfigParser()
+ config.read_file(config_file, source=source)
+ styles = {name: Style.parse(value) for name, value in config.items("styles")}
+ theme = Theme(styles, inherit=inherit)
+ return theme
+
+ @classmethod
+ def read(cls, path: str, inherit: bool = True) -> "Theme":
+ """Read a theme from a path.
+
+ Args:
+ path (str): Path to a config file readable by Python configparser module.
+ inherit (bool, optional): Inherit default styles. Defaults to True.
+
+ Returns:
+ Theme: A new theme instance.
+ """
+ with open(path, "rt") as config_file:
+ return cls.from_file(config_file, source=path, inherit=inherit)
+
+
+class ThemeStackError(Exception):
+ """Base exception for errors related to the theme stack."""
+
+
+class ThemeStack:
+ """A stack of themes.
+
+ Args:
+ theme (Theme): A theme instance
+ """
+
+ def __init__(self, theme: Theme) -> None:
+ self._entries: List[Dict[str, Style]] = [theme.styles]
+ self.get = self._entries[-1].get
+
+ def push_theme(self, theme: Theme, inherit: bool = True) -> None:
+ """Push a theme on the top of the stack.
+
+ Args:
+ theme (Theme): A Theme instance.
+ inherit (boolean, optional): Inherit styles from current top of stack.
+ """
+ styles: Dict[str, Style]
+ styles = (
+ {**self._entries[-1], **theme.styles} if inherit else theme.styles.copy()
+ )
+ self._entries.append(styles)
+ self.get = self._entries[-1].get
+
+ def pop_theme(self) -> None:
+ """Pop (and discard) the top-most theme."""
+ if len(self._entries) == 1:
+ raise ThemeStackError("Unable to pop base theme")
+ self._entries.pop()
+ self.get = self._entries[-1].get
+
+
+if __name__ == "__main__": # pragma: no cover
+ theme = Theme()
+ print(theme.config)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/themes.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/themes.py
new file mode 100644
index 0000000..bf6db10
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/themes.py
@@ -0,0 +1,5 @@
+from .default_styles import DEFAULT_STYLES
+from .theme import Theme
+
+
+DEFAULT = Theme(DEFAULT_STYLES)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/traceback.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/traceback.py
new file mode 100644
index 0000000..66a39eb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/traceback.py
@@ -0,0 +1,678 @@
+from __future__ import absolute_import
+
+import os
+import platform
+import sys
+from dataclasses import dataclass, field
+from traceback import walk_tb
+from types import ModuleType, TracebackType
+from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Type, Union
+
+from pip._vendor.pygments.lexers import guess_lexer_for_filename
+from pip._vendor.pygments.token import Comment, Keyword, Name, Number, Operator, String
+from pip._vendor.pygments.token import Text as TextToken
+from pip._vendor.pygments.token import Token
+
+from . import pretty
+from ._loop import loop_first, loop_last
+from .columns import Columns
+from .console import Console, ConsoleOptions, ConsoleRenderable, RenderResult, group
+from .constrain import Constrain
+from .highlighter import RegexHighlighter, ReprHighlighter
+from .panel import Panel
+from .scope import render_scope
+from .style import Style
+from .syntax import Syntax
+from .text import Text
+from .theme import Theme
+
+WINDOWS = platform.system() == "Windows"
+
+LOCALS_MAX_LENGTH = 10
+LOCALS_MAX_STRING = 80
+
+
+def install(
+ *,
+ console: Optional[Console] = None,
+ width: Optional[int] = 100,
+ extra_lines: int = 3,
+ theme: Optional[str] = None,
+ word_wrap: bool = False,
+ show_locals: bool = False,
+ indent_guides: bool = True,
+ suppress: Iterable[Union[str, ModuleType]] = (),
+ max_frames: int = 100,
+) -> Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any]:
+ """Install a rich traceback handler.
+
+ Once installed, any tracebacks will be printed with syntax highlighting and rich formatting.
+
+
+ Args:
+ console (Optional[Console], optional): Console to write exception to. Default uses internal Console instance.
+ width (Optional[int], optional): Width (in characters) of traceback. Defaults to 100.
+ extra_lines (int, optional): Extra lines of code. Defaults to 3.
+ theme (Optional[str], optional): Pygments theme to use in traceback. Defaults to ``None`` which will pick
+ a theme appropriate for the platform.
+ word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
+ suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+
+ Returns:
+ Callable: The previous exception handler that was replaced.
+
+ """
+ traceback_console = Console(file=sys.stderr) if console is None else console
+
+ def excepthook(
+ type_: Type[BaseException],
+ value: BaseException,
+ traceback: Optional[TracebackType],
+ ) -> None:
+ traceback_console.print(
+ Traceback.from_exception(
+ type_,
+ value,
+ traceback,
+ width=width,
+ extra_lines=extra_lines,
+ theme=theme,
+ word_wrap=word_wrap,
+ show_locals=show_locals,
+ indent_guides=indent_guides,
+ suppress=suppress,
+ max_frames=max_frames,
+ )
+ )
+
+ def ipy_excepthook_closure(ip: Any) -> None: # pragma: no cover
+ tb_data = {} # store information about showtraceback call
+ default_showtraceback = ip.showtraceback # keep reference of default traceback
+
+ def ipy_show_traceback(*args: Any, **kwargs: Any) -> None:
+ """wrap the default ip.showtraceback to store info for ip._showtraceback"""
+ nonlocal tb_data
+ tb_data = kwargs
+ default_showtraceback(*args, **kwargs)
+
+ def ipy_display_traceback(
+ *args: Any, is_syntax: bool = False, **kwargs: Any
+ ) -> None:
+ """Internally called traceback from ip._showtraceback"""
+ nonlocal tb_data
+ exc_tuple = ip._get_exc_info()
+
+ # do not display trace on syntax error
+ tb: Optional[TracebackType] = None if is_syntax else exc_tuple[2]
+
+ # determine correct tb_offset
+ compiled = tb_data.get("running_compiled_code", False)
+ tb_offset = tb_data.get("tb_offset", 1 if compiled else 0)
+ # remove ipython internal frames from trace with tb_offset
+ for _ in range(tb_offset):
+ if tb is None:
+ break
+ tb = tb.tb_next
+
+ excepthook(exc_tuple[0], exc_tuple[1], tb)
+ tb_data = {} # clear data upon usage
+
+ # replace _showtraceback instead of showtraceback to allow ipython features such as debugging to work
+ # this is also what the ipython docs recommends to modify when subclassing InteractiveShell
+ ip._showtraceback = ipy_display_traceback
+ # add wrapper to capture tb_data
+ ip.showtraceback = ipy_show_traceback
+ ip.showsyntaxerror = lambda *args, **kwargs: ipy_display_traceback(
+ *args, is_syntax=True, **kwargs
+ )
+
+ try: # pragma: no cover
+ # if within ipython, use customized traceback
+ ip = get_ipython() # type: ignore
+ ipy_excepthook_closure(ip)
+ return sys.excepthook
+ except Exception:
+ # otherwise use default system hook
+ old_excepthook = sys.excepthook
+ sys.excepthook = excepthook
+ return old_excepthook
+
+
+@dataclass
+class Frame:
+ filename: str
+ lineno: int
+ name: str
+ line: str = ""
+ locals: Optional[Dict[str, pretty.Node]] = None
+
+
+@dataclass
+class _SyntaxError:
+ offset: int
+ filename: str
+ line: str
+ lineno: int
+ msg: str
+
+
+@dataclass
+class Stack:
+ exc_type: str
+ exc_value: str
+ syntax_error: Optional[_SyntaxError] = None
+ is_cause: bool = False
+ frames: List[Frame] = field(default_factory=list)
+
+
+@dataclass
+class Trace:
+ stacks: List[Stack]
+
+
+class PathHighlighter(RegexHighlighter):
+ highlights = [r"(?P.*/)(?P.+)"]
+
+
+class Traceback:
+ """A Console renderable that renders a traceback.
+
+ Args:
+ trace (Trace, optional): A `Trace` object produced from `extract`. Defaults to None, which uses
+ the last exception.
+ width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.
+ extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
+ theme (str, optional): Override pygments theme used in traceback.
+ word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
+ locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to 10.
+ locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+ suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+
+ """
+
+ LEXERS = {
+ "": "text",
+ ".py": "python",
+ ".pxd": "cython",
+ ".pyx": "cython",
+ ".pxi": "pyrex",
+ }
+
+ def __init__(
+ self,
+ trace: Optional[Trace] = None,
+ width: Optional[int] = 100,
+ extra_lines: int = 3,
+ theme: Optional[str] = None,
+ word_wrap: bool = False,
+ show_locals: bool = False,
+ indent_guides: bool = True,
+ locals_max_length: int = LOCALS_MAX_LENGTH,
+ locals_max_string: int = LOCALS_MAX_STRING,
+ suppress: Iterable[Union[str, ModuleType]] = (),
+ max_frames: int = 100,
+ ):
+ if trace is None:
+ exc_type, exc_value, traceback = sys.exc_info()
+ if exc_type is None or exc_value is None or traceback is None:
+ raise ValueError(
+ "Value for 'trace' required if not called in except: block"
+ )
+ trace = self.extract(
+ exc_type, exc_value, traceback, show_locals=show_locals
+ )
+ self.trace = trace
+ self.width = width
+ self.extra_lines = extra_lines
+ self.theme = Syntax.get_theme(theme or "ansi_dark")
+ self.word_wrap = word_wrap
+ self.show_locals = show_locals
+ self.indent_guides = indent_guides
+ self.locals_max_length = locals_max_length
+ self.locals_max_string = locals_max_string
+
+ self.suppress: Sequence[str] = []
+ for suppress_entity in suppress:
+ if not isinstance(suppress_entity, str):
+ assert (
+ suppress_entity.__file__ is not None
+ ), f"{suppress_entity!r} must be a module with '__file__' attribute"
+ path = os.path.dirname(suppress_entity.__file__)
+ else:
+ path = suppress_entity
+ path = os.path.normpath(os.path.abspath(path))
+ self.suppress.append(path)
+ self.max_frames = max(4, max_frames) if max_frames > 0 else 0
+
+ @classmethod
+ def from_exception(
+ cls,
+ exc_type: Type[Any],
+ exc_value: BaseException,
+ traceback: Optional[TracebackType],
+ width: Optional[int] = 100,
+ extra_lines: int = 3,
+ theme: Optional[str] = None,
+ word_wrap: bool = False,
+ show_locals: bool = False,
+ indent_guides: bool = True,
+ locals_max_length: int = LOCALS_MAX_LENGTH,
+ locals_max_string: int = LOCALS_MAX_STRING,
+ suppress: Iterable[Union[str, ModuleType]] = (),
+ max_frames: int = 100,
+ ) -> "Traceback":
+ """Create a traceback from exception info
+
+ Args:
+ exc_type (Type[BaseException]): Exception type.
+ exc_value (BaseException): Exception value.
+ traceback (TracebackType): Python Traceback object.
+ width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.
+ extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
+ theme (str, optional): Override pygments theme used in traceback.
+ word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
+ locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to 10.
+ locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+ suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+
+ Returns:
+ Traceback: A Traceback instance that may be printed.
+ """
+ rich_traceback = cls.extract(
+ exc_type, exc_value, traceback, show_locals=show_locals
+ )
+ return cls(
+ rich_traceback,
+ width=width,
+ extra_lines=extra_lines,
+ theme=theme,
+ word_wrap=word_wrap,
+ show_locals=show_locals,
+ indent_guides=indent_guides,
+ locals_max_length=locals_max_length,
+ locals_max_string=locals_max_string,
+ suppress=suppress,
+ max_frames=max_frames,
+ )
+
+ @classmethod
+ def extract(
+ cls,
+ exc_type: Type[BaseException],
+ exc_value: BaseException,
+ traceback: Optional[TracebackType],
+ show_locals: bool = False,
+ locals_max_length: int = LOCALS_MAX_LENGTH,
+ locals_max_string: int = LOCALS_MAX_STRING,
+ ) -> Trace:
+ """Extract traceback information.
+
+ Args:
+ exc_type (Type[BaseException]): Exception type.
+ exc_value (BaseException): Exception value.
+ traceback (TracebackType): Python Traceback object.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to 10.
+ locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+
+ Returns:
+ Trace: A Trace instance which you can use to construct a `Traceback`.
+ """
+
+ stacks: List[Stack] = []
+ is_cause = False
+
+ from pip._vendor.rich import _IMPORT_CWD
+
+ def safe_str(_object: Any) -> str:
+ """Don't allow exceptions from __str__ to propegate."""
+ try:
+ return str(_object)
+ except Exception:
+ return ""
+
+ while True:
+ stack = Stack(
+ exc_type=safe_str(exc_type.__name__),
+ exc_value=safe_str(exc_value),
+ is_cause=is_cause,
+ )
+
+ if isinstance(exc_value, SyntaxError):
+ stack.syntax_error = _SyntaxError(
+ offset=exc_value.offset or 0,
+ filename=exc_value.filename or "?",
+ lineno=exc_value.lineno or 0,
+ line=exc_value.text or "",
+ msg=exc_value.msg,
+ )
+
+ stacks.append(stack)
+ append = stack.frames.append
+
+ for frame_summary, line_no in walk_tb(traceback):
+ filename = frame_summary.f_code.co_filename
+ if filename and not filename.startswith("<"):
+ if not os.path.isabs(filename):
+ filename = os.path.join(_IMPORT_CWD, filename)
+ frame = Frame(
+ filename=filename or "?",
+ lineno=line_no,
+ name=frame_summary.f_code.co_name,
+ locals={
+ key: pretty.traverse(
+ value,
+ max_length=locals_max_length,
+ max_string=locals_max_string,
+ )
+ for key, value in frame_summary.f_locals.items()
+ }
+ if show_locals
+ else None,
+ )
+ append(frame)
+ if "_rich_traceback_guard" in frame_summary.f_locals:
+ del stack.frames[:]
+
+ cause = getattr(exc_value, "__cause__", None)
+ if cause and cause.__traceback__:
+ exc_type = cause.__class__
+ exc_value = cause
+ traceback = cause.__traceback__
+ if traceback:
+ is_cause = True
+ continue
+
+ cause = exc_value.__context__
+ if (
+ cause
+ and cause.__traceback__
+ and not getattr(exc_value, "__suppress_context__", False)
+ ):
+ exc_type = cause.__class__
+ exc_value = cause
+ traceback = cause.__traceback__
+ if traceback:
+ is_cause = False
+ continue
+ # No cover, code is reached but coverage doesn't recognize it.
+ break # pragma: no cover
+
+ trace = Trace(stacks=stacks)
+ return trace
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ theme = self.theme
+ background_style = theme.get_background_style()
+ token_style = theme.get_style_for_token
+
+ traceback_theme = Theme(
+ {
+ "pretty": token_style(TextToken),
+ "pygments.text": token_style(Token),
+ "pygments.string": token_style(String),
+ "pygments.function": token_style(Name.Function),
+ "pygments.number": token_style(Number),
+ "repr.indent": token_style(Comment) + Style(dim=True),
+ "repr.str": token_style(String),
+ "repr.brace": token_style(TextToken) + Style(bold=True),
+ "repr.number": token_style(Number),
+ "repr.bool_true": token_style(Keyword.Constant),
+ "repr.bool_false": token_style(Keyword.Constant),
+ "repr.none": token_style(Keyword.Constant),
+ "scope.border": token_style(String.Delimiter),
+ "scope.equals": token_style(Operator),
+ "scope.key": token_style(Name),
+ "scope.key.special": token_style(Name.Constant) + Style(dim=True),
+ },
+ inherit=False,
+ )
+
+ highlighter = ReprHighlighter()
+ for last, stack in loop_last(reversed(self.trace.stacks)):
+ if stack.frames:
+ stack_renderable: ConsoleRenderable = Panel(
+ self._render_stack(stack),
+ title="[traceback.title]Traceback [dim](most recent call last)",
+ style=background_style,
+ border_style="traceback.border",
+ expand=True,
+ padding=(0, 1),
+ )
+ stack_renderable = Constrain(stack_renderable, self.width)
+ with console.use_theme(traceback_theme):
+ yield stack_renderable
+ if stack.syntax_error is not None:
+ with console.use_theme(traceback_theme):
+ yield Constrain(
+ Panel(
+ self._render_syntax_error(stack.syntax_error),
+ style=background_style,
+ border_style="traceback.border.syntax_error",
+ expand=True,
+ padding=(0, 1),
+ width=self.width,
+ ),
+ self.width,
+ )
+ yield Text.assemble(
+ (f"{stack.exc_type}: ", "traceback.exc_type"),
+ highlighter(stack.syntax_error.msg),
+ )
+ elif stack.exc_value:
+ yield Text.assemble(
+ (f"{stack.exc_type}: ", "traceback.exc_type"),
+ highlighter(stack.exc_value),
+ )
+ else:
+ yield Text.assemble((f"{stack.exc_type}", "traceback.exc_type"))
+
+ if not last:
+ if stack.is_cause:
+ yield Text.from_markup(
+ "\n[i]The above exception was the direct cause of the following exception:\n",
+ )
+ else:
+ yield Text.from_markup(
+ "\n[i]During handling of the above exception, another exception occurred:\n",
+ )
+
+ @group()
+ def _render_syntax_error(self, syntax_error: _SyntaxError) -> RenderResult:
+ highlighter = ReprHighlighter()
+ path_highlighter = PathHighlighter()
+ if syntax_error.filename != "":
+ text = Text.assemble(
+ (f" {syntax_error.filename}", "pygments.string"),
+ (":", "pygments.text"),
+ (str(syntax_error.lineno), "pygments.number"),
+ style="pygments.text",
+ )
+ yield path_highlighter(text)
+ syntax_error_text = highlighter(syntax_error.line.rstrip())
+ syntax_error_text.no_wrap = True
+ offset = min(syntax_error.offset - 1, len(syntax_error_text))
+ syntax_error_text.stylize("bold underline", offset, offset)
+ syntax_error_text += Text.from_markup(
+ "\n" + " " * offset + "[traceback.offset]▲[/]",
+ style="pygments.text",
+ )
+ yield syntax_error_text
+
+ @classmethod
+ def _guess_lexer(cls, filename: str, code: str) -> str:
+ ext = os.path.splitext(filename)[-1]
+ if not ext:
+ # No extension, look at first line to see if it is a hashbang
+ # Note, this is an educated guess and not a guarantee
+ # If it fails, the only downside is that the code is highlighted strangely
+ new_line_index = code.index("\n")
+ first_line = code[:new_line_index] if new_line_index != -1 else code
+ if first_line.startswith("#!") and "python" in first_line.lower():
+ return "python"
+ lexer_name = (
+ cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name
+ )
+ return lexer_name
+
+ @group()
+ def _render_stack(self, stack: Stack) -> RenderResult:
+ path_highlighter = PathHighlighter()
+ theme = self.theme
+ code_cache: Dict[str, str] = {}
+
+ def read_code(filename: str) -> str:
+ """Read files, and cache results on filename.
+
+ Args:
+ filename (str): Filename to read
+
+ Returns:
+ str: Contents of file
+ """
+ code = code_cache.get(filename)
+ if code is None:
+ with open(
+ filename, "rt", encoding="utf-8", errors="replace"
+ ) as code_file:
+ code = code_file.read()
+ code_cache[filename] = code
+ return code
+
+ def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]:
+ if frame.locals:
+ yield render_scope(
+ frame.locals,
+ title="locals",
+ indent_guides=self.indent_guides,
+ max_length=self.locals_max_length,
+ max_string=self.locals_max_string,
+ )
+
+ exclude_frames: Optional[range] = None
+ if self.max_frames != 0:
+ exclude_frames = range(
+ self.max_frames // 2,
+ len(stack.frames) - self.max_frames // 2,
+ )
+
+ excluded = False
+ for frame_index, frame in enumerate(stack.frames):
+
+ if exclude_frames and frame_index in exclude_frames:
+ excluded = True
+ continue
+
+ if excluded:
+ assert exclude_frames is not None
+ yield Text(
+ f"\n... {len(exclude_frames)} frames hidden ...",
+ justify="center",
+ style="traceback.error",
+ )
+ excluded = False
+
+ first = frame_index == 1
+ frame_filename = frame.filename
+ suppressed = any(frame_filename.startswith(path) for path in self.suppress)
+
+ text = Text.assemble(
+ path_highlighter(Text(frame.filename, style="pygments.string")),
+ (":", "pygments.text"),
+ (str(frame.lineno), "pygments.number"),
+ " in ",
+ (frame.name, "pygments.function"),
+ style="pygments.text",
+ )
+ if not frame.filename.startswith("<") and not first:
+ yield ""
+ yield text
+ if frame.filename.startswith("<"):
+ yield from render_locals(frame)
+ continue
+ if not suppressed:
+ try:
+ code = read_code(frame.filename)
+ lexer_name = self._guess_lexer(frame.filename, code)
+ syntax = Syntax(
+ code,
+ lexer_name,
+ theme=theme,
+ line_numbers=True,
+ line_range=(
+ frame.lineno - self.extra_lines,
+ frame.lineno + self.extra_lines,
+ ),
+ highlight_lines={frame.lineno},
+ word_wrap=self.word_wrap,
+ code_width=88,
+ indent_guides=self.indent_guides,
+ dedent=False,
+ )
+ yield ""
+ except Exception as error:
+ yield Text.assemble(
+ (f"\n{error}", "traceback.error"),
+ )
+ else:
+ yield (
+ Columns(
+ [
+ syntax,
+ *render_locals(frame),
+ ],
+ padding=1,
+ )
+ if frame.locals
+ else syntax
+ )
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from .console import Console
+
+ console = Console()
+ import sys
+
+ def bar(a: Any) -> None: # 这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑
+ one = 1
+ print(one / a)
+
+ def foo(a: Any) -> None:
+ _rich_traceback_guard = True
+ zed = {
+ "characters": {
+ "Paul Atreides",
+ "Vladimir Harkonnen",
+ "Thufir Hawat",
+ "Duncan Idaho",
+ },
+ "atomic_types": (None, False, True),
+ }
+ bar(a)
+
+ def error() -> None:
+
+ try:
+ try:
+ foo(0)
+ except:
+ slfkjsldkfj # type: ignore
+ except:
+ console.print_exception(show_locals=True)
+
+ error()
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/rich/tree.py b/venv/lib/python3.9/site-packages/pip/_vendor/rich/tree.py
new file mode 100644
index 0000000..c5ec27d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/rich/tree.py
@@ -0,0 +1,249 @@
+from typing import Iterator, List, Optional, Tuple
+
+from ._loop import loop_first, loop_last
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style, StyleStack, StyleType
+from .styled import Styled
+
+
+class Tree(JupyterMixin):
+ """A renderable for a tree structure.
+
+ Args:
+ label (RenderableType): The renderable or str for the tree label.
+ style (StyleType, optional): Style of this tree. Defaults to "tree".
+ guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
+ expanded (bool, optional): Also display children. Defaults to True.
+ highlight (bool, optional): Highlight renderable (if str). Defaults to False.
+ """
+
+ def __init__(
+ self,
+ label: RenderableType,
+ *,
+ style: StyleType = "tree",
+ guide_style: StyleType = "tree.line",
+ expanded: bool = True,
+ highlight: bool = False,
+ hide_root: bool = False,
+ ) -> None:
+ self.label = label
+ self.style = style
+ self.guide_style = guide_style
+ self.children: List[Tree] = []
+ self.expanded = expanded
+ self.highlight = highlight
+ self.hide_root = hide_root
+
+ def add(
+ self,
+ label: RenderableType,
+ *,
+ style: Optional[StyleType] = None,
+ guide_style: Optional[StyleType] = None,
+ expanded: bool = True,
+ highlight: bool = False,
+ ) -> "Tree":
+ """Add a child tree.
+
+ Args:
+ label (RenderableType): The renderable or str for the tree label.
+ style (StyleType, optional): Style of this tree. Defaults to "tree".
+ guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
+ expanded (bool, optional): Also display children. Defaults to True.
+ highlight (Optional[bool], optional): Highlight renderable (if str). Defaults to False.
+
+ Returns:
+ Tree: A new child Tree, which may be further modified.
+ """
+ node = Tree(
+ label,
+ style=self.style if style is None else style,
+ guide_style=self.guide_style if guide_style is None else guide_style,
+ expanded=expanded,
+ highlight=self.highlight if highlight is None else highlight,
+ )
+ self.children.append(node)
+ return node
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+
+ stack: List[Iterator[Tuple[bool, Tree]]] = []
+ pop = stack.pop
+ push = stack.append
+ new_line = Segment.line()
+
+ get_style = console.get_style
+ null_style = Style.null()
+ guide_style = get_style(self.guide_style, default="") or null_style
+ SPACE, CONTINUE, FORK, END = range(4)
+
+ ASCII_GUIDES = (" ", "| ", "+-- ", "`-- ")
+ TREE_GUIDES = [
+ (" ", "│ ", "├── ", "└── "),
+ (" ", "┃ ", "┣━━ ", "┗━━ "),
+ (" ", "║ ", "╠══ ", "╚══ "),
+ ]
+ _Segment = Segment
+
+ def make_guide(index: int, style: Style) -> Segment:
+ """Make a Segment for a level of the guide lines."""
+ if options.ascii_only:
+ line = ASCII_GUIDES[index]
+ else:
+ guide = 1 if style.bold else (2 if style.underline2 else 0)
+ line = TREE_GUIDES[0 if options.legacy_windows else guide][index]
+ return _Segment(line, style)
+
+ levels: List[Segment] = [make_guide(CONTINUE, guide_style)]
+ push(iter(loop_last([self])))
+
+ guide_style_stack = StyleStack(get_style(self.guide_style))
+ style_stack = StyleStack(get_style(self.style))
+ remove_guide_styles = Style(bold=False, underline2=False)
+
+ depth = 0
+
+ while stack:
+ stack_node = pop()
+ try:
+ last, node = next(stack_node)
+ except StopIteration:
+ levels.pop()
+ if levels:
+ guide_style = levels[-1].style or null_style
+ levels[-1] = make_guide(FORK, guide_style)
+ guide_style_stack.pop()
+ style_stack.pop()
+ continue
+ push(stack_node)
+ if last:
+ levels[-1] = make_guide(END, levels[-1].style or null_style)
+
+ guide_style = guide_style_stack.current + get_style(node.guide_style)
+ style = style_stack.current + get_style(node.style)
+ prefix = levels[(2 if self.hide_root else 1) :]
+ renderable_lines = console.render_lines(
+ Styled(node.label, style),
+ options.update(
+ width=options.max_width
+ - sum(level.cell_length for level in prefix),
+ highlight=self.highlight,
+ height=None,
+ ),
+ )
+
+ if not (depth == 0 and self.hide_root):
+ for first, line in loop_first(renderable_lines):
+ if prefix:
+ yield from _Segment.apply_style(
+ prefix,
+ style.background_style,
+ post_style=remove_guide_styles,
+ )
+ yield from line
+ yield new_line
+ if first and prefix:
+ prefix[-1] = make_guide(
+ SPACE if last else CONTINUE, prefix[-1].style or null_style
+ )
+
+ if node.expanded and node.children:
+ levels[-1] = make_guide(
+ SPACE if last else CONTINUE, levels[-1].style or null_style
+ )
+ levels.append(
+ make_guide(END if len(node.children) == 1 else FORK, guide_style)
+ )
+ style_stack.push(get_style(node.style))
+ guide_style_stack.push(get_style(node.guide_style))
+ push(iter(loop_last(node.children)))
+ depth += 1
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ stack: List[Iterator[Tree]] = [iter([self])]
+ pop = stack.pop
+ push = stack.append
+ minimum = 0
+ maximum = 0
+ measure = Measurement.get
+ level = 0
+ while stack:
+ iter_tree = pop()
+ try:
+ tree = next(iter_tree)
+ except StopIteration:
+ level -= 1
+ continue
+ push(iter_tree)
+ min_measure, max_measure = measure(console, options, tree.label)
+ indent = level * 4
+ minimum = max(min_measure + indent, minimum)
+ maximum = max(max_measure + indent, maximum)
+ if tree.expanded and tree.children:
+ push(iter(tree.children))
+ level += 1
+ return Measurement(minimum, maximum)
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from pip._vendor.rich.console import Group
+ from pip._vendor.rich.markdown import Markdown
+ from pip._vendor.rich.panel import Panel
+ from pip._vendor.rich.syntax import Syntax
+ from pip._vendor.rich.table import Table
+
+ table = Table(row_styles=["", "dim"])
+
+ table.add_column("Released", style="cyan", no_wrap=True)
+ table.add_column("Title", style="magenta")
+ table.add_column("Box Office", justify="right", style="green")
+
+ table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
+ table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
+ table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
+ table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
+
+ code = """\
+class Segment(NamedTuple):
+ text: str = ""
+ style: Optional[Style] = None
+ is_control: bool = False
+"""
+ syntax = Syntax(code, "python", theme="monokai", line_numbers=True)
+
+ markdown = Markdown(
+ """\
+### example.md
+> Hello, World!
+>
+> Markdown _all_ the things
+"""
+ )
+
+ root = Tree("🌲 [b green]Rich Tree", highlight=True, hide_root=True)
+
+ node = root.add(":file_folder: Renderables", guide_style="red")
+ simple_node = node.add(":file_folder: [bold yellow]Atomic", guide_style="uu green")
+ simple_node.add(Group("📄 Syntax", syntax))
+ simple_node.add(Group("📄 Markdown", Panel(markdown, border_style="green")))
+
+ containers_node = node.add(
+ ":file_folder: [bold magenta]Containers", guide_style="bold magenta"
+ )
+ containers_node.expanded = True
+ panel = Panel.fit("Just a panel", border_style="red")
+ containers_node.add(Group("📄 Panels", panel))
+
+ containers_node.add(Group("📄 [b magenta]Table", table))
+
+ console = Console()
+ console.print(root)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/six.py b/venv/lib/python3.9/site-packages/pip/_vendor/six.py
new file mode 100644
index 0000000..4e15675
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/six.py
@@ -0,0 +1,998 @@
+# Copyright (c) 2010-2020 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson "
+__version__ = "1.16.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+if PY34:
+ from importlib.util import spec_from_loader
+else:
+ spec_from_loader = None
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.known_modules:
+ return spec_from_loader(fullname, self)
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+ def create_module(self, spec):
+ return self.load_module(spec.name)
+
+ def exec_module(self, module):
+ pass
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ del io
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+""")
+
+
+if sys.version_info[:2] > (3,):
+ exec_("""def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+
+ def __new__(cls, name, this_bases, d):
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, binary_type):
+ return s
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ # Optimization: Fast return for the common case.
+ if type(s) is str:
+ return s
+ if PY2 and isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/__init__.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/__init__.py
new file mode 100644
index 0000000..086ad46
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/__init__.py
@@ -0,0 +1,517 @@
+# Copyright 2016-2018 Julien Danjou
+# Copyright 2017 Elisey Zanko
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import sys
+import threading
+import time
+import typing as t
+import warnings
+from abc import ABC, abstractmethod
+from concurrent import futures
+from inspect import iscoroutinefunction
+
+# Import all built-in retry strategies for easier usage.
+from .retry import retry_base # noqa
+from .retry import retry_all # noqa
+from .retry import retry_always # noqa
+from .retry import retry_any # noqa
+from .retry import retry_if_exception # noqa
+from .retry import retry_if_exception_type # noqa
+from .retry import retry_if_not_exception_type # noqa
+from .retry import retry_if_not_result # noqa
+from .retry import retry_if_result # noqa
+from .retry import retry_never # noqa
+from .retry import retry_unless_exception_type # noqa
+from .retry import retry_if_exception_message # noqa
+from .retry import retry_if_not_exception_message # noqa
+
+# Import all nap strategies for easier usage.
+from .nap import sleep # noqa
+from .nap import sleep_using_event # noqa
+
+# Import all built-in stop strategies for easier usage.
+from .stop import stop_after_attempt # noqa
+from .stop import stop_after_delay # noqa
+from .stop import stop_all # noqa
+from .stop import stop_any # noqa
+from .stop import stop_never # noqa
+from .stop import stop_when_event_set # noqa
+
+# Import all built-in wait strategies for easier usage.
+from .wait import wait_chain # noqa
+from .wait import wait_combine # noqa
+from .wait import wait_exponential # noqa
+from .wait import wait_fixed # noqa
+from .wait import wait_incrementing # noqa
+from .wait import wait_none # noqa
+from .wait import wait_random # noqa
+from .wait import wait_random_exponential # noqa
+from .wait import wait_random_exponential as wait_full_jitter # noqa
+
+# Import all built-in before strategies for easier usage.
+from .before import before_log # noqa
+from .before import before_nothing # noqa
+
+# Import all built-in after strategies for easier usage.
+from .after import after_log # noqa
+from .after import after_nothing # noqa
+
+# Import all built-in after strategies for easier usage.
+from .before_sleep import before_sleep_log # noqa
+from .before_sleep import before_sleep_nothing # noqa
+
+# Replace a conditional import with a hard-coded None so that pip does
+# not attempt to use tornado even if it is present in the environment.
+# If tornado is non-None, tenacity will attempt to execute some code
+# that is sensitive to the version of tornado, which could break pip
+# if an old version is found.
+tornado = None # type: ignore
+
+if t.TYPE_CHECKING:
+ import types
+
+ from .wait import wait_base
+ from .stop import stop_base
+
+
+WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable)
+_RetValT = t.TypeVar("_RetValT")
+
+
+@t.overload
+def retry(fn: WrappedFn) -> WrappedFn:
+ pass
+
+
+@t.overload
+def retry(*dargs: t.Any, **dkw: t.Any) -> t.Callable[[WrappedFn], WrappedFn]: # noqa
+ pass
+
+
+def retry(*dargs: t.Any, **dkw: t.Any) -> t.Union[WrappedFn, t.Callable[[WrappedFn], WrappedFn]]: # noqa
+ """Wrap a function with a new `Retrying` object.
+
+ :param dargs: positional arguments passed to Retrying object
+ :param dkw: keyword arguments passed to the Retrying object
+ """
+ # support both @retry and @retry() as valid syntax
+ if len(dargs) == 1 and callable(dargs[0]):
+ return retry()(dargs[0])
+ else:
+
+ def wrap(f: WrappedFn) -> WrappedFn:
+ if isinstance(f, retry_base):
+ warnings.warn(
+ f"Got retry_base instance ({f.__class__.__name__}) as callable argument, "
+ f"this will probably hang indefinitely (did you mean retry={f.__class__.__name__}(...)?)"
+ )
+ if iscoroutinefunction(f):
+ r: "BaseRetrying" = AsyncRetrying(*dargs, **dkw)
+ elif tornado and hasattr(tornado.gen, "is_coroutine_function") and tornado.gen.is_coroutine_function(f):
+ r = TornadoRetrying(*dargs, **dkw)
+ else:
+ r = Retrying(*dargs, **dkw)
+
+ return r.wraps(f)
+
+ return wrap
+
+
+class TryAgain(Exception):
+ """Always retry the executed function when raised."""
+
+
+NO_RESULT = object()
+
+
+class DoAttempt:
+ pass
+
+
+class DoSleep(float):
+ pass
+
+
+class BaseAction:
+ """Base class for representing actions to take by retry object.
+
+ Concrete implementations must define:
+ - __init__: to initialize all necessary fields
+ - REPR_FIELDS: class variable specifying attributes to include in repr(self)
+ - NAME: for identification in retry object methods and callbacks
+ """
+
+ REPR_FIELDS: t.Sequence[str] = ()
+ NAME: t.Optional[str] = None
+
+ def __repr__(self) -> str:
+ state_str = ", ".join(f"{field}={getattr(self, field)!r}" for field in self.REPR_FIELDS)
+ return f"{self.__class__.__name__}({state_str})"
+
+ def __str__(self) -> str:
+ return repr(self)
+
+
+class RetryAction(BaseAction):
+ REPR_FIELDS = ("sleep",)
+ NAME = "retry"
+
+ def __init__(self, sleep: t.SupportsFloat) -> None:
+ self.sleep = float(sleep)
+
+
+_unset = object()
+
+
+def _first_set(first: t.Union[t.Any, object], second: t.Any) -> t.Any:
+ return second if first is _unset else first
+
+
+class RetryError(Exception):
+ """Encapsulates the last attempt instance right before giving up."""
+
+ def __init__(self, last_attempt: "Future") -> None:
+ self.last_attempt = last_attempt
+ super().__init__(last_attempt)
+
+ def reraise(self) -> "t.NoReturn":
+ if self.last_attempt.failed:
+ raise self.last_attempt.result()
+ raise self
+
+ def __str__(self) -> str:
+ return f"{self.__class__.__name__}[{self.last_attempt}]"
+
+
+class AttemptManager:
+ """Manage attempt context."""
+
+ def __init__(self, retry_state: "RetryCallState"):
+ self.retry_state = retry_state
+
+ def __enter__(self) -> None:
+ pass
+
+ def __exit__(
+ self,
+ exc_type: t.Optional[t.Type[BaseException]],
+ exc_value: t.Optional[BaseException],
+ traceback: t.Optional["types.TracebackType"],
+ ) -> t.Optional[bool]:
+ if isinstance(exc_value, BaseException):
+ self.retry_state.set_exception((exc_type, exc_value, traceback))
+ return True # Swallow exception.
+ else:
+ # We don't have the result, actually.
+ self.retry_state.set_result(None)
+ return None
+
+
+class BaseRetrying(ABC):
+ def __init__(
+ self,
+ sleep: t.Callable[[t.Union[int, float]], None] = sleep,
+ stop: "stop_base" = stop_never,
+ wait: "wait_base" = wait_none(),
+ retry: retry_base = retry_if_exception_type(),
+ before: t.Callable[["RetryCallState"], None] = before_nothing,
+ after: t.Callable[["RetryCallState"], None] = after_nothing,
+ before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None,
+ reraise: bool = False,
+ retry_error_cls: t.Type[RetryError] = RetryError,
+ retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None,
+ ):
+ self.sleep = sleep
+ self.stop = stop
+ self.wait = wait
+ self.retry = retry
+ self.before = before
+ self.after = after
+ self.before_sleep = before_sleep
+ self.reraise = reraise
+ self._local = threading.local()
+ self.retry_error_cls = retry_error_cls
+ self.retry_error_callback = retry_error_callback
+
+ def copy(
+ self,
+ sleep: t.Union[t.Callable[[t.Union[int, float]], None], object] = _unset,
+ stop: t.Union["stop_base", object] = _unset,
+ wait: t.Union["wait_base", object] = _unset,
+ retry: t.Union[retry_base, object] = _unset,
+ before: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
+ after: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
+ before_sleep: t.Union[t.Optional[t.Callable[["RetryCallState"], None]], object] = _unset,
+ reraise: t.Union[bool, object] = _unset,
+ retry_error_cls: t.Union[t.Type[RetryError], object] = _unset,
+ retry_error_callback: t.Union[t.Optional[t.Callable[["RetryCallState"], t.Any]], object] = _unset,
+ ) -> "BaseRetrying":
+ """Copy this object with some parameters changed if needed."""
+ return self.__class__(
+ sleep=_first_set(sleep, self.sleep),
+ stop=_first_set(stop, self.stop),
+ wait=_first_set(wait, self.wait),
+ retry=_first_set(retry, self.retry),
+ before=_first_set(before, self.before),
+ after=_first_set(after, self.after),
+ before_sleep=_first_set(before_sleep, self.before_sleep),
+ reraise=_first_set(reraise, self.reraise),
+ retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),
+ retry_error_callback=_first_set(retry_error_callback, self.retry_error_callback),
+ )
+
+ def __repr__(self) -> str:
+ return (
+ f"<{self.__class__.__name__} object at 0x{id(self):x} ("
+ f"stop={self.stop}, "
+ f"wait={self.wait}, "
+ f"sleep={self.sleep}, "
+ f"retry={self.retry}, "
+ f"before={self.before}, "
+ f"after={self.after})>"
+ )
+
+ @property
+ def statistics(self) -> t.Dict[str, t.Any]:
+ """Return a dictionary of runtime statistics.
+
+ This dictionary will be empty when the controller has never been
+ ran. When it is running or has ran previously it should have (but
+ may not) have useful and/or informational keys and values when
+ running is underway and/or completed.
+
+ .. warning:: The keys in this dictionary **should** be some what
+ stable (not changing), but there existence **may**
+ change between major releases as new statistics are
+ gathered or removed so before accessing keys ensure that
+ they actually exist and handle when they do not.
+
+ .. note:: The values in this dictionary are local to the thread
+ running call (so if multiple threads share the same retrying
+ object - either directly or indirectly) they will each have
+ there own view of statistics they have collected (in the
+ future we may provide a way to aggregate the various
+ statistics from each thread).
+ """
+ try:
+ return self._local.statistics
+ except AttributeError:
+ self._local.statistics = {}
+ return self._local.statistics
+
+ def wraps(self, f: WrappedFn) -> WrappedFn:
+ """Wrap a function for retrying.
+
+ :param f: A function to wraps for retrying.
+ """
+
+ @functools.wraps(f)
+ def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
+ return self(f, *args, **kw)
+
+ def retry_with(*args: t.Any, **kwargs: t.Any) -> WrappedFn:
+ return self.copy(*args, **kwargs).wraps(f)
+
+ wrapped_f.retry = self
+ wrapped_f.retry_with = retry_with
+
+ return wrapped_f
+
+ def begin(self) -> None:
+ self.statistics.clear()
+ self.statistics["start_time"] = time.monotonic()
+ self.statistics["attempt_number"] = 1
+ self.statistics["idle_for"] = 0
+
+ def iter(self, retry_state: "RetryCallState") -> t.Union[DoAttempt, DoSleep, t.Any]: # noqa
+ fut = retry_state.outcome
+ if fut is None:
+ if self.before is not None:
+ self.before(retry_state)
+ return DoAttempt()
+
+ is_explicit_retry = retry_state.outcome.failed and isinstance(retry_state.outcome.exception(), TryAgain)
+ if not (is_explicit_retry or self.retry(retry_state=retry_state)):
+ return fut.result()
+
+ if self.after is not None:
+ self.after(retry_state)
+
+ self.statistics["delay_since_first_attempt"] = retry_state.seconds_since_start
+ if self.stop(retry_state=retry_state):
+ if self.retry_error_callback:
+ return self.retry_error_callback(retry_state)
+ retry_exc = self.retry_error_cls(fut)
+ if self.reraise:
+ raise retry_exc.reraise()
+ raise retry_exc from fut.exception()
+
+ if self.wait:
+ sleep = self.wait(retry_state=retry_state)
+ else:
+ sleep = 0.0
+ retry_state.next_action = RetryAction(sleep)
+ retry_state.idle_for += sleep
+ self.statistics["idle_for"] += sleep
+ self.statistics["attempt_number"] += 1
+
+ if self.before_sleep is not None:
+ self.before_sleep(retry_state)
+
+ return DoSleep(sleep)
+
+ def __iter__(self) -> t.Generator[AttemptManager, None, None]:
+ self.begin()
+
+ retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ yield AttemptManager(retry_state=retry_state)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ self.sleep(do)
+ else:
+ break
+
+ @abstractmethod
+ def __call__(self, fn: t.Callable[..., _RetValT], *args: t.Any, **kwargs: t.Any) -> _RetValT:
+ pass
+
+
+class Retrying(BaseRetrying):
+ """Retrying controller."""
+
+ def __call__(self, fn: t.Callable[..., _RetValT], *args: t.Any, **kwargs: t.Any) -> _RetValT:
+ self.begin()
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info())
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ self.sleep(do)
+ else:
+ return do
+
+
+class Future(futures.Future):
+ """Encapsulates a (future or past) attempted call to a target function."""
+
+ def __init__(self, attempt_number: int) -> None:
+ super().__init__()
+ self.attempt_number = attempt_number
+
+ @property
+ def failed(self) -> bool:
+ """Return whether a exception is being held in this future."""
+ return self.exception() is not None
+
+ @classmethod
+ def construct(cls, attempt_number: int, value: t.Any, has_exception: bool) -> "Future":
+ """Construct a new Future object."""
+ fut = cls(attempt_number)
+ if has_exception:
+ fut.set_exception(value)
+ else:
+ fut.set_result(value)
+ return fut
+
+
+class RetryCallState:
+ """State related to a single call wrapped with Retrying."""
+
+ def __init__(
+ self,
+ retry_object: BaseRetrying,
+ fn: t.Optional[WrappedFn],
+ args: t.Any,
+ kwargs: t.Any,
+ ) -> None:
+ #: Retry call start timestamp
+ self.start_time = time.monotonic()
+ #: Retry manager object
+ self.retry_object = retry_object
+ #: Function wrapped by this retry call
+ self.fn = fn
+ #: Arguments of the function wrapped by this retry call
+ self.args = args
+ #: Keyword arguments of the function wrapped by this retry call
+ self.kwargs = kwargs
+
+ #: The number of the current attempt
+ self.attempt_number: int = 1
+ #: Last outcome (result or exception) produced by the function
+ self.outcome: t.Optional[Future] = None
+ #: Timestamp of the last outcome
+ self.outcome_timestamp: t.Optional[float] = None
+ #: Time spent sleeping in retries
+ self.idle_for: float = 0.0
+ #: Next action as decided by the retry manager
+ self.next_action: t.Optional[RetryAction] = None
+
+ @property
+ def seconds_since_start(self) -> t.Optional[float]:
+ if self.outcome_timestamp is None:
+ return None
+ return self.outcome_timestamp - self.start_time
+
+ def prepare_for_next_attempt(self) -> None:
+ self.outcome = None
+ self.outcome_timestamp = None
+ self.attempt_number += 1
+ self.next_action = None
+
+ def set_result(self, val: t.Any) -> None:
+ ts = time.monotonic()
+ fut = Future(self.attempt_number)
+ fut.set_result(val)
+ self.outcome, self.outcome_timestamp = fut, ts
+
+ def set_exception(self, exc_info: t.Tuple[t.Type[BaseException], BaseException, "types.TracebackType"]) -> None:
+ ts = time.monotonic()
+ fut = Future(self.attempt_number)
+ fut.set_exception(exc_info[1])
+ self.outcome, self.outcome_timestamp = fut, ts
+
+ def __repr__(self):
+ if self.outcome is None:
+ result = "none yet"
+ elif self.outcome.failed:
+ exception = self.outcome.exception()
+ result = f"failed ({exception.__class__.__name__} {exception})"
+ else:
+ result = f"returned {self.outcome.result()}"
+
+ slept = float(round(self.idle_for, 2))
+ clsname = self.__class__.__name__
+ return f"<{clsname} {id(self)}: attempt #{self.attempt_number}; slept for {slept}; last result: {result}>"
+
+
+from pip._vendor.tenacity._asyncio import AsyncRetrying # noqa:E402,I100
+
+if tornado:
+ from pip._vendor.tenacity.tornadoweb import TornadoRetrying
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/_asyncio.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/_asyncio.py
new file mode 100644
index 0000000..0f32b5f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/_asyncio.py
@@ -0,0 +1,92 @@
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import sys
+import typing
+from asyncio import sleep
+
+from pip._vendor.tenacity import AttemptManager
+from pip._vendor.tenacity import BaseRetrying
+from pip._vendor.tenacity import DoAttempt
+from pip._vendor.tenacity import DoSleep
+from pip._vendor.tenacity import RetryCallState
+
+WrappedFn = typing.TypeVar("WrappedFn", bound=typing.Callable)
+_RetValT = typing.TypeVar("_RetValT")
+
+
+class AsyncRetrying(BaseRetrying):
+ def __init__(self, sleep: typing.Callable[[float], typing.Awaitable] = sleep, **kwargs: typing.Any) -> None:
+ super().__init__(**kwargs)
+ self.sleep = sleep
+
+ async def __call__( # type: ignore # Change signature from supertype
+ self,
+ fn: typing.Callable[..., typing.Awaitable[_RetValT]],
+ *args: typing.Any,
+ **kwargs: typing.Any,
+ ) -> _RetValT:
+ self.begin()
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = await fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info())
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ await self.sleep(do)
+ else:
+ return do
+
+ def __aiter__(self) -> "AsyncRetrying":
+ self.begin()
+ self._retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
+ return self
+
+ async def __anext__(self) -> typing.Union[AttemptManager, typing.Any]:
+ while True:
+ do = self.iter(retry_state=self._retry_state)
+ if do is None:
+ raise StopAsyncIteration
+ elif isinstance(do, DoAttempt):
+ return AttemptManager(retry_state=self._retry_state)
+ elif isinstance(do, DoSleep):
+ self._retry_state.prepare_for_next_attempt()
+ await self.sleep(do)
+ else:
+ return do
+
+ def wraps(self, fn: WrappedFn) -> WrappedFn:
+ fn = super().wraps(fn)
+ # Ensure wrapper is recognized as a coroutine function.
+
+ @functools.wraps(fn)
+ async def async_wrapped(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
+ return await fn(*args, **kwargs)
+
+ # Preserve attributes
+ async_wrapped.retry = fn.retry
+ async_wrapped.retry_with = fn.retry_with
+
+ return async_wrapped
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/_utils.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/_utils.py
new file mode 100644
index 0000000..d5c4c9d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/_utils.py
@@ -0,0 +1,68 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import typing
+
+
+# sys.maxsize:
+# An integer giving the maximum value a variable of type Py_ssize_t can take.
+MAX_WAIT = sys.maxsize / 2
+
+
+def find_ordinal(pos_num: int) -> str:
+ # See: https://en.wikipedia.org/wiki/English_numerals#Ordinal_numbers
+ if pos_num == 0:
+ return "th"
+ elif pos_num == 1:
+ return "st"
+ elif pos_num == 2:
+ return "nd"
+ elif pos_num == 3:
+ return "rd"
+ elif 4 <= pos_num <= 20:
+ return "th"
+ else:
+ return find_ordinal(pos_num % 10)
+
+
+def to_ordinal(pos_num: int) -> str:
+ return f"{pos_num}{find_ordinal(pos_num)}"
+
+
+def get_callback_name(cb: typing.Callable[..., typing.Any]) -> str:
+ """Get a callback fully-qualified name.
+
+ If no name can be produced ``repr(cb)`` is called and returned.
+ """
+ segments = []
+ try:
+ segments.append(cb.__qualname__)
+ except AttributeError:
+ try:
+ segments.append(cb.__name__)
+ except AttributeError:
+ pass
+ if not segments:
+ return repr(cb)
+ else:
+ try:
+ # When running under sphinx it appears this can be none?
+ if cb.__module__:
+ segments.insert(0, cb.__module__)
+ except AttributeError:
+ pass
+ return ".".join(segments)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/after.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/after.py
new file mode 100644
index 0000000..c056700
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/after.py
@@ -0,0 +1,46 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from pip._vendor.tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import logging
+
+ from pip._vendor.tenacity import RetryCallState
+
+
+def after_nothing(retry_state: "RetryCallState") -> None:
+ """After call strategy that does nothing."""
+
+
+def after_log(
+ logger: "logging.Logger",
+ log_level: int,
+ sec_format: str = "%0.3f",
+) -> typing.Callable[["RetryCallState"], None]:
+ """After call strategy that logs to some logger the finished attempt."""
+
+ def log_it(retry_state: "RetryCallState") -> None:
+ logger.log(
+ log_level,
+ f"Finished call to '{_utils.get_callback_name(retry_state.fn)}' "
+ f"after {sec_format % retry_state.seconds_since_start}(s), "
+ f"this was the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
+ )
+
+ return log_it
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/before.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/before.py
new file mode 100644
index 0000000..a72c2c5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/before.py
@@ -0,0 +1,41 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from pip._vendor.tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import logging
+
+ from pip._vendor.tenacity import RetryCallState
+
+
+def before_nothing(retry_state: "RetryCallState") -> None:
+ """Before call strategy that does nothing."""
+
+
+def before_log(logger: "logging.Logger", log_level: int) -> typing.Callable[["RetryCallState"], None]:
+ """Before call strategy that logs to some logger the attempt."""
+
+ def log_it(retry_state: "RetryCallState") -> None:
+ logger.log(
+ log_level,
+ f"Starting call to '{_utils.get_callback_name(retry_state.fn)}', "
+ f"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
+ )
+
+ return log_it
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/before_sleep.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/before_sleep.py
new file mode 100644
index 0000000..b35564f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/before_sleep.py
@@ -0,0 +1,58 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from pip._vendor.tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import logging
+
+ from pip._vendor.tenacity import RetryCallState
+
+
+def before_sleep_nothing(retry_state: "RetryCallState") -> None:
+ """Before call strategy that does nothing."""
+
+
+def before_sleep_log(
+ logger: "logging.Logger",
+ log_level: int,
+ exc_info: bool = False,
+) -> typing.Callable[["RetryCallState"], None]:
+ """Before call strategy that logs to some logger the attempt."""
+
+ def log_it(retry_state: "RetryCallState") -> None:
+ if retry_state.outcome.failed:
+ ex = retry_state.outcome.exception()
+ verb, value = "raised", f"{ex.__class__.__name__}: {ex}"
+
+ if exc_info:
+ local_exc_info = retry_state.outcome.exception()
+ else:
+ local_exc_info = False
+ else:
+ verb, value = "returned", retry_state.outcome.result()
+ local_exc_info = False # exc_info does not apply when no exception
+
+ logger.log(
+ log_level,
+ f"Retrying {_utils.get_callback_name(retry_state.fn)} "
+ f"in {retry_state.next_action.sleep} seconds as it {verb} {value}.",
+ exc_info=local_exc_info,
+ )
+
+ return log_it
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/nap.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/nap.py
new file mode 100644
index 0000000..72aa5bf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/nap.py
@@ -0,0 +1,43 @@
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import typing
+
+if typing.TYPE_CHECKING:
+ import threading
+
+
+def sleep(seconds: float) -> None:
+ """
+ Sleep strategy that delays execution for a given number of seconds.
+
+ This is the default strategy, and may be mocked out for unit testing.
+ """
+ time.sleep(seconds)
+
+
+class sleep_using_event:
+ """Sleep strategy that waits on an event to be set."""
+
+ def __init__(self, event: "threading.Event") -> None:
+ self.event = event
+
+ def __call__(self, timeout: typing.Optional[float]) -> None:
+ # NOTE(harlowja): this may *not* actually wait for timeout
+ # seconds if the event is set (ie this may eject out early).
+ self.event.wait(timeout=timeout)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/retry.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/retry.py
new file mode 100644
index 0000000..1d727e9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/retry.py
@@ -0,0 +1,213 @@
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import re
+import typing
+
+if typing.TYPE_CHECKING:
+ from pip._vendor.tenacity import RetryCallState
+
+
+class retry_base(abc.ABC):
+ """Abstract base class for retry strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ pass
+
+ def __and__(self, other: "retry_base") -> "retry_all":
+ return retry_all(self, other)
+
+ def __or__(self, other: "retry_base") -> "retry_any":
+ return retry_any(self, other)
+
+
+class _retry_never(retry_base):
+ """Retry strategy that never rejects any result."""
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return False
+
+
+retry_never = _retry_never()
+
+
+class _retry_always(retry_base):
+ """Retry strategy that always rejects any result."""
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return True
+
+
+retry_always = _retry_always()
+
+
+class retry_if_exception(retry_base):
+ """Retry strategy that retries if an exception verifies a predicate."""
+
+ def __init__(self, predicate: typing.Callable[[BaseException], bool]) -> None:
+ self.predicate = predicate
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.outcome.failed:
+ return self.predicate(retry_state.outcome.exception())
+ else:
+ return False
+
+
+class retry_if_exception_type(retry_if_exception):
+ """Retries if an exception has been raised of one or more types."""
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_types = exception_types
+ super().__init__(lambda e: isinstance(e, exception_types))
+
+
+class retry_if_not_exception_type(retry_if_exception):
+ """Retries except an exception has been raised of one or more types."""
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_types = exception_types
+ super().__init__(lambda e: not isinstance(e, exception_types))
+
+
+class retry_unless_exception_type(retry_if_exception):
+ """Retries until an exception is raised of one or more types."""
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_types = exception_types
+ super().__init__(lambda e: not isinstance(e, exception_types))
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ # always retry if no exception was raised
+ if not retry_state.outcome.failed:
+ return True
+ return self.predicate(retry_state.outcome.exception())
+
+
+class retry_if_result(retry_base):
+ """Retries if the result verifies a predicate."""
+
+ def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
+ self.predicate = predicate
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if not retry_state.outcome.failed:
+ return self.predicate(retry_state.outcome.result())
+ else:
+ return False
+
+
+class retry_if_not_result(retry_base):
+ """Retries if the result refutes a predicate."""
+
+ def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
+ self.predicate = predicate
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if not retry_state.outcome.failed:
+ return not self.predicate(retry_state.outcome.result())
+ else:
+ return False
+
+
+class retry_if_exception_message(retry_if_exception):
+ """Retries if an exception message equals or matches."""
+
+ def __init__(
+ self,
+ message: typing.Optional[str] = None,
+ match: typing.Optional[str] = None,
+ ) -> None:
+ if message and match:
+ raise TypeError(f"{self.__class__.__name__}() takes either 'message' or 'match', not both")
+
+ # set predicate
+ if message:
+
+ def message_fnc(exception: BaseException) -> bool:
+ return message == str(exception)
+
+ predicate = message_fnc
+ elif match:
+ prog = re.compile(match)
+
+ def match_fnc(exception: BaseException) -> bool:
+ return bool(prog.match(str(exception)))
+
+ predicate = match_fnc
+ else:
+ raise TypeError(f"{self.__class__.__name__}() missing 1 required argument 'message' or 'match'")
+
+ super().__init__(predicate)
+
+
+class retry_if_not_exception_message(retry_if_exception_message):
+ """Retries until an exception message equals or matches."""
+
+ def __init__(
+ self,
+ message: typing.Optional[str] = None,
+ match: typing.Optional[str] = None,
+ ) -> None:
+ super().__init__(message, match)
+ # invert predicate
+ if_predicate = self.predicate
+ self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_)
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if not retry_state.outcome.failed:
+ return True
+ return self.predicate(retry_state.outcome.exception())
+
+
+class retry_any(retry_base):
+ """Retries if any of the retries condition is valid."""
+
+ def __init__(self, *retries: retry_base) -> None:
+ self.retries = retries
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return any(r(retry_state) for r in self.retries)
+
+
+class retry_all(retry_base):
+ """Retries if all the retries condition are valid."""
+
+ def __init__(self, *retries: retry_base) -> None:
+ self.retries = retries
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return all(r(retry_state) for r in self.retries)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/stop.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/stop.py
new file mode 100644
index 0000000..faaae9a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/stop.py
@@ -0,0 +1,96 @@
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import abc
+import typing
+
+if typing.TYPE_CHECKING:
+ import threading
+
+ from pip._vendor.tenacity import RetryCallState
+
+
+class stop_base(abc.ABC):
+ """Abstract base class for stop strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ pass
+
+ def __and__(self, other: "stop_base") -> "stop_all":
+ return stop_all(self, other)
+
+ def __or__(self, other: "stop_base") -> "stop_any":
+ return stop_any(self, other)
+
+
+class stop_any(stop_base):
+ """Stop if any of the stop condition is valid."""
+
+ def __init__(self, *stops: stop_base) -> None:
+ self.stops = stops
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return any(x(retry_state) for x in self.stops)
+
+
+class stop_all(stop_base):
+ """Stop if all the stop conditions are valid."""
+
+ def __init__(self, *stops: stop_base) -> None:
+ self.stops = stops
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return all(x(retry_state) for x in self.stops)
+
+
+class _stop_never(stop_base):
+ """Never stop."""
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return False
+
+
+stop_never = _stop_never()
+
+
+class stop_when_event_set(stop_base):
+ """Stop when the given event is set."""
+
+ def __init__(self, event: "threading.Event") -> None:
+ self.event = event
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return self.event.is_set()
+
+
+class stop_after_attempt(stop_base):
+ """Stop when the previous attempt >= max_attempt."""
+
+ def __init__(self, max_attempt_number: int) -> None:
+ self.max_attempt_number = max_attempt_number
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return retry_state.attempt_number >= self.max_attempt_number
+
+
+class stop_after_delay(stop_base):
+ """Stop when the time from the first attempt >= limit."""
+
+ def __init__(self, max_delay: float) -> None:
+ self.max_delay = max_delay
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return retry_state.seconds_since_start >= self.max_delay
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/tornadoweb.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/tornadoweb.py
new file mode 100644
index 0000000..8f7731a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/tornadoweb.py
@@ -0,0 +1,59 @@
+# Copyright 2017 Elisey Zanko
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import typing
+
+from pip._vendor.tenacity import BaseRetrying
+from pip._vendor.tenacity import DoAttempt
+from pip._vendor.tenacity import DoSleep
+from pip._vendor.tenacity import RetryCallState
+
+from tornado import gen
+
+if typing.TYPE_CHECKING:
+ from tornado.concurrent import Future
+
+_RetValT = typing.TypeVar("_RetValT")
+
+
+class TornadoRetrying(BaseRetrying):
+ def __init__(self, sleep: "typing.Callable[[float], Future[None]]" = gen.sleep, **kwargs: typing.Any) -> None:
+ super().__init__(**kwargs)
+ self.sleep = sleep
+
+ @gen.coroutine
+ def __call__( # type: ignore # Change signature from supertype
+ self,
+ fn: "typing.Callable[..., typing.Union[typing.Generator[typing.Any, typing.Any, _RetValT], Future[_RetValT]]]",
+ *args: typing.Any,
+ **kwargs: typing.Any,
+ ) -> "typing.Generator[typing.Any, typing.Any, _RetValT]":
+ self.begin()
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = yield fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info())
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ yield self.sleep(do)
+ else:
+ raise gen.Return(do)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/wait.py b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/wait.py
new file mode 100644
index 0000000..6ed97a7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tenacity/wait.py
@@ -0,0 +1,191 @@
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import random
+import typing
+
+from pip._vendor.tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ from pip._vendor.tenacity import RetryCallState
+
+
+class wait_base(abc.ABC):
+ """Abstract base class for wait strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ pass
+
+ def __add__(self, other: "wait_base") -> "wait_combine":
+ return wait_combine(self, other)
+
+ def __radd__(self, other: "wait_base") -> typing.Union["wait_combine", "wait_base"]:
+ # make it possible to use multiple waits with the built-in sum function
+ if other == 0:
+ return self
+ return self.__add__(other)
+
+
+class wait_fixed(wait_base):
+ """Wait strategy that waits a fixed amount of time between each retry."""
+
+ def __init__(self, wait: float) -> None:
+ self.wait_fixed = wait
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ return self.wait_fixed
+
+
+class wait_none(wait_fixed):
+ """Wait strategy that doesn't wait at all before retrying."""
+
+ def __init__(self) -> None:
+ super().__init__(0)
+
+
+class wait_random(wait_base):
+ """Wait strategy that waits a random amount of time between min/max."""
+
+ def __init__(self, min: typing.Union[int, float] = 0, max: typing.Union[int, float] = 1) -> None: # noqa
+ self.wait_random_min = min
+ self.wait_random_max = max
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ return self.wait_random_min + (random.random() * (self.wait_random_max - self.wait_random_min))
+
+
+class wait_combine(wait_base):
+ """Combine several waiting strategies."""
+
+ def __init__(self, *strategies: wait_base) -> None:
+ self.wait_funcs = strategies
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ return sum(x(retry_state=retry_state) for x in self.wait_funcs)
+
+
+class wait_chain(wait_base):
+ """Chain two or more waiting strategies.
+
+ If all strategies are exhausted, the very last strategy is used
+ thereafter.
+
+ For example::
+
+ @retry(wait=wait_chain(*[wait_fixed(1) for i in range(3)] +
+ [wait_fixed(2) for j in range(5)] +
+ [wait_fixed(5) for k in range(4)))
+ def wait_chained():
+ print("Wait 1s for 3 attempts, 2s for 5 attempts and 5s
+ thereafter.")
+ """
+
+ def __init__(self, *strategies: wait_base) -> None:
+ self.strategies = strategies
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ wait_func_no = min(max(retry_state.attempt_number, 1), len(self.strategies))
+ wait_func = self.strategies[wait_func_no - 1]
+ return wait_func(retry_state=retry_state)
+
+
+class wait_incrementing(wait_base):
+ """Wait an incremental amount of time after each attempt.
+
+ Starting at a starting value and incrementing by a value for each attempt
+ (and restricting the upper limit to some maximum value).
+ """
+
+ def __init__(
+ self,
+ start: typing.Union[int, float] = 0,
+ increment: typing.Union[int, float] = 100,
+ max: typing.Union[int, float] = _utils.MAX_WAIT, # noqa
+ ) -> None:
+ self.start = start
+ self.increment = increment
+ self.max = max
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ result = self.start + (self.increment * (retry_state.attempt_number - 1))
+ return max(0, min(result, self.max))
+
+
+class wait_exponential(wait_base):
+ """Wait strategy that applies exponential backoff.
+
+ It allows for a customized multiplier and an ability to restrict the
+ upper and lower limits to some maximum and minimum value.
+
+ The intervals are fixed (i.e. there is no jitter), so this strategy is
+ suitable for balancing retries against latency when a required resource is
+ unavailable for an unknown duration, but *not* suitable for resolving
+ contention between multiple processes for a shared resource. Use
+ wait_random_exponential for the latter case.
+ """
+
+ def __init__(
+ self,
+ multiplier: typing.Union[int, float] = 1,
+ max: typing.Union[int, float] = _utils.MAX_WAIT, # noqa
+ exp_base: typing.Union[int, float] = 2,
+ min: typing.Union[int, float] = 0, # noqa
+ ) -> None:
+ self.multiplier = multiplier
+ self.min = min
+ self.max = max
+ self.exp_base = exp_base
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ try:
+ exp = self.exp_base ** (retry_state.attempt_number - 1)
+ result = self.multiplier * exp
+ except OverflowError:
+ return self.max
+ return max(max(0, self.min), min(result, self.max))
+
+
+class wait_random_exponential(wait_exponential):
+ """Random wait with exponentially widening window.
+
+ An exponential backoff strategy used to mediate contention between multiple
+ uncoordinated processes for a shared resource in distributed systems. This
+ is the sense in which "exponential backoff" is meant in e.g. Ethernet
+ networking, and corresponds to the "Full Jitter" algorithm described in
+ this blog post:
+
+ https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
+
+ Each retry occurs at a random time in a geometrically expanding interval.
+ It allows for a custom multiplier and an ability to restrict the upper
+ limit of the random interval to some maximum value.
+
+ Example::
+
+ wait_random_exponential(multiplier=0.5, # initial window 0.5s
+ max=60) # max 60s timeout
+
+ When waiting for an unavailable resource to become available again, as
+ opposed to trying to resolve contention for a shared resource, the
+ wait_exponential strategy (which uses a fixed interval) may be preferable.
+
+ """
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ high = super().__call__(retry_state=retry_state)
+ return random.uniform(0, high)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tomli/__init__.py b/venv/lib/python3.9/site-packages/pip/_vendor/tomli/__init__.py
new file mode 100644
index 0000000..1cd8e07
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tomli/__init__.py
@@ -0,0 +1,6 @@
+"""A lil' TOML parser."""
+
+__all__ = ("loads", "load", "TOMLDecodeError")
+__version__ = "1.0.3" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
+
+from pip._vendor.tomli._parser import TOMLDecodeError, load, loads
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tomli/_parser.py b/venv/lib/python3.9/site-packages/pip/_vendor/tomli/_parser.py
new file mode 100644
index 0000000..730a746
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tomli/_parser.py
@@ -0,0 +1,703 @@
+import string
+from types import MappingProxyType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Dict,
+ FrozenSet,
+ Iterable,
+ Optional,
+ TextIO,
+ Tuple,
+)
+
+from pip._vendor.tomli._re import (
+ RE_BIN,
+ RE_DATETIME,
+ RE_HEX,
+ RE_LOCALTIME,
+ RE_NUMBER,
+ RE_OCT,
+ match_to_datetime,
+ match_to_localtime,
+ match_to_number,
+)
+
+if TYPE_CHECKING:
+ from re import Pattern
+
+
+ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
+
+# Neither of these sets include quotation mark or backslash. They are
+# currently handled as separate cases in the parser functions.
+ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
+ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n\r")
+
+ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
+ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
+
+ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
+
+TOML_WS = frozenset(" \t")
+TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
+BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
+KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
+
+BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
+ {
+ "\\b": "\u0008", # backspace
+ "\\t": "\u0009", # tab
+ "\\n": "\u000A", # linefeed
+ "\\f": "\u000C", # form feed
+ "\\r": "\u000D", # carriage return
+ '\\"': "\u0022", # quote
+ "\\\\": "\u005C", # backslash
+ }
+)
+
+# Type annotations
+ParseFloat = Callable[[str], Any]
+Key = Tuple[str, ...]
+Pos = int
+
+
+class TOMLDecodeError(ValueError):
+ """An error raised if a document is not valid TOML."""
+
+
+def load(fp: TextIO, *, parse_float: ParseFloat = float) -> Dict[str, Any]:
+ """Parse TOML from a file object."""
+ s = fp.read()
+ return loads(s, parse_float=parse_float)
+
+
+def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa: C901
+ """Parse TOML from a string."""
+
+ # The spec allows converting "\r\n" to "\n", even in string
+ # literals. Let's do so to simplify parsing.
+ src = s.replace("\r\n", "\n")
+ pos = 0
+ state = State()
+
+ # Parse one statement at a time
+ # (typically means one line in TOML source)
+ while True:
+ # 1. Skip line leading whitespace
+ pos = skip_chars(src, pos, TOML_WS)
+
+ # 2. Parse rules. Expect one of the following:
+ # - end of file
+ # - end of line
+ # - comment
+ # - key/value pair
+ # - append dict to list (and move to its namespace)
+ # - create dict (and move to its namespace)
+ # Skip trailing whitespace when applicable.
+ try:
+ char = src[pos]
+ except IndexError:
+ break
+ if char == "\n":
+ pos += 1
+ continue
+ if char in KEY_INITIAL_CHARS:
+ pos = key_value_rule(src, pos, state, parse_float)
+ pos = skip_chars(src, pos, TOML_WS)
+ elif char == "[":
+ try:
+ second_char: Optional[str] = src[pos + 1]
+ except IndexError:
+ second_char = None
+ if second_char == "[":
+ pos = create_list_rule(src, pos, state)
+ else:
+ pos = create_dict_rule(src, pos, state)
+ pos = skip_chars(src, pos, TOML_WS)
+ elif char != "#":
+ raise suffixed_err(src, pos, "Invalid statement")
+
+ # 3. Skip comment
+ pos = skip_comment(src, pos)
+
+ # 4. Expect end of line or end of file
+ try:
+ char = src[pos]
+ except IndexError:
+ break
+ if char != "\n":
+ raise suffixed_err(
+ src, pos, "Expected newline or end of document after a statement"
+ )
+ pos += 1
+
+ return state.out.dict
+
+
+class State:
+ def __init__(self) -> None:
+ # Mutable, read-only
+ self.out = NestedDict()
+ self.flags = Flags()
+
+ # Immutable, read and write
+ self.header_namespace: Key = ()
+
+
+class Flags:
+ """Flags that map to parsed keys/namespaces."""
+
+ # Marks an immutable namespace (inline array or inline table).
+ FROZEN = 0
+ # Marks a nest that has been explicitly created and can no longer
+ # be opened using the "[table]" syntax.
+ EXPLICIT_NEST = 1
+
+ def __init__(self) -> None:
+ self._flags: Dict[str, dict] = {}
+
+ def unset_all(self, key: Key) -> None:
+ cont = self._flags
+ for k in key[:-1]:
+ if k not in cont:
+ return
+ cont = cont[k]["nested"]
+ cont.pop(key[-1], None)
+
+ def set_for_relative_key(self, head_key: Key, rel_key: Key, flag: int) -> None:
+ cont = self._flags
+ for k in head_key:
+ if k not in cont:
+ cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+ cont = cont[k]["nested"]
+ for k in rel_key:
+ if k in cont:
+ cont[k]["flags"].add(flag)
+ else:
+ cont[k] = {"flags": {flag}, "recursive_flags": set(), "nested": {}}
+ cont = cont[k]["nested"]
+
+ def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
+ cont = self._flags
+ key_parent, key_stem = key[:-1], key[-1]
+ for k in key_parent:
+ if k not in cont:
+ cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+ cont = cont[k]["nested"]
+ if key_stem not in cont:
+ cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+ cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
+
+ def is_(self, key: Key, flag: int) -> bool:
+ if not key:
+ return False # document root has no flags
+ cont = self._flags
+ for k in key[:-1]:
+ if k not in cont:
+ return False
+ inner_cont = cont[k]
+ if flag in inner_cont["recursive_flags"]:
+ return True
+ cont = inner_cont["nested"]
+ key_stem = key[-1]
+ if key_stem in cont:
+ cont = cont[key_stem]
+ return flag in cont["flags"] or flag in cont["recursive_flags"]
+ return False
+
+
+class NestedDict:
+ def __init__(self) -> None:
+ # The parsed content of the TOML document
+ self.dict: Dict[str, Any] = {}
+
+ def get_or_create_nest(
+ self,
+ key: Key,
+ *,
+ access_lists: bool = True,
+ ) -> dict:
+ cont: Any = self.dict
+ for k in key:
+ if k not in cont:
+ cont[k] = {}
+ cont = cont[k]
+ if access_lists and isinstance(cont, list):
+ cont = cont[-1]
+ if not isinstance(cont, dict):
+ raise KeyError("There is no nest behind this key")
+ return cont
+
+ def append_nest_to_list(self, key: Key) -> None:
+ cont = self.get_or_create_nest(key[:-1])
+ last_key = key[-1]
+ if last_key in cont:
+ list_ = cont[last_key]
+ if not isinstance(list_, list):
+ raise KeyError("An object other than list found behind this key")
+ list_.append({})
+ else:
+ cont[last_key] = [{}]
+
+
+def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
+ try:
+ while src[pos] in chars:
+ pos += 1
+ except IndexError:
+ pass
+ return pos
+
+
+def skip_until(
+ src: str,
+ pos: Pos,
+ expect: str,
+ *,
+ error_on: FrozenSet[str],
+ error_on_eof: bool,
+) -> Pos:
+ try:
+ new_pos = src.index(expect, pos)
+ except ValueError:
+ new_pos = len(src)
+ if error_on_eof:
+ raise suffixed_err(src, new_pos, f'Expected "{expect!r}"')
+
+ bad_chars = error_on.intersection(src[pos:new_pos])
+ if bad_chars:
+ bad_char = next(iter(bad_chars))
+ bad_pos = src.index(bad_char, pos)
+ raise suffixed_err(src, bad_pos, f'Found invalid character "{bad_char!r}"')
+ return new_pos
+
+
+def skip_comment(src: str, pos: Pos) -> Pos:
+ try:
+ char: Optional[str] = src[pos]
+ except IndexError:
+ char = None
+ if char == "#":
+ return skip_until(
+ src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
+ )
+ return pos
+
+
+def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
+ while True:
+ pos_before_skip = pos
+ pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
+ pos = skip_comment(src, pos)
+ if pos == pos_before_skip:
+ return pos
+
+
+def create_dict_rule(src: str, pos: Pos, state: State) -> Pos:
+ pos += 1 # Skip "["
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, key = parse_key(src, pos)
+
+ if state.flags.is_(key, Flags.EXPLICIT_NEST) or state.flags.is_(key, Flags.FROZEN):
+ raise suffixed_err(src, pos, f"Can not declare {key} twice")
+ state.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
+ try:
+ state.out.get_or_create_nest(key)
+ except KeyError:
+ raise suffixed_err(src, pos, "Can not overwrite a value")
+ state.header_namespace = key
+
+ if src[pos : pos + 1] != "]":
+ raise suffixed_err(src, pos, 'Expected "]" at the end of a table declaration')
+ return pos + 1
+
+
+def create_list_rule(src: str, pos: Pos, state: State) -> Pos:
+ pos += 2 # Skip "[["
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, key = parse_key(src, pos)
+
+ if state.flags.is_(key, Flags.FROZEN):
+ raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}")
+ # Free the namespace now that it points to another empty list item...
+ state.flags.unset_all(key)
+ # ...but this key precisely is still prohibited from table declaration
+ state.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
+ try:
+ state.out.append_nest_to_list(key)
+ except KeyError:
+ raise suffixed_err(src, pos, "Can not overwrite a value")
+ state.header_namespace = key
+
+ end_marker = src[pos : pos + 2]
+ if end_marker != "]]":
+ raise suffixed_err(
+ src,
+ pos,
+ f'Found "{end_marker!r}" at the end of an array declaration.'
+ ' Expected "]]"',
+ )
+ return pos + 2
+
+
+def key_value_rule(src: str, pos: Pos, state: State, parse_float: ParseFloat) -> Pos:
+ pos, key, value = parse_key_value_pair(src, pos, parse_float)
+ key_parent, key_stem = key[:-1], key[-1]
+ abs_key_parent = state.header_namespace + key_parent
+
+ if state.flags.is_(abs_key_parent, Flags.FROZEN):
+ raise suffixed_err(
+ src, pos, f"Can not mutate immutable namespace {abs_key_parent}"
+ )
+ # Containers in the relative path can't be opened with the table syntax after this
+ state.flags.set_for_relative_key(state.header_namespace, key, Flags.EXPLICIT_NEST)
+ try:
+ nest = state.out.get_or_create_nest(abs_key_parent)
+ except KeyError:
+ raise suffixed_err(src, pos, "Can not overwrite a value")
+ if key_stem in nest:
+ raise suffixed_err(src, pos, "Can not overwrite a value")
+ # Mark inline table and array namespaces recursively immutable
+ if isinstance(value, (dict, list)):
+ abs_key = state.header_namespace + key
+ state.flags.set(abs_key, Flags.FROZEN, recursive=True)
+ nest[key_stem] = value
+ return pos
+
+
+def parse_key_value_pair(
+ src: str, pos: Pos, parse_float: ParseFloat
+) -> Tuple[Pos, Key, Any]:
+ pos, key = parse_key(src, pos)
+ try:
+ char: Optional[str] = src[pos]
+ except IndexError:
+ char = None
+ if char != "=":
+ raise suffixed_err(src, pos, 'Expected "=" after a key in a key/value pair')
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, value = parse_value(src, pos, parse_float)
+ return pos, key, value
+
+
+def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]:
+ pos, key_part = parse_key_part(src, pos)
+ key = [key_part]
+ pos = skip_chars(src, pos, TOML_WS)
+ while True:
+ try:
+ char: Optional[str] = src[pos]
+ except IndexError:
+ char = None
+ if char != ".":
+ return pos, tuple(key)
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, key_part = parse_key_part(src, pos)
+ key.append(key_part)
+ pos = skip_chars(src, pos, TOML_WS)
+
+
+def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]:
+ try:
+ char: Optional[str] = src[pos]
+ except IndexError:
+ char = None
+ if char in BARE_KEY_CHARS:
+ start_pos = pos
+ pos = skip_chars(src, pos, BARE_KEY_CHARS)
+ return pos, src[start_pos:pos]
+ if char == "'":
+ return parse_literal_str(src, pos)
+ if char == '"':
+ return parse_one_line_basic_str(src, pos)
+ raise suffixed_err(src, pos, "Invalid initial character for a key part")
+
+
+def parse_one_line_basic_str(src: str, pos: Pos) -> Tuple[Pos, str]:
+ pos += 1
+ return parse_basic_str(src, pos, multiline=False)
+
+
+def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]:
+ pos += 1
+ array: list = []
+
+ pos = skip_comments_and_array_ws(src, pos)
+ if src[pos : pos + 1] == "]":
+ return pos + 1, array
+ while True:
+ pos, val = parse_value(src, pos, parse_float)
+ array.append(val)
+ pos = skip_comments_and_array_ws(src, pos)
+
+ c = src[pos : pos + 1]
+ if c == "]":
+ return pos + 1, array
+ if c != ",":
+ raise suffixed_err(src, pos, "Unclosed array")
+ pos += 1
+
+ pos = skip_comments_and_array_ws(src, pos)
+ if src[pos : pos + 1] == "]":
+ return pos + 1, array
+
+
+def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, dict]:
+ pos += 1
+ nested_dict = NestedDict()
+ flags = Flags()
+
+ pos = skip_chars(src, pos, TOML_WS)
+ if src[pos : pos + 1] == "}":
+ return pos + 1, nested_dict.dict
+ while True:
+ pos, key, value = parse_key_value_pair(src, pos, parse_float)
+ key_parent, key_stem = key[:-1], key[-1]
+ if flags.is_(key, Flags.FROZEN):
+ raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}")
+ try:
+ nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
+ except KeyError:
+ raise suffixed_err(src, pos, "Can not overwrite a value")
+ if key_stem in nest:
+ raise suffixed_err(src, pos, f'Duplicate inline table key "{key_stem}"')
+ nest[key_stem] = value
+ pos = skip_chars(src, pos, TOML_WS)
+ c = src[pos : pos + 1]
+ if c == "}":
+ return pos + 1, nested_dict.dict
+ if c != ",":
+ raise suffixed_err(src, pos, "Unclosed inline table")
+ if isinstance(value, (dict, list)):
+ flags.set(key, Flags.FROZEN, recursive=True)
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS)
+
+
+def parse_basic_str_escape(
+ src: str, pos: Pos, *, multiline: bool = False
+) -> Tuple[Pos, str]:
+ escape_id = src[pos : pos + 2]
+ pos += 2
+ if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
+ # Skip whitespace until next non-whitespace character or end of
+ # the doc. Error if non-whitespace is found before newline.
+ if escape_id != "\\\n":
+ pos = skip_chars(src, pos, TOML_WS)
+ char = src[pos : pos + 1]
+ if not char:
+ return pos, ""
+ if char != "\n":
+ raise suffixed_err(src, pos, 'Unescaped "\\" in a string')
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
+ return pos, ""
+ if escape_id == "\\u":
+ return parse_hex_char(src, pos, 4)
+ if escape_id == "\\U":
+ return parse_hex_char(src, pos, 8)
+ try:
+ return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
+ except KeyError:
+ if len(escape_id) != 2:
+ raise suffixed_err(src, pos, "Unterminated string")
+ raise suffixed_err(src, pos, 'Unescaped "\\" in a string')
+
+
+def parse_basic_str_escape_multiline(src: str, pos: Pos) -> Tuple[Pos, str]:
+ return parse_basic_str_escape(src, pos, multiline=True)
+
+
+def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]:
+ hex_str = src[pos : pos + hex_len]
+ if len(hex_str) != hex_len or any(c not in string.hexdigits for c in hex_str):
+ raise suffixed_err(src, pos, "Invalid hex value")
+ pos += hex_len
+ hex_int = int(hex_str, 16)
+ if not is_unicode_scalar_value(hex_int):
+ raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
+ return pos, chr(hex_int)
+
+
+def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]:
+ pos += 1 # Skip starting apostrophe
+ start_pos = pos
+ pos = skip_until(
+ src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
+ )
+ return pos + 1, src[start_pos:pos] # Skip ending apostrophe
+
+
+def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]:
+ pos += 3
+ if src[pos : pos + 1] == "\n":
+ pos += 1
+
+ if literal:
+ delim = "'"
+ end_pos = skip_until(
+ src,
+ pos,
+ "'''",
+ error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
+ error_on_eof=True,
+ )
+ result = src[pos:end_pos]
+ pos = end_pos + 3
+ else:
+ delim = '"'
+ pos, result = parse_basic_str(src, pos, multiline=True)
+
+ # Add at maximum two extra apostrophes/quotes if the end sequence
+ # is 4 or 5 chars long instead of just 3.
+ if src[pos : pos + 1] != delim:
+ return pos, result
+ pos += 1
+ if src[pos : pos + 1] != delim:
+ return pos, result + delim
+ pos += 1
+ return pos, result + (delim * 2)
+
+
+def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]:
+ if multiline:
+ error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
+ parse_escapes = parse_basic_str_escape_multiline
+ else:
+ error_on = ILLEGAL_BASIC_STR_CHARS
+ parse_escapes = parse_basic_str_escape
+ result = ""
+ start_pos = pos
+ while True:
+ try:
+ char = src[pos]
+ except IndexError:
+ raise suffixed_err(src, pos, "Unterminated string")
+ if char == '"':
+ if not multiline:
+ return pos + 1, result + src[start_pos:pos]
+ if src[pos + 1 : pos + 3] == '""':
+ return pos + 3, result + src[start_pos:pos]
+ pos += 1
+ continue
+ if char == "\\":
+ result += src[start_pos:pos]
+ pos, parsed_escape = parse_escapes(src, pos)
+ result += parsed_escape
+ start_pos = pos
+ continue
+ if char in error_on:
+ raise suffixed_err(src, pos, f'Illegal character "{char!r}"')
+ pos += 1
+
+
+def parse_regex(src: str, pos: Pos, regex: "Pattern") -> Tuple[Pos, str]:
+ match = regex.match(src, pos)
+ if not match:
+ raise suffixed_err(src, pos, "Unexpected sequence")
+ return match.end(), match.group()
+
+
+def parse_value( # noqa: C901
+ src: str, pos: Pos, parse_float: ParseFloat
+) -> Tuple[Pos, Any]:
+ try:
+ char: Optional[str] = src[pos]
+ except IndexError:
+ char = None
+
+ # Basic strings
+ if char == '"':
+ if src[pos + 1 : pos + 3] == '""':
+ return parse_multiline_str(src, pos, literal=False)
+ return parse_one_line_basic_str(src, pos)
+
+ # Literal strings
+ if char == "'":
+ if src[pos + 1 : pos + 3] == "''":
+ return parse_multiline_str(src, pos, literal=True)
+ return parse_literal_str(src, pos)
+
+ # Booleans
+ if char == "t":
+ if src[pos + 1 : pos + 4] == "rue":
+ return pos + 4, True
+ if char == "f":
+ if src[pos + 1 : pos + 5] == "alse":
+ return pos + 5, False
+
+ # Dates and times
+ datetime_match = RE_DATETIME.match(src, pos)
+ if datetime_match:
+ try:
+ datetime_obj = match_to_datetime(datetime_match)
+ except ValueError:
+ raise suffixed_err(src, pos, "Invalid date or datetime")
+ return datetime_match.end(), datetime_obj
+ localtime_match = RE_LOCALTIME.match(src, pos)
+ if localtime_match:
+ return localtime_match.end(), match_to_localtime(localtime_match)
+
+ # Non-decimal integers
+ if char == "0":
+ second_char = src[pos + 1 : pos + 2]
+ if second_char == "x":
+ pos, hex_str = parse_regex(src, pos + 2, RE_HEX)
+ return pos, int(hex_str, 16)
+ if second_char == "o":
+ pos, oct_str = parse_regex(src, pos + 2, RE_OCT)
+ return pos, int(oct_str, 8)
+ if second_char == "b":
+ pos, bin_str = parse_regex(src, pos + 2, RE_BIN)
+ return pos, int(bin_str, 2)
+
+ # Decimal integers and "normal" floats.
+ # The regex will greedily match any type starting with a decimal
+ # char, so needs to be located after handling of non-decimal ints,
+ # and dates and times.
+ number_match = RE_NUMBER.match(src, pos)
+ if number_match:
+ return number_match.end(), match_to_number(number_match, parse_float)
+
+ # Arrays
+ if char == "[":
+ return parse_array(src, pos, parse_float)
+
+ # Inline tables
+ if char == "{":
+ return parse_inline_table(src, pos, parse_float)
+
+ # Special floats
+ first_three = src[pos : pos + 3]
+ if first_three in {"inf", "nan"}:
+ return pos + 3, parse_float(first_three)
+ first_four = src[pos : pos + 4]
+ if first_four in {"-inf", "+inf", "-nan", "+nan"}:
+ return pos + 4, parse_float(first_four)
+
+ raise suffixed_err(src, pos, "Invalid value")
+
+
+def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
+ """Return a `TOMLDecodeError` where error message is suffixed with
+ coordinates in source."""
+
+ def coord_repr(src: str, pos: Pos) -> str:
+ if pos >= len(src):
+ return "end of document"
+ line = src.count("\n", 0, pos) + 1
+ if line == 1:
+ column = pos + 1
+ else:
+ column = pos - src.rindex("\n", 0, pos)
+ return f"line {line}, column {column}"
+
+ return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
+
+
+def is_unicode_scalar_value(codepoint: int) -> bool:
+ return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/tomli/_re.py b/venv/lib/python3.9/site-packages/pip/_vendor/tomli/_re.py
new file mode 100644
index 0000000..3883fdd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/tomli/_re.py
@@ -0,0 +1,83 @@
+from datetime import date, datetime, time, timedelta, timezone, tzinfo
+import re
+from typing import TYPE_CHECKING, Any, Optional, Union
+
+if TYPE_CHECKING:
+ from re import Match
+
+ from pip._vendor.tomli._parser import ParseFloat
+
+# E.g.
+# - 00:32:00.999999
+# - 00:32:00
+_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?"
+
+RE_HEX = re.compile(r"[0-9A-Fa-f](?:_?[0-9A-Fa-f])*")
+RE_BIN = re.compile(r"[01](?:_?[01])*")
+RE_OCT = re.compile(r"[0-7](?:_?[0-7])*")
+RE_NUMBER = re.compile(
+ r"[+-]?(?:0|[1-9](?:_?[0-9])*)" # integer
+ + r"(?:\.[0-9](?:_?[0-9])*)?" # optional fractional part
+ + r"(?:[eE][+-]?[0-9](?:_?[0-9])*)?" # optional exponent part
+)
+RE_LOCALTIME = re.compile(_TIME_RE_STR)
+RE_DATETIME = re.compile(
+ r"([0-9]{4})-(0[1-9]|1[0-2])-(0[1-9]|1[0-9]|2[0-9]|3[01])" # date, e.g. 1988-10-27
+ + r"(?:"
+ + r"[T ]"
+ + _TIME_RE_STR
+ + r"(?:(Z)|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))?" # time offset
+ + r")?"
+)
+
+
+def match_to_datetime(match: "Match") -> Union[datetime, date]:
+ """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
+
+ Raises ValueError if the match does not correspond to a valid date
+ or datetime.
+ """
+ (
+ year_str,
+ month_str,
+ day_str,
+ hour_str,
+ minute_str,
+ sec_str,
+ micros_str,
+ zulu_time,
+ offset_dir_str,
+ offset_hour_str,
+ offset_minute_str,
+ ) = match.groups()
+ year, month, day = int(year_str), int(month_str), int(day_str)
+ if hour_str is None:
+ return date(year, month, day)
+ hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
+ micros = int(micros_str[1:].ljust(6, "0")[:6]) if micros_str else 0
+ if offset_dir_str:
+ offset_dir = 1 if offset_dir_str == "+" else -1
+ tz: Optional[tzinfo] = timezone(
+ timedelta(
+ hours=offset_dir * int(offset_hour_str),
+ minutes=offset_dir * int(offset_minute_str),
+ )
+ )
+ elif zulu_time:
+ tz = timezone.utc
+ else: # local date-time
+ tz = None
+ return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
+
+
+def match_to_localtime(match: "Match") -> time:
+ hour_str, minute_str, sec_str, micros_str = match.groups()
+ micros = int(micros_str[1:].ljust(6, "0")[:6]) if micros_str else 0
+ return time(int(hour_str), int(minute_str), int(sec_str), micros)
+
+
+def match_to_number(match: "Match", parse_float: "ParseFloat") -> Any:
+ match_str = match.group()
+ if "." in match_str or "e" in match_str or "E" in match_str:
+ return parse_float(match_str)
+ return int(match_str)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/typing_extensions.py b/venv/lib/python3.9/site-packages/pip/_vendor/typing_extensions.py
new file mode 100644
index 0000000..9f1c7aa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/typing_extensions.py
@@ -0,0 +1,2296 @@
+import abc
+import collections
+import collections.abc
+import operator
+import sys
+import typing
+
+# After PEP 560, internal typing API was substantially reworked.
+# This is especially important for Protocol class which uses internal APIs
+# quite extensively.
+PEP_560 = sys.version_info[:3] >= (3, 7, 0)
+
+if PEP_560:
+ GenericMeta = type
+else:
+ # 3.6
+ from typing import GenericMeta, _type_vars # noqa
+
+# The two functions below are copies of typing internal helpers.
+# They are needed by _ProtocolMeta
+
+
+def _no_slots_copy(dct):
+ dict_copy = dict(dct)
+ if '__slots__' in dict_copy:
+ for slot in dict_copy['__slots__']:
+ dict_copy.pop(slot, None)
+ return dict_copy
+
+
+def _check_generic(cls, parameters):
+ if not cls.__parameters__:
+ raise TypeError(f"{cls} is not a generic class")
+ alen = len(parameters)
+ elen = len(cls.__parameters__)
+ if alen != elen:
+ raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
+ f" actual {alen}, expected {elen}")
+
+
+# Please keep __all__ alphabetized within each category.
+__all__ = [
+ # Super-special typing primitives.
+ 'ClassVar',
+ 'Concatenate',
+ 'Final',
+ 'ParamSpec',
+ 'Self',
+ 'Type',
+
+ # ABCs (from collections.abc).
+ 'Awaitable',
+ 'AsyncIterator',
+ 'AsyncIterable',
+ 'Coroutine',
+ 'AsyncGenerator',
+ 'AsyncContextManager',
+ 'ChainMap',
+
+ # Concrete collection types.
+ 'ContextManager',
+ 'Counter',
+ 'Deque',
+ 'DefaultDict',
+ 'OrderedDict',
+ 'TypedDict',
+
+ # Structural checks, a.k.a. protocols.
+ 'SupportsIndex',
+
+ # One-off things.
+ 'Annotated',
+ 'final',
+ 'IntVar',
+ 'Literal',
+ 'NewType',
+ 'overload',
+ 'Protocol',
+ 'runtime',
+ 'runtime_checkable',
+ 'Text',
+ 'TypeAlias',
+ 'TypeGuard',
+ 'TYPE_CHECKING',
+]
+
+if PEP_560:
+ __all__.extend(["get_args", "get_origin", "get_type_hints"])
+
+# 3.6.2+
+if hasattr(typing, 'NoReturn'):
+ NoReturn = typing.NoReturn
+# 3.6.0-3.6.1
+else:
+ class _NoReturn(typing._FinalTypingBase, _root=True):
+ """Special type indicating functions that never return.
+ Example::
+
+ from typing import NoReturn
+
+ def stop() -> NoReturn:
+ raise Exception('no way')
+
+ This type is invalid in other positions, e.g., ``List[NoReturn]``
+ will fail in static type checkers.
+ """
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("NoReturn cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("NoReturn cannot be used with issubclass().")
+
+ NoReturn = _NoReturn(_root=True)
+
+# Some unconstrained type variables. These are used by the container types.
+# (These are not for export.)
+T = typing.TypeVar('T') # Any type.
+KT = typing.TypeVar('KT') # Key type.
+VT = typing.TypeVar('VT') # Value type.
+T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
+T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
+
+ClassVar = typing.ClassVar
+
+# On older versions of typing there is an internal class named "Final".
+# 3.8+
+if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
+ Final = typing.Final
+# 3.7
+elif sys.version_info[:2] >= (3, 7):
+ class _FinalForm(typing._SpecialForm, _root=True):
+
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only single type')
+ return typing._GenericAlias(self, (item,))
+
+ Final = _FinalForm('Final',
+ doc="""A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.""")
+# 3.6
+else:
+ class _Final(typing._FinalTypingBase, _root=True):
+ """A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.
+ """
+
+ __slots__ = ('__type__',)
+
+ def __init__(self, tp=None, **kwds):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(typing._type_check(item,
+ f'{cls.__name__[1:]} accepts only single type.'),
+ _root=True)
+ raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(new_tp, _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += f'[{typing._type_repr(self.__type__)}]'
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _Final):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ Final = _Final(_root=True)
+
+
+# 3.8+
+if hasattr(typing, 'final'):
+ final = typing.final
+# 3.6-3.7
+else:
+ def final(f):
+ """This decorator can be used to indicate to type checkers that
+ the decorated method cannot be overridden, and decorated class
+ cannot be subclassed. For example:
+
+ class Base:
+ @final
+ def done(self) -> None:
+ ...
+ class Sub(Base):
+ def done(self) -> None: # Error reported by type checker
+ ...
+ @final
+ class Leaf:
+ ...
+ class Other(Leaf): # Error reported by type checker
+ ...
+
+ There is no runtime checking of these properties.
+ """
+ return f
+
+
+def IntVar(name):
+ return typing.TypeVar(name)
+
+
+# 3.8+:
+if hasattr(typing, 'Literal'):
+ Literal = typing.Literal
+# 3.7:
+elif sys.version_info[:2] >= (3, 7):
+ class _LiteralForm(typing._SpecialForm, _root=True):
+
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ return typing._GenericAlias(self, parameters)
+
+ Literal = _LiteralForm('Literal',
+ doc="""A type that can be used to indicate to type checkers
+ that the corresponding value has a value literally equivalent
+ to the provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to
+ the value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime
+ checking verifying that the parameter is actually a value
+ instead of a type.""")
+# 3.6:
+else:
+ class _Literal(typing._FinalTypingBase, _root=True):
+ """A type that can be used to indicate to type checkers that the
+ corresponding value has a value literally equivalent to the
+ provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to the
+ value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime checking
+ verifying that the parameter is actually a value instead of a type.
+ """
+
+ __slots__ = ('__values__',)
+
+ def __init__(self, values=None, **kwds):
+ self.__values__ = values
+
+ def __getitem__(self, values):
+ cls = type(self)
+ if self.__values__ is None:
+ if not isinstance(values, tuple):
+ values = (values,)
+ return cls(values, _root=True)
+ raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
+
+ def _eval_type(self, globalns, localns):
+ return self
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__values__ is not None:
+ r += f'[{", ".join(map(typing._type_repr, self.__values__))}]'
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__values__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _Literal):
+ return NotImplemented
+ if self.__values__ is not None:
+ return self.__values__ == other.__values__
+ return self is other
+
+ Literal = _Literal(_root=True)
+
+
+_overload_dummy = typing._overload_dummy # noqa
+overload = typing.overload
+
+
+# This is not a real generic class. Don't use outside annotations.
+Type = typing.Type
+
+# Various ABCs mimicking those in collections.abc.
+# A few are simply re-exported for completeness.
+
+
+class _ExtensionsGenericMeta(GenericMeta):
+ def __subclasscheck__(self, subclass):
+ """This mimics a more modern GenericMeta.__subclasscheck__() logic
+ (that does not have problems with recursion) to work around interactions
+ between collections, typing, and typing_extensions on older
+ versions of Python, see https://github.com/python/typing/issues/501.
+ """
+ if self.__origin__ is not None:
+ if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
+ raise TypeError("Parameterized generics cannot be used with class "
+ "or instance checks")
+ return False
+ if not self.__extra__:
+ return super().__subclasscheck__(subclass)
+ res = self.__extra__.__subclasshook__(subclass)
+ if res is not NotImplemented:
+ return res
+ if self.__extra__ in subclass.__mro__:
+ return True
+ for scls in self.__extra__.__subclasses__():
+ if isinstance(scls, GenericMeta):
+ continue
+ if issubclass(subclass, scls):
+ return True
+ return False
+
+
+Awaitable = typing.Awaitable
+Coroutine = typing.Coroutine
+AsyncIterable = typing.AsyncIterable
+AsyncIterator = typing.AsyncIterator
+
+# 3.6.1+
+if hasattr(typing, 'Deque'):
+ Deque = typing.Deque
+# 3.6.0
+else:
+ class Deque(collections.deque, typing.MutableSequence[T],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.deque):
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Deque:
+ return collections.deque(*args, **kwds)
+ return typing._generic_new(collections.deque, cls, *args, **kwds)
+
+ContextManager = typing.ContextManager
+# 3.6.2+
+if hasattr(typing, 'AsyncContextManager'):
+ AsyncContextManager = typing.AsyncContextManager
+# 3.6.0-3.6.1
+else:
+ from _collections_abc import _check_methods as _check_methods_in_mro # noqa
+
+ class AsyncContextManager(typing.Generic[T_co]):
+ __slots__ = ()
+
+ async def __aenter__(self):
+ return self
+
+ @abc.abstractmethod
+ async def __aexit__(self, exc_type, exc_value, traceback):
+ return None
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is AsyncContextManager:
+ return _check_methods_in_mro(C, "__aenter__", "__aexit__")
+ return NotImplemented
+
+DefaultDict = typing.DefaultDict
+
+# 3.7.2+
+if hasattr(typing, 'OrderedDict'):
+ OrderedDict = typing.OrderedDict
+# 3.7.0-3.7.2
+elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2):
+ OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
+# 3.6
+else:
+ class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.OrderedDict):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is OrderedDict:
+ return collections.OrderedDict(*args, **kwds)
+ return typing._generic_new(collections.OrderedDict, cls, *args, **kwds)
+
+# 3.6.2+
+if hasattr(typing, 'Counter'):
+ Counter = typing.Counter
+# 3.6.0-3.6.1
+else:
+ class Counter(collections.Counter,
+ typing.Dict[T, int],
+ metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Counter:
+ return collections.Counter(*args, **kwds)
+ return typing._generic_new(collections.Counter, cls, *args, **kwds)
+
+# 3.6.1+
+if hasattr(typing, 'ChainMap'):
+ ChainMap = typing.ChainMap
+elif hasattr(collections, 'ChainMap'):
+ class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.ChainMap):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is ChainMap:
+ return collections.ChainMap(*args, **kwds)
+ return typing._generic_new(collections.ChainMap, cls, *args, **kwds)
+
+# 3.6.1+
+if hasattr(typing, 'AsyncGenerator'):
+ AsyncGenerator = typing.AsyncGenerator
+# 3.6.0
+else:
+ class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.abc.AsyncGenerator):
+ __slots__ = ()
+
+NewType = typing.NewType
+Text = typing.Text
+TYPE_CHECKING = typing.TYPE_CHECKING
+
+
+def _gorg(cls):
+ """This function exists for compatibility with old typing versions."""
+ assert isinstance(cls, GenericMeta)
+ if hasattr(cls, '_gorg'):
+ return cls._gorg
+ while cls.__origin__ is not None:
+ cls = cls.__origin__
+ return cls
+
+
+_PROTO_WHITELIST = ['Callable', 'Awaitable',
+ 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
+ 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
+ 'ContextManager', 'AsyncContextManager']
+
+
+def _get_protocol_attrs(cls):
+ attrs = set()
+ for base in cls.__mro__[:-1]: # without object
+ if base.__name__ in ('Protocol', 'Generic'):
+ continue
+ annotations = getattr(base, '__annotations__', {})
+ for attr in list(base.__dict__.keys()) + list(annotations.keys()):
+ if (not attr.startswith('_abc_') and attr not in (
+ '__abstractmethods__', '__annotations__', '__weakref__',
+ '_is_protocol', '_is_runtime_protocol', '__dict__',
+ '__args__', '__slots__',
+ '__next_in_mro__', '__parameters__', '__origin__',
+ '__orig_bases__', '__extra__', '__tree_hash__',
+ '__doc__', '__subclasshook__', '__init__', '__new__',
+ '__module__', '_MutableMapping__marker', '_gorg')):
+ attrs.add(attr)
+ return attrs
+
+
+def _is_callable_members_only(cls):
+ return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
+
+
+# 3.8+
+if hasattr(typing, 'Protocol'):
+ Protocol = typing.Protocol
+# 3.7
+elif PEP_560:
+ from typing import _collect_type_vars # noqa
+
+ def _no_init(self, *args, **kwargs):
+ if type(self)._is_protocol:
+ raise TypeError('Protocols cannot be instantiated')
+
+ class _ProtocolMeta(abc.ABCMeta):
+ # This metaclass is a bit unfortunate and exists only because of the lack
+ # of __instancehook__.
+ def __instancecheck__(cls, instance):
+ # We need this method for situations where attributes are
+ # assigned in __init__.
+ if ((not getattr(cls, '_is_protocol', False) or
+ _is_callable_members_only(cls)) and
+ issubclass(instance.__class__, cls)):
+ return True
+ if cls._is_protocol:
+ if all(hasattr(instance, attr) and
+ (not callable(getattr(cls, attr, None)) or
+ getattr(instance, attr) is not None)
+ for attr in _get_protocol_attrs(cls)):
+ return True
+ return super().__instancecheck__(instance)
+
+ class Protocol(metaclass=_ProtocolMeta):
+ # There is quite a lot of overlapping code with typing.Generic.
+ # Unfortunately it is hard to avoid this while these live in two different
+ # modules. The duplicated code will be removed when Protocol is moved to typing.
+ """Base class for protocol classes. Protocol classes are defined as::
+
+ class Proto(Protocol):
+ def meth(self) -> int:
+ ...
+
+ Such classes are primarily used with static type checkers that recognize
+ structural subtyping (static duck-typing), for example::
+
+ class C:
+ def meth(self) -> int:
+ return 0
+
+ def func(x: Proto) -> int:
+ return x.meth()
+
+ func(C()) # Passes static type check
+
+ See PEP 544 for details. Protocol classes decorated with
+ @typing_extensions.runtime act as simple-minded runtime protocol that checks
+ only the presence of given attributes, ignoring their type signatures.
+
+ Protocol classes can be generic, they are defined as::
+
+ class GenProto(Protocol[T]):
+ def meth(self) -> T:
+ ...
+ """
+ __slots__ = ()
+ _is_protocol = True
+
+ def __new__(cls, *args, **kwds):
+ if cls is Protocol:
+ raise TypeError("Type Protocol cannot be instantiated; "
+ "it can only be used as a base class")
+ return super().__new__(cls)
+
+ @typing._tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple):
+ params = (params,)
+ if not params and cls is not typing.Tuple:
+ raise TypeError(
+ f"Parameter list to {cls.__qualname__}[...] cannot be empty")
+ msg = "Parameters to generic types must be types."
+ params = tuple(typing._type_check(p, msg) for p in params) # noqa
+ if cls is Protocol:
+ # Generic can only be subscripted with unique type variables.
+ if not all(isinstance(p, typing.TypeVar) for p in params):
+ i = 0
+ while isinstance(params[i], typing.TypeVar):
+ i += 1
+ raise TypeError(
+ "Parameters to Protocol[...] must all be type variables."
+ f" Parameter {i + 1} is {params[i]}")
+ if len(set(params)) != len(params):
+ raise TypeError(
+ "Parameters to Protocol[...] must all be unique")
+ else:
+ # Subscripting a regular Generic subclass.
+ _check_generic(cls, params)
+ return typing._GenericAlias(cls, params)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ tvars = []
+ if '__orig_bases__' in cls.__dict__:
+ error = typing.Generic in cls.__orig_bases__
+ else:
+ error = typing.Generic in cls.__bases__
+ if error:
+ raise TypeError("Cannot inherit from plain Generic")
+ if '__orig_bases__' in cls.__dict__:
+ tvars = _collect_type_vars(cls.__orig_bases__)
+ # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
+ # If found, tvars must be a subset of it.
+ # If not found, tvars is it.
+ # Also check for and reject plain Generic,
+ # and reject multiple Generic[...] and/or Protocol[...].
+ gvars = None
+ for base in cls.__orig_bases__:
+ if (isinstance(base, typing._GenericAlias) and
+ base.__origin__ in (typing.Generic, Protocol)):
+ # for error messages
+ the_base = base.__origin__.__name__
+ if gvars is not None:
+ raise TypeError(
+ "Cannot inherit from Generic[...]"
+ " and/or Protocol[...] multiple types.")
+ gvars = base.__parameters__
+ if gvars is None:
+ gvars = tvars
+ else:
+ tvarset = set(tvars)
+ gvarset = set(gvars)
+ if not tvarset <= gvarset:
+ s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
+ s_args = ', '.join(str(g) for g in gvars)
+ raise TypeError(f"Some type variables ({s_vars}) are"
+ f" not listed in {the_base}[{s_args}]")
+ tvars = gvars
+ cls.__parameters__ = tuple(tvars)
+
+ # Determine if this is a protocol or a concrete subclass.
+ if not cls.__dict__.get('_is_protocol', None):
+ cls._is_protocol = any(b is Protocol for b in cls.__bases__)
+
+ # Set (or override) the protocol subclass hook.
+ def _proto_hook(other):
+ if not cls.__dict__.get('_is_protocol', None):
+ return NotImplemented
+ if not getattr(cls, '_is_runtime_protocol', False):
+ if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
+ return NotImplemented
+ raise TypeError("Instance and class checks can only be used with"
+ " @runtime protocols")
+ if not _is_callable_members_only(cls):
+ if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
+ return NotImplemented
+ raise TypeError("Protocols with non-method members"
+ " don't support issubclass()")
+ if not isinstance(other, type):
+ # Same error as for issubclass(1, int)
+ raise TypeError('issubclass() arg 1 must be a class')
+ for attr in _get_protocol_attrs(cls):
+ for base in other.__mro__:
+ if attr in base.__dict__:
+ if base.__dict__[attr] is None:
+ return NotImplemented
+ break
+ annotations = getattr(base, '__annotations__', {})
+ if (isinstance(annotations, typing.Mapping) and
+ attr in annotations and
+ isinstance(other, _ProtocolMeta) and
+ other._is_protocol):
+ break
+ else:
+ return NotImplemented
+ return True
+ if '__subclasshook__' not in cls.__dict__:
+ cls.__subclasshook__ = _proto_hook
+
+ # We have nothing more to do for non-protocols.
+ if not cls._is_protocol:
+ return
+
+ # Check consistency of bases.
+ for base in cls.__bases__:
+ if not (base in (object, typing.Generic) or
+ base.__module__ == 'collections.abc' and
+ base.__name__ in _PROTO_WHITELIST or
+ isinstance(base, _ProtocolMeta) and base._is_protocol):
+ raise TypeError('Protocols can only inherit from other'
+ f' protocols, got {repr(base)}')
+ cls.__init__ = _no_init
+# 3.6
+else:
+ from typing import _next_in_mro, _type_check # noqa
+
+ def _no_init(self, *args, **kwargs):
+ if type(self)._is_protocol:
+ raise TypeError('Protocols cannot be instantiated')
+
+ class _ProtocolMeta(GenericMeta):
+ """Internal metaclass for Protocol.
+
+ This exists so Protocol classes can be generic without deriving
+ from Generic.
+ """
+ def __new__(cls, name, bases, namespace,
+ tvars=None, args=None, origin=None, extra=None, orig_bases=None):
+ # This is just a version copied from GenericMeta.__new__ that
+ # includes "Protocol" special treatment. (Comments removed for brevity.)
+ assert extra is None # Protocols should not have extra
+ if tvars is not None:
+ assert origin is not None
+ assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars
+ else:
+ tvars = _type_vars(bases)
+ gvars = None
+ for base in bases:
+ if base is typing.Generic:
+ raise TypeError("Cannot inherit from plain Generic")
+ if (isinstance(base, GenericMeta) and
+ base.__origin__ in (typing.Generic, Protocol)):
+ if gvars is not None:
+ raise TypeError(
+ "Cannot inherit from Generic[...] or"
+ " Protocol[...] multiple times.")
+ gvars = base.__parameters__
+ if gvars is None:
+ gvars = tvars
+ else:
+ tvarset = set(tvars)
+ gvarset = set(gvars)
+ if not tvarset <= gvarset:
+ s_vars = ", ".join(str(t) for t in tvars if t not in gvarset)
+ s_args = ", ".join(str(g) for g in gvars)
+ cls_name = "Generic" if any(b.__origin__ is typing.Generic
+ for b in bases) else "Protocol"
+ raise TypeError(f"Some type variables ({s_vars}) are"
+ f" not listed in {cls_name}[{s_args}]")
+ tvars = gvars
+
+ initial_bases = bases
+ if (extra is not None and type(extra) is abc.ABCMeta and
+ extra not in bases):
+ bases = (extra,) + bases
+ bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b
+ for b in bases)
+ if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases):
+ bases = tuple(b for b in bases if b is not typing.Generic)
+ namespace.update({'__origin__': origin, '__extra__': extra})
+ self = super(GenericMeta, cls).__new__(cls, name, bases, namespace,
+ _root=True)
+ super(GenericMeta, self).__setattr__('_gorg',
+ self if not origin else
+ _gorg(origin))
+ self.__parameters__ = tvars
+ self.__args__ = tuple(... if a is typing._TypingEllipsis else
+ () if a is typing._TypingEmpty else
+ a for a in args) if args else None
+ self.__next_in_mro__ = _next_in_mro(self)
+ if orig_bases is None:
+ self.__orig_bases__ = initial_bases
+ elif origin is not None:
+ self._abc_registry = origin._abc_registry
+ self._abc_cache = origin._abc_cache
+ if hasattr(self, '_subs_tree'):
+ self.__tree_hash__ = (hash(self._subs_tree()) if origin else
+ super(GenericMeta, self).__hash__())
+ return self
+
+ def __init__(cls, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ if not cls.__dict__.get('_is_protocol', None):
+ cls._is_protocol = any(b is Protocol or
+ isinstance(b, _ProtocolMeta) and
+ b.__origin__ is Protocol
+ for b in cls.__bases__)
+ if cls._is_protocol:
+ for base in cls.__mro__[1:]:
+ if not (base in (object, typing.Generic) or
+ base.__module__ == 'collections.abc' and
+ base.__name__ in _PROTO_WHITELIST or
+ isinstance(base, typing.TypingMeta) and base._is_protocol or
+ isinstance(base, GenericMeta) and
+ base.__origin__ is typing.Generic):
+ raise TypeError(f'Protocols can only inherit from other'
+ f' protocols, got {repr(base)}')
+
+ cls.__init__ = _no_init
+
+ def _proto_hook(other):
+ if not cls.__dict__.get('_is_protocol', None):
+ return NotImplemented
+ if not isinstance(other, type):
+ # Same error as for issubclass(1, int)
+ raise TypeError('issubclass() arg 1 must be a class')
+ for attr in _get_protocol_attrs(cls):
+ for base in other.__mro__:
+ if attr in base.__dict__:
+ if base.__dict__[attr] is None:
+ return NotImplemented
+ break
+ annotations = getattr(base, '__annotations__', {})
+ if (isinstance(annotations, typing.Mapping) and
+ attr in annotations and
+ isinstance(other, _ProtocolMeta) and
+ other._is_protocol):
+ break
+ else:
+ return NotImplemented
+ return True
+ if '__subclasshook__' not in cls.__dict__:
+ cls.__subclasshook__ = _proto_hook
+
+ def __instancecheck__(self, instance):
+ # We need this method for situations where attributes are
+ # assigned in __init__.
+ if ((not getattr(self, '_is_protocol', False) or
+ _is_callable_members_only(self)) and
+ issubclass(instance.__class__, self)):
+ return True
+ if self._is_protocol:
+ if all(hasattr(instance, attr) and
+ (not callable(getattr(self, attr, None)) or
+ getattr(instance, attr) is not None)
+ for attr in _get_protocol_attrs(self)):
+ return True
+ return super(GenericMeta, self).__instancecheck__(instance)
+
+ def __subclasscheck__(self, cls):
+ if self.__origin__ is not None:
+ if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
+ raise TypeError("Parameterized generics cannot be used with class "
+ "or instance checks")
+ return False
+ if (self.__dict__.get('_is_protocol', None) and
+ not self.__dict__.get('_is_runtime_protocol', None)):
+ if sys._getframe(1).f_globals['__name__'] in ['abc',
+ 'functools',
+ 'typing']:
+ return False
+ raise TypeError("Instance and class checks can only be used with"
+ " @runtime protocols")
+ if (self.__dict__.get('_is_runtime_protocol', None) and
+ not _is_callable_members_only(self)):
+ if sys._getframe(1).f_globals['__name__'] in ['abc',
+ 'functools',
+ 'typing']:
+ return super(GenericMeta, self).__subclasscheck__(cls)
+ raise TypeError("Protocols with non-method members"
+ " don't support issubclass()")
+ return super(GenericMeta, self).__subclasscheck__(cls)
+
+ @typing._tp_cache
+ def __getitem__(self, params):
+ # We also need to copy this from GenericMeta.__getitem__ to get
+ # special treatment of "Protocol". (Comments removed for brevity.)
+ if not isinstance(params, tuple):
+ params = (params,)
+ if not params and _gorg(self) is not typing.Tuple:
+ raise TypeError(
+ f"Parameter list to {self.__qualname__}[...] cannot be empty")
+ msg = "Parameters to generic types must be types."
+ params = tuple(_type_check(p, msg) for p in params)
+ if self in (typing.Generic, Protocol):
+ if not all(isinstance(p, typing.TypeVar) for p in params):
+ raise TypeError(
+ f"Parameters to {repr(self)}[...] must all be type variables")
+ if len(set(params)) != len(params):
+ raise TypeError(
+ f"Parameters to {repr(self)}[...] must all be unique")
+ tvars = params
+ args = params
+ elif self in (typing.Tuple, typing.Callable):
+ tvars = _type_vars(params)
+ args = params
+ elif self.__origin__ in (typing.Generic, Protocol):
+ raise TypeError(f"Cannot subscript already-subscripted {repr(self)}")
+ else:
+ _check_generic(self, params)
+ tvars = _type_vars(params)
+ args = params
+
+ prepend = (self,) if self.__origin__ is None else ()
+ return self.__class__(self.__name__,
+ prepend + self.__bases__,
+ _no_slots_copy(self.__dict__),
+ tvars=tvars,
+ args=args,
+ origin=self,
+ extra=self.__extra__,
+ orig_bases=self.__orig_bases__)
+
+ class Protocol(metaclass=_ProtocolMeta):
+ """Base class for protocol classes. Protocol classes are defined as::
+
+ class Proto(Protocol):
+ def meth(self) -> int:
+ ...
+
+ Such classes are primarily used with static type checkers that recognize
+ structural subtyping (static duck-typing), for example::
+
+ class C:
+ def meth(self) -> int:
+ return 0
+
+ def func(x: Proto) -> int:
+ return x.meth()
+
+ func(C()) # Passes static type check
+
+ See PEP 544 for details. Protocol classes decorated with
+ @typing_extensions.runtime act as simple-minded runtime protocol that checks
+ only the presence of given attributes, ignoring their type signatures.
+
+ Protocol classes can be generic, they are defined as::
+
+ class GenProto(Protocol[T]):
+ def meth(self) -> T:
+ ...
+ """
+ __slots__ = ()
+ _is_protocol = True
+
+ def __new__(cls, *args, **kwds):
+ if _gorg(cls) is Protocol:
+ raise TypeError("Type Protocol cannot be instantiated; "
+ "it can be used only as a base class")
+ return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds)
+
+
+# 3.8+
+if hasattr(typing, 'runtime_checkable'):
+ runtime_checkable = typing.runtime_checkable
+# 3.6-3.7
+else:
+ def runtime_checkable(cls):
+ """Mark a protocol class as a runtime protocol, so that it
+ can be used with isinstance() and issubclass(). Raise TypeError
+ if applied to a non-protocol class.
+
+ This allows a simple-minded structural check very similar to the
+ one-offs in collections.abc such as Hashable.
+ """
+ if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
+ raise TypeError('@runtime_checkable can be only applied to protocol classes,'
+ f' got {cls!r}')
+ cls._is_runtime_protocol = True
+ return cls
+
+
+# Exists for backwards compatibility.
+runtime = runtime_checkable
+
+
+# 3.8+
+if hasattr(typing, 'SupportsIndex'):
+ SupportsIndex = typing.SupportsIndex
+# 3.6-3.7
+else:
+ @runtime_checkable
+ class SupportsIndex(Protocol):
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __index__(self) -> int:
+ pass
+
+
+if sys.version_info >= (3, 9, 2):
+ # The standard library TypedDict in Python 3.8 does not store runtime information
+ # about which (if any) keys are optional. See https://bugs.python.org/issue38834
+ # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
+ # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
+ TypedDict = typing.TypedDict
+else:
+ def _check_fails(cls, other):
+ try:
+ if sys._getframe(1).f_globals['__name__'] not in ['abc',
+ 'functools',
+ 'typing']:
+ # Typed dicts are only for static structural subtyping.
+ raise TypeError('TypedDict does not support instance and class checks')
+ except (AttributeError, ValueError):
+ pass
+ return False
+
+ def _dict_new(*args, **kwargs):
+ if not args:
+ raise TypeError('TypedDict.__new__(): not enough arguments')
+ _, args = args[0], args[1:] # allow the "cls" keyword be passed
+ return dict(*args, **kwargs)
+
+ _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
+
+ def _typeddict_new(*args, total=True, **kwargs):
+ if not args:
+ raise TypeError('TypedDict.__new__(): not enough arguments')
+ _, args = args[0], args[1:] # allow the "cls" keyword be passed
+ if args:
+ typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
+ elif '_typename' in kwargs:
+ typename = kwargs.pop('_typename')
+ import warnings
+ warnings.warn("Passing '_typename' as keyword argument is deprecated",
+ DeprecationWarning, stacklevel=2)
+ else:
+ raise TypeError("TypedDict.__new__() missing 1 required positional "
+ "argument: '_typename'")
+ if args:
+ try:
+ fields, = args # allow the "_fields" keyword be passed
+ except ValueError:
+ raise TypeError('TypedDict.__new__() takes from 2 to 3 '
+ f'positional arguments but {len(args) + 2} '
+ 'were given')
+ elif '_fields' in kwargs and len(kwargs) == 1:
+ fields = kwargs.pop('_fields')
+ import warnings
+ warnings.warn("Passing '_fields' as keyword argument is deprecated",
+ DeprecationWarning, stacklevel=2)
+ else:
+ fields = None
+
+ if fields is None:
+ fields = kwargs
+ elif kwargs:
+ raise TypeError("TypedDict takes either a dict or keyword arguments,"
+ " but not both")
+
+ ns = {'__annotations__': dict(fields)}
+ try:
+ # Setting correct module is necessary to make typed dict classes pickleable.
+ ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ pass
+
+ return _TypedDictMeta(typename, (), ns, total=total)
+
+ _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
+ ' /, *, total=True, **kwargs)')
+
+ class _TypedDictMeta(type):
+ def __init__(cls, name, bases, ns, total=True):
+ super().__init__(name, bases, ns)
+
+ def __new__(cls, name, bases, ns, total=True):
+ # Create new typed dict class object.
+ # This method is called directly when TypedDict is subclassed,
+ # or via _typeddict_new when TypedDict is instantiated. This way
+ # TypedDict supports all three syntaxes described in its docstring.
+ # Subclasses and instances of TypedDict return actual dictionaries
+ # via _dict_new.
+ ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
+ tp_dict = super().__new__(cls, name, (dict,), ns)
+
+ annotations = {}
+ own_annotations = ns.get('__annotations__', {})
+ own_annotation_keys = set(own_annotations.keys())
+ msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
+ own_annotations = {
+ n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
+ }
+ required_keys = set()
+ optional_keys = set()
+
+ for base in bases:
+ annotations.update(base.__dict__.get('__annotations__', {}))
+ required_keys.update(base.__dict__.get('__required_keys__', ()))
+ optional_keys.update(base.__dict__.get('__optional_keys__', ()))
+
+ annotations.update(own_annotations)
+ if total:
+ required_keys.update(own_annotation_keys)
+ else:
+ optional_keys.update(own_annotation_keys)
+
+ tp_dict.__annotations__ = annotations
+ tp_dict.__required_keys__ = frozenset(required_keys)
+ tp_dict.__optional_keys__ = frozenset(optional_keys)
+ if not hasattr(tp_dict, '__total__'):
+ tp_dict.__total__ = total
+ return tp_dict
+
+ __instancecheck__ = __subclasscheck__ = _check_fails
+
+ TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
+ TypedDict.__module__ = __name__
+ TypedDict.__doc__ = \
+ """A simple typed name space. At runtime it is equivalent to a plain dict.
+
+ TypedDict creates a dictionary type that expects all of its
+ instances to have a certain set of keys, with each key
+ associated with a value of a consistent type. This expectation
+ is not checked at runtime but is only enforced by type checkers.
+ Usage::
+
+ class Point2D(TypedDict):
+ x: int
+ y: int
+ label: str
+
+ a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
+ b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
+
+ assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
+
+ The type info can be accessed via the Point2D.__annotations__ dict, and
+ the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
+ TypedDict supports two additional equivalent forms::
+
+ Point2D = TypedDict('Point2D', x=int, y=int, label=str)
+ Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
+
+ The class syntax is only supported in Python 3.6+, while two other
+ syntax forms work for Python 2.7 and 3.2+
+ """
+
+
+# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
+if hasattr(typing, 'Annotated'):
+ Annotated = typing.Annotated
+ get_type_hints = typing.get_type_hints
+ # Not exported and not a public API, but needed for get_origin() and get_args()
+ # to work.
+ _AnnotatedAlias = typing._AnnotatedAlias
+# 3.7-3.8
+elif PEP_560:
+ class _AnnotatedAlias(typing._GenericAlias, _root=True):
+ """Runtime representation of an annotated type.
+
+ At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
+ with extra annotations. The alias behaves like a normal typing alias,
+ instantiating is the same as instantiating the underlying type, binding
+ it to types is also the same.
+ """
+ def __init__(self, origin, metadata):
+ if isinstance(origin, _AnnotatedAlias):
+ metadata = origin.__metadata__ + metadata
+ origin = origin.__origin__
+ super().__init__(origin, origin)
+ self.__metadata__ = metadata
+
+ def copy_with(self, params):
+ assert len(params) == 1
+ new_type = params[0]
+ return _AnnotatedAlias(new_type, self.__metadata__)
+
+ def __repr__(self):
+ return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
+ f"{', '.join(repr(a) for a in self.__metadata__)}]")
+
+ def __reduce__(self):
+ return operator.getitem, (
+ Annotated, (self.__origin__,) + self.__metadata__
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, _AnnotatedAlias):
+ return NotImplemented
+ if self.__origin__ != other.__origin__:
+ return False
+ return self.__metadata__ == other.__metadata__
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__metadata__))
+
+ class Annotated:
+ """Add context specific metadata to a type.
+
+ Example: Annotated[int, runtime_check.Unsigned] indicates to the
+ hypothetical runtime_check module that this type is an unsigned int.
+ Every other consumer of this type can ignore this metadata and treat
+ this type as int.
+
+ The first argument to Annotated must be a valid type (and will be in
+ the __origin__ field), the remaining arguments are kept as a tuple in
+ the __extra__ field.
+
+ Details:
+
+ - It's an error to call `Annotated` with less than two arguments.
+ - Nested Annotated are flattened::
+
+ Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+ - Instantiating an annotated type is equivalent to instantiating the
+ underlying type::
+
+ Annotated[C, Ann1](5) == C(5)
+
+ - Annotated can be used as a generic type alias::
+
+ Optimized = Annotated[T, runtime.Optimize()]
+ Optimized[int] == Annotated[int, runtime.Optimize()]
+
+ OptimizedList = Annotated[List[T], runtime.Optimize()]
+ OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise TypeError("Type Annotated cannot be instantiated.")
+
+ @typing._tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple) or len(params) < 2:
+ raise TypeError("Annotated[...] should be used "
+ "with at least two arguments (a type and an "
+ "annotation).")
+ msg = "Annotated[t, ...]: t must be a type."
+ origin = typing._type_check(params[0], msg)
+ metadata = tuple(params[1:])
+ return _AnnotatedAlias(origin, metadata)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ raise TypeError(
+ f"Cannot subclass {cls.__module__}.Annotated"
+ )
+
+ def _strip_annotations(t):
+ """Strips the annotations from a given type.
+ """
+ if isinstance(t, _AnnotatedAlias):
+ return _strip_annotations(t.__origin__)
+ if isinstance(t, typing._GenericAlias):
+ stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ res = t.copy_with(stripped_args)
+ res._special = t._special
+ return res
+ return t
+
+ def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
+ """Return type hints for an object.
+
+ This is often the same as obj.__annotations__, but it handles
+ forward references encoded as string literals, adds Optional[t] if a
+ default value equal to None is set and recursively replaces all
+ 'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
+
+ The argument may be a module, class, method, or function. The annotations
+ are returned as a dictionary. For classes, annotations include also
+ inherited members.
+
+ TypeError is raised if the argument is not of a type that can contain
+ annotations, and an empty dictionary is returned if no annotations are
+ present.
+
+ BEWARE -- the behavior of globalns and localns is counterintuitive
+ (unless you are familiar with how eval() and exec() work). The
+ search order is locals first, then globals.
+
+ - If no dict arguments are passed, an attempt is made to use the
+ globals from obj (or the respective module's globals for classes),
+ and these are also used as the locals. If the object does not appear
+ to have globals, an empty dictionary is used.
+
+ - If one dict argument is passed, it is used for both globals and
+ locals.
+
+ - If two dict arguments are passed, they specify globals and
+ locals, respectively.
+ """
+ hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
+ if include_extras:
+ return hint
+ return {k: _strip_annotations(t) for k, t in hint.items()}
+# 3.6
+else:
+
+ def _is_dunder(name):
+ """Returns True if name is a __dunder_variable_name__."""
+ return len(name) > 4 and name.startswith('__') and name.endswith('__')
+
+ # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
+ # checks, argument expansion etc. are done on the _subs_tre. As a result we
+ # can't provide a get_type_hints function that strips out annotations.
+
+ class AnnotatedMeta(typing.GenericMeta):
+ """Metaclass for Annotated"""
+
+ def __new__(cls, name, bases, namespace, **kwargs):
+ if any(b is not object for b in bases):
+ raise TypeError("Cannot subclass " + str(Annotated))
+ return super().__new__(cls, name, bases, namespace, **kwargs)
+
+ @property
+ def __metadata__(self):
+ return self._subs_tree()[2]
+
+ def _tree_repr(self, tree):
+ cls, origin, metadata = tree
+ if not isinstance(origin, tuple):
+ tp_repr = typing._type_repr(origin)
+ else:
+ tp_repr = origin[0]._tree_repr(origin)
+ metadata_reprs = ", ".join(repr(arg) for arg in metadata)
+ return f'{cls}[{tp_repr}, {metadata_reprs}]'
+
+ def _subs_tree(self, tvars=None, args=None): # noqa
+ if self is Annotated:
+ return Annotated
+ res = super()._subs_tree(tvars=tvars, args=args)
+ # Flatten nested Annotated
+ if isinstance(res[1], tuple) and res[1][0] is Annotated:
+ sub_tp = res[1][1]
+ sub_annot = res[1][2]
+ return (Annotated, sub_tp, sub_annot + res[2])
+ return res
+
+ def _get_cons(self):
+ """Return the class used to create instance of this type."""
+ if self.__origin__ is None:
+ raise TypeError("Cannot get the underlying type of a "
+ "non-specialized Annotated type.")
+ tree = self._subs_tree()
+ while isinstance(tree, tuple) and tree[0] is Annotated:
+ tree = tree[1]
+ if isinstance(tree, tuple):
+ return tree[0]
+ else:
+ return tree
+
+ @typing._tp_cache
+ def __getitem__(self, params):
+ if not isinstance(params, tuple):
+ params = (params,)
+ if self.__origin__ is not None: # specializing an instantiated type
+ return super().__getitem__(params)
+ elif not isinstance(params, tuple) or len(params) < 2:
+ raise TypeError("Annotated[...] should be instantiated "
+ "with at least two arguments (a type and an "
+ "annotation).")
+ else:
+ msg = "Annotated[t, ...]: t must be a type."
+ tp = typing._type_check(params[0], msg)
+ metadata = tuple(params[1:])
+ return self.__class__(
+ self.__name__,
+ self.__bases__,
+ _no_slots_copy(self.__dict__),
+ tvars=_type_vars((tp,)),
+ # Metadata is a tuple so it won't be touched by _replace_args et al.
+ args=(tp, metadata),
+ origin=self,
+ )
+
+ def __call__(self, *args, **kwargs):
+ cons = self._get_cons()
+ result = cons(*args, **kwargs)
+ try:
+ result.__orig_class__ = self
+ except AttributeError:
+ pass
+ return result
+
+ def __getattr__(self, attr):
+ # For simplicity we just don't relay all dunder names
+ if self.__origin__ is not None and not _is_dunder(attr):
+ return getattr(self._get_cons(), attr)
+ raise AttributeError(attr)
+
+ def __setattr__(self, attr, value):
+ if _is_dunder(attr) or attr.startswith('_abc_'):
+ super().__setattr__(attr, value)
+ elif self.__origin__ is None:
+ raise AttributeError(attr)
+ else:
+ setattr(self._get_cons(), attr, value)
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Annotated cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Annotated cannot be used with issubclass().")
+
+ class Annotated(metaclass=AnnotatedMeta):
+ """Add context specific metadata to a type.
+
+ Example: Annotated[int, runtime_check.Unsigned] indicates to the
+ hypothetical runtime_check module that this type is an unsigned int.
+ Every other consumer of this type can ignore this metadata and treat
+ this type as int.
+
+ The first argument to Annotated must be a valid type, the remaining
+ arguments are kept as a tuple in the __metadata__ field.
+
+ Details:
+
+ - It's an error to call `Annotated` with less than two arguments.
+ - Nested Annotated are flattened::
+
+ Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+ - Instantiating an annotated type is equivalent to instantiating the
+ underlying type::
+
+ Annotated[C, Ann1](5) == C(5)
+
+ - Annotated can be used as a generic type alias::
+
+ Optimized = Annotated[T, runtime.Optimize()]
+ Optimized[int] == Annotated[int, runtime.Optimize()]
+
+ OptimizedList = Annotated[List[T], runtime.Optimize()]
+ OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+ """
+
+# Python 3.8 has get_origin() and get_args() but those implementations aren't
+# Annotated-aware, so we can't use those. Python 3.9's versions don't support
+# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
+if sys.version_info[:2] >= (3, 10):
+ get_origin = typing.get_origin
+ get_args = typing.get_args
+# 3.7-3.9
+elif PEP_560:
+ try:
+ # 3.9+
+ from typing import _BaseGenericAlias
+ except ImportError:
+ _BaseGenericAlias = typing._GenericAlias
+ try:
+ # 3.9+
+ from typing import GenericAlias
+ except ImportError:
+ GenericAlias = typing._GenericAlias
+
+ def get_origin(tp):
+ """Get the unsubscripted version of a type.
+
+ This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
+ and Annotated. Return None for unsupported types. Examples::
+
+ get_origin(Literal[42]) is Literal
+ get_origin(int) is None
+ get_origin(ClassVar[int]) is ClassVar
+ get_origin(Generic) is Generic
+ get_origin(Generic[T]) is Generic
+ get_origin(Union[T, int]) is Union
+ get_origin(List[Tuple[T, T]][int]) == list
+ get_origin(P.args) is P
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return Annotated
+ if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias,
+ ParamSpecArgs, ParamSpecKwargs)):
+ return tp.__origin__
+ if tp is typing.Generic:
+ return typing.Generic
+ return None
+
+ def get_args(tp):
+ """Get type arguments with all substitutions performed.
+
+ For unions, basic simplifications used by Union constructor are performed.
+ Examples::
+ get_args(Dict[str, int]) == (str, int)
+ get_args(int) == ()
+ get_args(Union[int, Union[T, int], str][int]) == (int, str)
+ get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
+ get_args(Callable[[], T][int]) == ([], int)
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return (tp.__origin__,) + tp.__metadata__
+ if isinstance(tp, (typing._GenericAlias, GenericAlias)):
+ if getattr(tp, "_special", False):
+ return ()
+ res = tp.__args__
+ if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
+ res = (list(res[:-1]), res[-1])
+ return res
+ return ()
+
+
+# 3.10+
+if hasattr(typing, 'TypeAlias'):
+ TypeAlias = typing.TypeAlias
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ class _TypeAliasForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ @_TypeAliasForm
+ def TypeAlias(self, parameters):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+ raise TypeError(f"{self} is not subscriptable")
+# 3.7-3.8
+elif sys.version_info[:2] >= (3, 7):
+ class _TypeAliasForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ TypeAlias = _TypeAliasForm('TypeAlias',
+ doc="""Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example
+ above.""")
+# 3.6
+else:
+ class _TypeAliasMeta(typing.TypingMeta):
+ """Metaclass for TypeAlias"""
+
+ def __repr__(self):
+ return 'typing_extensions.TypeAlias'
+
+ class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("TypeAlias cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("TypeAlias cannot be used with issubclass().")
+
+ def __repr__(self):
+ return 'typing_extensions.TypeAlias'
+
+ TypeAlias = _TypeAliasBase(_root=True)
+
+
+# Python 3.10+ has PEP 612
+if hasattr(typing, 'ParamSpecArgs'):
+ ParamSpecArgs = typing.ParamSpecArgs
+ ParamSpecKwargs = typing.ParamSpecKwargs
+# 3.6-3.9
+else:
+ class _Immutable:
+ """Mixin to indicate that object should not be copied."""
+ __slots__ = ()
+
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
+ class ParamSpecArgs(_Immutable):
+ """The args for a ParamSpec object.
+
+ Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
+
+ ParamSpecArgs objects have a reference back to their ParamSpec:
+
+ P.args.__origin__ is P
+
+ This type is meant for runtime introspection and has no special meaning to
+ static type checkers.
+ """
+ def __init__(self, origin):
+ self.__origin__ = origin
+
+ def __repr__(self):
+ return f"{self.__origin__.__name__}.args"
+
+ class ParamSpecKwargs(_Immutable):
+ """The kwargs for a ParamSpec object.
+
+ Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
+
+ ParamSpecKwargs objects have a reference back to their ParamSpec:
+
+ P.kwargs.__origin__ is P
+
+ This type is meant for runtime introspection and has no special meaning to
+ static type checkers.
+ """
+ def __init__(self, origin):
+ self.__origin__ = origin
+
+ def __repr__(self):
+ return f"{self.__origin__.__name__}.kwargs"
+
+# 3.10+
+if hasattr(typing, 'ParamSpec'):
+ ParamSpec = typing.ParamSpec
+# 3.6-3.9
+else:
+
+ # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+ class ParamSpec(list):
+ """Parameter specification variable.
+
+ Usage::
+
+ P = ParamSpec('P')
+
+ Parameter specification variables exist primarily for the benefit of static
+ type checkers. They are used to forward the parameter types of one
+ callable to another callable, a pattern commonly found in higher order
+ functions and decorators. They are only valid when used in ``Concatenate``,
+ or s the first argument to ``Callable``. In Python 3.10 and higher,
+ they are also supported in user-defined Generics at runtime.
+ See class Generic for more information on generic types. An
+ example for annotating a decorator::
+
+ T = TypeVar('T')
+ P = ParamSpec('P')
+
+ def add_logging(f: Callable[P, T]) -> Callable[P, T]:
+ '''A type-safe decorator to add logging to a function.'''
+ def inner(*args: P.args, **kwargs: P.kwargs) -> T:
+ logging.info(f'{f.__name__} was called')
+ return f(*args, **kwargs)
+ return inner
+
+ @add_logging
+ def add_two(x: float, y: float) -> float:
+ '''Add two numbers together.'''
+ return x + y
+
+ Parameter specification variables defined with covariant=True or
+ contravariant=True can be used to declare covariant or contravariant
+ generic types. These keyword arguments are valid, but their actual semantics
+ are yet to be decided. See PEP 612 for details.
+
+ Parameter specification variables can be introspected. e.g.:
+
+ P.__name__ == 'T'
+ P.__bound__ == None
+ P.__covariant__ == False
+ P.__contravariant__ == False
+
+ Note that only parameter specification variables defined in global scope can
+ be pickled.
+ """
+
+ # Trick Generic __parameters__.
+ __class__ = typing.TypeVar
+
+ @property
+ def args(self):
+ return ParamSpecArgs(self)
+
+ @property
+ def kwargs(self):
+ return ParamSpecKwargs(self)
+
+ def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
+ super().__init__([self])
+ self.__name__ = name
+ self.__covariant__ = bool(covariant)
+ self.__contravariant__ = bool(contravariant)
+ if bound:
+ self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
+ else:
+ self.__bound__ = None
+
+ # for pickling:
+ try:
+ def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ def_mod = None
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+ def __repr__(self):
+ if self.__covariant__:
+ prefix = '+'
+ elif self.__contravariant__:
+ prefix = '-'
+ else:
+ prefix = '~'
+ return prefix + self.__name__
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ def __eq__(self, other):
+ return self is other
+
+ def __reduce__(self):
+ return self.__name__
+
+ # Hack to get typing._type_check to pass.
+ def __call__(self, *args, **kwargs):
+ pass
+
+ if not PEP_560:
+ # Only needed in 3.6.
+ def _get_type_vars(self, tvars):
+ if self not in tvars:
+ tvars.append(self)
+
+
+# 3.6-3.9
+if not hasattr(typing, 'Concatenate'):
+ # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+ class _ConcatenateGenericAlias(list):
+
+ # Trick Generic into looking into this for __parameters__.
+ if PEP_560:
+ __class__ = typing._GenericAlias
+ else:
+ __class__ = typing._TypingBase
+
+ # Flag in 3.8.
+ _special = False
+ # Attribute in 3.6 and earlier.
+ _gorg = typing.Generic
+
+ def __init__(self, origin, args):
+ super().__init__(args)
+ self.__origin__ = origin
+ self.__args__ = args
+
+ def __repr__(self):
+ _type_repr = typing._type_repr
+ return (f'{_type_repr(self.__origin__)}'
+ f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__args__))
+
+ # Hack to get typing._type_check to pass in Generic.
+ def __call__(self, *args, **kwargs):
+ pass
+
+ @property
+ def __parameters__(self):
+ return tuple(
+ tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
+ )
+
+ if not PEP_560:
+ # Only required in 3.6.
+ def _get_type_vars(self, tvars):
+ if self.__origin__ and self.__parameters__:
+ typing._get_type_vars(self.__parameters__, tvars)
+
+
+# 3.6-3.9
+@typing._tp_cache
+def _concatenate_getitem(self, parameters):
+ if parameters == ():
+ raise TypeError("Cannot take a Concatenate of no types.")
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+ if not isinstance(parameters[-1], ParamSpec):
+ raise TypeError("The last parameter to Concatenate should be a "
+ "ParamSpec variable.")
+ msg = "Concatenate[arg, ...]: each arg must be a type."
+ parameters = tuple(typing._type_check(p, msg) for p in parameters)
+ return _ConcatenateGenericAlias(self, parameters)
+
+
+# 3.10+
+if hasattr(typing, 'Concatenate'):
+ Concatenate = typing.Concatenate
+ _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ @_TypeAliasForm
+ def Concatenate(self, parameters):
+ """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """
+ return _concatenate_getitem(self, parameters)
+# 3.7-8
+elif sys.version_info[:2] >= (3, 7):
+ class _ConcatenateForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ return _concatenate_getitem(self, parameters)
+
+ Concatenate = _ConcatenateForm(
+ 'Concatenate',
+ doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """)
+# 3.6
+else:
+ class _ConcatenateAliasMeta(typing.TypingMeta):
+ """Metaclass for Concatenate."""
+
+ def __repr__(self):
+ return 'typing_extensions.Concatenate'
+
+ class _ConcatenateAliasBase(typing._FinalTypingBase,
+ metaclass=_ConcatenateAliasMeta,
+ _root=True):
+ """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Concatenate cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Concatenate cannot be used with issubclass().")
+
+ def __repr__(self):
+ return 'typing_extensions.Concatenate'
+
+ def __getitem__(self, parameters):
+ return _concatenate_getitem(self, parameters)
+
+ Concatenate = _ConcatenateAliasBase(_root=True)
+
+# 3.10+
+if hasattr(typing, 'TypeGuard'):
+ TypeGuard = typing.TypeGuard
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ class _TypeGuardForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ @_TypeGuardForm
+ def TypeGuard(self, parameters):
+ """Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """
+ item = typing._type_check(parameters, f'{self} accepts only single type.')
+ return typing._GenericAlias(self, (item,))
+# 3.7-3.8
+elif sys.version_info[:2] >= (3, 7):
+ class _TypeGuardForm(typing._SpecialForm, _root=True):
+
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type')
+ return typing._GenericAlias(self, (item,))
+
+ TypeGuard = _TypeGuardForm(
+ 'TypeGuard',
+ doc="""Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """)
+# 3.6
+else:
+ class _TypeGuard(typing._FinalTypingBase, _root=True):
+ """Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """
+
+ __slots__ = ('__type__',)
+
+ def __init__(self, tp=None, **kwds):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(typing._type_check(item,
+ f'{cls.__name__[1:]} accepts only a single type.'),
+ _root=True)
+ raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(new_tp, _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += f'[{typing._type_repr(self.__type__)}]'
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _TypeGuard):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ TypeGuard = _TypeGuard(_root=True)
+
+if hasattr(typing, "Self"):
+ Self = typing.Self
+elif sys.version_info[:2] >= (3, 7):
+ # Vendored from cpython typing._SpecialFrom
+ class _SpecialForm(typing._Final, _root=True):
+ __slots__ = ('_name', '__doc__', '_getitem')
+
+ def __init__(self, getitem):
+ self._getitem = getitem
+ self._name = getitem.__name__
+ self.__doc__ = getitem.__doc__
+
+ def __getattr__(self, item):
+ if item in {'__name__', '__qualname__'}:
+ return self._name
+
+ raise AttributeError(item)
+
+ def __mro_entries__(self, bases):
+ raise TypeError(f"Cannot subclass {self!r}")
+
+ def __repr__(self):
+ return f'typing_extensions.{self._name}'
+
+ def __reduce__(self):
+ return self._name
+
+ def __call__(self, *args, **kwds):
+ raise TypeError(f"Cannot instantiate {self!r}")
+
+ def __or__(self, other):
+ return typing.Union[self, other]
+
+ def __ror__(self, other):
+ return typing.Union[other, self]
+
+ def __instancecheck__(self, obj):
+ raise TypeError(f"{self} cannot be used with isinstance()")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError(f"{self} cannot be used with issubclass()")
+
+ @typing._tp_cache
+ def __getitem__(self, parameters):
+ return self._getitem(self, parameters)
+
+ @_SpecialForm
+ def Self(self, params):
+ """Used to spell the type of "self" in classes.
+
+ Example::
+
+ from typing import Self
+
+ class ReturnsSelf:
+ def parse(self, data: bytes) -> Self:
+ ...
+ return self
+
+ """
+
+ raise TypeError(f"{self} is not subscriptable")
+else:
+ class _Self(typing._FinalTypingBase, _root=True):
+ """Used to spell the type of "self" in classes.
+
+ Example::
+
+ from typing import Self
+
+ class ReturnsSelf:
+ def parse(self, data: bytes) -> Self:
+ ...
+ return self
+
+ """
+
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError(f"{self} cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError(f"{self} cannot be used with issubclass().")
+
+ Self = _Self(_root=True)
+
+
+if hasattr(typing, 'Required'):
+ Required = typing.Required
+ NotRequired = typing.NotRequired
+elif sys.version_info[:2] >= (3, 9):
+ class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ @_ExtensionsSpecialForm
+ def Required(self, parameters):
+ """A special typing construct to mark a key of a total=False TypedDict
+ as required. For example:
+
+ class Movie(TypedDict, total=False):
+ title: Required[str]
+ year: int
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+
+ There is no runtime checking that a required key is actually provided
+ when instantiating a related TypedDict.
+ """
+ item = typing._type_check(parameters, f'{self._name} accepts only single type')
+ return typing._GenericAlias(self, (item,))
+
+ @_ExtensionsSpecialForm
+ def NotRequired(self, parameters):
+ """A special typing construct to mark a key of a TypedDict as
+ potentially missing. For example:
+
+ class Movie(TypedDict):
+ title: str
+ year: NotRequired[int]
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+ """
+ item = typing._type_check(parameters, f'{self._name} accepts only single type')
+ return typing._GenericAlias(self, (item,))
+
+elif sys.version_info[:2] >= (3, 7):
+ class _RequiredForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ '{} accepts only single type'.format(self._name))
+ return typing._GenericAlias(self, (item,))
+
+ Required = _RequiredForm(
+ 'Required',
+ doc="""A special typing construct to mark a key of a total=False TypedDict
+ as required. For example:
+
+ class Movie(TypedDict, total=False):
+ title: Required[str]
+ year: int
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+
+ There is no runtime checking that a required key is actually provided
+ when instantiating a related TypedDict.
+ """)
+ NotRequired = _RequiredForm(
+ 'NotRequired',
+ doc="""A special typing construct to mark a key of a TypedDict as
+ potentially missing. For example:
+
+ class Movie(TypedDict):
+ title: str
+ year: NotRequired[int]
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+ """)
+else:
+ # NOTE: Modeled after _Final's implementation when _FinalTypingBase available
+ class _MaybeRequired(typing._FinalTypingBase, _root=True):
+ __slots__ = ('__type__',)
+
+ def __init__(self, tp=None, **kwds):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(typing._type_check(item,
+ '{} accepts only single type.'.format(cls.__name__[1:])),
+ _root=True)
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(new_tp, _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += '[{}]'.format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, type(self)):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ class _Required(_MaybeRequired, _root=True):
+ """A special typing construct to mark a key of a total=False TypedDict
+ as required. For example:
+
+ class Movie(TypedDict, total=False):
+ title: Required[str]
+ year: int
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+
+ There is no runtime checking that a required key is actually provided
+ when instantiating a related TypedDict.
+ """
+
+ class _NotRequired(_MaybeRequired, _root=True):
+ """A special typing construct to mark a key of a TypedDict as
+ potentially missing. For example:
+
+ class Movie(TypedDict):
+ title: str
+ year: NotRequired[int]
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+ """
+
+ Required = _Required(_root=True)
+ NotRequired = _NotRequired(_root=True)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/__init__.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/__init__.py
new file mode 100644
index 0000000..fe86b59
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/__init__.py
@@ -0,0 +1,85 @@
+"""
+Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
+"""
+from __future__ import absolute_import
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+import warnings
+from logging import NullHandler
+
+from . import exceptions
+from ._version import __version__
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
+from .filepost import encode_multipart_formdata
+from .poolmanager import PoolManager, ProxyManager, proxy_from_url
+from .response import HTTPResponse
+from .util.request import make_headers
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host
+
+__author__ = "Andrey Petrov (andrey.petrov@shazow.net)"
+__license__ = "MIT"
+__version__ = __version__
+
+__all__ = (
+ "HTTPConnectionPool",
+ "HTTPSConnectionPool",
+ "PoolManager",
+ "ProxyManager",
+ "HTTPResponse",
+ "Retry",
+ "Timeout",
+ "add_stderr_logger",
+ "connection_from_url",
+ "disable_warnings",
+ "encode_multipart_formdata",
+ "get_host",
+ "make_headers",
+ "proxy_from_url",
+)
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+
+def add_stderr_logger(level=logging.DEBUG):
+ """
+ Helper for quickly adding a StreamHandler to the logger. Useful for
+ debugging.
+
+ Returns the handler after adding it.
+ """
+ # This method needs to be in this __init__.py to get the __name__ correct
+ # even if urllib3 is vendored within another package.
+ logger = logging.getLogger(__name__)
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
+ logger.addHandler(handler)
+ logger.setLevel(level)
+ logger.debug("Added a stderr logging handler to logger: %s", __name__)
+ return handler
+
+
+# ... Clean up.
+del NullHandler
+
+
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
+# SecurityWarning's always go off by default.
+warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
+# SNIMissingWarnings should go off only once.
+warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
+
+
+def disable_warnings(category=exceptions.HTTPWarning):
+ """
+ Helper for quickly disabling all urllib3 warnings.
+ """
+ warnings.simplefilter("ignore", category)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/_collections.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/_collections.py
new file mode 100644
index 0000000..da9857e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/_collections.py
@@ -0,0 +1,337 @@
+from __future__ import absolute_import
+
+try:
+ from collections.abc import Mapping, MutableMapping
+except ImportError:
+ from collections import Mapping, MutableMapping
+try:
+ from threading import RLock
+except ImportError: # Platform-specific: No threads available
+
+ class RLock:
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+from collections import OrderedDict
+
+from .exceptions import InvalidHeader
+from .packages import six
+from .packages.six import iterkeys, itervalues
+
+__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
+
+
+_Null = object()
+
+
+class RecentlyUsedContainer(MutableMapping):
+ """
+ Provides a thread-safe dict-like container which maintains up to
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
+ ``maxsize``.
+
+ :param maxsize:
+ Maximum number of recent elements to retain.
+
+ :param dispose_func:
+ Every time an item is evicted from the container,
+ ``dispose_func(value)`` is called. Callback which will get called
+ """
+
+ ContainerCls = OrderedDict
+
+ def __init__(self, maxsize=10, dispose_func=None):
+ self._maxsize = maxsize
+ self.dispose_func = dispose_func
+
+ self._container = self.ContainerCls()
+ self.lock = RLock()
+
+ def __getitem__(self, key):
+ # Re-insert the item, moving it to the end of the eviction line.
+ with self.lock:
+ item = self._container.pop(key)
+ self._container[key] = item
+ return item
+
+ def __setitem__(self, key, value):
+ evicted_value = _Null
+ with self.lock:
+ # Possibly evict the existing value of 'key'
+ evicted_value = self._container.get(key, _Null)
+ self._container[key] = value
+
+ # If we didn't evict an existing value, we might have to evict the
+ # least recently used item from the beginning of the container.
+ if len(self._container) > self._maxsize:
+ _key, evicted_value = self._container.popitem(last=False)
+
+ if self.dispose_func and evicted_value is not _Null:
+ self.dispose_func(evicted_value)
+
+ def __delitem__(self, key):
+ with self.lock:
+ value = self._container.pop(key)
+
+ if self.dispose_func:
+ self.dispose_func(value)
+
+ def __len__(self):
+ with self.lock:
+ return len(self._container)
+
+ def __iter__(self):
+ raise NotImplementedError(
+ "Iteration over this class is unlikely to be threadsafe."
+ )
+
+ def clear(self):
+ with self.lock:
+ # Copy pointers to all values, then wipe the mapping
+ values = list(itervalues(self._container))
+ self._container.clear()
+
+ if self.dispose_func:
+ for value in values:
+ self.dispose_func(value)
+
+ def keys(self):
+ with self.lock:
+ return list(iterkeys(self._container))
+
+
+class HTTPHeaderDict(MutableMapping):
+ """
+ :param headers:
+ An iterable of field-value pairs. Must not contain multiple field names
+ when compared case-insensitively.
+
+ :param kwargs:
+ Additional field-value pairs to pass in to ``dict.update``.
+
+ A ``dict`` like container for storing HTTP Headers.
+
+ Field names are stored and compared case-insensitively in compliance with
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
+ case-insensitive pair.
+
+ Using ``__setitem__`` syntax overwrites fields that compare equal
+ case-insensitively in order to maintain ``dict``'s api. For fields that
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+ in a loop.
+
+ If multiple fields that are equal case-insensitively are passed to the
+ constructor or ``.update``, the behavior is undefined and some will be
+ lost.
+
+ >>> headers = HTTPHeaderDict()
+ >>> headers.add('Set-Cookie', 'foo=bar')
+ >>> headers.add('set-cookie', 'baz=quxx')
+ >>> headers['content-length'] = '7'
+ >>> headers['SET-cookie']
+ 'foo=bar, baz=quxx'
+ >>> headers['Content-Length']
+ '7'
+ """
+
+ def __init__(self, headers=None, **kwargs):
+ super(HTTPHeaderDict, self).__init__()
+ self._container = OrderedDict()
+ if headers is not None:
+ if isinstance(headers, HTTPHeaderDict):
+ self._copy_from(headers)
+ else:
+ self.extend(headers)
+ if kwargs:
+ self.extend(kwargs)
+
+ def __setitem__(self, key, val):
+ self._container[key.lower()] = [key, val]
+ return self._container[key.lower()]
+
+ def __getitem__(self, key):
+ val = self._container[key.lower()]
+ return ", ".join(val[1:])
+
+ def __delitem__(self, key):
+ del self._container[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self._container
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping) and not hasattr(other, "keys"):
+ return False
+ if not isinstance(other, type(self)):
+ other = type(self)(other)
+ return dict((k.lower(), v) for k, v in self.itermerged()) == dict(
+ (k.lower(), v) for k, v in other.itermerged()
+ )
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if six.PY2: # Python 2
+ iterkeys = MutableMapping.iterkeys
+ itervalues = MutableMapping.itervalues
+
+ __marker = object()
+
+ def __len__(self):
+ return len(self._container)
+
+ def __iter__(self):
+ # Only provide the originally cased names
+ for vals in self._container.values():
+ yield vals[0]
+
+ def pop(self, key, default=__marker):
+ """D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ """
+ # Using the MutableMapping function directly fails due to the private marker.
+ # Using ordinary dict.pop would expose the internal structures.
+ # So let's reinvent the wheel.
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def discard(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def add(self, key, val):
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
+ exists.
+
+ >>> headers = HTTPHeaderDict(foo='bar')
+ >>> headers.add('Foo', 'baz')
+ >>> headers['foo']
+ 'bar, baz'
+ """
+ key_lower = key.lower()
+ new_vals = [key, val]
+ # Keep the common case aka no item present as fast as possible
+ vals = self._container.setdefault(key_lower, new_vals)
+ if new_vals is not vals:
+ vals.append(val)
+
+ def extend(self, *args, **kwargs):
+ """Generic import function for any type of header-like object.
+ Adapted version of MutableMapping.update in order to insert items
+ with self.add instead of self.__setitem__
+ """
+ if len(args) > 1:
+ raise TypeError(
+ "extend() takes at most 1 positional "
+ "arguments ({0} given)".format(len(args))
+ )
+ other = args[0] if len(args) >= 1 else ()
+
+ if isinstance(other, HTTPHeaderDict):
+ for key, val in other.iteritems():
+ self.add(key, val)
+ elif isinstance(other, Mapping):
+ for key in other:
+ self.add(key, other[key])
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self.add(key, other[key])
+ else:
+ for key, value in other:
+ self.add(key, value)
+
+ for key, value in kwargs.items():
+ self.add(key, value)
+
+ def getlist(self, key, default=__marker):
+ """Returns a list of all the values for the named field. Returns an
+ empty list if the key doesn't exist."""
+ try:
+ vals = self._container[key.lower()]
+ except KeyError:
+ if default is self.__marker:
+ return []
+ return default
+ else:
+ return vals[1:]
+
+ # Backwards compatibility for httplib
+ getheaders = getlist
+ getallmatchingheaders = getlist
+ iget = getlist
+
+ # Backwards compatibility for http.cookiejar
+ get_all = getlist
+
+ def __repr__(self):
+ return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+ def _copy_from(self, other):
+ for key in other:
+ val = other.getlist(key)
+ if isinstance(val, list):
+ # Don't need to convert tuples
+ val = list(val)
+ self._container[key.lower()] = [key] + val
+
+ def copy(self):
+ clone = type(self)()
+ clone._copy_from(self)
+ return clone
+
+ def iteritems(self):
+ """Iterate over all header lines, including duplicate ones."""
+ for key in self:
+ vals = self._container[key.lower()]
+ for val in vals[1:]:
+ yield vals[0], val
+
+ def itermerged(self):
+ """Iterate over all headers, merging duplicate ones together."""
+ for key in self:
+ val = self._container[key.lower()]
+ yield val[0], ", ".join(val[1:])
+
+ def items(self):
+ return list(self.iteritems())
+
+ @classmethod
+ def from_httplib(cls, message): # Python 2
+ """Read headers from a Python 2 httplib message object."""
+ # python2.7 does not expose a proper API for exporting multiheaders
+ # efficiently. This function re-reads raw lines from the message
+ # object and extracts the multiheaders properly.
+ obs_fold_continued_leaders = (" ", "\t")
+ headers = []
+
+ for line in message.headers:
+ if line.startswith(obs_fold_continued_leaders):
+ if not headers:
+ # We received a header line that starts with OWS as described
+ # in RFC-7230 S3.2.4. This indicates a multiline header, but
+ # there exists no previous header to which we can attach it.
+ raise InvalidHeader(
+ "Header continuation with no previous header: %s" % line
+ )
+ else:
+ key, value = headers[-1]
+ headers[-1] = (key, value + " " + line.strip())
+ continue
+
+ key, value = line.split(":", 1)
+ headers.append((key, value.strip()))
+
+ return cls(headers)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/_version.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/_version.py
new file mode 100644
index 0000000..fa8979d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/_version.py
@@ -0,0 +1,2 @@
+# This file is protected via CODEOWNERS
+__version__ = "1.26.8"
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/connection.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/connection.py
new file mode 100644
index 0000000..4d92ac6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/connection.py
@@ -0,0 +1,569 @@
+from __future__ import absolute_import
+
+import datetime
+import logging
+import os
+import re
+import socket
+import warnings
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+from .packages import six
+from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
+from .packages.six.moves.http_client import HTTPException # noqa: F401
+from .util.proxy import create_proxy_ssl_context
+
+try: # Compiled with SSL?
+ import ssl
+
+ BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError): # Platform-specific: No SSL.
+ ssl = None
+
+ class BaseSSLError(BaseException):
+ pass
+
+
+try:
+ # Python 3: not a no-op, we're adding this to the namespace so it can be imported.
+ ConnectionError = ConnectionError
+except NameError:
+ # Python 2
+ class ConnectionError(Exception):
+ pass
+
+
+try: # Python 3:
+ # Not a no-op, we're adding this to the namespace so it can be imported.
+ BrokenPipeError = BrokenPipeError
+except NameError: # Python 2:
+
+ class BrokenPipeError(Exception):
+ pass
+
+
+from ._collections import HTTPHeaderDict # noqa (historical, removed in v2)
+from ._version import __version__
+from .exceptions import (
+ ConnectTimeoutError,
+ NewConnectionError,
+ SubjectAltNameWarning,
+ SystemTimeWarning,
+)
+from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection
+from .util.ssl_ import (
+ assert_fingerprint,
+ create_urllib3_context,
+ is_ipaddress,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .util.ssl_match_hostname import CertificateError, match_hostname
+
+log = logging.getLogger(__name__)
+
+port_by_scheme = {"http": 80, "https": 443}
+
+# When it comes time to update this value as a part of regular maintenance
+# (ie test_recent_date is failing) update it to ~6 months before the current date.
+RECENT_DATE = datetime.date(2020, 7, 1)
+
+_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]")
+
+
+class HTTPConnection(_HTTPConnection, object):
+ """
+ Based on :class:`http.client.HTTPConnection` but provides an extra constructor
+ backwards-compatibility layer between older and newer Pythons.
+
+ Additional keyword parameters are used to configure attributes of the connection.
+ Accepted parameters include:
+
+ - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+ - ``source_address``: Set the source address for the current connection.
+ - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+ defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+ Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+ For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+ you might pass:
+
+ .. code-block:: python
+
+ HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ]
+
+ Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+ """
+
+ default_port = port_by_scheme["http"]
+
+ #: Disable Nagle's algorithm by default.
+ #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+ default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+ #: Whether this connection verifies the host's certificate.
+ is_verified = False
+
+ #: Whether this proxy connection (if used) verifies the proxy host's
+ #: certificate.
+ proxy_is_verified = None
+
+ def __init__(self, *args, **kw):
+ if not six.PY2:
+ kw.pop("strict", None)
+
+ # Pre-set source_address.
+ self.source_address = kw.get("source_address")
+
+ #: The socket options provided by the user. If no options are
+ #: provided, we use the default options.
+ self.socket_options = kw.pop("socket_options", self.default_socket_options)
+
+ # Proxy options provided by the user.
+ self.proxy = kw.pop("proxy", None)
+ self.proxy_config = kw.pop("proxy_config", None)
+
+ _HTTPConnection.__init__(self, *args, **kw)
+
+ @property
+ def host(self):
+ """
+ Getter method to remove any trailing dots that indicate the hostname is an FQDN.
+
+ In general, SSL certificates don't include the trailing dot indicating a
+ fully-qualified domain name, and thus, they don't validate properly when
+ checked against a domain name that includes the dot. In addition, some
+ servers may not expect to receive the trailing dot when provided.
+
+ However, the hostname with trailing dot is critical to DNS resolution; doing a
+ lookup with the trailing dot will properly only resolve the appropriate FQDN,
+ whereas a lookup without a trailing dot will search the system's search domain
+ list. Thus, it's important to keep the original host around for use only in
+ those cases where it's appropriate (i.e., when doing DNS lookup to establish the
+ actual TCP connection across which we're going to send HTTP requests).
+ """
+ return self._dns_host.rstrip(".")
+
+ @host.setter
+ def host(self, value):
+ """
+ Setter for the `host` property.
+
+ We assume that only urllib3 uses the _dns_host attribute; httplib itself
+ only uses `host`, and it seems reasonable that other libraries follow suit.
+ """
+ self._dns_host = value
+
+ def _new_conn(self):
+ """Establish a socket connection and set nodelay settings on it.
+
+ :return: New socket connection.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw["source_address"] = self.source_address
+
+ if self.socket_options:
+ extra_kw["socket_options"] = self.socket_options
+
+ try:
+ conn = connection.create_connection(
+ (self._dns_host, self.port), self.timeout, **extra_kw
+ )
+
+ except SocketTimeout:
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)"
+ % (self.host, self.timeout),
+ )
+
+ except SocketError as e:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e
+ )
+
+ return conn
+
+ def _is_using_tunnel(self):
+ # Google App Engine's httplib does not define _tunnel_host
+ return getattr(self, "_tunnel_host", None)
+
+ def _prepare_conn(self, conn):
+ self.sock = conn
+ if self._is_using_tunnel():
+ # TODO: Fix tunnel so it doesn't depend on self.sock state.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ def putrequest(self, method, url, *args, **kwargs):
+ """ """
+ # Empty docstring because the indentation of CPython's implementation
+ # is broken but we don't want this method in our documentation.
+ match = _CONTAINS_CONTROL_CHAR_RE.search(method)
+ if match:
+ raise ValueError(
+ "Method cannot contain non-token characters %r (found at least %r)"
+ % (method, match.group())
+ )
+
+ return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)
+
+ def putheader(self, header, *values):
+ """ """
+ if not any(isinstance(v, str) and v == SKIP_HEADER for v in values):
+ _HTTPConnection.putheader(self, header, *values)
+ elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS:
+ raise ValueError(
+ "urllib3.util.SKIP_HEADER only supports '%s'"
+ % ("', '".join(map(str.title, sorted(SKIPPABLE_HEADERS))),)
+ )
+
+ def request(self, method, url, body=None, headers=None):
+ if headers is None:
+ headers = {}
+ else:
+ # Avoid modifying the headers passed into .request()
+ headers = headers.copy()
+ if "user-agent" not in (six.ensure_str(k.lower()) for k in headers):
+ headers["User-Agent"] = _get_default_user_agent()
+ super(HTTPConnection, self).request(method, url, body=body, headers=headers)
+
+ def request_chunked(self, method, url, body=None, headers=None):
+ """
+ Alternative to the common request method, which sends the
+ body with chunked encoding and not as one block
+ """
+ headers = headers or {}
+ header_keys = set([six.ensure_str(k.lower()) for k in headers])
+ skip_accept_encoding = "accept-encoding" in header_keys
+ skip_host = "host" in header_keys
+ self.putrequest(
+ method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
+ )
+ if "user-agent" not in header_keys:
+ self.putheader("User-Agent", _get_default_user_agent())
+ for header, value in headers.items():
+ self.putheader(header, value)
+ if "transfer-encoding" not in header_keys:
+ self.putheader("Transfer-Encoding", "chunked")
+ self.endheaders()
+
+ if body is not None:
+ stringish_types = six.string_types + (bytes,)
+ if isinstance(body, stringish_types):
+ body = (body,)
+ for chunk in body:
+ if not chunk:
+ continue
+ if not isinstance(chunk, bytes):
+ chunk = chunk.encode("utf8")
+ len_str = hex(len(chunk))[2:]
+ to_send = bytearray(len_str.encode())
+ to_send += b"\r\n"
+ to_send += chunk
+ to_send += b"\r\n"
+ self.send(to_send)
+
+ # After the if clause, to always have a closed body
+ self.send(b"0\r\n\r\n")
+
+
+class HTTPSConnection(HTTPConnection):
+ """
+ Many of the parameters to this constructor are passed to the underlying SSL
+ socket by means of :py:func:`urllib3.util.ssl_wrap_socket`.
+ """
+
+ default_port = port_by_scheme["https"]
+
+ cert_reqs = None
+ ca_certs = None
+ ca_cert_dir = None
+ ca_cert_data = None
+ ssl_version = None
+ assert_fingerprint = None
+ tls_in_tls_required = False
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ key_file=None,
+ cert_file=None,
+ key_password=None,
+ strict=None,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ ssl_context=None,
+ server_hostname=None,
+ **kw
+ ):
+
+ HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.key_password = key_password
+ self.ssl_context = ssl_context
+ self.server_hostname = server_hostname
+
+ # Required property for Google AppEngine 1.9.0 which otherwise causes
+ # HTTPS requests to go out as HTTP. (See Issue #356)
+ self._protocol = "https"
+
+ def set_cert(
+ self,
+ key_file=None,
+ cert_file=None,
+ cert_reqs=None,
+ key_password=None,
+ ca_certs=None,
+ assert_hostname=None,
+ assert_fingerprint=None,
+ ca_cert_dir=None,
+ ca_cert_data=None,
+ ):
+ """
+ This method should only be called once, before the connection is used.
+ """
+ # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
+ # have an SSLContext object in which case we'll use its verify_mode.
+ if cert_reqs is None:
+ if self.ssl_context is not None:
+ cert_reqs = self.ssl_context.verify_mode
+ else:
+ cert_reqs = resolve_cert_reqs(None)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.key_password = key_password
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+ self.ca_cert_data = ca_cert_data
+
+ def connect(self):
+ # Add certificate verification
+ conn = self._new_conn()
+ hostname = self.host
+ tls_in_tls = False
+
+ if self._is_using_tunnel():
+ if self.tls_in_tls_required:
+ conn = self._connect_tls_proxy(hostname, conn)
+ tls_in_tls = True
+
+ self.sock = conn
+
+ # Calls self._set_hostport(), so self.host is
+ # self._tunnel_host below.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ # Override the host with the one we're requesting data from.
+ hostname = self._tunnel_host
+
+ server_hostname = hostname
+ if self.server_hostname is not None:
+ server_hostname = self.server_hostname
+
+ is_time_off = datetime.date.today() < RECENT_DATE
+ if is_time_off:
+ warnings.warn(
+ (
+ "System time is way off (before {0}). This will probably "
+ "lead to SSL verification errors"
+ ).format(RECENT_DATE),
+ SystemTimeWarning,
+ )
+
+ # Wrap socket using verification with the root certs in
+ # trusted_root_certs
+ default_ssl_context = False
+ if self.ssl_context is None:
+ default_ssl_context = True
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(self.ssl_version),
+ cert_reqs=resolve_cert_reqs(self.cert_reqs),
+ )
+
+ context = self.ssl_context
+ context.verify_mode = resolve_cert_reqs(self.cert_reqs)
+
+ # Try to load OS default certs if none are given.
+ # Works well on Windows (requires Python3.4+)
+ if (
+ not self.ca_certs
+ and not self.ca_cert_dir
+ and not self.ca_cert_data
+ and default_ssl_context
+ and hasattr(context, "load_default_certs")
+ ):
+ context.load_default_certs()
+
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ key_password=self.key_password,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ ca_cert_data=self.ca_cert_data,
+ server_hostname=server_hostname,
+ ssl_context=context,
+ tls_in_tls=tls_in_tls,
+ )
+
+ # If we're using all defaults and the connection
+ # is TLSv1 or TLSv1.1 we throw a DeprecationWarning
+ # for the host.
+ if (
+ default_ssl_context
+ and self.ssl_version is None
+ and hasattr(self.sock, "version")
+ and self.sock.version() in {"TLSv1", "TLSv1.1"}
+ ):
+ warnings.warn(
+ "Negotiating TLSv1/TLSv1.1 by default is deprecated "
+ "and will be disabled in urllib3 v2.0.0. Connecting to "
+ "'%s' with '%s' can be enabled by explicitly opting-in "
+ "with 'ssl_version'" % (self.host, self.sock.version()),
+ DeprecationWarning,
+ )
+
+ if self.assert_fingerprint:
+ assert_fingerprint(
+ self.sock.getpeercert(binary_form=True), self.assert_fingerprint
+ )
+ elif (
+ context.verify_mode != ssl.CERT_NONE
+ and not getattr(context, "check_hostname", False)
+ and self.assert_hostname is not False
+ ):
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = self.sock.getpeercert()
+ if not cert.get("subjectAltName", ()):
+ warnings.warn(
+ (
+ "Certificate for {0} has no `subjectAltName`, falling back to check for a "
+ "`commonName` for now. This feature is being removed by major browsers and "
+ "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
+ "for details.)".format(hostname)
+ ),
+ SubjectAltNameWarning,
+ )
+ _match_hostname(cert, self.assert_hostname or server_hostname)
+
+ self.is_verified = (
+ context.verify_mode == ssl.CERT_REQUIRED
+ or self.assert_fingerprint is not None
+ )
+
+ def _connect_tls_proxy(self, hostname, conn):
+ """
+ Establish a TLS connection to the proxy using the provided SSL context.
+ """
+ proxy_config = self.proxy_config
+ ssl_context = proxy_config.ssl_context
+ if ssl_context:
+ # If the user provided a proxy context, we assume CA and client
+ # certificates have already been set
+ return ssl_wrap_socket(
+ sock=conn,
+ server_hostname=hostname,
+ ssl_context=ssl_context,
+ )
+
+ ssl_context = create_proxy_ssl_context(
+ self.ssl_version,
+ self.cert_reqs,
+ self.ca_certs,
+ self.ca_cert_dir,
+ self.ca_cert_data,
+ )
+
+ # If no cert was provided, use only the default options for server
+ # certificate validation
+ socket = ssl_wrap_socket(
+ sock=conn,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ ca_cert_data=self.ca_cert_data,
+ server_hostname=hostname,
+ ssl_context=ssl_context,
+ )
+
+ if ssl_context.verify_mode != ssl.CERT_NONE and not getattr(
+ ssl_context, "check_hostname", False
+ ):
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = socket.getpeercert()
+ if not cert.get("subjectAltName", ()):
+ warnings.warn(
+ (
+ "Certificate for {0} has no `subjectAltName`, falling back to check for a "
+ "`commonName` for now. This feature is being removed by major browsers and "
+ "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
+ "for details.)".format(hostname)
+ ),
+ SubjectAltNameWarning,
+ )
+ _match_hostname(cert, hostname)
+
+ self.proxy_is_verified = ssl_context.verify_mode == ssl.CERT_REQUIRED
+ return socket
+
+
+def _match_hostname(cert, asserted_hostname):
+ # Our upstream implementation of ssl.match_hostname()
+ # only applies this normalization to IP addresses so it doesn't
+ # match DNS SANs so we do the same thing!
+ stripped_hostname = asserted_hostname.strip("u[]")
+ if is_ipaddress(stripped_hostname):
+ asserted_hostname = stripped_hostname
+
+ try:
+ match_hostname(cert, asserted_hostname)
+ except CertificateError as e:
+ log.warning(
+ "Certificate did not match expected hostname: %s. Certificate: %s",
+ asserted_hostname,
+ cert,
+ )
+ # Add cert to exception and reraise so client code can inspect
+ # the cert when catching the exception, if they want to
+ e._peer_cert = cert
+ raise
+
+
+def _get_default_user_agent():
+ return "python-urllib3/%s" % __version__
+
+
+class DummyConnection(object):
+ """Used to detect a failed ConnectionCls import."""
+
+ pass
+
+
+if not ssl:
+ HTTPSConnection = DummyConnection # noqa: F811
+
+
+VerifiedHTTPSConnection = HTTPSConnection
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/connectionpool.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/connectionpool.py
new file mode 100644
index 0000000..15bffcb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/connectionpool.py
@@ -0,0 +1,1108 @@
+from __future__ import absolute_import
+
+import errno
+import logging
+import re
+import socket
+import sys
+import warnings
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+from .connection import (
+ BaseSSLError,
+ BrokenPipeError,
+ DummyConnection,
+ HTTPConnection,
+ HTTPException,
+ HTTPSConnection,
+ VerifiedHTTPSConnection,
+ port_by_scheme,
+)
+from .exceptions import (
+ ClosedPoolError,
+ EmptyPoolError,
+ HeaderParsingError,
+ HostChangedError,
+ InsecureRequestWarning,
+ LocationValueError,
+ MaxRetryError,
+ NewConnectionError,
+ ProtocolError,
+ ProxyError,
+ ReadTimeoutError,
+ SSLError,
+ TimeoutError,
+)
+from .packages import six
+from .packages.six.moves import queue
+from .request import RequestMethods
+from .response import HTTPResponse
+from .util.connection import is_connection_dropped
+from .util.proxy import connection_requires_http_tunnel
+from .util.queue import LifoQueue
+from .util.request import set_file_position
+from .util.response import assert_header_parsing
+from .util.retry import Retry
+from .util.ssl_match_hostname import CertificateError
+from .util.timeout import Timeout
+from .util.url import Url, _encode_target
+from .util.url import _normalize_host as normalize_host
+from .util.url import get_host, parse_url
+
+xrange = six.moves.xrange
+
+log = logging.getLogger(__name__)
+
+_Default = object()
+
+
+# Pool objects
+class ConnectionPool(object):
+ """
+ Base class for all connection pools, such as
+ :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+
+ .. note::
+ ConnectionPool.urlopen() does not normalize or percent-encode target URIs
+ which is useful if your target server doesn't support percent-encoded
+ target URIs.
+ """
+
+ scheme = None
+ QueueCls = LifoQueue
+
+ def __init__(self, host, port=None):
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ self.host = _normalize_host(host, scheme=self.scheme)
+ self._proxy_host = host.lower()
+ self.port = port
+
+ def __str__(self):
+ return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ pass
+
+
+# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
+_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
+
+
+class HTTPConnectionPool(ConnectionPool, RequestMethods):
+ """
+ Thread-safe connection pool for one host.
+
+ :param host:
+ Host used for this HTTP Connection (e.g. "localhost"), passed into
+ :class:`http.client.HTTPConnection`.
+
+ :param port:
+ Port used for this HTTP Connection (None is equivalent to 80), passed
+ into :class:`http.client.HTTPConnection`.
+
+ :param strict:
+ Causes BadStatusLine to be raised if the status line can't be parsed
+ as a valid HTTP/1.0 or 1.1 status line, passed into
+ :class:`http.client.HTTPConnection`.
+
+ .. note::
+ Only works in Python 2. This parameter is ignored in Python 3.
+
+ :param timeout:
+ Socket timeout in seconds for each individual connection. This can
+ be a float or integer, which sets the timeout for the HTTP request,
+ or an instance of :class:`urllib3.util.Timeout` which gives you more
+ fine-grained control over request timeouts. After the constructor has
+ been parsed, this is always a `urllib3.util.Timeout` object.
+
+ :param maxsize:
+ Number of connections to save that can be reused. More than 1 is useful
+ in multithreaded situations. If ``block`` is set to False, more
+ connections will be created but they will not be saved once they've
+ been used.
+
+ :param block:
+ If set to True, no more than ``maxsize`` connections will be used at
+ a time. When no free connections are available, the call will block
+ until a connection has been released. This is a useful side effect for
+ particular multithreaded situations where one does not want to use more
+ than maxsize connections per host to prevent flooding.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param retries:
+ Retry configuration to use by default with requests in this pool.
+
+ :param _proxy:
+ Parsed proxy URL, should not be used directly, instead, see
+ :class:`urllib3.ProxyManager`
+
+ :param _proxy_headers:
+ A dictionary with proxy headers, should not be used directly,
+ instead, see :class:`urllib3.ProxyManager`
+
+ :param \\**conn_kw:
+ Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+ :class:`urllib3.connection.HTTPSConnection` instances.
+ """
+
+ scheme = "http"
+ ConnectionCls = HTTPConnection
+ ResponseCls = HTTPResponse
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT,
+ maxsize=1,
+ block=False,
+ headers=None,
+ retries=None,
+ _proxy=None,
+ _proxy_headers=None,
+ _proxy_config=None,
+ **conn_kw
+ ):
+ ConnectionPool.__init__(self, host, port)
+ RequestMethods.__init__(self, headers)
+
+ self.strict = strict
+
+ if not isinstance(timeout, Timeout):
+ timeout = Timeout.from_float(timeout)
+
+ if retries is None:
+ retries = Retry.DEFAULT
+
+ self.timeout = timeout
+ self.retries = retries
+
+ self.pool = self.QueueCls(maxsize)
+ self.block = block
+
+ self.proxy = _proxy
+ self.proxy_headers = _proxy_headers or {}
+ self.proxy_config = _proxy_config
+
+ # Fill the queue up so that doing get() on it will block properly
+ for _ in xrange(maxsize):
+ self.pool.put(None)
+
+ # These are mostly for testing and debugging purposes.
+ self.num_connections = 0
+ self.num_requests = 0
+ self.conn_kw = conn_kw
+
+ if self.proxy:
+ # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+ # We cannot know if the user has added default socket options, so we cannot replace the
+ # list.
+ self.conn_kw.setdefault("socket_options", [])
+
+ self.conn_kw["proxy"] = self.proxy
+ self.conn_kw["proxy_config"] = self.proxy_config
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`HTTPConnection`.
+ """
+ self.num_connections += 1
+ log.debug(
+ "Starting new HTTP connection (%d): %s:%s",
+ self.num_connections,
+ self.host,
+ self.port or "80",
+ )
+
+ conn = self.ConnectionCls(
+ host=self.host,
+ port=self.port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict,
+ **self.conn_kw
+ )
+ return conn
+
+ def _get_conn(self, timeout=None):
+ """
+ Get a connection. Will return a pooled connection if one is available.
+
+ If no connections are available and :prop:`.block` is ``False``, then a
+ fresh connection is returned.
+
+ :param timeout:
+ Seconds to wait before giving up and raising
+ :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
+ :prop:`.block` is ``True``.
+ """
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more connections are allowed.",
+ )
+ pass # Oh well, we'll create a new connection then
+
+ # If this is a persistent connection, check if it got disconnected
+ if conn and is_connection_dropped(conn):
+ log.debug("Resetting dropped connection: %s", self.host)
+ conn.close()
+ if getattr(conn, "auto_open", 1) == 0:
+ # This is a proxied connection that has been mutated by
+ # http.client._tunnel() and cannot be reused (since it would
+ # attempt to bypass the proxy)
+ conn = None
+
+ return conn or self._new_conn()
+
+ def _put_conn(self, conn):
+ """
+ Put a connection back into the pool.
+
+ :param conn:
+ Connection object for the current host and port as returned by
+ :meth:`._new_conn` or :meth:`._get_conn`.
+
+ If the pool is already full, the connection is closed and discarded
+ because we exceeded maxsize. If connections are discarded frequently,
+ then maxsize should be increased.
+
+ If the pool is closed, then the connection will be closed and discarded.
+ """
+ try:
+ self.pool.put(conn, block=False)
+ return # Everything is dandy, done.
+ except AttributeError:
+ # self.pool is None.
+ pass
+ except queue.Full:
+ # This should never happen if self.block == True
+ log.warning(
+ "Connection pool is full, discarding connection: %s. Connection pool size: %s",
+ self.host,
+ self.pool.qsize(),
+ )
+ # Connection never got put back into the pool, close it.
+ if conn:
+ conn.close()
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ pass
+
+ def _prepare_proxy(self, conn):
+ # Nothing to do for HTTP connections.
+ pass
+
+ def _get_timeout(self, timeout):
+ """Helper that always returns a :class:`urllib3.util.Timeout`"""
+ if timeout is _Default:
+ return self.timeout.clone()
+
+ if isinstance(timeout, Timeout):
+ return timeout.clone()
+ else:
+ # User passed us an int/float. This is for backwards compatibility,
+ # can be removed later
+ return Timeout.from_float(timeout)
+
+ def _raise_timeout(self, err, url, timeout_value):
+ """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+ if isinstance(err, SocketTimeout):
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % timeout_value
+ )
+
+ # See the above comment about EAGAIN in Python 3. In Python 2 we have
+ # to specifically catch it and throw the timeout error
+ if hasattr(err, "errno") and err.errno in _blocking_errnos:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % timeout_value
+ )
+
+ # Catch possible read timeouts thrown as SSL errors. If not the
+ # case, rethrow the original. We need to do this because of:
+ # http://bugs.python.org/issue10272
+ if "timed out" in str(err) or "did not complete (read)" in str(
+ err
+ ): # Python < 2.7.4
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % timeout_value
+ )
+
+ def _make_request(
+ self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
+ ):
+ """
+ Perform a request on a given urllib connection object taken from our
+ pool.
+
+ :param conn:
+ a connection from one of our connection pools
+
+ :param timeout:
+ Socket timeout in seconds for the request. This can be a
+ float or integer, which will set the same timeout value for
+ the socket connect and the socket read, or an instance of
+ :class:`urllib3.util.Timeout`, which gives you more fine-grained
+ control over your timeouts.
+ """
+ self.num_requests += 1
+
+ timeout_obj = self._get_timeout(timeout)
+ timeout_obj.start_connect()
+ conn.timeout = timeout_obj.connect_timeout
+
+ # Trigger any extra validation we need to do.
+ try:
+ self._validate_conn(conn)
+ except (SocketTimeout, BaseSSLError) as e:
+ # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+ self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+ raise
+
+ # conn.request() calls http.client.*.request, not the method in
+ # urllib3.request. It also calls makefile (recv) on the socket.
+ try:
+ if chunked:
+ conn.request_chunked(method, url, **httplib_request_kw)
+ else:
+ conn.request(method, url, **httplib_request_kw)
+
+ # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
+ # legitimately able to close the connection after sending a valid response.
+ # With this behaviour, the received response is still readable.
+ except BrokenPipeError:
+ # Python 3
+ pass
+ except IOError as e:
+ # Python 2 and macOS/Linux
+ # EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS
+ # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
+ if e.errno not in {
+ errno.EPIPE,
+ errno.ESHUTDOWN,
+ errno.EPROTOTYPE,
+ }:
+ raise
+
+ # Reset the timeout for the recv() on the socket
+ read_timeout = timeout_obj.read_timeout
+
+ # App Engine doesn't have a sock attr
+ if getattr(conn, "sock", None):
+ # In Python 3 socket.py will catch EAGAIN and return None when you
+ # try and read into the file pointer created by http.client, which
+ # instead raises a BadStatusLine exception. Instead of catching
+ # the exception and assuming all BadStatusLine exceptions are read
+ # timeouts, check for a zero timeout before making the request.
+ if read_timeout == 0:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % read_timeout
+ )
+ if read_timeout is Timeout.DEFAULT_TIMEOUT:
+ conn.sock.settimeout(socket.getdefaulttimeout())
+ else: # None or a value
+ conn.sock.settimeout(read_timeout)
+
+ # Receive the response from the server
+ try:
+ try:
+ # Python 2.7, use buffering of HTTP responses
+ httplib_response = conn.getresponse(buffering=True)
+ except TypeError:
+ # Python 3
+ try:
+ httplib_response = conn.getresponse()
+ except BaseException as e:
+ # Remove the TypeError from the exception chain in
+ # Python 3 (including for exceptions like SystemExit).
+ # Otherwise it looks like a bug in the code.
+ six.raise_from(e, None)
+ except (SocketTimeout, BaseSSLError, SocketError) as e:
+ self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
+ raise
+
+ # AppEngine doesn't have a version attr.
+ http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
+ log.debug(
+ '%s://%s:%s "%s %s %s" %s %s',
+ self.scheme,
+ self.host,
+ self.port,
+ method,
+ url,
+ http_version,
+ httplib_response.status,
+ httplib_response.length,
+ )
+
+ try:
+ assert_header_parsing(httplib_response.msg)
+ except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
+ log.warning(
+ "Failed to parse headers (url=%s): %s",
+ self._absolute_url(url),
+ hpe,
+ exc_info=True,
+ )
+
+ return httplib_response
+
+ def _absolute_url(self, path):
+ return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ if self.pool is None:
+ return
+ # Disable access to the pool
+ old_pool, self.pool = self.pool, None
+
+ try:
+ while True:
+ conn = old_pool.get(block=False)
+ if conn:
+ conn.close()
+
+ except queue.Empty:
+ pass # Done.
+
+ def is_same_host(self, url):
+ """
+ Check if the given ``url`` is a member of the same host as this
+ connection pool.
+ """
+ if url.startswith("/"):
+ return True
+
+ # TODO: Add optional support for socket.gethostbyname checking.
+ scheme, host, port = get_host(url)
+ if host is not None:
+ host = _normalize_host(host, scheme=scheme)
+
+ # Use explicit default port for comparison when none is given
+ if self.port and not port:
+ port = port_by_scheme.get(scheme)
+ elif not self.port and port == port_by_scheme.get(scheme):
+ port = None
+
+ return (scheme, host, port) == (self.scheme, self.host, self.port)
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ retries=None,
+ redirect=True,
+ assert_same_host=True,
+ timeout=_Default,
+ pool_timeout=None,
+ release_conn=None,
+ chunked=False,
+ body_pos=None,
+ **response_kw
+ ):
+ """
+ Get a connection from the pool and perform an HTTP request. This is the
+ lowest level call for making a request, so you'll need to specify all
+ the raw details.
+
+ .. note::
+
+ More commonly, it's appropriate to use a convenience method provided
+ by :class:`.RequestMethods`, such as :meth:`request`.
+
+ .. note::
+
+ `release_conn` will only behave as expected if
+ `preload_content=False` because we want to make
+ `preload_content=False` the default behaviour someday soon without
+ breaking backwards compatibility.
+
+ :param method:
+ HTTP request method (such as GET, POST, PUT, etc.)
+
+ :param url:
+ The URL to perform the request on.
+
+ :param body:
+ Data to send in the request body, either :class:`str`, :class:`bytes`,
+ an iterable of :class:`str`/:class:`bytes`, or a file-like object.
+
+ :param headers:
+ Dictionary of custom headers to send, such as User-Agent,
+ If-None-Match, etc. If None, pool headers are used. If provided,
+ these headers completely replace any pool-specific headers.
+
+ :param retries:
+ Configure the number of retries to allow before raising a
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+ Pass ``None`` to retry until you receive a response. Pass a
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
+ over different types of retries.
+ Pass an integer number to retry connection errors that many times,
+ but no other types of errors. Pass zero to never retry.
+
+ If ``False``, then retries are disabled and any exception is raised
+ immediately. Also, instead of raising a MaxRetryError on redirects,
+ the redirect response will be returned.
+
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
+
+ :param redirect:
+ If True, automatically handle redirects (status codes 301, 302,
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
+ will disable redirect, too.
+
+ :param assert_same_host:
+ If ``True``, will make sure that the host of the pool requests is
+ consistent else will raise HostChangedError. When ``False``, you can
+ use the pool on an HTTP proxy and request foreign hosts.
+
+ :param timeout:
+ If specified, overrides the default timeout for this one
+ request. It may be a float (in seconds) or an instance of
+ :class:`urllib3.util.Timeout`.
+
+ :param pool_timeout:
+ If set and the pool is set to block=True, then this method will
+ block for ``pool_timeout`` seconds and raise EmptyPoolError if no
+ connection is available within the time period.
+
+ :param release_conn:
+ If False, then the urlopen call will not release the connection
+ back into the pool once a response is received (but will release if
+ you read the entire contents of the response such as when
+ `preload_content=True`). This is useful if you're not preloading
+ the response's content immediately. You will need to call
+ ``r.release_conn()`` on the response ``r`` to return the connection
+ back into the pool. If None, it takes the value of
+ ``response_kw.get('preload_content', True)``.
+
+ :param chunked:
+ If True, urllib3 will send the body using chunked transfer
+ encoding. Otherwise, urllib3 will send the body using the standard
+ content-length form. Defaults to False.
+
+ :param int body_pos:
+ Position to seek to in file-like body in the event of a retry or
+ redirect. Typically this won't need to be set because urllib3 will
+ auto-populate the value when needed.
+
+ :param \\**response_kw:
+ Additional parameters are passed to
+ :meth:`urllib3.response.HTTPResponse.from_httplib`
+ """
+
+ parsed_url = parse_url(url)
+ destination_scheme = parsed_url.scheme
+
+ if headers is None:
+ headers = self.headers
+
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if release_conn is None:
+ release_conn = response_kw.get("preload_content", True)
+
+ # Check host
+ if assert_same_host and not self.is_same_host(url):
+ raise HostChangedError(self, url, retries)
+
+ # Ensure that the URL we're connecting to is properly encoded
+ if url.startswith("/"):
+ url = six.ensure_str(_encode_target(url))
+ else:
+ url = six.ensure_str(parsed_url.url)
+
+ conn = None
+
+ # Track whether `conn` needs to be released before
+ # returning/raising/recursing. Update this variable if necessary, and
+ # leave `release_conn` constant throughout the function. That way, if
+ # the function recurses, the original value of `release_conn` will be
+ # passed down into the recursive call, and its value will be respected.
+ #
+ # See issue #651 [1] for details.
+ #
+ # [1]
+ release_this_conn = release_conn
+
+ http_tunnel_required = connection_requires_http_tunnel(
+ self.proxy, self.proxy_config, destination_scheme
+ )
+
+ # Merge the proxy headers. Only done when not using HTTP CONNECT. We
+ # have to copy the headers dict so we can safely change it without those
+ # changes being reflected in anyone else's copy.
+ if not http_tunnel_required:
+ headers = headers.copy()
+ headers.update(self.proxy_headers)
+
+ # Must keep the exception bound to a separate variable or else Python 3
+ # complains about UnboundLocalError.
+ err = None
+
+ # Keep track of whether we cleanly exited the except block. This
+ # ensures we do proper cleanup in finally.
+ clean_exit = False
+
+ # Rewind body position, if needed. Record current position
+ # for future rewinds in the event of a redirect/retry.
+ body_pos = set_file_position(body, body_pos)
+
+ try:
+ # Request a connection from the queue.
+ timeout_obj = self._get_timeout(timeout)
+ conn = self._get_conn(timeout=pool_timeout)
+
+ conn.timeout = timeout_obj.connect_timeout
+
+ is_new_proxy_conn = self.proxy is not None and not getattr(
+ conn, "sock", None
+ )
+ if is_new_proxy_conn and http_tunnel_required:
+ self._prepare_proxy(conn)
+
+ # Make the request on the httplib connection object.
+ httplib_response = self._make_request(
+ conn,
+ method,
+ url,
+ timeout=timeout_obj,
+ body=body,
+ headers=headers,
+ chunked=chunked,
+ )
+
+ # If we're going to release the connection in ``finally:``, then
+ # the response doesn't need to know about the connection. Otherwise
+ # it will also try to release it and we'll have a double-release
+ # mess.
+ response_conn = conn if not release_conn else None
+
+ # Pass method to Response for length checking
+ response_kw["request_method"] = method
+
+ # Import httplib's response into our own wrapper object
+ response = self.ResponseCls.from_httplib(
+ httplib_response,
+ pool=self,
+ connection=response_conn,
+ retries=retries,
+ **response_kw
+ )
+
+ # Everything went great!
+ clean_exit = True
+
+ except EmptyPoolError:
+ # Didn't get a connection from the pool, no need to clean up
+ clean_exit = True
+ release_this_conn = False
+ raise
+
+ except (
+ TimeoutError,
+ HTTPException,
+ SocketError,
+ ProtocolError,
+ BaseSSLError,
+ SSLError,
+ CertificateError,
+ ) as e:
+ # Discard the connection for these exceptions. It will be
+ # replaced during the next _get_conn() call.
+ clean_exit = False
+
+ def _is_ssl_error_message_from_http_proxy(ssl_error):
+ # We're trying to detect the message 'WRONG_VERSION_NUMBER' but
+ # SSLErrors are kinda all over the place when it comes to the message,
+ # so we try to cover our bases here!
+ message = " ".join(re.split("[^a-z]", str(ssl_error).lower()))
+ return (
+ "wrong version number" in message or "unknown protocol" in message
+ )
+
+ # Try to detect a common user error with proxies which is to
+ # set an HTTP proxy to be HTTPS when it should be 'http://'
+ # (ie {'http': 'http://proxy', 'https': 'https://proxy'})
+ # Instead we add a nice error message and point to a URL.
+ if (
+ isinstance(e, BaseSSLError)
+ and self.proxy
+ and _is_ssl_error_message_from_http_proxy(e)
+ ):
+ e = ProxyError(
+ "Your proxy appears to only use HTTP and not HTTPS, "
+ "try changing your proxy URL to be HTTP. See: "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#https-proxy-error-http-proxy",
+ SSLError(e),
+ )
+ elif isinstance(e, (BaseSSLError, CertificateError)):
+ e = SSLError(e)
+ elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
+ e = ProxyError("Cannot connect to proxy.", e)
+ elif isinstance(e, (SocketError, HTTPException)):
+ e = ProtocolError("Connection aborted.", e)
+
+ retries = retries.increment(
+ method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
+ )
+ retries.sleep()
+
+ # Keep track of the error for the retry warning.
+ err = e
+
+ finally:
+ if not clean_exit:
+ # We hit some kind of exception, handled or otherwise. We need
+ # to throw the connection away unless explicitly told not to.
+ # Close the connection, set the variable to None, and make sure
+ # we put the None back in the pool to avoid leaking it.
+ conn = conn and conn.close()
+ release_this_conn = True
+
+ if release_this_conn:
+ # Put the connection back to be reused. If the connection is
+ # expired then it will be None, which will get replaced with a
+ # fresh connection during _get_conn.
+ self._put_conn(conn)
+
+ if not conn:
+ # Try again
+ log.warning(
+ "Retrying (%r) after connection broken by '%r': %s", retries, err, url
+ )
+ return self.urlopen(
+ method,
+ url,
+ body,
+ headers,
+ retries,
+ redirect,
+ assert_same_host,
+ timeout=timeout,
+ pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ chunked=chunked,
+ body_pos=body_pos,
+ **response_kw
+ )
+
+ # Handle redirect?
+ redirect_location = redirect and response.get_redirect_location()
+ if redirect_location:
+ if response.status == 303:
+ method = "GET"
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ response.drain_conn()
+ raise
+ return response
+
+ response.drain_conn()
+ retries.sleep_for_retry(response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(
+ method,
+ redirect_location,
+ body,
+ headers,
+ retries=retries,
+ redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout,
+ pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ chunked=chunked,
+ body_pos=body_pos,
+ **response_kw
+ )
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(response.getheader("Retry-After"))
+ if retries.is_retry(method, response.status, has_retry_after):
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_status:
+ response.drain_conn()
+ raise
+ return response
+
+ response.drain_conn()
+ retries.sleep(response)
+ log.debug("Retry: %s", url)
+ return self.urlopen(
+ method,
+ url,
+ body,
+ headers,
+ retries=retries,
+ redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout,
+ pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ chunked=chunked,
+ body_pos=body_pos,
+ **response_kw
+ )
+
+ return response
+
+
+class HTTPSConnectionPool(HTTPConnectionPool):
+ """
+ Same as :class:`.HTTPConnectionPool`, but HTTPS.
+
+ :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
+ ``assert_hostname`` and ``host`` in this order to verify connections.
+ If ``assert_hostname`` is False, no verification is done.
+
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+ ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
+ is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+ the connection socket into an SSL socket.
+ """
+
+ scheme = "https"
+ ConnectionCls = HTTPSConnection
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT,
+ maxsize=1,
+ block=False,
+ headers=None,
+ retries=None,
+ _proxy=None,
+ _proxy_headers=None,
+ key_file=None,
+ cert_file=None,
+ cert_reqs=None,
+ key_password=None,
+ ca_certs=None,
+ ssl_version=None,
+ assert_hostname=None,
+ assert_fingerprint=None,
+ ca_cert_dir=None,
+ **conn_kw
+ ):
+
+ HTTPConnectionPool.__init__(
+ self,
+ host,
+ port,
+ strict,
+ timeout,
+ maxsize,
+ block,
+ headers,
+ retries,
+ _proxy,
+ _proxy_headers,
+ **conn_kw
+ )
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.key_password = key_password
+ self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ def _prepare_conn(self, conn):
+ """
+ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
+ and establish the tunnel if proxy is used.
+ """
+
+ if isinstance(conn, VerifiedHTTPSConnection):
+ conn.set_cert(
+ key_file=self.key_file,
+ key_password=self.key_password,
+ cert_file=self.cert_file,
+ cert_reqs=self.cert_reqs,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint,
+ )
+ conn.ssl_version = self.ssl_version
+ return conn
+
+ def _prepare_proxy(self, conn):
+ """
+ Establishes a tunnel connection through HTTP CONNECT.
+
+ Tunnel connection is established early because otherwise httplib would
+ improperly set Host: header to proxy's IP:port.
+ """
+
+ conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
+
+ if self.proxy.scheme == "https":
+ conn.tls_in_tls_required = True
+
+ conn.connect()
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`http.client.HTTPSConnection`.
+ """
+ self.num_connections += 1
+ log.debug(
+ "Starting new HTTPS connection (%d): %s:%s",
+ self.num_connections,
+ self.host,
+ self.port or "443",
+ )
+
+ if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
+ raise SSLError(
+ "Can't connect to HTTPS URL because the SSL module is not available."
+ )
+
+ actual_host = self.host
+ actual_port = self.port
+ if self.proxy is not None:
+ actual_host = self.proxy.host
+ actual_port = self.proxy.port
+
+ conn = self.ConnectionCls(
+ host=actual_host,
+ port=actual_port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict,
+ cert_file=self.cert_file,
+ key_file=self.key_file,
+ key_password=self.key_password,
+ **self.conn_kw
+ )
+
+ return self._prepare_conn(conn)
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+ # Force connect early to allow us to validate the connection.
+ if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
+ conn.connect()
+
+ if not conn.is_verified:
+ warnings.warn(
+ (
+ "Unverified HTTPS request is being made to host '%s'. "
+ "Adding certificate verification is strongly advised. See: "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#ssl-warnings" % conn.host
+ ),
+ InsecureRequestWarning,
+ )
+
+ if getattr(conn, "proxy_is_verified", None) is False:
+ warnings.warn(
+ (
+ "Unverified HTTPS connection done to an HTTPS proxy. "
+ "Adding certificate verification is strongly advised. See: "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#ssl-warnings"
+ ),
+ InsecureRequestWarning,
+ )
+
+
+def connection_from_url(url, **kw):
+ """
+ Given a url, return an :class:`.ConnectionPool` instance of its host.
+
+ This is a shortcut for not having to parse out the scheme, host, and port
+ of the url before creating an :class:`.ConnectionPool` instance.
+
+ :param url:
+ Absolute URL string that must include the scheme. Port is optional.
+
+ :param \\**kw:
+ Passes additional parameters to the constructor of the appropriate
+ :class:`.ConnectionPool`. Useful for specifying things like
+ timeout, maxsize, headers, etc.
+
+ Example::
+
+ >>> conn = connection_from_url('http://google.com/')
+ >>> r = conn.request('GET', '/')
+ """
+ scheme, host, port = get_host(url)
+ port = port or port_by_scheme.get(scheme, 80)
+ if scheme == "https":
+ return HTTPSConnectionPool(host, port=port, **kw)
+ else:
+ return HTTPConnectionPool(host, port=port, **kw)
+
+
+def _normalize_host(host, scheme):
+ """
+ Normalize hosts for comparisons and use with sockets.
+ """
+
+ host = normalize_host(host, scheme)
+
+ # httplib doesn't like it when we include brackets in IPv6 addresses
+ # Specifically, if we include brackets but also pass the port then
+ # httplib crazily doubles up the square brackets on the Host header.
+ # Instead, we need to make sure we never pass ``None`` as the port.
+ # However, for backward compatibility reasons we can't actually
+ # *assert* that. See http://bugs.python.org/issue28539
+ if host.startswith("[") and host.endswith("]"):
+ host = host[1:-1]
+ return host
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/__init__.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py
new file mode 100644
index 0000000..8765b90
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py
@@ -0,0 +1,36 @@
+"""
+This module provides means to detect the App Engine environment.
+"""
+
+import os
+
+
+def is_appengine():
+ return is_local_appengine() or is_prod_appengine()
+
+
+def is_appengine_sandbox():
+ """Reports if the app is running in the first generation sandbox.
+
+ The second generation runtimes are technically still in a sandbox, but it
+ is much less restrictive, so generally you shouldn't need to check for it.
+ see https://cloud.google.com/appengine/docs/standard/runtimes
+ """
+ return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
+
+
+def is_local_appengine():
+ return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
+ "SERVER_SOFTWARE", ""
+ ).startswith("Development/")
+
+
+def is_prod_appengine():
+ return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
+ "SERVER_SOFTWARE", ""
+ ).startswith("Google App Engine/")
+
+
+def is_prod_appengine_mvms():
+ """Deprecated."""
+ return False
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py
new file mode 100644
index 0000000..264d564
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py
@@ -0,0 +1,519 @@
+"""
+This module uses ctypes to bind a whole bunch of functions and constants from
+SecureTransport. The goal here is to provide the low-level API to
+SecureTransport. These are essentially the C-level functions and constants, and
+they're pretty gross to work with.
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+ Copyright (c) 2015-2016 Will Bond
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import platform
+from ctypes import (
+ CDLL,
+ CFUNCTYPE,
+ POINTER,
+ c_bool,
+ c_byte,
+ c_char_p,
+ c_int32,
+ c_long,
+ c_size_t,
+ c_uint32,
+ c_ulong,
+ c_void_p,
+)
+from ctypes.util import find_library
+
+from ...packages.six import raise_from
+
+if platform.system() != "Darwin":
+ raise ImportError("Only macOS is supported")
+
+version = platform.mac_ver()[0]
+version_info = tuple(map(int, version.split(".")))
+if version_info < (10, 8):
+ raise OSError(
+ "Only OS X 10.8 and newer are supported, not %s.%s"
+ % (version_info[0], version_info[1])
+ )
+
+
+def load_cdll(name, macos10_16_path):
+ """Loads a CDLL by name, falling back to known path on 10.16+"""
+ try:
+ # Big Sur is technically 11 but we use 10.16 due to the Big Sur
+ # beta being labeled as 10.16.
+ if version_info >= (10, 16):
+ path = macos10_16_path
+ else:
+ path = find_library(name)
+ if not path:
+ raise OSError # Caught and reraised as 'ImportError'
+ return CDLL(path, use_errno=True)
+ except OSError:
+ raise_from(ImportError("The library %s failed to load" % name), None)
+
+
+Security = load_cdll(
+ "Security", "/System/Library/Frameworks/Security.framework/Security"
+)
+CoreFoundation = load_cdll(
+ "CoreFoundation",
+ "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
+)
+
+
+Boolean = c_bool
+CFIndex = c_long
+CFStringEncoding = c_uint32
+CFData = c_void_p
+CFString = c_void_p
+CFArray = c_void_p
+CFMutableArray = c_void_p
+CFDictionary = c_void_p
+CFError = c_void_p
+CFType = c_void_p
+CFTypeID = c_ulong
+
+CFTypeRef = POINTER(CFType)
+CFAllocatorRef = c_void_p
+
+OSStatus = c_int32
+
+CFDataRef = POINTER(CFData)
+CFStringRef = POINTER(CFString)
+CFArrayRef = POINTER(CFArray)
+CFMutableArrayRef = POINTER(CFMutableArray)
+CFDictionaryRef = POINTER(CFDictionary)
+CFArrayCallBacks = c_void_p
+CFDictionaryKeyCallBacks = c_void_p
+CFDictionaryValueCallBacks = c_void_p
+
+SecCertificateRef = POINTER(c_void_p)
+SecExternalFormat = c_uint32
+SecExternalItemType = c_uint32
+SecIdentityRef = POINTER(c_void_p)
+SecItemImportExportFlags = c_uint32
+SecItemImportExportKeyParameters = c_void_p
+SecKeychainRef = POINTER(c_void_p)
+SSLProtocol = c_uint32
+SSLCipherSuite = c_uint32
+SSLContextRef = POINTER(c_void_p)
+SecTrustRef = POINTER(c_void_p)
+SSLConnectionRef = c_uint32
+SecTrustResultType = c_uint32
+SecTrustOptionFlags = c_uint32
+SSLProtocolSide = c_uint32
+SSLConnectionType = c_uint32
+SSLSessionOption = c_uint32
+
+
+try:
+ Security.SecItemImport.argtypes = [
+ CFDataRef,
+ CFStringRef,
+ POINTER(SecExternalFormat),
+ POINTER(SecExternalItemType),
+ SecItemImportExportFlags,
+ POINTER(SecItemImportExportKeyParameters),
+ SecKeychainRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecItemImport.restype = OSStatus
+
+ Security.SecCertificateGetTypeID.argtypes = []
+ Security.SecCertificateGetTypeID.restype = CFTypeID
+
+ Security.SecIdentityGetTypeID.argtypes = []
+ Security.SecIdentityGetTypeID.restype = CFTypeID
+
+ Security.SecKeyGetTypeID.argtypes = []
+ Security.SecKeyGetTypeID.restype = CFTypeID
+
+ Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef]
+ Security.SecCertificateCreateWithData.restype = SecCertificateRef
+
+ Security.SecCertificateCopyData.argtypes = [SecCertificateRef]
+ Security.SecCertificateCopyData.restype = CFDataRef
+
+ Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SecIdentityCreateWithCertificate.argtypes = [
+ CFTypeRef,
+ SecCertificateRef,
+ POINTER(SecIdentityRef),
+ ]
+ Security.SecIdentityCreateWithCertificate.restype = OSStatus
+
+ Security.SecKeychainCreate.argtypes = [
+ c_char_p,
+ c_uint32,
+ c_void_p,
+ Boolean,
+ c_void_p,
+ POINTER(SecKeychainRef),
+ ]
+ Security.SecKeychainCreate.restype = OSStatus
+
+ Security.SecKeychainDelete.argtypes = [SecKeychainRef]
+ Security.SecKeychainDelete.restype = OSStatus
+
+ Security.SecPKCS12Import.argtypes = [
+ CFDataRef,
+ CFDictionaryRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecPKCS12Import.restype = OSStatus
+
+ SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
+ SSLWriteFunc = CFUNCTYPE(
+ OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)
+ )
+
+ Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc]
+ Security.SSLSetIOFuncs.restype = OSStatus
+
+ Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t]
+ Security.SSLSetPeerID.restype = OSStatus
+
+ Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef]
+ Security.SSLSetCertificate.restype = OSStatus
+
+ Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean]
+ Security.SSLSetCertificateAuthorities.restype = OSStatus
+
+ Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef]
+ Security.SSLSetConnection.restype = OSStatus
+
+ Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t]
+ Security.SSLSetPeerDomainName.restype = OSStatus
+
+ Security.SSLHandshake.argtypes = [SSLContextRef]
+ Security.SSLHandshake.restype = OSStatus
+
+ Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
+ Security.SSLRead.restype = OSStatus
+
+ Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
+ Security.SSLWrite.restype = OSStatus
+
+ Security.SSLClose.argtypes = [SSLContextRef]
+ Security.SSLClose.restype = OSStatus
+
+ Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)]
+ Security.SSLGetNumberSupportedCiphers.restype = OSStatus
+
+ Security.SSLGetSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t),
+ ]
+ Security.SSLGetSupportedCiphers.restype = OSStatus
+
+ Security.SSLSetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ c_size_t,
+ ]
+ Security.SSLSetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)]
+ Security.SSLGetNumberEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t),
+ ]
+ Security.SSLGetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)]
+ Security.SSLGetNegotiatedCipher.restype = OSStatus
+
+ Security.SSLGetNegotiatedProtocolVersion.argtypes = [
+ SSLContextRef,
+ POINTER(SSLProtocol),
+ ]
+ Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
+
+ Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)]
+ Security.SSLCopyPeerTrust.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef]
+ Security.SecTrustSetAnchorCertificates.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean]
+ Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
+
+ Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)]
+ Security.SecTrustEvaluate.restype = OSStatus
+
+ Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef]
+ Security.SecTrustGetCertificateCount.restype = CFIndex
+
+ Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex]
+ Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
+
+ Security.SSLCreateContext.argtypes = [
+ CFAllocatorRef,
+ SSLProtocolSide,
+ SSLConnectionType,
+ ]
+ Security.SSLCreateContext.restype = SSLContextRef
+
+ Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean]
+ Security.SSLSetSessionOption.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol]
+ Security.SSLSetProtocolVersionMin.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol]
+ Security.SSLSetProtocolVersionMax.restype = OSStatus
+
+ try:
+ Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef]
+ Security.SSLSetALPNProtocols.restype = OSStatus
+ except AttributeError:
+ # Supported only in 10.12+
+ pass
+
+ Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SSLReadFunc = SSLReadFunc
+ Security.SSLWriteFunc = SSLWriteFunc
+ Security.SSLContextRef = SSLContextRef
+ Security.SSLProtocol = SSLProtocol
+ Security.SSLCipherSuite = SSLCipherSuite
+ Security.SecIdentityRef = SecIdentityRef
+ Security.SecKeychainRef = SecKeychainRef
+ Security.SecTrustRef = SecTrustRef
+ Security.SecTrustResultType = SecTrustResultType
+ Security.SecExternalFormat = SecExternalFormat
+ Security.OSStatus = OSStatus
+
+ Security.kSecImportExportPassphrase = CFStringRef.in_dll(
+ Security, "kSecImportExportPassphrase"
+ )
+ Security.kSecImportItemIdentity = CFStringRef.in_dll(
+ Security, "kSecImportItemIdentity"
+ )
+
+ # CoreFoundation time!
+ CoreFoundation.CFRetain.argtypes = [CFTypeRef]
+ CoreFoundation.CFRetain.restype = CFTypeRef
+
+ CoreFoundation.CFRelease.argtypes = [CFTypeRef]
+ CoreFoundation.CFRelease.restype = None
+
+ CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef]
+ CoreFoundation.CFGetTypeID.restype = CFTypeID
+
+ CoreFoundation.CFStringCreateWithCString.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFStringEncoding,
+ ]
+ CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
+
+ CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
+ CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
+
+ CoreFoundation.CFStringGetCString.argtypes = [
+ CFStringRef,
+ c_char_p,
+ CFIndex,
+ CFStringEncoding,
+ ]
+ CoreFoundation.CFStringGetCString.restype = c_bool
+
+ CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex]
+ CoreFoundation.CFDataCreate.restype = CFDataRef
+
+ CoreFoundation.CFDataGetLength.argtypes = [CFDataRef]
+ CoreFoundation.CFDataGetLength.restype = CFIndex
+
+ CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef]
+ CoreFoundation.CFDataGetBytePtr.restype = c_void_p
+
+ CoreFoundation.CFDictionaryCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFDictionaryKeyCallBacks,
+ CFDictionaryValueCallBacks,
+ ]
+ CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
+
+ CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef]
+ CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
+
+ CoreFoundation.CFArrayCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreate.restype = CFArrayRef
+
+ CoreFoundation.CFArrayCreateMutable.argtypes = [
+ CFAllocatorRef,
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
+
+ CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p]
+ CoreFoundation.CFArrayAppendValue.restype = None
+
+ CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
+ CoreFoundation.CFArrayGetCount.restype = CFIndex
+
+ CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
+ CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
+
+ CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
+ CoreFoundation, "kCFAllocatorDefault"
+ )
+ CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(
+ CoreFoundation, "kCFTypeArrayCallBacks"
+ )
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
+ CoreFoundation, "kCFTypeDictionaryKeyCallBacks"
+ )
+ CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
+ CoreFoundation, "kCFTypeDictionaryValueCallBacks"
+ )
+
+ CoreFoundation.CFTypeRef = CFTypeRef
+ CoreFoundation.CFArrayRef = CFArrayRef
+ CoreFoundation.CFStringRef = CFStringRef
+ CoreFoundation.CFDictionaryRef = CFDictionaryRef
+
+except (AttributeError):
+ raise ImportError("Error initializing ctypes")
+
+
+class CFConst(object):
+ """
+ A class object that acts as essentially a namespace for CoreFoundation
+ constants.
+ """
+
+ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
+
+
+class SecurityConst(object):
+ """
+ A class object that acts as essentially a namespace for Security constants.
+ """
+
+ kSSLSessionOptionBreakOnServerAuth = 0
+
+ kSSLProtocol2 = 1
+ kSSLProtocol3 = 2
+ kTLSProtocol1 = 4
+ kTLSProtocol11 = 7
+ kTLSProtocol12 = 8
+ # SecureTransport does not support TLS 1.3 even if there's a constant for it
+ kTLSProtocol13 = 10
+ kTLSProtocolMaxSupported = 999
+
+ kSSLClientSide = 1
+ kSSLStreamType = 0
+
+ kSecFormatPEMSequence = 10
+
+ kSecTrustResultInvalid = 0
+ kSecTrustResultProceed = 1
+ # This gap is present on purpose: this was kSecTrustResultConfirm, which
+ # is deprecated.
+ kSecTrustResultDeny = 3
+ kSecTrustResultUnspecified = 4
+ kSecTrustResultRecoverableTrustFailure = 5
+ kSecTrustResultFatalTrustFailure = 6
+ kSecTrustResultOtherError = 7
+
+ errSSLProtocol = -9800
+ errSSLWouldBlock = -9803
+ errSSLClosedGraceful = -9805
+ errSSLClosedNoNotify = -9816
+ errSSLClosedAbort = -9806
+
+ errSSLXCertChainInvalid = -9807
+ errSSLCrypto = -9809
+ errSSLInternal = -9810
+ errSSLCertExpired = -9814
+ errSSLCertNotYetValid = -9815
+ errSSLUnknownRootCert = -9812
+ errSSLNoRootCert = -9813
+ errSSLHostNameMismatch = -9843
+ errSSLPeerHandshakeFail = -9824
+ errSSLPeerUserCancelled = -9839
+ errSSLWeakPeerEphemeralDHKey = -9850
+ errSSLServerAuthCompleted = -9841
+ errSSLRecordOverflow = -9847
+
+ errSecVerifyFailed = -67808
+ errSecNoTrustSettings = -25263
+ errSecItemNotFound = -25300
+ errSecInvalidTrustSettings = -25262
+
+ # Cipher suites. We only pick the ones our default cipher string allows.
+ # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8
+ TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
+ TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
+ TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
+ TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
+ TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
+ TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
+ TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
+ TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
+ TLS_AES_128_GCM_SHA256 = 0x1301
+ TLS_AES_256_GCM_SHA384 = 0x1302
+ TLS_AES_128_CCM_8_SHA256 = 0x1305
+ TLS_AES_128_CCM_SHA256 = 0x1304
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py
new file mode 100644
index 0000000..fa0b245
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py
@@ -0,0 +1,397 @@
+"""
+Low-level helpers for the SecureTransport bindings.
+
+These are Python functions that are not directly related to the high-level APIs
+but are necessary to get them to work. They include a whole bunch of low-level
+CoreFoundation messing about and memory management. The concerns in this module
+are almost entirely about trying to avoid memory leaks and providing
+appropriate and useful assistance to the higher-level code.
+"""
+import base64
+import ctypes
+import itertools
+import os
+import re
+import ssl
+import struct
+import tempfile
+
+from .bindings import CFConst, CoreFoundation, Security
+
+# This regular expression is used to grab PEM data out of a PEM bundle.
+_PEM_CERTS_RE = re.compile(
+ b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
+)
+
+
+def _cf_data_from_bytes(bytestring):
+ """
+ Given a bytestring, create a CFData object from it. This CFData object must
+ be CFReleased by the caller.
+ """
+ return CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
+ )
+
+
+def _cf_dictionary_from_tuples(tuples):
+ """
+ Given a list of Python tuples, create an associated CFDictionary.
+ """
+ dictionary_size = len(tuples)
+
+ # We need to get the dictionary keys and values out in the same order.
+ keys = (t[0] for t in tuples)
+ values = (t[1] for t in tuples)
+ cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
+ cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
+
+ return CoreFoundation.CFDictionaryCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ cf_keys,
+ cf_values,
+ dictionary_size,
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks,
+ CoreFoundation.kCFTypeDictionaryValueCallBacks,
+ )
+
+
+def _cfstr(py_bstr):
+ """
+ Given a Python binary data, create a CFString.
+ The string must be CFReleased by the caller.
+ """
+ c_str = ctypes.c_char_p(py_bstr)
+ cf_str = CoreFoundation.CFStringCreateWithCString(
+ CoreFoundation.kCFAllocatorDefault,
+ c_str,
+ CFConst.kCFStringEncodingUTF8,
+ )
+ return cf_str
+
+
+def _create_cfstring_array(lst):
+ """
+ Given a list of Python binary data, create an associated CFMutableArray.
+ The array must be CFReleased by the caller.
+
+ Raises an ssl.SSLError on failure.
+ """
+ cf_arr = None
+ try:
+ cf_arr = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ if not cf_arr:
+ raise MemoryError("Unable to allocate memory!")
+ for item in lst:
+ cf_str = _cfstr(item)
+ if not cf_str:
+ raise MemoryError("Unable to allocate memory!")
+ try:
+ CoreFoundation.CFArrayAppendValue(cf_arr, cf_str)
+ finally:
+ CoreFoundation.CFRelease(cf_str)
+ except BaseException as e:
+ if cf_arr:
+ CoreFoundation.CFRelease(cf_arr)
+ raise ssl.SSLError("Unable to allocate array: %s" % (e,))
+ return cf_arr
+
+
+def _cf_string_to_unicode(value):
+ """
+ Creates a Unicode string from a CFString object. Used entirely for error
+ reporting.
+
+ Yes, it annoys me quite a lot that this function is this complex.
+ """
+ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
+
+ string = CoreFoundation.CFStringGetCStringPtr(
+ value_as_void_p, CFConst.kCFStringEncodingUTF8
+ )
+ if string is None:
+ buffer = ctypes.create_string_buffer(1024)
+ result = CoreFoundation.CFStringGetCString(
+ value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8
+ )
+ if not result:
+ raise OSError("Error copying C string from CFStringRef")
+ string = buffer.value
+ if string is not None:
+ string = string.decode("utf-8")
+ return string
+
+
+def _assert_no_error(error, exception_class=None):
+ """
+ Checks the return code and throws an exception if there is an error to
+ report
+ """
+ if error == 0:
+ return
+
+ cf_error_string = Security.SecCopyErrorMessageString(error, None)
+ output = _cf_string_to_unicode(cf_error_string)
+ CoreFoundation.CFRelease(cf_error_string)
+
+ if output is None or output == u"":
+ output = u"OSStatus %s" % error
+
+ if exception_class is None:
+ exception_class = ssl.SSLError
+
+ raise exception_class(output)
+
+
+def _cert_array_from_pem(pem_bundle):
+ """
+ Given a bundle of certs in PEM format, turns them into a CFArray of certs
+ that can be used to validate a cert chain.
+ """
+ # Normalize the PEM bundle's line endings.
+ pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
+
+ der_certs = [
+ base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle)
+ ]
+ if not der_certs:
+ raise ssl.SSLError("No root certificates specified")
+
+ cert_array = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ if not cert_array:
+ raise ssl.SSLError("Unable to allocate memory!")
+
+ try:
+ for der_bytes in der_certs:
+ certdata = _cf_data_from_bytes(der_bytes)
+ if not certdata:
+ raise ssl.SSLError("Unable to allocate memory!")
+ cert = Security.SecCertificateCreateWithData(
+ CoreFoundation.kCFAllocatorDefault, certdata
+ )
+ CoreFoundation.CFRelease(certdata)
+ if not cert:
+ raise ssl.SSLError("Unable to build cert object!")
+
+ CoreFoundation.CFArrayAppendValue(cert_array, cert)
+ CoreFoundation.CFRelease(cert)
+ except Exception:
+ # We need to free the array before the exception bubbles further.
+ # We only want to do that if an error occurs: otherwise, the caller
+ # should free.
+ CoreFoundation.CFRelease(cert_array)
+ raise
+
+ return cert_array
+
+
+def _is_cert(item):
+ """
+ Returns True if a given CFTypeRef is a certificate.
+ """
+ expected = Security.SecCertificateGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _is_identity(item):
+ """
+ Returns True if a given CFTypeRef is an identity.
+ """
+ expected = Security.SecIdentityGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _temporary_keychain():
+ """
+ This function creates a temporary Mac keychain that we can use to work with
+ credentials. This keychain uses a one-time password and a temporary file to
+ store the data. We expect to have one keychain per socket. The returned
+ SecKeychainRef must be freed by the caller, including calling
+ SecKeychainDelete.
+
+ Returns a tuple of the SecKeychainRef and the path to the temporary
+ directory that contains it.
+ """
+ # Unfortunately, SecKeychainCreate requires a path to a keychain. This
+ # means we cannot use mkstemp to use a generic temporary file. Instead,
+ # we're going to create a temporary directory and a filename to use there.
+ # This filename will be 8 random bytes expanded into base64. We also need
+ # some random bytes to password-protect the keychain we're creating, so we
+ # ask for 40 random bytes.
+ random_bytes = os.urandom(40)
+ filename = base64.b16encode(random_bytes[:8]).decode("utf-8")
+ password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
+ tempdirectory = tempfile.mkdtemp()
+
+ keychain_path = os.path.join(tempdirectory, filename).encode("utf-8")
+
+ # We now want to create the keychain itself.
+ keychain = Security.SecKeychainRef()
+ status = Security.SecKeychainCreate(
+ keychain_path, len(password), password, False, None, ctypes.byref(keychain)
+ )
+ _assert_no_error(status)
+
+ # Having created the keychain, we want to pass it off to the caller.
+ return keychain, tempdirectory
+
+
+def _load_items_from_file(keychain, path):
+ """
+ Given a single file, loads all the trust objects from it into arrays and
+ the keychain.
+ Returns a tuple of lists: the first list is a list of identities, the
+ second a list of certs.
+ """
+ certificates = []
+ identities = []
+ result_array = None
+
+ with open(path, "rb") as f:
+ raw_filedata = f.read()
+
+ try:
+ filedata = CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)
+ )
+ result_array = CoreFoundation.CFArrayRef()
+ result = Security.SecItemImport(
+ filedata, # cert data
+ None, # Filename, leaving it out for now
+ None, # What the type of the file is, we don't care
+ None, # what's in the file, we don't care
+ 0, # import flags
+ None, # key params, can include passphrase in the future
+ keychain, # The keychain to insert into
+ ctypes.byref(result_array), # Results
+ )
+ _assert_no_error(result)
+
+ # A CFArray is not very useful to us as an intermediary
+ # representation, so we are going to extract the objects we want
+ # and then free the array. We don't need to keep hold of keys: the
+ # keychain already has them!
+ result_count = CoreFoundation.CFArrayGetCount(result_array)
+ for index in range(result_count):
+ item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index)
+ item = ctypes.cast(item, CoreFoundation.CFTypeRef)
+
+ if _is_cert(item):
+ CoreFoundation.CFRetain(item)
+ certificates.append(item)
+ elif _is_identity(item):
+ CoreFoundation.CFRetain(item)
+ identities.append(item)
+ finally:
+ if result_array:
+ CoreFoundation.CFRelease(result_array)
+
+ CoreFoundation.CFRelease(filedata)
+
+ return (identities, certificates)
+
+
+def _load_client_cert_chain(keychain, *paths):
+ """
+ Load certificates and maybe keys from a number of files. Has the end goal
+ of returning a CFArray containing one SecIdentityRef, and then zero or more
+ SecCertificateRef objects, suitable for use as a client certificate trust
+ chain.
+ """
+ # Ok, the strategy.
+ #
+ # This relies on knowing that macOS will not give you a SecIdentityRef
+ # unless you have imported a key into a keychain. This is a somewhat
+ # artificial limitation of macOS (for example, it doesn't necessarily
+ # affect iOS), but there is nothing inside Security.framework that lets you
+ # get a SecIdentityRef without having a key in a keychain.
+ #
+ # So the policy here is we take all the files and iterate them in order.
+ # Each one will use SecItemImport to have one or more objects loaded from
+ # it. We will also point at a keychain that macOS can use to work with the
+ # private key.
+ #
+ # Once we have all the objects, we'll check what we actually have. If we
+ # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
+ # we'll take the first certificate (which we assume to be our leaf) and
+ # ask the keychain to give us a SecIdentityRef with that cert's associated
+ # key.
+ #
+ # We'll then return a CFArray containing the trust chain: one
+ # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
+ # responsibility for freeing this CFArray will be with the caller. This
+ # CFArray must remain alive for the entire connection, so in practice it
+ # will be stored with a single SSLSocket, along with the reference to the
+ # keychain.
+ certificates = []
+ identities = []
+
+ # Filter out bad paths.
+ paths = (path for path in paths if path)
+
+ try:
+ for file_path in paths:
+ new_identities, new_certs = _load_items_from_file(keychain, file_path)
+ identities.extend(new_identities)
+ certificates.extend(new_certs)
+
+ # Ok, we have everything. The question is: do we have an identity? If
+ # not, we want to grab one from the first cert we have.
+ if not identities:
+ new_identity = Security.SecIdentityRef()
+ status = Security.SecIdentityCreateWithCertificate(
+ keychain, certificates[0], ctypes.byref(new_identity)
+ )
+ _assert_no_error(status)
+ identities.append(new_identity)
+
+ # We now want to release the original certificate, as we no longer
+ # need it.
+ CoreFoundation.CFRelease(certificates.pop(0))
+
+ # We now need to build a new CFArray that holds the trust chain.
+ trust_chain = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ for item in itertools.chain(identities, certificates):
+ # ArrayAppendValue does a CFRetain on the item. That's fine,
+ # because the finally block will release our other refs to them.
+ CoreFoundation.CFArrayAppendValue(trust_chain, item)
+
+ return trust_chain
+ finally:
+ for obj in itertools.chain(identities, certificates):
+ CoreFoundation.CFRelease(obj)
+
+
+TLS_PROTOCOL_VERSIONS = {
+ "SSLv2": (0, 2),
+ "SSLv3": (3, 0),
+ "TLSv1": (3, 1),
+ "TLSv1.1": (3, 2),
+ "TLSv1.2": (3, 3),
+}
+
+
+def _build_tls_unknown_ca_alert(version):
+ """
+ Builds a TLS alert record for an unknown CA.
+ """
+ ver_maj, ver_min = TLS_PROTOCOL_VERSIONS[version]
+ severity_fatal = 0x02
+ description_unknown_ca = 0x30
+ msg = struct.pack(">BB", severity_fatal, description_unknown_ca)
+ msg_len = len(msg)
+ record_type_alert = 0x15
+ record = struct.pack(">BBBH", record_type_alert, ver_maj, ver_min, msg_len) + msg
+ return record
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/appengine.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/appengine.py
new file mode 100644
index 0000000..6685386
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/appengine.py
@@ -0,0 +1,314 @@
+"""
+This module provides a pool manager that uses Google App Engine's
+`URLFetch Service `_.
+
+Example usage::
+
+ from pip._vendor.urllib3 import PoolManager
+ from pip._vendor.urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
+
+ if is_appengine_sandbox():
+ # AppEngineManager uses AppEngine's URLFetch API behind the scenes
+ http = AppEngineManager()
+ else:
+ # PoolManager uses a socket-level API behind the scenes
+ http = PoolManager()
+
+ r = http.request('GET', 'https://google.com/')
+
+There are `limitations `_ to the URLFetch service and it may not be
+the best choice for your application. There are three options for using
+urllib3 on Google App Engine:
+
+1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
+ cost-effective in many circumstances as long as your usage is within the
+ limitations.
+2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
+ Sockets also have `limitations and restrictions
+ `_ and have a lower free quota than URLFetch.
+ To use sockets, be sure to specify the following in your ``app.yaml``::
+
+ env_variables:
+ GAE_USE_SOCKETS_HTTPLIB : 'true'
+
+3. If you are using `App Engine Flexible
+ `_, you can use the standard
+:class:`PoolManager` without any configuration or special environment variables.
+"""
+
+from __future__ import absolute_import
+
+import io
+import logging
+import warnings
+
+from ..exceptions import (
+ HTTPError,
+ HTTPWarning,
+ MaxRetryError,
+ ProtocolError,
+ SSLError,
+ TimeoutError,
+)
+from ..packages.six.moves.urllib.parse import urljoin
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.retry import Retry
+from ..util.timeout import Timeout
+from . import _appengine_environ
+
+try:
+ from google.appengine.api import urlfetch
+except ImportError:
+ urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+ pass
+
+
+class AppEnginePlatformError(HTTPError):
+ pass
+
+
+class AppEngineManager(RequestMethods):
+ """
+ Connection manager for Google App Engine sandbox applications.
+
+ This manager uses the URLFetch service directly instead of using the
+ emulated httplib, and is subject to URLFetch limitations as described in
+ the App Engine documentation `here
+ `_.
+
+ Notably it will raise an :class:`AppEnginePlatformError` if:
+ * URLFetch is not available.
+ * If you attempt to use this on App Engine Flexible, as full socket
+ support is available.
+ * If a request size is more than 10 megabytes.
+ * If a response size is more than 32 megabytes.
+ * If you use an unsupported request method such as OPTIONS.
+
+ Beyond those cases, it will raise normal urllib3 errors.
+ """
+
+ def __init__(
+ self,
+ headers=None,
+ retries=None,
+ validate_certificate=True,
+ urlfetch_retries=True,
+ ):
+ if not urlfetch:
+ raise AppEnginePlatformError(
+ "URLFetch is not available in this environment."
+ )
+
+ warnings.warn(
+ "urllib3 is using URLFetch on Google App Engine sandbox instead "
+ "of sockets. To use sockets directly instead of URLFetch see "
+ "https://urllib3.readthedocs.io/en/1.26.x/reference/urllib3.contrib.html.",
+ AppEnginePlatformWarning,
+ )
+
+ RequestMethods.__init__(self, headers)
+ self.validate_certificate = validate_certificate
+ self.urlfetch_retries = urlfetch_retries
+
+ self.retries = retries or Retry.DEFAULT
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ retries=None,
+ redirect=True,
+ timeout=Timeout.DEFAULT_TIMEOUT,
+ **response_kw
+ ):
+
+ retries = self._get_retries(retries, redirect)
+
+ try:
+ follow_redirects = redirect and retries.redirect != 0 and retries.total
+ response = urlfetch.fetch(
+ url,
+ payload=body,
+ method=method,
+ headers=headers or {},
+ allow_truncated=False,
+ follow_redirects=self.urlfetch_retries and follow_redirects,
+ deadline=self._get_absolute_timeout(timeout),
+ validate_certificate=self.validate_certificate,
+ )
+ except urlfetch.DeadlineExceededError as e:
+ raise TimeoutError(self, e)
+
+ except urlfetch.InvalidURLError as e:
+ if "too large" in str(e):
+ raise AppEnginePlatformError(
+ "URLFetch request too large, URLFetch only "
+ "supports requests up to 10mb in size.",
+ e,
+ )
+ raise ProtocolError(e)
+
+ except urlfetch.DownloadError as e:
+ if "Too many redirects" in str(e):
+ raise MaxRetryError(self, url, reason=e)
+ raise ProtocolError(e)
+
+ except urlfetch.ResponseTooLargeError as e:
+ raise AppEnginePlatformError(
+ "URLFetch response too large, URLFetch only supports"
+ "responses up to 32mb in size.",
+ e,
+ )
+
+ except urlfetch.SSLCertificateError as e:
+ raise SSLError(e)
+
+ except urlfetch.InvalidMethodError as e:
+ raise AppEnginePlatformError(
+ "URLFetch does not support method: %s" % method, e
+ )
+
+ http_response = self._urlfetch_response_to_http_response(
+ response, retries=retries, **response_kw
+ )
+
+ # Handle redirect?
+ redirect_location = redirect and http_response.get_redirect_location()
+ if redirect_location:
+ # Check for redirect response
+ if self.urlfetch_retries and retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ else:
+ if http_response.status == 303:
+ method = "GET"
+
+ try:
+ retries = retries.increment(
+ method, url, response=http_response, _pool=self
+ )
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ return http_response
+
+ retries.sleep_for_retry(http_response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ redirect_url = urljoin(url, redirect_location)
+ return self.urlopen(
+ method,
+ redirect_url,
+ body,
+ headers,
+ retries=retries,
+ redirect=redirect,
+ timeout=timeout,
+ **response_kw
+ )
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(http_response.getheader("Retry-After"))
+ if retries.is_retry(method, http_response.status, has_retry_after):
+ retries = retries.increment(method, url, response=http_response, _pool=self)
+ log.debug("Retry: %s", url)
+ retries.sleep(http_response)
+ return self.urlopen(
+ method,
+ url,
+ body=body,
+ headers=headers,
+ retries=retries,
+ redirect=redirect,
+ timeout=timeout,
+ **response_kw
+ )
+
+ return http_response
+
+ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+ if is_prod_appengine():
+ # Production GAE handles deflate encoding automatically, but does
+ # not remove the encoding header.
+ content_encoding = urlfetch_resp.headers.get("content-encoding")
+
+ if content_encoding == "deflate":
+ del urlfetch_resp.headers["content-encoding"]
+
+ transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
+ # We have a full response's content,
+ # so let's make sure we don't report ourselves as chunked data.
+ if transfer_encoding == "chunked":
+ encodings = transfer_encoding.split(",")
+ encodings.remove("chunked")
+ urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
+
+ original_response = HTTPResponse(
+ # In order for decoding to work, we must present the content as
+ # a file-like object.
+ body=io.BytesIO(urlfetch_resp.content),
+ msg=urlfetch_resp.header_msg,
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ **response_kw
+ )
+
+ return HTTPResponse(
+ body=io.BytesIO(urlfetch_resp.content),
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ original_response=original_response,
+ **response_kw
+ )
+
+ def _get_absolute_timeout(self, timeout):
+ if timeout is Timeout.DEFAULT_TIMEOUT:
+ return None # Defer to URLFetch's default.
+ if isinstance(timeout, Timeout):
+ if timeout._read is not None or timeout._connect is not None:
+ warnings.warn(
+ "URLFetch does not support granular timeout settings, "
+ "reverting to total or default URLFetch timeout.",
+ AppEnginePlatformWarning,
+ )
+ return timeout.total
+ return timeout
+
+ def _get_retries(self, retries, redirect):
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if retries.connect or retries.read or retries.redirect:
+ warnings.warn(
+ "URLFetch only supports total retries and does not "
+ "recognize connect, read, or redirect retry parameters.",
+ AppEnginePlatformWarning,
+ )
+
+ return retries
+
+
+# Alias methods from _appengine_environ to maintain public API interface.
+
+is_appengine = _appengine_environ.is_appengine
+is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
+is_local_appengine = _appengine_environ.is_local_appengine
+is_prod_appengine = _appengine_environ.is_prod_appengine
+is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py
new file mode 100644
index 0000000..41a8fd1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py
@@ -0,0 +1,130 @@
+"""
+NTLM authenticating pool, contributed by erikcederstran
+
+Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
+"""
+from __future__ import absolute_import
+
+import warnings
+from logging import getLogger
+
+from ntlm import ntlm
+
+from .. import HTTPSConnectionPool
+from ..packages.six.moves.http_client import HTTPSConnection
+
+warnings.warn(
+ "The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed "
+ "in urllib3 v2.0 release, urllib3 is not able to support it properly due "
+ "to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. "
+ "If you are a user of this module please comment in the mentioned issue.",
+ DeprecationWarning,
+)
+
+log = getLogger(__name__)
+
+
+class NTLMConnectionPool(HTTPSConnectionPool):
+ """
+ Implements an NTLM authentication version of an urllib3 connection pool
+ """
+
+ scheme = "https"
+
+ def __init__(self, user, pw, authurl, *args, **kwargs):
+ """
+ authurl is a random URL on the server that is protected by NTLM.
+ user is the Windows user, probably in the DOMAIN\\username format.
+ pw is the password for the user.
+ """
+ super(NTLMConnectionPool, self).__init__(*args, **kwargs)
+ self.authurl = authurl
+ self.rawuser = user
+ user_parts = user.split("\\", 1)
+ self.domain = user_parts[0].upper()
+ self.user = user_parts[1]
+ self.pw = pw
+
+ def _new_conn(self):
+ # Performs the NTLM handshake that secures the connection. The socket
+ # must be kept open while requests are performed.
+ self.num_connections += 1
+ log.debug(
+ "Starting NTLM HTTPS connection no. %d: https://%s%s",
+ self.num_connections,
+ self.host,
+ self.authurl,
+ )
+
+ headers = {"Connection": "Keep-Alive"}
+ req_header = "Authorization"
+ resp_header = "www-authenticate"
+
+ conn = HTTPSConnection(host=self.host, port=self.port)
+
+ # Send negotiation message
+ headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
+ self.rawuser
+ )
+ log.debug("Request headers: %s", headers)
+ conn.request("GET", self.authurl, None, headers)
+ res = conn.getresponse()
+ reshdr = dict(res.getheaders())
+ log.debug("Response status: %s %s", res.status, res.reason)
+ log.debug("Response headers: %s", reshdr)
+ log.debug("Response data: %s [...]", res.read(100))
+
+ # Remove the reference to the socket, so that it can not be closed by
+ # the response object (we want to keep the socket open)
+ res.fp = None
+
+ # Server should respond with a challenge message
+ auth_header_values = reshdr[resp_header].split(", ")
+ auth_header_value = None
+ for s in auth_header_values:
+ if s[:5] == "NTLM ":
+ auth_header_value = s[5:]
+ if auth_header_value is None:
+ raise Exception(
+ "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
+ )
+
+ # Send authentication message
+ ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
+ auth_header_value
+ )
+ auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
+ ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
+ )
+ headers[req_header] = "NTLM %s" % auth_msg
+ log.debug("Request headers: %s", headers)
+ conn.request("GET", self.authurl, None, headers)
+ res = conn.getresponse()
+ log.debug("Response status: %s %s", res.status, res.reason)
+ log.debug("Response headers: %s", dict(res.getheaders()))
+ log.debug("Response data: %s [...]", res.read()[:100])
+ if res.status != 200:
+ if res.status == 401:
+ raise Exception("Server rejected request: wrong username or password")
+ raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
+
+ res.fp = None
+ log.debug("Connection established")
+ return conn
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ retries=3,
+ redirect=True,
+ assert_same_host=True,
+ ):
+ if headers is None:
+ headers = {}
+ headers["Connection"] = "Keep-Alive"
+ return super(NTLMConnectionPool, self).urlopen(
+ method, url, body, headers, retries, redirect, assert_same_host
+ )
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py
new file mode 100644
index 0000000..3130f51
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py
@@ -0,0 +1,511 @@
+"""
+TLS with SNI_-support for Python 2. Follow these instructions if you would
+like to verify TLS certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
+
+This needs the following packages installed:
+
+* `pyOpenSSL`_ (tested with 16.0.0)
+* `cryptography`_ (minimum 1.3.4, from pyopenssl)
+* `idna`_ (minimum 2.0, from cryptography)
+
+However, pyopenssl depends on cryptography, which depends on idna, so while we
+use all three directly here we end up having relatively few packages required.
+
+You can install them with the following command:
+
+.. code-block:: bash
+
+ $ python -m pip install pyopenssl cryptography idna
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this:
+
+.. code-block:: python
+
+ try:
+ import pip._vendor.urllib3.contrib.pyopenssl as pyopenssl
+ pyopenssl.inject_into_urllib3()
+ except ImportError:
+ pass
+
+Now you can use :mod:`urllib3` as you normally would, and it will support SNI
+when the required modules are installed.
+
+Activating this module also has the positive side effect of disabling SSL/TLS
+compression in Python 2 (see `CRIME attack`_).
+
+.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
+.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
+.. _pyopenssl: https://www.pyopenssl.org
+.. _cryptography: https://cryptography.io
+.. _idna: https://github.com/kjd/idna
+"""
+from __future__ import absolute_import
+
+import OpenSSL.SSL
+from cryptography import x509
+from cryptography.hazmat.backends.openssl import backend as openssl_backend
+from cryptography.hazmat.backends.openssl.x509 import _Certificate
+
+try:
+ from cryptography.x509 import UnsupportedExtension
+except ImportError:
+ # UnsupportedExtension is gone in cryptography >= 2.1.0
+ class UnsupportedExtension(Exception):
+ pass
+
+
+from io import BytesIO
+from socket import error as SocketError
+from socket import timeout
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+import logging
+import ssl
+import sys
+
+from .. import util
+from ..packages import six
+from ..util.ssl_ import PROTOCOL_TLS_CLIENT
+
+__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
+
+# SNI always works.
+HAS_SNI = True
+
+# Map from urllib3 to PyOpenSSL compatible parameter-values.
+_openssl_versions = {
+ util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD,
+ PROTOCOL_TLS_CLIENT: OpenSSL.SSL.SSLv23_METHOD,
+ ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"):
+ _openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD
+
+if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
+
+_stdlib_to_openssl_verify = {
+ ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
+ ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
+ ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+}
+_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items())
+
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+
+log = logging.getLogger(__name__)
+
+
+def inject_into_urllib3():
+ "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support."
+
+ _validate_dependencies_met()
+
+ util.SSLContext = PyOpenSSLContext
+ util.ssl_.SSLContext = PyOpenSSLContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_PYOPENSSL = True
+ util.ssl_.IS_PYOPENSSL = True
+
+
+def extract_from_urllib3():
+ "Undo monkey-patching by :func:`inject_into_urllib3`."
+
+ util.SSLContext = orig_util_SSLContext
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_PYOPENSSL = False
+ util.ssl_.IS_PYOPENSSL = False
+
+
+def _validate_dependencies_met():
+ """
+ Verifies that PyOpenSSL's package-level dependencies have been met.
+ Throws `ImportError` if they are not met.
+ """
+ # Method added in `cryptography==1.1`; not available in older versions
+ from cryptography.x509.extensions import Extensions
+
+ if getattr(Extensions, "get_extension_for_class", None) is None:
+ raise ImportError(
+ "'cryptography' module missing required functionality. "
+ "Try upgrading to v1.3.4 or newer."
+ )
+
+ # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
+ # attribute is only present on those versions.
+ from OpenSSL.crypto import X509
+
+ x509 = X509()
+ if getattr(x509, "_x509", None) is None:
+ raise ImportError(
+ "'pyOpenSSL' module missing required functionality. "
+ "Try upgrading to v0.14 or newer."
+ )
+
+
+def _dnsname_to_stdlib(name):
+ """
+ Converts a dNSName SubjectAlternativeName field to the form used by the
+ standard library on the given Python version.
+
+ Cryptography produces a dNSName as a unicode string that was idna-decoded
+ from ASCII bytes. We need to idna-encode that string to get it back, and
+ then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
+ uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
+
+ If the name cannot be idna-encoded then we return None signalling that
+ the name given should be skipped.
+ """
+
+ def idna_encode(name):
+ """
+ Borrowed wholesale from the Python Cryptography Project. It turns out
+ that we can't just safely call `idna.encode`: it can explode for
+ wildcard names. This avoids that problem.
+ """
+ from pip._vendor import idna
+
+ try:
+ for prefix in [u"*.", u"."]:
+ if name.startswith(prefix):
+ name = name[len(prefix) :]
+ return prefix.encode("ascii") + idna.encode(name)
+ return idna.encode(name)
+ except idna.core.IDNAError:
+ return None
+
+ # Don't send IPv6 addresses through the IDNA encoder.
+ if ":" in name:
+ return name
+
+ name = idna_encode(name)
+ if name is None:
+ return None
+ elif sys.version_info >= (3, 0):
+ name = name.decode("utf-8")
+ return name
+
+
+def get_subj_alt_name(peer_cert):
+ """
+ Given an PyOpenSSL certificate, provides all the subject alternative names.
+ """
+ # Pass the cert to cryptography, which has much better APIs for this.
+ if hasattr(peer_cert, "to_cryptography"):
+ cert = peer_cert.to_cryptography()
+ else:
+ # This is technically using private APIs, but should work across all
+ # relevant versions before PyOpenSSL got a proper API for this.
+ cert = _Certificate(openssl_backend, peer_cert._x509)
+
+ # We want to find the SAN extension. Ask Cryptography to locate it (it's
+ # faster than looping in Python)
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
+ except x509.ExtensionNotFound:
+ # No such extension, return the empty list.
+ return []
+ except (
+ x509.DuplicateExtension,
+ UnsupportedExtension,
+ x509.UnsupportedGeneralNameType,
+ UnicodeError,
+ ) as e:
+ # A problem has been found with the quality of the certificate. Assume
+ # no SAN field is present.
+ log.warning(
+ "A problem was encountered with the certificate that prevented "
+ "urllib3 from finding the SubjectAlternativeName field. This can "
+ "affect certificate validation. The error was %s",
+ e,
+ )
+ return []
+
+ # We want to return dNSName and iPAddress fields. We need to cast the IPs
+ # back to strings because the match_hostname function wants them as
+ # strings.
+ # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
+ # decoded. This is pretty frustrating, but that's what the standard library
+ # does with certificates, and so we need to attempt to do the same.
+ # We also want to skip over names which cannot be idna encoded.
+ names = [
+ ("DNS", name)
+ for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
+ if name is not None
+ ]
+ names.extend(
+ ("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress)
+ )
+
+ return names
+
+
+class WrappedSocket(object):
+ """API-compatibility wrapper for Python OpenSSL's Connection-class.
+
+ Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+ collector of pypy.
+ """
+
+ def __init__(self, connection, socket, suppress_ragged_eofs=True):
+ self.connection = connection
+ self.socket = socket
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+ self._closed = False
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, *args, **kwargs):
+ try:
+ data = self.connection.recv(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
+ return b""
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return b""
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ if not util.wait_for_read(self.socket, self.socket.gettimeout()):
+ raise timeout("The read operation timed out")
+ else:
+ return self.recv(*args, **kwargs)
+
+ # TLS 1.3 post-handshake authentication
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("read error: %r" % e)
+ else:
+ return data
+
+ def recv_into(self, *args, **kwargs):
+ try:
+ return self.connection.recv_into(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
+ return 0
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return 0
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ if not util.wait_for_read(self.socket, self.socket.gettimeout()):
+ raise timeout("The read operation timed out")
+ else:
+ return self.recv_into(*args, **kwargs)
+
+ # TLS 1.3 post-handshake authentication
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("read error: %r" % e)
+
+ def settimeout(self, timeout):
+ return self.socket.settimeout(timeout)
+
+ def _send_until_done(self, data):
+ while True:
+ try:
+ return self.connection.send(data)
+ except OpenSSL.SSL.WantWriteError:
+ if not util.wait_for_write(self.socket, self.socket.gettimeout()):
+ raise timeout()
+ continue
+ except OpenSSL.SSL.SysCallError as e:
+ raise SocketError(str(e))
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self._send_until_done(
+ data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]
+ )
+ total_sent += sent
+
+ def shutdown(self):
+ # FIXME rethrow compatible exceptions should we ever use this
+ self.connection.shutdown()
+
+ def close(self):
+ if self._makefile_refs < 1:
+ try:
+ self._closed = True
+ return self.connection.close()
+ except OpenSSL.SSL.Error:
+ return
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ x509 = self.connection.get_peer_certificate()
+
+ if not x509:
+ return x509
+
+ if binary_form:
+ return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509)
+
+ return {
+ "subject": ((("commonName", x509.get_subject().CN),),),
+ "subjectAltName": get_subj_alt_name(x509),
+ }
+
+ def version(self):
+ return self.connection.get_protocol_version_name()
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+
+
+else: # Platform-specific: Python 3
+ makefile = backport_makefile
+
+WrappedSocket.makefile = makefile
+
+
+class PyOpenSSLContext(object):
+ """
+ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
+ for translating the interface of the standard library ``SSLContext`` object
+ to calls into PyOpenSSL.
+ """
+
+ def __init__(self, protocol):
+ self.protocol = _openssl_versions[protocol]
+ self._ctx = OpenSSL.SSL.Context(self.protocol)
+ self._options = 0
+ self.check_hostname = False
+
+ @property
+ def options(self):
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ self._options = value
+ self._ctx.set_options(value)
+
+ @property
+ def verify_mode(self):
+ return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback)
+
+ def set_default_verify_paths(self):
+ self._ctx.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ if isinstance(ciphers, six.text_type):
+ ciphers = ciphers.encode("utf-8")
+ self._ctx.set_cipher_list(ciphers)
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ if cafile is not None:
+ cafile = cafile.encode("utf-8")
+ if capath is not None:
+ capath = capath.encode("utf-8")
+ try:
+ self._ctx.load_verify_locations(cafile, capath)
+ if cadata is not None:
+ self._ctx.load_verify_locations(BytesIO(cadata))
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("unable to load trusted certificates: %r" % e)
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._ctx.use_certificate_chain_file(certfile)
+ if password is not None:
+ if not isinstance(password, six.binary_type):
+ password = password.encode("utf-8")
+ self._ctx.set_passwd_cb(lambda *_: password)
+ self._ctx.use_privatekey_file(keyfile or certfile)
+
+ def set_alpn_protocols(self, protocols):
+ protocols = [six.ensure_binary(p) for p in protocols]
+ return self._ctx.set_alpn_protos(protocols)
+
+ def wrap_socket(
+ self,
+ sock,
+ server_side=False,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True,
+ server_hostname=None,
+ ):
+ cnx = OpenSSL.SSL.Connection(self._ctx, sock)
+
+ if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
+ server_hostname = server_hostname.encode("utf-8")
+
+ if server_hostname is not None:
+ cnx.set_tlsext_host_name(server_hostname)
+
+ cnx.set_connect_state()
+
+ while True:
+ try:
+ cnx.do_handshake()
+ except OpenSSL.SSL.WantReadError:
+ if not util.wait_for_read(sock, sock.gettimeout()):
+ raise timeout("select timed out")
+ continue
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("bad handshake: %r" % e)
+ break
+
+ return WrappedSocket(cnx, sock)
+
+
+def _verify_callback(cnx, x509, err_no, err_depth, return_code):
+ return err_no == 0
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/securetransport.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/securetransport.py
new file mode 100644
index 0000000..b4ca80b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/securetransport.py
@@ -0,0 +1,922 @@
+"""
+SecureTranport support for urllib3 via ctypes.
+
+This makes platform-native TLS available to urllib3 users on macOS without the
+use of a compiler. This is an important feature because the Python Package
+Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
+that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
+this is to give macOS users an alternative solution to the problem, and that
+solution is to use SecureTransport.
+
+We use ctypes here because this solution must not require a compiler. That's
+because pip is not allowed to require a compiler either.
+
+This is not intended to be a seriously long-term solution to this problem.
+The hope is that PEP 543 will eventually solve this issue for us, at which
+point we can retire this contrib module. But in the short term, we need to
+solve the impending tire fire that is Python on Mac without this kind of
+contrib module. So...here we are.
+
+To use this module, simply import and inject it::
+
+ import pip._vendor.urllib3.contrib.securetransport as securetransport
+ securetransport.inject_into_urllib3()
+
+Happy TLSing!
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+.. code-block::
+
+ Copyright (c) 2015-2016 Will Bond
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import contextlib
+import ctypes
+import errno
+import os.path
+import shutil
+import socket
+import ssl
+import struct
+import threading
+import weakref
+
+from pip._vendor import six
+
+from .. import util
+from ..util.ssl_ import PROTOCOL_TLS_CLIENT
+from ._securetransport.bindings import CoreFoundation, Security, SecurityConst
+from ._securetransport.low_level import (
+ _assert_no_error,
+ _build_tls_unknown_ca_alert,
+ _cert_array_from_pem,
+ _create_cfstring_array,
+ _load_client_cert_chain,
+ _temporary_keychain,
+)
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
+
+# SNI always works
+HAS_SNI = True
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+# This dictionary is used by the read callback to obtain a handle to the
+# calling wrapped socket. This is a pretty silly approach, but for now it'll
+# do. I feel like I should be able to smuggle a handle to the wrapped socket
+# directly in the SSLConnectionRef, but for now this approach will work I
+# guess.
+#
+# We need to lock around this structure for inserts, but we don't do it for
+# reads/writes in the callbacks. The reasoning here goes as follows:
+#
+# 1. It is not possible to call into the callbacks before the dictionary is
+# populated, so once in the callback the id must be in the dictionary.
+# 2. The callbacks don't mutate the dictionary, they only read from it, and
+# so cannot conflict with any of the insertions.
+#
+# This is good: if we had to lock in the callbacks we'd drastically slow down
+# the performance of this code.
+_connection_refs = weakref.WeakValueDictionary()
+_connection_ref_lock = threading.Lock()
+
+# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
+# for no better reason than we need *a* limit, and this one is right there.
+SSL_WRITE_BLOCKSIZE = 16384
+
+# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
+# individual cipher suites. We need to do this because this is how
+# SecureTransport wants them.
+CIPHER_SUITES = [
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_AES_256_GCM_SHA384,
+ SecurityConst.TLS_AES_128_GCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_AES_128_CCM_8_SHA256,
+ SecurityConst.TLS_AES_128_CCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
+]
+
+# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
+# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
+# TLSv1 to 1.2 are supported on macOS 10.8+
+_protocol_to_min_max = {
+ util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+ PROTOCOL_TLS_CLIENT: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv2"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
+ SecurityConst.kSSLProtocol2,
+ SecurityConst.kSSLProtocol2,
+ )
+if hasattr(ssl, "PROTOCOL_SSLv3"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
+ SecurityConst.kSSLProtocol3,
+ SecurityConst.kSSLProtocol3,
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
+ SecurityConst.kTLSProtocol1,
+ SecurityConst.kTLSProtocol1,
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
+ SecurityConst.kTLSProtocol11,
+ SecurityConst.kTLSProtocol11,
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_2"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
+ SecurityConst.kTLSProtocol12,
+ SecurityConst.kTLSProtocol12,
+ )
+
+
+def inject_into_urllib3():
+ """
+ Monkey-patch urllib3 with SecureTransport-backed SSL-support.
+ """
+ util.SSLContext = SecureTransportContext
+ util.ssl_.SSLContext = SecureTransportContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_SECURETRANSPORT = True
+ util.ssl_.IS_SECURETRANSPORT = True
+
+
+def extract_from_urllib3():
+ """
+ Undo monkey-patching by :func:`inject_into_urllib3`.
+ """
+ util.SSLContext = orig_util_SSLContext
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_SECURETRANSPORT = False
+ util.ssl_.IS_SECURETRANSPORT = False
+
+
+def _read_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport read callback. This is called by ST to request that data
+ be returned from the socket.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ requested_length = data_length_pointer[0]
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ read_count = 0
+
+ try:
+ while read_count < requested_length:
+ if timeout is None or timeout >= 0:
+ if not util.wait_for_read(base_socket, timeout):
+ raise socket.error(errno.EAGAIN, "timed out")
+
+ remaining = requested_length - read_count
+ buffer = (ctypes.c_char * remaining).from_address(
+ data_buffer + read_count
+ )
+ chunk_size = base_socket.recv_into(buffer, remaining)
+ read_count += chunk_size
+ if not chunk_size:
+ if not read_count:
+ return SecurityConst.errSSLClosedGraceful
+ break
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ data_length_pointer[0] = read_count
+ if error == errno.ECONNRESET or error == errno.EPIPE:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = read_count
+
+ if read_count != requested_length:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+def _write_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport write callback. This is called by ST to request that data
+ actually be sent on the network.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ bytes_to_write = data_length_pointer[0]
+ data = ctypes.string_at(data_buffer, bytes_to_write)
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ sent = 0
+
+ try:
+ while sent < bytes_to_write:
+ if timeout is None or timeout >= 0:
+ if not util.wait_for_write(base_socket, timeout):
+ raise socket.error(errno.EAGAIN, "timed out")
+ chunk_sent = base_socket.send(data)
+ sent += chunk_sent
+
+ # This has some needless copying here, but I'm not sure there's
+ # much value in optimising this data path.
+ data = data[chunk_sent:]
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ data_length_pointer[0] = sent
+ if error == errno.ECONNRESET or error == errno.EPIPE:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = sent
+
+ if sent != bytes_to_write:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+# We need to keep these two objects references alive: if they get GC'd while
+# in use then SecureTransport could attempt to call a function that is in freed
+# memory. That would be...uh...bad. Yeah, that's the word. Bad.
+_read_callback_pointer = Security.SSLReadFunc(_read_callback)
+_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
+
+
+class WrappedSocket(object):
+ """
+ API-compatibility wrapper for Python's OpenSSL wrapped socket object.
+
+ Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
+ collector of PyPy.
+ """
+
+ def __init__(self, socket):
+ self.socket = socket
+ self.context = None
+ self._makefile_refs = 0
+ self._closed = False
+ self._exception = None
+ self._keychain = None
+ self._keychain_dir = None
+ self._client_cert_chain = None
+
+ # We save off the previously-configured timeout and then set it to
+ # zero. This is done because we use select and friends to handle the
+ # timeouts, but if we leave the timeout set on the lower socket then
+ # Python will "kindly" call select on that socket again for us. Avoid
+ # that by forcing the timeout to zero.
+ self._timeout = self.socket.gettimeout()
+ self.socket.settimeout(0)
+
+ @contextlib.contextmanager
+ def _raise_on_error(self):
+ """
+ A context manager that can be used to wrap calls that do I/O from
+ SecureTransport. If any of the I/O callbacks hit an exception, this
+ context manager will correctly propagate the exception after the fact.
+ This avoids silently swallowing those exceptions.
+
+ It also correctly forces the socket closed.
+ """
+ self._exception = None
+
+ # We explicitly don't catch around this yield because in the unlikely
+ # event that an exception was hit in the block we don't want to swallow
+ # it.
+ yield
+ if self._exception is not None:
+ exception, self._exception = self._exception, None
+ self.close()
+ raise exception
+
+ def _set_ciphers(self):
+ """
+ Sets up the allowed ciphers. By default this matches the set in
+ util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
+ custom and doesn't allow changing at this time, mostly because parsing
+ OpenSSL cipher strings is going to be a freaking nightmare.
+ """
+ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
+ result = Security.SSLSetEnabledCiphers(
+ self.context, ciphers, len(CIPHER_SUITES)
+ )
+ _assert_no_error(result)
+
+ def _set_alpn_protocols(self, protocols):
+ """
+ Sets up the ALPN protocols on the context.
+ """
+ if not protocols:
+ return
+ protocols_arr = _create_cfstring_array(protocols)
+ try:
+ result = Security.SSLSetALPNProtocols(self.context, protocols_arr)
+ _assert_no_error(result)
+ finally:
+ CoreFoundation.CFRelease(protocols_arr)
+
+ def _custom_validate(self, verify, trust_bundle):
+ """
+ Called when we have set custom validation. We do this in two cases:
+ first, when cert validation is entirely disabled; and second, when
+ using a custom trust DB.
+ Raises an SSLError if the connection is not trusted.
+ """
+ # If we disabled cert validation, just say: cool.
+ if not verify:
+ return
+
+ successes = (
+ SecurityConst.kSecTrustResultUnspecified,
+ SecurityConst.kSecTrustResultProceed,
+ )
+ try:
+ trust_result = self._evaluate_trust(trust_bundle)
+ if trust_result in successes:
+ return
+ reason = "error code: %d" % (trust_result,)
+ except Exception as e:
+ # Do not trust on error
+ reason = "exception: %r" % (e,)
+
+ # SecureTransport does not send an alert nor shuts down the connection.
+ rec = _build_tls_unknown_ca_alert(self.version())
+ self.socket.sendall(rec)
+ # close the connection immediately
+ # l_onoff = 1, activate linger
+ # l_linger = 0, linger for 0 seoncds
+ opts = struct.pack("ii", 1, 0)
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts)
+ self.close()
+ raise ssl.SSLError("certificate verify failed, %s" % reason)
+
+ def _evaluate_trust(self, trust_bundle):
+ # We want data in memory, so load it up.
+ if os.path.isfile(trust_bundle):
+ with open(trust_bundle, "rb") as f:
+ trust_bundle = f.read()
+
+ cert_array = None
+ trust = Security.SecTrustRef()
+
+ try:
+ # Get a CFArray that contains the certs we want.
+ cert_array = _cert_array_from_pem(trust_bundle)
+
+ # Ok, now the hard part. We want to get the SecTrustRef that ST has
+ # created for this connection, shove our CAs into it, tell ST to
+ # ignore everything else it knows, and then ask if it can build a
+ # chain. This is a buuuunch of code.
+ result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
+ _assert_no_error(result)
+ if not trust:
+ raise ssl.SSLError("Failed to copy trust reference")
+
+ result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
+ _assert_no_error(result)
+
+ result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
+ _assert_no_error(result)
+
+ trust_result = Security.SecTrustResultType()
+ result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
+ _assert_no_error(result)
+ finally:
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ if cert_array is not None:
+ CoreFoundation.CFRelease(cert_array)
+
+ return trust_result.value
+
+ def handshake(
+ self,
+ server_hostname,
+ verify,
+ trust_bundle,
+ min_version,
+ max_version,
+ client_cert,
+ client_key,
+ client_key_passphrase,
+ alpn_protocols,
+ ):
+ """
+ Actually performs the TLS handshake. This is run automatically by
+ wrapped socket, and shouldn't be needed in user code.
+ """
+ # First, we do the initial bits of connection setup. We need to create
+ # a context, set its I/O funcs, and set the connection reference.
+ self.context = Security.SSLCreateContext(
+ None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
+ )
+ result = Security.SSLSetIOFuncs(
+ self.context, _read_callback_pointer, _write_callback_pointer
+ )
+ _assert_no_error(result)
+
+ # Here we need to compute the handle to use. We do this by taking the
+ # id of self modulo 2**31 - 1. If this is already in the dictionary, we
+ # just keep incrementing by one until we find a free space.
+ with _connection_ref_lock:
+ handle = id(self) % 2147483647
+ while handle in _connection_refs:
+ handle = (handle + 1) % 2147483647
+ _connection_refs[handle] = self
+
+ result = Security.SSLSetConnection(self.context, handle)
+ _assert_no_error(result)
+
+ # If we have a server hostname, we should set that too.
+ if server_hostname:
+ if not isinstance(server_hostname, bytes):
+ server_hostname = server_hostname.encode("utf-8")
+
+ result = Security.SSLSetPeerDomainName(
+ self.context, server_hostname, len(server_hostname)
+ )
+ _assert_no_error(result)
+
+ # Setup the ciphers.
+ self._set_ciphers()
+
+ # Setup the ALPN protocols.
+ self._set_alpn_protocols(alpn_protocols)
+
+ # Set the minimum and maximum TLS versions.
+ result = Security.SSLSetProtocolVersionMin(self.context, min_version)
+ _assert_no_error(result)
+
+ result = Security.SSLSetProtocolVersionMax(self.context, max_version)
+ _assert_no_error(result)
+
+ # If there's a trust DB, we need to use it. We do that by telling
+ # SecureTransport to break on server auth. We also do that if we don't
+ # want to validate the certs at all: we just won't actually do any
+ # authing in that case.
+ if not verify or trust_bundle is not None:
+ result = Security.SSLSetSessionOption(
+ self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
+ )
+ _assert_no_error(result)
+
+ # If there's a client cert, we need to use it.
+ if client_cert:
+ self._keychain, self._keychain_dir = _temporary_keychain()
+ self._client_cert_chain = _load_client_cert_chain(
+ self._keychain, client_cert, client_key
+ )
+ result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
+ _assert_no_error(result)
+
+ while True:
+ with self._raise_on_error():
+ result = Security.SSLHandshake(self.context)
+
+ if result == SecurityConst.errSSLWouldBlock:
+ raise socket.timeout("handshake timed out")
+ elif result == SecurityConst.errSSLServerAuthCompleted:
+ self._custom_validate(verify, trust_bundle)
+ continue
+ else:
+ _assert_no_error(result)
+ break
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, bufsiz):
+ buffer = ctypes.create_string_buffer(bufsiz)
+ bytes_read = self.recv_into(buffer, bufsiz)
+ data = buffer[:bytes_read]
+ return data
+
+ def recv_into(self, buffer, nbytes=None):
+ # Read short on EOF.
+ if self._closed:
+ return 0
+
+ if nbytes is None:
+ nbytes = len(buffer)
+
+ buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLRead(
+ self.context, buffer, nbytes, ctypes.byref(processed_bytes)
+ )
+
+ # There are some result codes that we want to treat as "not always
+ # errors". Specifically, those are errSSLWouldBlock,
+ # errSSLClosedGraceful, and errSSLClosedNoNotify.
+ if result == SecurityConst.errSSLWouldBlock:
+ # If we didn't process any bytes, then this was just a time out.
+ # However, we can get errSSLWouldBlock in situations when we *did*
+ # read some data, and in those cases we should just read "short"
+ # and return.
+ if processed_bytes.value == 0:
+ # Timed out, no data read.
+ raise socket.timeout("recv timed out")
+ elif result in (
+ SecurityConst.errSSLClosedGraceful,
+ SecurityConst.errSSLClosedNoNotify,
+ ):
+ # The remote peer has closed this connection. We should do so as
+ # well. Note that we don't actually return here because in
+ # principle this could actually be fired along with return data.
+ # It's unlikely though.
+ self.close()
+ else:
+ _assert_no_error(result)
+
+ # Ok, we read and probably succeeded. We should return whatever data
+ # was actually read.
+ return processed_bytes.value
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def send(self, data):
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLWrite(
+ self.context, data, len(data), ctypes.byref(processed_bytes)
+ )
+
+ if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
+ # Timed out
+ raise socket.timeout("send timed out")
+ else:
+ _assert_no_error(result)
+
+ # We sent, and probably succeeded. Tell them how much we sent.
+ return processed_bytes.value
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ with self._raise_on_error():
+ Security.SSLClose(self.context)
+
+ def close(self):
+ # TODO: should I do clean shutdown here? Do I have to?
+ if self._makefile_refs < 1:
+ self._closed = True
+ if self.context:
+ CoreFoundation.CFRelease(self.context)
+ self.context = None
+ if self._client_cert_chain:
+ CoreFoundation.CFRelease(self._client_cert_chain)
+ self._client_cert_chain = None
+ if self._keychain:
+ Security.SecKeychainDelete(self._keychain)
+ CoreFoundation.CFRelease(self._keychain)
+ shutil.rmtree(self._keychain_dir)
+ self._keychain = self._keychain_dir = None
+ return self.socket.close()
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ # Urgh, annoying.
+ #
+ # Here's how we do this:
+ #
+ # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
+ # connection.
+ # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
+ # 3. To get the CN, call SecCertificateCopyCommonName and process that
+ # string so that it's of the appropriate type.
+ # 4. To get the SAN, we need to do something a bit more complex:
+ # a. Call SecCertificateCopyValues to get the data, requesting
+ # kSecOIDSubjectAltName.
+ # b. Mess about with this dictionary to try to get the SANs out.
+ #
+ # This is gross. Really gross. It's going to be a few hundred LoC extra
+ # just to repeat something that SecureTransport can *already do*. So my
+ # operating assumption at this time is that what we want to do is
+ # instead to just flag to urllib3 that it shouldn't do its own hostname
+ # validation when using SecureTransport.
+ if not binary_form:
+ raise ValueError("SecureTransport only supports dumping binary certs")
+ trust = Security.SecTrustRef()
+ certdata = None
+ der_bytes = None
+
+ try:
+ # Grab the trust store.
+ result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
+ _assert_no_error(result)
+ if not trust:
+ # Probably we haven't done the handshake yet. No biggie.
+ return None
+
+ cert_count = Security.SecTrustGetCertificateCount(trust)
+ if not cert_count:
+ # Also a case that might happen if we haven't handshaked.
+ # Handshook? Handshaken?
+ return None
+
+ leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
+ assert leaf
+
+ # Ok, now we want the DER bytes.
+ certdata = Security.SecCertificateCopyData(leaf)
+ assert certdata
+
+ data_length = CoreFoundation.CFDataGetLength(certdata)
+ data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
+ der_bytes = ctypes.string_at(data_buffer, data_length)
+ finally:
+ if certdata:
+ CoreFoundation.CFRelease(certdata)
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ return der_bytes
+
+ def version(self):
+ protocol = Security.SSLProtocol()
+ result = Security.SSLGetNegotiatedProtocolVersion(
+ self.context, ctypes.byref(protocol)
+ )
+ _assert_no_error(result)
+ if protocol.value == SecurityConst.kTLSProtocol13:
+ raise ssl.SSLError("SecureTransport does not support TLS 1.3")
+ elif protocol.value == SecurityConst.kTLSProtocol12:
+ return "TLSv1.2"
+ elif protocol.value == SecurityConst.kTLSProtocol11:
+ return "TLSv1.1"
+ elif protocol.value == SecurityConst.kTLSProtocol1:
+ return "TLSv1"
+ elif protocol.value == SecurityConst.kSSLProtocol3:
+ return "SSLv3"
+ elif protocol.value == SecurityConst.kSSLProtocol2:
+ return "SSLv2"
+ else:
+ raise ssl.SSLError("Unknown TLS version: %r" % protocol)
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+
+
+else: # Platform-specific: Python 3
+
+ def makefile(self, mode="r", buffering=None, *args, **kwargs):
+ # We disable buffering with SecureTransport because it conflicts with
+ # the buffering that ST does internally (see issue #1153 for more).
+ buffering = 0
+ return backport_makefile(self, mode, buffering, *args, **kwargs)
+
+
+WrappedSocket.makefile = makefile
+
+
+class SecureTransportContext(object):
+ """
+ I am a wrapper class for the SecureTransport library, to translate the
+ interface of the standard library ``SSLContext`` object to calls into
+ SecureTransport.
+ """
+
+ def __init__(self, protocol):
+ self._min_version, self._max_version = _protocol_to_min_max[protocol]
+ self._options = 0
+ self._verify = False
+ self._trust_bundle = None
+ self._client_cert = None
+ self._client_key = None
+ self._client_key_passphrase = None
+ self._alpn_protocols = None
+
+ @property
+ def check_hostname(self):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ return True
+
+ @check_hostname.setter
+ def check_hostname(self, value):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ pass
+
+ @property
+ def options(self):
+ # TODO: Well, crap.
+ #
+ # So this is the bit of the code that is the most likely to cause us
+ # trouble. Essentially we need to enumerate all of the SSL options that
+ # users might want to use and try to see if we can sensibly translate
+ # them, or whether we should just ignore them.
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ # TODO: Update in line with above.
+ self._options = value
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._verify = True if value == ssl.CERT_REQUIRED else False
+
+ def set_default_verify_paths(self):
+ # So, this has to do something a bit weird. Specifically, what it does
+ # is nothing.
+ #
+ # This means that, if we had previously had load_verify_locations
+ # called, this does not undo that. We need to do that because it turns
+ # out that the rest of the urllib3 code will attempt to load the
+ # default verify paths if it hasn't been told about any paths, even if
+ # the context itself was sometime earlier. We resolve that by just
+ # ignoring it.
+ pass
+
+ def load_default_certs(self):
+ return self.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ # For now, we just require the default cipher string.
+ if ciphers != util.ssl_.DEFAULT_CIPHERS:
+ raise ValueError("SecureTransport doesn't support custom cipher strings")
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ # OK, we only really support cadata and cafile.
+ if capath is not None:
+ raise ValueError("SecureTransport does not support cert directories")
+
+ # Raise if cafile does not exist.
+ if cafile is not None:
+ with open(cafile):
+ pass
+
+ self._trust_bundle = cafile or cadata
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._client_cert = certfile
+ self._client_key = keyfile
+ self._client_cert_passphrase = password
+
+ def set_alpn_protocols(self, protocols):
+ """
+ Sets the ALPN protocols that will later be set on the context.
+
+ Raises a NotImplementedError if ALPN is not supported.
+ """
+ if not hasattr(Security, "SSLSetALPNProtocols"):
+ raise NotImplementedError(
+ "SecureTransport supports ALPN only in macOS 10.12+"
+ )
+ self._alpn_protocols = [six.ensure_binary(p) for p in protocols]
+
+ def wrap_socket(
+ self,
+ sock,
+ server_side=False,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True,
+ server_hostname=None,
+ ):
+ # So, what do we do here? Firstly, we assert some properties. This is a
+ # stripped down shim, so there is some functionality we don't support.
+ # See PEP 543 for the real deal.
+ assert not server_side
+ assert do_handshake_on_connect
+ assert suppress_ragged_eofs
+
+ # Ok, we're good to go. Now we want to create the wrapped socket object
+ # and store it in the appropriate place.
+ wrapped_socket = WrappedSocket(sock)
+
+ # Now we can handshake
+ wrapped_socket.handshake(
+ server_hostname,
+ self._verify,
+ self._trust_bundle,
+ self._min_version,
+ self._max_version,
+ self._client_cert,
+ self._client_key,
+ self._client_key_passphrase,
+ self._alpn_protocols,
+ )
+ return wrapped_socket
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/socks.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/socks.py
new file mode 100644
index 0000000..c326e80
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/socks.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+"""
+This module contains provisional support for SOCKS proxies from within
+urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
+SOCKS5. To enable its functionality, either install PySocks or install this
+module with the ``socks`` extra.
+
+The SOCKS implementation supports the full range of urllib3 features. It also
+supports the following SOCKS features:
+
+- SOCKS4A (``proxy_url='socks4a://...``)
+- SOCKS4 (``proxy_url='socks4://...``)
+- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
+- SOCKS5 with local DNS (``proxy_url='socks5://...``)
+- Usernames and passwords for the SOCKS proxy
+
+.. note::
+ It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
+ your ``proxy_url`` to ensure that DNS resolution is done from the remote
+ server instead of client-side when connecting to a domain name.
+
+SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
+supports IPv4, IPv6, and domain names.
+
+When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
+will be sent as the ``userid`` section of the SOCKS request:
+
+.. code-block:: python
+
+ proxy_url="socks4a://@proxy-host"
+
+When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
+of the ``proxy_url`` will be sent as the username/password to authenticate
+with the proxy:
+
+.. code-block:: python
+
+ proxy_url="socks5h://:@proxy-host"
+
+"""
+from __future__ import absolute_import
+
+try:
+ import socks
+except ImportError:
+ import warnings
+
+ from ..exceptions import DependencyWarning
+
+ warnings.warn(
+ (
+ "SOCKS support in urllib3 requires the installation of optional "
+ "dependencies: specifically, PySocks. For more information, see "
+ "https://urllib3.readthedocs.io/en/1.26.x/contrib.html#socks-proxies"
+ ),
+ DependencyWarning,
+ )
+ raise
+
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+from ..connection import HTTPConnection, HTTPSConnection
+from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from ..exceptions import ConnectTimeoutError, NewConnectionError
+from ..poolmanager import PoolManager
+from ..util.url import parse_url
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+
+class SOCKSConnection(HTTPConnection):
+ """
+ A plain-text HTTP connection that connects via a SOCKS proxy.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self._socks_options = kwargs.pop("_socks_options")
+ super(SOCKSConnection, self).__init__(*args, **kwargs)
+
+ def _new_conn(self):
+ """
+ Establish a new connection via the SOCKS proxy.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw["source_address"] = self.source_address
+
+ if self.socket_options:
+ extra_kw["socket_options"] = self.socket_options
+
+ try:
+ conn = socks.create_connection(
+ (self.host, self.port),
+ proxy_type=self._socks_options["socks_version"],
+ proxy_addr=self._socks_options["proxy_host"],
+ proxy_port=self._socks_options["proxy_port"],
+ proxy_username=self._socks_options["username"],
+ proxy_password=self._socks_options["password"],
+ proxy_rdns=self._socks_options["rdns"],
+ timeout=self.timeout,
+ **extra_kw
+ )
+
+ except SocketTimeout:
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)"
+ % (self.host, self.timeout),
+ )
+
+ except socks.ProxyError as e:
+ # This is fragile as hell, but it seems to be the only way to raise
+ # useful errors here.
+ if e.socket_err:
+ error = e.socket_err
+ if isinstance(error, SocketTimeout):
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)"
+ % (self.host, self.timeout),
+ )
+ else:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % error
+ )
+ else:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e
+ )
+
+ except SocketError as e: # Defensive: PySocks should catch all these.
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e
+ )
+
+ return conn
+
+
+# We don't need to duplicate the Verified/Unverified distinction from
+# urllib3/connection.py here because the HTTPSConnection will already have been
+# correctly set to either the Verified or Unverified form by that module. This
+# means the SOCKSHTTPSConnection will automatically be the correct type.
+class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
+ pass
+
+
+class SOCKSHTTPConnectionPool(HTTPConnectionPool):
+ ConnectionCls = SOCKSConnection
+
+
+class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
+ ConnectionCls = SOCKSHTTPSConnection
+
+
+class SOCKSProxyManager(PoolManager):
+ """
+ A version of the urllib3 ProxyManager that routes connections via the
+ defined SOCKS proxy.
+ """
+
+ pool_classes_by_scheme = {
+ "http": SOCKSHTTPConnectionPool,
+ "https": SOCKSHTTPSConnectionPool,
+ }
+
+ def __init__(
+ self,
+ proxy_url,
+ username=None,
+ password=None,
+ num_pools=10,
+ headers=None,
+ **connection_pool_kw
+ ):
+ parsed = parse_url(proxy_url)
+
+ if username is None and password is None and parsed.auth is not None:
+ split = parsed.auth.split(":")
+ if len(split) == 2:
+ username, password = split
+ if parsed.scheme == "socks5":
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = False
+ elif parsed.scheme == "socks5h":
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = True
+ elif parsed.scheme == "socks4":
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = False
+ elif parsed.scheme == "socks4a":
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = True
+ else:
+ raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
+
+ self.proxy_url = proxy_url
+
+ socks_options = {
+ "socks_version": socks_version,
+ "proxy_host": parsed.host,
+ "proxy_port": parsed.port,
+ "username": username,
+ "password": password,
+ "rdns": rdns,
+ }
+ connection_pool_kw["_socks_options"] = socks_options
+
+ super(SOCKSProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw
+ )
+
+ self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/exceptions.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/exceptions.py
new file mode 100644
index 0000000..cba6f3f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/exceptions.py
@@ -0,0 +1,323 @@
+from __future__ import absolute_import
+
+from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead
+
+# Base Exceptions
+
+
+class HTTPError(Exception):
+ """Base exception used by this module."""
+
+ pass
+
+
+class HTTPWarning(Warning):
+ """Base warning used by this module."""
+
+ pass
+
+
+class PoolError(HTTPError):
+ """Base exception for errors caused within a pool."""
+
+ def __init__(self, pool, message):
+ self.pool = pool
+ HTTPError.__init__(self, "%s: %s" % (pool, message))
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, None)
+
+
+class RequestError(PoolError):
+ """Base exception for PoolErrors that have associated URLs."""
+
+ def __init__(self, pool, url, message):
+ self.url = url
+ PoolError.__init__(self, pool, message)
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, self.url, None)
+
+
+class SSLError(HTTPError):
+ """Raised when SSL certificate fails in an HTTPS connection."""
+
+ pass
+
+
+class ProxyError(HTTPError):
+ """Raised when the connection to a proxy fails."""
+
+ def __init__(self, message, error, *args):
+ super(ProxyError, self).__init__(message, error, *args)
+ self.original_error = error
+
+
+class DecodeError(HTTPError):
+ """Raised when automatic decoding based on Content-Type fails."""
+
+ pass
+
+
+class ProtocolError(HTTPError):
+ """Raised when something unexpected happens mid-request/response."""
+
+ pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
+# Leaf Exceptions
+
+
+class MaxRetryError(RequestError):
+ """Raised when the maximum number of retries is exceeded.
+
+ :param pool: The connection pool
+ :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+ :param string url: The requested Url
+ :param exceptions.Exception reason: The underlying error
+
+ """
+
+ def __init__(self, pool, url, reason=None):
+ self.reason = reason
+
+ message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason)
+
+ RequestError.__init__(self, pool, url, message)
+
+
+class HostChangedError(RequestError):
+ """Raised when an existing pool gets a request for a foreign host."""
+
+ def __init__(self, pool, url, retries=3):
+ message = "Tried to open a foreign host with url: %s" % url
+ RequestError.__init__(self, pool, url, message)
+ self.retries = retries
+
+
+class TimeoutStateError(HTTPError):
+ """Raised when passing an invalid state to a timeout"""
+
+ pass
+
+
+class TimeoutError(HTTPError):
+ """Raised when a socket timeout error occurs.
+
+ Catching this error will catch both :exc:`ReadTimeoutErrors
+ ` and :exc:`ConnectTimeoutErrors `.
+ """
+
+ pass
+
+
+class ReadTimeoutError(TimeoutError, RequestError):
+ """Raised when a socket timeout occurs while receiving data from a server"""
+
+ pass
+
+
+# This timeout error does not have a URL attached and needs to inherit from the
+# base HTTPError
+class ConnectTimeoutError(TimeoutError):
+ """Raised when a socket timeout occurs while connecting to a server"""
+
+ pass
+
+
+class NewConnectionError(ConnectTimeoutError, PoolError):
+ """Raised when we fail to establish a new connection. Usually ECONNREFUSED."""
+
+ pass
+
+
+class EmptyPoolError(PoolError):
+ """Raised when a pool runs out of connections and no more are allowed."""
+
+ pass
+
+
+class ClosedPoolError(PoolError):
+ """Raised when a request enters a pool after the pool has been closed."""
+
+ pass
+
+
+class LocationValueError(ValueError, HTTPError):
+ """Raised when there is something wrong with a given URL input."""
+
+ pass
+
+
+class LocationParseError(LocationValueError):
+ """Raised when get_host or similar fails to parse the URL input."""
+
+ def __init__(self, location):
+ message = "Failed to parse: %s" % location
+ HTTPError.__init__(self, message)
+
+ self.location = location
+
+
+class URLSchemeUnknown(LocationValueError):
+ """Raised when a URL input has an unsupported scheme."""
+
+ def __init__(self, scheme):
+ message = "Not supported URL scheme %s" % scheme
+ super(URLSchemeUnknown, self).__init__(message)
+
+ self.scheme = scheme
+
+
+class ResponseError(HTTPError):
+ """Used as a container for an error reason supplied in a MaxRetryError."""
+
+ GENERIC_ERROR = "too many error responses"
+ SPECIFIC_ERROR = "too many {status_code} error responses"
+
+
+class SecurityWarning(HTTPWarning):
+ """Warned when performing security reducing actions"""
+
+ pass
+
+
+class SubjectAltNameWarning(SecurityWarning):
+ """Warned when connecting to a host with a certificate missing a SAN."""
+
+ pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+ """Warned when making an unverified HTTPS request."""
+
+ pass
+
+
+class SystemTimeWarning(SecurityWarning):
+ """Warned when system time is suspected to be wrong"""
+
+ pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+ """Warned when certain TLS/SSL configuration is not available on a platform."""
+
+ pass
+
+
+class SNIMissingWarning(HTTPWarning):
+ """Warned when making a HTTPS request without SNI available."""
+
+ pass
+
+
+class DependencyWarning(HTTPWarning):
+ """
+ Warned when an attempt is made to import a module with missing optional
+ dependencies.
+ """
+
+ pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+ """Response needs to be chunked in order to read it as chunks."""
+
+ pass
+
+
+class BodyNotHttplibCompatible(HTTPError):
+ """
+ Body should be :class:`http.client.HTTPResponse` like
+ (have an fp attribute which returns raw chunks) for read_chunked().
+ """
+
+ pass
+
+
+class IncompleteRead(HTTPError, httplib_IncompleteRead):
+ """
+ Response length doesn't match expected Content-Length
+
+ Subclass of :class:`http.client.IncompleteRead` to allow int value
+ for ``partial`` to avoid creating large objects on streamed reads.
+ """
+
+ def __init__(self, partial, expected):
+ super(IncompleteRead, self).__init__(partial, expected)
+
+ def __repr__(self):
+ return "IncompleteRead(%i bytes read, %i more expected)" % (
+ self.partial,
+ self.expected,
+ )
+
+
+class InvalidChunkLength(HTTPError, httplib_IncompleteRead):
+ """Invalid chunk length in a chunked response."""
+
+ def __init__(self, response, length):
+ super(InvalidChunkLength, self).__init__(
+ response.tell(), response.length_remaining
+ )
+ self.response = response
+ self.length = length
+
+ def __repr__(self):
+ return "InvalidChunkLength(got length %r, %i bytes read)" % (
+ self.length,
+ self.partial,
+ )
+
+
+class InvalidHeader(HTTPError):
+ """The header provided was somehow invalid."""
+
+ pass
+
+
+class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):
+ """ProxyManager does not support the supplied scheme"""
+
+ # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+ def __init__(self, scheme):
+ # 'localhost' is here because our URL parser parses
+ # localhost:8080 -> scheme=localhost, remove if we fix this.
+ if scheme == "localhost":
+ scheme = None
+ if scheme is None:
+ message = "Proxy URL had no scheme, should start with http:// or https://"
+ else:
+ message = (
+ "Proxy URL had unsupported scheme %s, should use http:// or https://"
+ % scheme
+ )
+ super(ProxySchemeUnknown, self).__init__(message)
+
+
+class ProxySchemeUnsupported(ValueError):
+ """Fetching HTTPS resources through HTTPS proxies is unsupported"""
+
+ pass
+
+
+class HeaderParsingError(HTTPError):
+ """Raised by assert_header_parsing, but we convert it to a log.warning statement."""
+
+ def __init__(self, defects, unparsed_data):
+ message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data)
+ super(HeaderParsingError, self).__init__(message)
+
+
+class UnrewindableBodyError(HTTPError):
+ """urllib3 encountered an error when trying to rewind a body"""
+
+ pass
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/fields.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/fields.py
new file mode 100644
index 0000000..9d630f4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/fields.py
@@ -0,0 +1,274 @@
+from __future__ import absolute_import
+
+import email.utils
+import mimetypes
+import re
+
+from .packages import six
+
+
+def guess_content_type(filename, default="application/octet-stream"):
+ """
+ Guess the "Content-Type" of a file.
+
+ :param filename:
+ The filename to guess the "Content-Type" of using :mod:`mimetypes`.
+ :param default:
+ If no "Content-Type" can be guessed, default to `default`.
+ """
+ if filename:
+ return mimetypes.guess_type(filename)[0] or default
+ return default
+
+
+def format_header_param_rfc2231(name, value):
+ """
+ Helper function to format and quote a single header parameter using the
+ strategy defined in RFC 2231.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows
+ `RFC 2388 Section 4.4 `_.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as ``bytes`` or `str``.
+ :ret:
+ An RFC-2231-formatted unicode string.
+ """
+ if isinstance(value, six.binary_type):
+ value = value.decode("utf-8")
+
+ if not any(ch in value for ch in '"\\\r\n'):
+ result = u'%s="%s"' % (name, value)
+ try:
+ result.encode("ascii")
+ except (UnicodeEncodeError, UnicodeDecodeError):
+ pass
+ else:
+ return result
+
+ if six.PY2: # Python 2:
+ value = value.encode("utf-8")
+
+ # encode_rfc2231 accepts an encoded string and returns an ascii-encoded
+ # string in Python 2 but accepts and returns unicode strings in Python 3
+ value = email.utils.encode_rfc2231(value, "utf-8")
+ value = "%s*=%s" % (name, value)
+
+ if six.PY2: # Python 2:
+ value = value.decode("utf-8")
+
+ return value
+
+
+_HTML5_REPLACEMENTS = {
+ u"\u0022": u"%22",
+ # Replace "\" with "\\".
+ u"\u005C": u"\u005C\u005C",
+}
+
+# All control characters from 0x00 to 0x1F *except* 0x1B.
+_HTML5_REPLACEMENTS.update(
+ {
+ six.unichr(cc): u"%{:02X}".format(cc)
+ for cc in range(0x00, 0x1F + 1)
+ if cc not in (0x1B,)
+ }
+)
+
+
+def _replace_multiple(value, needles_and_replacements):
+ def replacer(match):
+ return needles_and_replacements[match.group(0)]
+
+ pattern = re.compile(
+ r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()])
+ )
+
+ result = pattern.sub(replacer, value)
+
+ return result
+
+
+def format_header_param_html5(name, value):
+ """
+ Helper function to format and quote a single header parameter using the
+ HTML5 strategy.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows the `HTML5 Working Draft
+ Section 4.10.22.7`_ and matches the behavior of curl and modern browsers.
+
+ .. _HTML5 Working Draft Section 4.10.22.7:
+ https://w3c.github.io/html/sec-forms.html#multipart-form-data
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as ``bytes`` or `str``.
+ :ret:
+ A unicode string, stripped of troublesome characters.
+ """
+ if isinstance(value, six.binary_type):
+ value = value.decode("utf-8")
+
+ value = _replace_multiple(value, _HTML5_REPLACEMENTS)
+
+ return u'%s="%s"' % (name, value)
+
+
+# For backwards-compatibility.
+format_header_param = format_header_param_html5
+
+
+class RequestField(object):
+ """
+ A data container for request body parameters.
+
+ :param name:
+ The name of this request field. Must be unicode.
+ :param data:
+ The data/value body.
+ :param filename:
+ An optional filename of the request field. Must be unicode.
+ :param headers:
+ An optional dict-like object of headers to initially use for the field.
+ :param header_formatter:
+ An optional callable that is used to encode and format the headers. By
+ default, this is :func:`format_header_param_html5`.
+ """
+
+ def __init__(
+ self,
+ name,
+ data,
+ filename=None,
+ headers=None,
+ header_formatter=format_header_param_html5,
+ ):
+ self._name = name
+ self._filename = filename
+ self.data = data
+ self.headers = {}
+ if headers:
+ self.headers = dict(headers)
+ self.header_formatter = header_formatter
+
+ @classmethod
+ def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5):
+ """
+ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
+
+ Supports constructing :class:`~urllib3.fields.RequestField` from
+ parameter of key/value strings AND key/filetuple. A filetuple is a
+ (filename, data, MIME type) tuple where the MIME type is optional.
+ For example::
+
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+
+ Field names and filenames must be unicode.
+ """
+ if isinstance(value, tuple):
+ if len(value) == 3:
+ filename, data, content_type = value
+ else:
+ filename, data = value
+ content_type = guess_content_type(filename)
+ else:
+ filename = None
+ content_type = None
+ data = value
+
+ request_param = cls(
+ fieldname, data, filename=filename, header_formatter=header_formatter
+ )
+ request_param.make_multipart(content_type=content_type)
+
+ return request_param
+
+ def _render_part(self, name, value):
+ """
+ Overridable helper function to format a single header parameter. By
+ default, this calls ``self.header_formatter``.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+
+ return self.header_formatter(name, value)
+
+ def _render_parts(self, header_parts):
+ """
+ Helper function to format and quote a single header.
+
+ Useful for single headers that are composed of multiple items. E.g.,
+ 'Content-Disposition' fields.
+
+ :param header_parts:
+ A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
+ as `k1="v1"; k2="v2"; ...`.
+ """
+ parts = []
+ iterable = header_parts
+ if isinstance(header_parts, dict):
+ iterable = header_parts.items()
+
+ for name, value in iterable:
+ if value is not None:
+ parts.append(self._render_part(name, value))
+
+ return u"; ".join(parts)
+
+ def render_headers(self):
+ """
+ Renders the headers for this request field.
+ """
+ lines = []
+
+ sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"]
+ for sort_key in sort_keys:
+ if self.headers.get(sort_key, False):
+ lines.append(u"%s: %s" % (sort_key, self.headers[sort_key]))
+
+ for header_name, header_value in self.headers.items():
+ if header_name not in sort_keys:
+ if header_value:
+ lines.append(u"%s: %s" % (header_name, header_value))
+
+ lines.append(u"\r\n")
+ return u"\r\n".join(lines)
+
+ def make_multipart(
+ self, content_disposition=None, content_type=None, content_location=None
+ ):
+ """
+ Makes this request field into a multipart request field.
+
+ This method overrides "Content-Disposition", "Content-Type" and
+ "Content-Location" headers to the request parameter.
+
+ :param content_type:
+ The 'Content-Type' of the request body.
+ :param content_location:
+ The 'Content-Location' of the request body.
+
+ """
+ self.headers["Content-Disposition"] = content_disposition or u"form-data"
+ self.headers["Content-Disposition"] += u"; ".join(
+ [
+ u"",
+ self._render_parts(
+ ((u"name", self._name), (u"filename", self._filename))
+ ),
+ ]
+ )
+ self.headers["Content-Type"] = content_type
+ self.headers["Content-Location"] = content_location
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/filepost.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/filepost.py
new file mode 100644
index 0000000..36c9252
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/filepost.py
@@ -0,0 +1,98 @@
+from __future__ import absolute_import
+
+import binascii
+import codecs
+import os
+from io import BytesIO
+
+from .fields import RequestField
+from .packages import six
+from .packages.six import b
+
+writer = codecs.lookup("utf-8")[3]
+
+
+def choose_boundary():
+ """
+ Our embarrassingly-simple replacement for mimetools.choose_boundary.
+ """
+ boundary = binascii.hexlify(os.urandom(16))
+ if not six.PY2:
+ boundary = boundary.decode("ascii")
+ return boundary
+
+
+def iter_field_objects(fields):
+ """
+ Iterate over fields.
+
+ Supports list of (k, v) tuples and dicts, and lists of
+ :class:`~urllib3.fields.RequestField`.
+
+ """
+ if isinstance(fields, dict):
+ i = six.iteritems(fields)
+ else:
+ i = iter(fields)
+
+ for field in i:
+ if isinstance(field, RequestField):
+ yield field
+ else:
+ yield RequestField.from_tuples(*field)
+
+
+def iter_fields(fields):
+ """
+ .. deprecated:: 1.6
+
+ Iterate over fields.
+
+ The addition of :class:`~urllib3.fields.RequestField` makes this function
+ obsolete. Instead, use :func:`iter_field_objects`, which returns
+ :class:`~urllib3.fields.RequestField` objects.
+
+ Supports list of (k, v) tuples and dicts.
+ """
+ if isinstance(fields, dict):
+ return ((k, v) for k, v in six.iteritems(fields))
+
+ return ((k, v) for k, v in fields)
+
+
+def encode_multipart_formdata(fields, boundary=None):
+ """
+ Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
+
+ :param fields:
+ Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
+
+ :param boundary:
+ If not specified, then a random boundary will be generated using
+ :func:`urllib3.filepost.choose_boundary`.
+ """
+ body = BytesIO()
+ if boundary is None:
+ boundary = choose_boundary()
+
+ for field in iter_field_objects(fields):
+ body.write(b("--%s\r\n" % (boundary)))
+
+ writer(body).write(field.render_headers())
+ data = field.data
+
+ if isinstance(data, int):
+ data = str(data) # Backwards compatibility
+
+ if isinstance(data, six.text_type):
+ writer(body).write(data)
+ else:
+ body.write(data)
+
+ body.write(b"\r\n")
+
+ body.write(b("--%s--\r\n" % (boundary)))
+
+ content_type = str("multipart/form-data; boundary=%s" % boundary)
+
+ return body.getvalue(), content_type
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/poolmanager.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/poolmanager.py
new file mode 100644
index 0000000..3a31a28
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/poolmanager.py
@@ -0,0 +1,536 @@
+from __future__ import absolute_import
+
+import collections
+import functools
+import logging
+
+from ._collections import RecentlyUsedContainer
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme
+from .exceptions import (
+ LocationValueError,
+ MaxRetryError,
+ ProxySchemeUnknown,
+ ProxySchemeUnsupported,
+ URLSchemeUnknown,
+)
+from .packages import six
+from .packages.six.moves.urllib.parse import urljoin
+from .request import RequestMethods
+from .util.proxy import connection_requires_http_tunnel
+from .util.retry import Retry
+from .util.url import parse_url
+
+__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
+
+
+log = logging.getLogger(__name__)
+
+SSL_KEYWORDS = (
+ "key_file",
+ "cert_file",
+ "cert_reqs",
+ "ca_certs",
+ "ssl_version",
+ "ca_cert_dir",
+ "ssl_context",
+ "key_password",
+)
+
+# All known keyword arguments that could be provided to the pool manager, its
+# pools, or the underlying connections. This is used to construct a pool key.
+_key_fields = (
+ "key_scheme", # str
+ "key_host", # str
+ "key_port", # int
+ "key_timeout", # int or float or Timeout
+ "key_retries", # int or Retry
+ "key_strict", # bool
+ "key_block", # bool
+ "key_source_address", # str
+ "key_key_file", # str
+ "key_key_password", # str
+ "key_cert_file", # str
+ "key_cert_reqs", # str
+ "key_ca_certs", # str
+ "key_ssl_version", # str
+ "key_ca_cert_dir", # str
+ "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
+ "key_maxsize", # int
+ "key_headers", # dict
+ "key__proxy", # parsed proxy url
+ "key__proxy_headers", # dict
+ "key__proxy_config", # class
+ "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
+ "key__socks_options", # dict
+ "key_assert_hostname", # bool or string
+ "key_assert_fingerprint", # str
+ "key_server_hostname", # str
+)
+
+#: The namedtuple class used to construct keys for the connection pool.
+#: All custom key schemes should include the fields in this key at a minimum.
+PoolKey = collections.namedtuple("PoolKey", _key_fields)
+
+_proxy_config_fields = ("ssl_context", "use_forwarding_for_https")
+ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields)
+
+
+def _default_key_normalizer(key_class, request_context):
+ """
+ Create a pool key out of a request context dictionary.
+
+ According to RFC 3986, both the scheme and host are case-insensitive.
+ Therefore, this function normalizes both before constructing the pool
+ key for an HTTPS request. If you wish to change this behaviour, provide
+ alternate callables to ``key_fn_by_scheme``.
+
+ :param key_class:
+ The class to use when constructing the key. This should be a namedtuple
+ with the ``scheme`` and ``host`` keys at a minimum.
+ :type key_class: namedtuple
+ :param request_context:
+ A dictionary-like object that contain the context for a request.
+ :type request_context: dict
+
+ :return: A namedtuple that can be used as a connection pool key.
+ :rtype: PoolKey
+ """
+ # Since we mutate the dictionary, make a copy first
+ context = request_context.copy()
+ context["scheme"] = context["scheme"].lower()
+ context["host"] = context["host"].lower()
+
+ # These are both dictionaries and need to be transformed into frozensets
+ for key in ("headers", "_proxy_headers", "_socks_options"):
+ if key in context and context[key] is not None:
+ context[key] = frozenset(context[key].items())
+
+ # The socket_options key may be a list and needs to be transformed into a
+ # tuple.
+ socket_opts = context.get("socket_options")
+ if socket_opts is not None:
+ context["socket_options"] = tuple(socket_opts)
+
+ # Map the kwargs to the names in the namedtuple - this is necessary since
+ # namedtuples can't have fields starting with '_'.
+ for key in list(context.keys()):
+ context["key_" + key] = context.pop(key)
+
+ # Default to ``None`` for keys missing from the context
+ for field in key_class._fields:
+ if field not in context:
+ context[field] = None
+
+ return key_class(**context)
+
+
+#: A dictionary that maps a scheme to a callable that creates a pool key.
+#: This can be used to alter the way pool keys are constructed, if desired.
+#: Each PoolManager makes a copy of this dictionary so they can be configured
+#: globally here, or individually on the instance.
+key_fn_by_scheme = {
+ "http": functools.partial(_default_key_normalizer, PoolKey),
+ "https": functools.partial(_default_key_normalizer, PoolKey),
+}
+
+pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
+
+
+class PoolManager(RequestMethods):
+ """
+ Allows for arbitrary requests while transparently keeping track of
+ necessary connection pools for you.
+
+ :param num_pools:
+ Number of connection pools to cache before discarding the least
+ recently used pool.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param \\**connection_pool_kw:
+ Additional parameters are used to create fresh
+ :class:`urllib3.connectionpool.ConnectionPool` instances.
+
+ Example::
+
+ >>> manager = PoolManager(num_pools=2)
+ >>> r = manager.request('GET', 'http://google.com/')
+ >>> r = manager.request('GET', 'http://google.com/mail')
+ >>> r = manager.request('GET', 'http://yahoo.com/')
+ >>> len(manager.pools)
+ 2
+
+ """
+
+ proxy = None
+ proxy_config = None
+
+ def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
+ RequestMethods.__init__(self, headers)
+ self.connection_pool_kw = connection_pool_kw
+ self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close())
+
+ # Locally set the pool classes and keys so other PoolManagers can
+ # override them.
+ self.pool_classes_by_scheme = pool_classes_by_scheme
+ self.key_fn_by_scheme = key_fn_by_scheme.copy()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.clear()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def _new_pool(self, scheme, host, port, request_context=None):
+ """
+ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
+ any additional pool keyword arguments.
+
+ If ``request_context`` is provided, it is provided as keyword arguments
+ to the pool class used. This method is used to actually create the
+ connection pools handed out by :meth:`connection_from_url` and
+ companion methods. It is intended to be overridden for customization.
+ """
+ pool_cls = self.pool_classes_by_scheme[scheme]
+ if request_context is None:
+ request_context = self.connection_pool_kw.copy()
+
+ # Although the context has everything necessary to create the pool,
+ # this function has historically only used the scheme, host, and port
+ # in the positional args. When an API change is acceptable these can
+ # be removed.
+ for key in ("scheme", "host", "port"):
+ request_context.pop(key, None)
+
+ if scheme == "http":
+ for kw in SSL_KEYWORDS:
+ request_context.pop(kw, None)
+
+ return pool_cls(host, port, **request_context)
+
+ def clear(self):
+ """
+ Empty our store of pools and direct them all to close.
+
+ This will not affect in-flight connections, but they will not be
+ re-used after completion.
+ """
+ self.pools.clear()
+
+ def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
+ """
+ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
+
+ If ``port`` isn't given, it will be derived from the ``scheme`` using
+ ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
+ provided, it is merged with the instance's ``connection_pool_kw``
+ variable and used to create the new connection pool, if one is
+ needed.
+ """
+
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ request_context = self._merge_pool_kwargs(pool_kwargs)
+ request_context["scheme"] = scheme or "http"
+ if not port:
+ port = port_by_scheme.get(request_context["scheme"].lower(), 80)
+ request_context["port"] = port
+ request_context["host"] = host
+
+ return self.connection_from_context(request_context)
+
+ def connection_from_context(self, request_context):
+ """
+ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
+
+ ``request_context`` must at least contain the ``scheme`` key and its
+ value must be a key in ``key_fn_by_scheme`` instance variable.
+ """
+ scheme = request_context["scheme"].lower()
+ pool_key_constructor = self.key_fn_by_scheme.get(scheme)
+ if not pool_key_constructor:
+ raise URLSchemeUnknown(scheme)
+ pool_key = pool_key_constructor(request_context)
+
+ return self.connection_from_pool_key(pool_key, request_context=request_context)
+
+ def connection_from_pool_key(self, pool_key, request_context=None):
+ """
+ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
+
+ ``pool_key`` should be a namedtuple that only contains immutable
+ objects. At a minimum it must have the ``scheme``, ``host``, and
+ ``port`` fields.
+ """
+ with self.pools.lock:
+ # If the scheme, host, or port doesn't match existing open
+ # connections, open a new ConnectionPool.
+ pool = self.pools.get(pool_key)
+ if pool:
+ return pool
+
+ # Make a fresh ConnectionPool of the desired type
+ scheme = request_context["scheme"]
+ host = request_context["host"]
+ port = request_context["port"]
+ pool = self._new_pool(scheme, host, port, request_context=request_context)
+ self.pools[pool_key] = pool
+
+ return pool
+
+ def connection_from_url(self, url, pool_kwargs=None):
+ """
+ Similar to :func:`urllib3.connectionpool.connection_from_url`.
+
+ If ``pool_kwargs`` is not provided and a new pool needs to be
+ constructed, ``self.connection_pool_kw`` is used to initialize
+ the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
+ is provided, it is used instead. Note that if a new pool does not
+ need to be created for the request, the provided ``pool_kwargs`` are
+ not used.
+ """
+ u = parse_url(url)
+ return self.connection_from_host(
+ u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
+ )
+
+ def _merge_pool_kwargs(self, override):
+ """
+ Merge a dictionary of override values for self.connection_pool_kw.
+
+ This does not modify self.connection_pool_kw and returns a new dict.
+ Any keys in the override dictionary with a value of ``None`` are
+ removed from the merged dictionary.
+ """
+ base_pool_kwargs = self.connection_pool_kw.copy()
+ if override:
+ for key, value in override.items():
+ if value is None:
+ try:
+ del base_pool_kwargs[key]
+ except KeyError:
+ pass
+ else:
+ base_pool_kwargs[key] = value
+ return base_pool_kwargs
+
+ def _proxy_requires_url_absolute_form(self, parsed_url):
+ """
+ Indicates if the proxy requires the complete destination URL in the
+ request. Normally this is only needed when not using an HTTP CONNECT
+ tunnel.
+ """
+ if self.proxy is None:
+ return False
+
+ return not connection_requires_http_tunnel(
+ self.proxy, self.proxy_config, parsed_url.scheme
+ )
+
+ def _validate_proxy_scheme_url_selection(self, url_scheme):
+ """
+ Validates that were not attempting to do TLS in TLS connections on
+ Python2 or with unsupported SSL implementations.
+ """
+ if self.proxy is None or url_scheme != "https":
+ return
+
+ if self.proxy.scheme != "https":
+ return
+
+ if six.PY2 and not self.proxy_config.use_forwarding_for_https:
+ raise ProxySchemeUnsupported(
+ "Contacting HTTPS destinations through HTTPS proxies "
+ "'via CONNECT tunnels' is not supported in Python 2"
+ )
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ """
+ Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
+ with custom cross-host redirect logic and only sends the request-uri
+ portion of the ``url``.
+
+ The given ``url`` parameter must be absolute, such that an appropriate
+ :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
+ """
+ u = parse_url(url)
+ self._validate_proxy_scheme_url_selection(u.scheme)
+
+ conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
+
+ kw["assert_same_host"] = False
+ kw["redirect"] = False
+
+ if "headers" not in kw:
+ kw["headers"] = self.headers.copy()
+
+ if self._proxy_requires_url_absolute_form(u):
+ response = conn.urlopen(method, url, **kw)
+ else:
+ response = conn.urlopen(method, u.request_uri, **kw)
+
+ redirect_location = redirect and response.get_redirect_location()
+ if not redirect_location:
+ return response
+
+ # Support relative URLs for redirecting.
+ redirect_location = urljoin(url, redirect_location)
+
+ # RFC 7231, Section 6.4.4
+ if response.status == 303:
+ method = "GET"
+
+ retries = kw.get("retries")
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect)
+
+ # Strip headers marked as unsafe to forward to the redirected location.
+ # Check remove_headers_on_redirect to avoid a potential network call within
+ # conn.is_same_host() which may use socket.gethostbyname() in the future.
+ if retries.remove_headers_on_redirect and not conn.is_same_host(
+ redirect_location
+ ):
+ headers = list(six.iterkeys(kw["headers"]))
+ for header in headers:
+ if header.lower() in retries.remove_headers_on_redirect:
+ kw["headers"].pop(header, None)
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=conn)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ response.drain_conn()
+ raise
+ return response
+
+ kw["retries"] = retries
+ kw["redirect"] = redirect
+
+ log.info("Redirecting %s -> %s", url, redirect_location)
+
+ response.drain_conn()
+ return self.urlopen(method, redirect_location, **kw)
+
+
+class ProxyManager(PoolManager):
+ """
+ Behaves just like :class:`PoolManager`, but sends all requests through
+ the defined proxy, using the CONNECT method for HTTPS URLs.
+
+ :param proxy_url:
+ The URL of the proxy to be used.
+
+ :param proxy_headers:
+ A dictionary containing headers that will be sent to the proxy. In case
+ of HTTP they are being sent with each request, while in the
+ HTTPS/CONNECT case they are sent only once. Could be used for proxy
+ authentication.
+
+ :param proxy_ssl_context:
+ The proxy SSL context is used to establish the TLS connection to the
+ proxy when using HTTPS proxies.
+
+ :param use_forwarding_for_https:
+ (Defaults to False) If set to True will forward requests to the HTTPS
+ proxy to be made on behalf of the client instead of creating a TLS
+ tunnel via the CONNECT method. **Enabling this flag means that request
+ and response headers and content will be visible from the HTTPS proxy**
+ whereas tunneling keeps request and response headers and content
+ private. IP address, target hostname, SNI, and port are always visible
+ to an HTTPS proxy even when this flag is disabled.
+
+ Example:
+ >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
+ >>> r1 = proxy.request('GET', 'http://google.com/')
+ >>> r2 = proxy.request('GET', 'http://httpbin.org/')
+ >>> len(proxy.pools)
+ 1
+ >>> r3 = proxy.request('GET', 'https://httpbin.org/')
+ >>> r4 = proxy.request('GET', 'https://twitter.com/')
+ >>> len(proxy.pools)
+ 3
+
+ """
+
+ def __init__(
+ self,
+ proxy_url,
+ num_pools=10,
+ headers=None,
+ proxy_headers=None,
+ proxy_ssl_context=None,
+ use_forwarding_for_https=False,
+ **connection_pool_kw
+ ):
+
+ if isinstance(proxy_url, HTTPConnectionPool):
+ proxy_url = "%s://%s:%i" % (
+ proxy_url.scheme,
+ proxy_url.host,
+ proxy_url.port,
+ )
+ proxy = parse_url(proxy_url)
+
+ if proxy.scheme not in ("http", "https"):
+ raise ProxySchemeUnknown(proxy.scheme)
+
+ if not proxy.port:
+ port = port_by_scheme.get(proxy.scheme, 80)
+ proxy = proxy._replace(port=port)
+
+ self.proxy = proxy
+ self.proxy_headers = proxy_headers or {}
+ self.proxy_ssl_context = proxy_ssl_context
+ self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https)
+
+ connection_pool_kw["_proxy"] = self.proxy
+ connection_pool_kw["_proxy_headers"] = self.proxy_headers
+ connection_pool_kw["_proxy_config"] = self.proxy_config
+
+ super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
+
+ def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
+ if scheme == "https":
+ return super(ProxyManager, self).connection_from_host(
+ host, port, scheme, pool_kwargs=pool_kwargs
+ )
+
+ return super(ProxyManager, self).connection_from_host(
+ self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
+ )
+
+ def _set_proxy_headers(self, url, headers=None):
+ """
+ Sets headers needed by proxies: specifically, the Accept and Host
+ headers. Only sets headers not provided by the user.
+ """
+ headers_ = {"Accept": "*/*"}
+
+ netloc = parse_url(url).netloc
+ if netloc:
+ headers_["Host"] = netloc
+
+ if headers:
+ headers_.update(headers)
+ return headers_
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
+ u = parse_url(url)
+ if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
+ # For connections using HTTP CONNECT, httplib sets the necessary
+ # headers on the CONNECT to the proxy. If we're not using CONNECT,
+ # we'll definitely need to set 'Host' at the very least.
+ headers = kw.get("headers", self.headers)
+ kw["headers"] = self._set_proxy_headers(url, headers)
+
+ return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
+
+
+def proxy_from_url(url, **kw):
+ return ProxyManager(proxy_url=url, **kw)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/request.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/request.py
new file mode 100644
index 0000000..398386a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/request.py
@@ -0,0 +1,170 @@
+from __future__ import absolute_import
+
+from .filepost import encode_multipart_formdata
+from .packages.six.moves.urllib.parse import urlencode
+
+__all__ = ["RequestMethods"]
+
+
+class RequestMethods(object):
+ """
+ Convenience mixin for classes who implement a :meth:`urlopen` method, such
+ as :class:`urllib3.HTTPConnectionPool` and
+ :class:`urllib3.PoolManager`.
+
+ Provides behavior for making common types of HTTP request methods and
+ decides which type of request field encoding to use.
+
+ Specifically,
+
+ :meth:`.request_encode_url` is for sending requests whose fields are
+ encoded in the URL (such as GET, HEAD, DELETE).
+
+ :meth:`.request_encode_body` is for sending requests whose fields are
+ encoded in the *body* of the request using multipart or www-form-urlencoded
+ (such as for POST, PUT, PATCH).
+
+ :meth:`.request` is for making any kind of request, it will look up the
+ appropriate encoding format and use one of the above two methods to make
+ the request.
+
+ Initializer parameters:
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+ """
+
+ _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"}
+
+ def __init__(self, headers=None):
+ self.headers = headers or {}
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ encode_multipart=True,
+ multipart_boundary=None,
+ **kw
+ ): # Abstract
+ raise NotImplementedError(
+ "Classes extending RequestMethods must implement "
+ "their own ``urlopen`` method."
+ )
+
+ def request(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the appropriate encoding of
+ ``fields`` based on the ``method`` used.
+
+ This is a convenience method that requires the least amount of manual
+ effort. It can be used in most situations, while still having the
+ option to drop down to more specific methods when necessary, such as
+ :meth:`request_encode_url`, :meth:`request_encode_body`,
+ or even the lowest level :meth:`urlopen`.
+ """
+ method = method.upper()
+
+ urlopen_kw["request_url"] = url
+
+ if method in self._encode_url_methods:
+ return self.request_encode_url(
+ method, url, fields=fields, headers=headers, **urlopen_kw
+ )
+ else:
+ return self.request_encode_body(
+ method, url, fields=fields, headers=headers, **urlopen_kw
+ )
+
+ def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the url. This is useful for request methods like GET, HEAD, DELETE, etc.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {"headers": headers}
+ extra_kw.update(urlopen_kw)
+
+ if fields:
+ url += "?" + urlencode(fields)
+
+ return self.urlopen(method, url, **extra_kw)
+
+ def request_encode_body(
+ self,
+ method,
+ url,
+ fields=None,
+ headers=None,
+ encode_multipart=True,
+ multipart_boundary=None,
+ **urlopen_kw
+ ):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the body. This is useful for request methods like POST, PUT, PATCH, etc.
+
+ When ``encode_multipart=True`` (default), then
+ :func:`urllib3.encode_multipart_formdata` is used to encode
+ the payload with the appropriate content type. Otherwise
+ :func:`urllib.parse.urlencode` is used with the
+ 'application/x-www-form-urlencoded' content type.
+
+ Multipart encoding must be used when posting files, and it's reasonably
+ safe to use it in other times too. However, it may break request
+ signing, such as with OAuth.
+
+ Supports an optional ``fields`` parameter of key/value strings AND
+ key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
+ the MIME type is optional. For example::
+
+ fields = {
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(),
+ 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+ }
+
+ When uploading a file, providing a filename (the first parameter of the
+ tuple) is optional but recommended to best mimic behavior of browsers.
+
+ Note that if ``headers`` are supplied, the 'Content-Type' header will
+ be overwritten because it depends on the dynamic random boundary string
+ which is used to compose the body of the request. The random boundary
+ string can be explicitly set with the ``multipart_boundary`` parameter.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {"headers": {}}
+
+ if fields:
+ if "body" in urlopen_kw:
+ raise TypeError(
+ "request got values for both 'fields' and 'body', can only specify one."
+ )
+
+ if encode_multipart:
+ body, content_type = encode_multipart_formdata(
+ fields, boundary=multipart_boundary
+ )
+ else:
+ body, content_type = (
+ urlencode(fields),
+ "application/x-www-form-urlencoded",
+ )
+
+ extra_kw["body"] = body
+ extra_kw["headers"] = {"Content-Type": content_type}
+
+ extra_kw["headers"].update(headers)
+ extra_kw.update(urlopen_kw)
+
+ return self.urlopen(method, url, **extra_kw)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/response.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/response.py
new file mode 100644
index 0000000..38693f4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/response.py
@@ -0,0 +1,821 @@
+from __future__ import absolute_import
+
+import io
+import logging
+import zlib
+from contextlib import contextmanager
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+try:
+ import brotli
+except ImportError:
+ brotli = None
+
+from ._collections import HTTPHeaderDict
+from .connection import BaseSSLError, HTTPException
+from .exceptions import (
+ BodyNotHttplibCompatible,
+ DecodeError,
+ HTTPError,
+ IncompleteRead,
+ InvalidChunkLength,
+ InvalidHeader,
+ ProtocolError,
+ ReadTimeoutError,
+ ResponseNotChunked,
+ SSLError,
+)
+from .packages import six
+from .util.response import is_fp_closed, is_response_to_head
+
+log = logging.getLogger(__name__)
+
+
+class DeflateDecoder(object):
+ def __init__(self):
+ self._first_try = True
+ self._data = b""
+ self._obj = zlib.decompressobj()
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+
+ if not self._first_try:
+ return self._obj.decompress(data)
+
+ self._data += data
+ try:
+ decompressed = self._obj.decompress(data)
+ if decompressed:
+ self._first_try = False
+ self._data = None
+ return decompressed
+ except zlib.error:
+ self._first_try = False
+ self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
+ try:
+ return self.decompress(self._data)
+ finally:
+ self._data = None
+
+
+class GzipDecoderState(object):
+
+ FIRST_MEMBER = 0
+ OTHER_MEMBERS = 1
+ SWALLOW_DATA = 2
+
+
+class GzipDecoder(object):
+ def __init__(self):
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+ self._state = GzipDecoderState.FIRST_MEMBER
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ ret = bytearray()
+ if self._state == GzipDecoderState.SWALLOW_DATA or not data:
+ return bytes(ret)
+ while True:
+ try:
+ ret += self._obj.decompress(data)
+ except zlib.error:
+ previous_state = self._state
+ # Ignore data after the first error
+ self._state = GzipDecoderState.SWALLOW_DATA
+ if previous_state == GzipDecoderState.OTHER_MEMBERS:
+ # Allow trailing garbage acceptable in other gzip clients
+ return bytes(ret)
+ raise
+ data = self._obj.unused_data
+ if not data:
+ return bytes(ret)
+ self._state = GzipDecoderState.OTHER_MEMBERS
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+
+if brotli is not None:
+
+ class BrotliDecoder(object):
+ # Supports both 'brotlipy' and 'Brotli' packages
+ # since they share an import name. The top branches
+ # are for 'brotlipy' and bottom branches for 'Brotli'
+ def __init__(self):
+ self._obj = brotli.Decompressor()
+ if hasattr(self._obj, "decompress"):
+ self.decompress = self._obj.decompress
+ else:
+ self.decompress = self._obj.process
+
+ def flush(self):
+ if hasattr(self._obj, "flush"):
+ return self._obj.flush()
+ return b""
+
+
+class MultiDecoder(object):
+ """
+ From RFC7231:
+ If one or more encodings have been applied to a representation, the
+ sender that applied the encodings MUST generate a Content-Encoding
+ header field that lists the content codings in the order in which
+ they were applied.
+ """
+
+ def __init__(self, modes):
+ self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
+
+ def flush(self):
+ return self._decoders[0].flush()
+
+ def decompress(self, data):
+ for d in reversed(self._decoders):
+ data = d.decompress(data)
+ return data
+
+
+def _get_decoder(mode):
+ if "," in mode:
+ return MultiDecoder(mode)
+
+ if mode == "gzip":
+ return GzipDecoder()
+
+ if brotli is not None and mode == "br":
+ return BrotliDecoder()
+
+ return DeflateDecoder()
+
+
+class HTTPResponse(io.IOBase):
+ """
+ HTTP Response container.
+
+ Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
+ loaded and decoded on-demand when the ``data`` property is accessed. This
+ class is also compatible with the Python standard library's :mod:`io`
+ module, and can hence be treated as a readable object in the context of that
+ framework.
+
+ Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
+
+ :param preload_content:
+ If True, the response's body will be preloaded during construction.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param original_response:
+ When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
+ object, it's convenient to include the original for debug purposes. It's
+ otherwise unused.
+
+ :param retries:
+ The retries contains the last :class:`~urllib3.util.retry.Retry` that
+ was used during the request.
+
+ :param enforce_content_length:
+ Enforce content length checking. Body returned by server must match
+ value of Content-Length header, if present. Otherwise, raise error.
+ """
+
+ CONTENT_DECODERS = ["gzip", "deflate"]
+ if brotli is not None:
+ CONTENT_DECODERS += ["br"]
+ REDIRECT_STATUSES = [301, 302, 303, 307, 308]
+
+ def __init__(
+ self,
+ body="",
+ headers=None,
+ status=0,
+ version=0,
+ reason=None,
+ strict=0,
+ preload_content=True,
+ decode_content=True,
+ original_response=None,
+ pool=None,
+ connection=None,
+ msg=None,
+ retries=None,
+ enforce_content_length=False,
+ request_method=None,
+ request_url=None,
+ auto_close=True,
+ ):
+
+ if isinstance(headers, HTTPHeaderDict):
+ self.headers = headers
+ else:
+ self.headers = HTTPHeaderDict(headers)
+ self.status = status
+ self.version = version
+ self.reason = reason
+ self.strict = strict
+ self.decode_content = decode_content
+ self.retries = retries
+ self.enforce_content_length = enforce_content_length
+ self.auto_close = auto_close
+
+ self._decoder = None
+ self._body = None
+ self._fp = None
+ self._original_response = original_response
+ self._fp_bytes_read = 0
+ self.msg = msg
+ self._request_url = request_url
+
+ if body and isinstance(body, (six.string_types, bytes)):
+ self._body = body
+
+ self._pool = pool
+ self._connection = connection
+
+ if hasattr(body, "read"):
+ self._fp = body
+
+ # Are we using the chunked-style of transfer encoding?
+ self.chunked = False
+ self.chunk_left = None
+ tr_enc = self.headers.get("transfer-encoding", "").lower()
+ # Don't incur the penalty of creating a list and then discarding it
+ encodings = (enc.strip() for enc in tr_enc.split(","))
+ if "chunked" in encodings:
+ self.chunked = True
+
+ # Determine length of response
+ self.length_remaining = self._init_length(request_method)
+
+ # If requested, preload the body.
+ if preload_content and not self._body:
+ self._body = self.read(decode_content=decode_content)
+
+ def get_redirect_location(self):
+ """
+ Should we redirect and where to?
+
+ :returns: Truthy redirect location string if we got a redirect status
+ code and valid location. ``None`` if redirect status and no
+ location. ``False`` if not a redirect status code.
+ """
+ if self.status in self.REDIRECT_STATUSES:
+ return self.headers.get("location")
+
+ return False
+
+ def release_conn(self):
+ if not self._pool or not self._connection:
+ return
+
+ self._pool._put_conn(self._connection)
+ self._connection = None
+
+ def drain_conn(self):
+ """
+ Read and discard any remaining HTTP response data in the response connection.
+
+ Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
+ """
+ try:
+ self.read()
+ except (HTTPError, SocketError, BaseSSLError, HTTPException):
+ pass
+
+ @property
+ def data(self):
+ # For backwards-compat with earlier urllib3 0.4 and earlier.
+ if self._body:
+ return self._body
+
+ if self._fp:
+ return self.read(cache_content=True)
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def isclosed(self):
+ return is_fp_closed(self._fp)
+
+ def tell(self):
+ """
+ Obtain the number of bytes pulled over the wire so far. May differ from
+ the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``
+ if bytes are encoded on the wire (e.g, compressed).
+ """
+ return self._fp_bytes_read
+
+ def _init_length(self, request_method):
+ """
+ Set initial length value for Response content if available.
+ """
+ length = self.headers.get("content-length")
+
+ if length is not None:
+ if self.chunked:
+ # This Response will fail with an IncompleteRead if it can't be
+ # received as chunked. This method falls back to attempt reading
+ # the response before raising an exception.
+ log.warning(
+ "Received response with both Content-Length and "
+ "Transfer-Encoding set. This is expressly forbidden "
+ "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
+ "attempting to process response as Transfer-Encoding: "
+ "chunked."
+ )
+ return None
+
+ try:
+ # RFC 7230 section 3.3.2 specifies multiple content lengths can
+ # be sent in a single Content-Length header
+ # (e.g. Content-Length: 42, 42). This line ensures the values
+ # are all valid ints and that as long as the `set` length is 1,
+ # all values are the same. Otherwise, the header is invalid.
+ lengths = set([int(val) for val in length.split(",")])
+ if len(lengths) > 1:
+ raise InvalidHeader(
+ "Content-Length contained multiple "
+ "unmatching values (%s)" % length
+ )
+ length = lengths.pop()
+ except ValueError:
+ length = None
+ else:
+ if length < 0:
+ length = None
+
+ # Convert status to int for comparison
+ # In some cases, httplib returns a status of "_UNKNOWN"
+ try:
+ status = int(self.status)
+ except ValueError:
+ status = 0
+
+ # Check for responses that shouldn't include a body
+ if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
+ length = 0
+
+ return length
+
+ def _init_decoder(self):
+ """
+ Set-up the _decoder attribute if necessary.
+ """
+ # Note: content-encoding value should be case-insensitive, per RFC 7230
+ # Section 3.2
+ content_encoding = self.headers.get("content-encoding", "").lower()
+ if self._decoder is None:
+ if content_encoding in self.CONTENT_DECODERS:
+ self._decoder = _get_decoder(content_encoding)
+ elif "," in content_encoding:
+ encodings = [
+ e.strip()
+ for e in content_encoding.split(",")
+ if e.strip() in self.CONTENT_DECODERS
+ ]
+ if len(encodings):
+ self._decoder = _get_decoder(content_encoding)
+
+ DECODER_ERROR_CLASSES = (IOError, zlib.error)
+ if brotli is not None:
+ DECODER_ERROR_CLASSES += (brotli.error,)
+
+ def _decode(self, data, decode_content, flush_decoder):
+ """
+ Decode the data passed in and potentially flush the decoder.
+ """
+ if not decode_content:
+ return data
+
+ try:
+ if self._decoder:
+ data = self._decoder.decompress(data)
+ except self.DECODER_ERROR_CLASSES as e:
+ content_encoding = self.headers.get("content-encoding", "").lower()
+ raise DecodeError(
+ "Received response with content-encoding: %s, but "
+ "failed to decode it." % content_encoding,
+ e,
+ )
+ if flush_decoder:
+ data += self._flush_decoder()
+
+ return data
+
+ def _flush_decoder(self):
+ """
+ Flushes the decoder. Should only be called if the decoder is actually
+ being used.
+ """
+ if self._decoder:
+ buf = self._decoder.decompress(b"")
+ return buf + self._decoder.flush()
+
+ return b""
+
+ @contextmanager
+ def _error_catcher(self):
+ """
+ Catch low-level python exceptions, instead re-raising urllib3
+ variants, so that low-level exceptions are not leaked in the
+ high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ clean_exit = False
+
+ try:
+ try:
+ yield
+
+ except SocketTimeout:
+ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+ # there is yet no clean way to get at it from this context.
+ raise ReadTimeoutError(self._pool, None, "Read timed out.")
+
+ except BaseSSLError as e:
+ # FIXME: Is there a better way to differentiate between SSLErrors?
+ if "read operation timed out" not in str(e):
+ # SSL errors related to framing/MAC get wrapped and reraised here
+ raise SSLError(e)
+
+ raise ReadTimeoutError(self._pool, None, "Read timed out.")
+
+ except (HTTPException, SocketError) as e:
+ # This includes IncompleteRead.
+ raise ProtocolError("Connection broken: %r" % e, e)
+
+ # If no exception is thrown, we should avoid cleaning up
+ # unnecessarily.
+ clean_exit = True
+ finally:
+ # If we didn't terminate cleanly, we need to throw away our
+ # connection.
+ if not clean_exit:
+ # The response may not be closed but we're not going to use it
+ # anymore so close it now to ensure that the connection is
+ # released back to the pool.
+ if self._original_response:
+ self._original_response.close()
+
+ # Closing the response may not actually be sufficient to close
+ # everything, so if we have a hold of the connection close that
+ # too.
+ if self._connection:
+ self._connection.close()
+
+ # If we hold the original response but it's closed now, we should
+ # return the connection back to the pool.
+ if self._original_response and self._original_response.isclosed():
+ self.release_conn()
+
+ def read(self, amt=None, decode_content=None, cache_content=False):
+ """
+ Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
+ parameters: ``decode_content`` and ``cache_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param cache_content:
+ If True, will save the returned data such that the same result is
+ returned despite of the state of the underlying file object. This
+ is useful if you want the ``.data`` property to continue working
+ after having ``.read()`` the file object. (Overridden if ``amt`` is
+ set.)
+ """
+ self._init_decoder()
+ if decode_content is None:
+ decode_content = self.decode_content
+
+ if self._fp is None:
+ return
+
+ flush_decoder = False
+ fp_closed = getattr(self._fp, "closed", False)
+
+ with self._error_catcher():
+ if amt is None:
+ # cStringIO doesn't like amt=None
+ data = self._fp.read() if not fp_closed else b""
+ flush_decoder = True
+ else:
+ cache_content = False
+ data = self._fp.read(amt) if not fp_closed else b""
+ if (
+ amt != 0 and not data
+ ): # Platform-specific: Buggy versions of Python.
+ # Close the connection when no data is returned
+ #
+ # This is redundant to what httplib/http.client _should_
+ # already do. However, versions of python released before
+ # December 15, 2012 (http://bugs.python.org/issue16298) do
+ # not properly close the connection in all cases. There is
+ # no harm in redundantly calling close.
+ self._fp.close()
+ flush_decoder = True
+ if self.enforce_content_length and self.length_remaining not in (
+ 0,
+ None,
+ ):
+ # This is an edge case that httplib failed to cover due
+ # to concerns of backward compatibility. We're
+ # addressing it here to make sure IncompleteRead is
+ # raised during streaming, so all calls with incorrect
+ # Content-Length are caught.
+ raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
+
+ if data:
+ self._fp_bytes_read += len(data)
+ if self.length_remaining is not None:
+ self.length_remaining -= len(data)
+
+ data = self._decode(data, decode_content, flush_decoder)
+
+ if cache_content:
+ self._body = data
+
+ return data
+
+ def stream(self, amt=2 ** 16, decode_content=None):
+ """
+ A generator wrapper for the read() method. A call will block until
+ ``amt`` bytes have been read from the connection or until the
+ connection is closed.
+
+ :param amt:
+ How much of the content to read. The generator will return up to
+ much data per iteration, but may return less. This is particularly
+ likely when using compressed data. However, the empty string will
+ never be returned.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ if self.chunked and self.supports_chunked_reads():
+ for line in self.read_chunked(amt, decode_content=decode_content):
+ yield line
+ else:
+ while not is_fp_closed(self._fp):
+ data = self.read(amt=amt, decode_content=decode_content)
+
+ if data:
+ yield data
+
+ @classmethod
+ def from_httplib(ResponseCls, r, **response_kw):
+ """
+ Given an :class:`http.client.HTTPResponse` instance ``r``, return a
+ corresponding :class:`urllib3.response.HTTPResponse` object.
+
+ Remaining parameters are passed to the HTTPResponse constructor, along
+ with ``original_response=r``.
+ """
+ headers = r.msg
+
+ if not isinstance(headers, HTTPHeaderDict):
+ if six.PY2:
+ # Python 2.7
+ headers = HTTPHeaderDict.from_httplib(headers)
+ else:
+ headers = HTTPHeaderDict(headers.items())
+
+ # HTTPResponse objects in Python 3 don't have a .strict attribute
+ strict = getattr(r, "strict", 0)
+ resp = ResponseCls(
+ body=r,
+ headers=headers,
+ status=r.status,
+ version=r.version,
+ reason=r.reason,
+ strict=strict,
+ original_response=r,
+ **response_kw
+ )
+ return resp
+
+ # Backwards-compatibility methods for http.client.HTTPResponse
+ def getheaders(self):
+ return self.headers
+
+ def getheader(self, name, default=None):
+ return self.headers.get(name, default)
+
+ # Backwards compatibility for http.cookiejar
+ def info(self):
+ return self.headers
+
+ # Overrides from io.IOBase
+ def close(self):
+ if not self.closed:
+ self._fp.close()
+
+ if self._connection:
+ self._connection.close()
+
+ if not self.auto_close:
+ io.IOBase.close(self)
+
+ @property
+ def closed(self):
+ if not self.auto_close:
+ return io.IOBase.closed.__get__(self)
+ elif self._fp is None:
+ return True
+ elif hasattr(self._fp, "isclosed"):
+ return self._fp.isclosed()
+ elif hasattr(self._fp, "closed"):
+ return self._fp.closed
+ else:
+ return True
+
+ def fileno(self):
+ if self._fp is None:
+ raise IOError("HTTPResponse has no file to get a fileno from")
+ elif hasattr(self._fp, "fileno"):
+ return self._fp.fileno()
+ else:
+ raise IOError(
+ "The file-like object this HTTPResponse is wrapped "
+ "around has no file descriptor"
+ )
+
+ def flush(self):
+ if (
+ self._fp is not None
+ and hasattr(self._fp, "flush")
+ and not getattr(self._fp, "closed", False)
+ ):
+ return self._fp.flush()
+
+ def readable(self):
+ # This method is required for `io` module compatibility.
+ return True
+
+ def readinto(self, b):
+ # This method is required for `io` module compatibility.
+ temp = self.read(len(b))
+ if len(temp) == 0:
+ return 0
+ else:
+ b[: len(temp)] = temp
+ return len(temp)
+
+ def supports_chunked_reads(self):
+ """
+ Checks if the underlying file-like object looks like a
+ :class:`http.client.HTTPResponse` object. We do this by testing for
+ the fp attribute. If it is present we assume it returns raw chunks as
+ processed by read_chunked().
+ """
+ return hasattr(self._fp, "fp")
+
+ def _update_chunk_length(self):
+ # First, we'll figure out length of a chunk and then
+ # we'll try to read it from socket.
+ if self.chunk_left is not None:
+ return
+ line = self._fp.fp.readline()
+ line = line.split(b";", 1)[0]
+ try:
+ self.chunk_left = int(line, 16)
+ except ValueError:
+ # Invalid chunked protocol response, abort.
+ self.close()
+ raise InvalidChunkLength(self, line)
+
+ def _handle_chunk(self, amt):
+ returned_chunk = None
+ if amt is None:
+ chunk = self._fp._safe_read(self.chunk_left)
+ returned_chunk = chunk
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ elif amt < self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self.chunk_left = self.chunk_left - amt
+ returned_chunk = value
+ elif amt == self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ returned_chunk = value
+ else: # amt > self.chunk_left
+ returned_chunk = self._fp._safe_read(self.chunk_left)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ return returned_chunk
+
+ def read_chunked(self, amt=None, decode_content=None):
+ """
+ Similar to :meth:`HTTPResponse.read`, but with an additional
+ parameter: ``decode_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ self._init_decoder()
+ # FIXME: Rewrite this method and make it a class with a better structured logic.
+ if not self.chunked:
+ raise ResponseNotChunked(
+ "Response is not chunked. "
+ "Header 'transfer-encoding: chunked' is missing."
+ )
+ if not self.supports_chunked_reads():
+ raise BodyNotHttplibCompatible(
+ "Body should be http.client.HTTPResponse like. "
+ "It should have have an fp attribute which returns raw chunks."
+ )
+
+ with self._error_catcher():
+ # Don't bother reading the body of a HEAD request.
+ if self._original_response and is_response_to_head(self._original_response):
+ self._original_response.close()
+ return
+
+ # If a response is already read and closed
+ # then return immediately.
+ if self._fp.fp is None:
+ return
+
+ while True:
+ self._update_chunk_length()
+ if self.chunk_left == 0:
+ break
+ chunk = self._handle_chunk(amt)
+ decoded = self._decode(
+ chunk, decode_content=decode_content, flush_decoder=False
+ )
+ if decoded:
+ yield decoded
+
+ if decode_content:
+ # On CPython and PyPy, we should never need to flush the
+ # decoder. However, on Jython we *might* need to, so
+ # lets defensively do it anyway.
+ decoded = self._flush_decoder()
+ if decoded: # Platform-specific: Jython.
+ yield decoded
+
+ # Chunk content ends with \r\n: discard it.
+ while True:
+ line = self._fp.fp.readline()
+ if not line:
+ # Some sites may not end with '\r\n'.
+ break
+ if line == b"\r\n":
+ break
+
+ # We read everything; close the "file".
+ if self._original_response:
+ self._original_response.close()
+
+ def geturl(self):
+ """
+ Returns the URL that was the source of this response.
+ If the request that generated this response redirected, this method
+ will return the final redirect location.
+ """
+ if self.retries is not None and len(self.retries.history):
+ return self.retries.history[-1].redirect_location
+ else:
+ return self._request_url
+
+ def __iter__(self):
+ buffer = []
+ for chunk in self.stream(decode_content=True):
+ if b"\n" in chunk:
+ chunk = chunk.split(b"\n")
+ yield b"".join(buffer) + chunk[0] + b"\n"
+ for x in chunk[1:-1]:
+ yield x + b"\n"
+ if chunk[-1]:
+ buffer = [chunk[-1]]
+ else:
+ buffer = []
+ else:
+ buffer.append(chunk)
+ if buffer:
+ yield b"".join(buffer)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/__init__.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/__init__.py
new file mode 100644
index 0000000..4547fc5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/__init__.py
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
+from .response import is_fp_closed
+from .retry import Retry
+from .ssl_ import (
+ ALPN_PROTOCOLS,
+ HAS_SNI,
+ IS_PYOPENSSL,
+ IS_SECURETRANSPORT,
+ PROTOCOL_TLS,
+ SSLContext,
+ assert_fingerprint,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .timeout import Timeout, current_time
+from .url import Url, get_host, parse_url, split_first
+from .wait import wait_for_read, wait_for_write
+
+__all__ = (
+ "HAS_SNI",
+ "IS_PYOPENSSL",
+ "IS_SECURETRANSPORT",
+ "SSLContext",
+ "PROTOCOL_TLS",
+ "ALPN_PROTOCOLS",
+ "Retry",
+ "Timeout",
+ "Url",
+ "assert_fingerprint",
+ "current_time",
+ "is_connection_dropped",
+ "is_fp_closed",
+ "get_host",
+ "parse_url",
+ "make_headers",
+ "resolve_cert_reqs",
+ "resolve_ssl_version",
+ "split_first",
+ "ssl_wrap_socket",
+ "wait_for_read",
+ "wait_for_write",
+ "SKIP_HEADER",
+ "SKIPPABLE_HEADERS",
+)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/connection.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/connection.py
new file mode 100644
index 0000000..6af1138
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/connection.py
@@ -0,0 +1,149 @@
+from __future__ import absolute_import
+
+import socket
+
+from ..contrib import _appengine_environ
+from ..exceptions import LocationParseError
+from ..packages import six
+from .wait import NoWayToWaitForSocketError, wait_for_read
+
+
+def is_connection_dropped(conn): # Platform-specific
+ """
+ Returns True if the connection is dropped and should be closed.
+
+ :param conn:
+ :class:`http.client.HTTPConnection` object.
+
+ Note: For platforms like AppEngine, this will always return ``False`` to
+ let the platform handle connection recycling transparently for us.
+ """
+ sock = getattr(conn, "sock", False)
+ if sock is False: # Platform-specific: AppEngine
+ return False
+ if sock is None: # Connection already closed (such as by httplib).
+ return True
+ try:
+ # Returns True if readable, which here means it's been dropped
+ return wait_for_read(sock, timeout=0.0)
+ except NoWayToWaitForSocketError: # Platform-specific: AppEngine
+ return False
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(
+ address,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None,
+ socket_options=None,
+):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`socket.getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ An host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ if host.startswith("["):
+ host = host.strip("[]")
+ err = None
+
+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+ # The original create_connection function always returns all records.
+ family = allowed_gai_family()
+
+ try:
+ host.encode("idna")
+ except UnicodeError:
+ return six.raise_from(
+ LocationParseError(u"'%s', label empty or too long" % host), None
+ )
+
+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+
+ # If provided, set socket level options before connecting.
+ _set_socket_options(sock, socket_options)
+
+ if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except socket.error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+ sock = None
+
+ if err is not None:
+ raise err
+
+ raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+ if options is None:
+ return
+
+ for opt in options:
+ sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+ """This function is designed to work in the context of
+ getaddrinfo, where family=socket.AF_UNSPEC is the default and
+ will perform a DNS search for both IPv6 and IPv4 records."""
+
+ family = socket.AF_INET
+ if HAS_IPV6:
+ family = socket.AF_UNSPEC
+ return family
+
+
+def _has_ipv6(host):
+ """Returns True if the system can bind an IPv6 address."""
+ sock = None
+ has_ipv6 = False
+
+ # App Engine doesn't support IPV6 sockets and actually has a quota on the
+ # number of sockets that can be used, so just early out here instead of
+ # creating a socket needlessly.
+ # See https://github.com/urllib3/urllib3/issues/1446
+ if _appengine_environ.is_appengine_sandbox():
+ return False
+
+ if socket.has_ipv6:
+ # has_ipv6 returns true if cPython was compiled with IPv6 support.
+ # It does not tell us if the system has IPv6 support enabled. To
+ # determine that we must bind to an IPv6 address.
+ # https://github.com/urllib3/urllib3/pull/611
+ # https://bugs.python.org/issue658327
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind((host, 0))
+ has_ipv6 = True
+ except Exception:
+ pass
+
+ if sock:
+ sock.close()
+ return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6("::1")
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/proxy.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/proxy.py
new file mode 100644
index 0000000..2199cc7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/proxy.py
@@ -0,0 +1,57 @@
+from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version
+
+
+def connection_requires_http_tunnel(
+ proxy_url=None, proxy_config=None, destination_scheme=None
+):
+ """
+ Returns True if the connection requires an HTTP CONNECT through the proxy.
+
+ :param URL proxy_url:
+ URL of the proxy.
+ :param ProxyConfig proxy_config:
+ Proxy configuration from poolmanager.py
+ :param str destination_scheme:
+ The scheme of the destination. (i.e https, http, etc)
+ """
+ # If we're not using a proxy, no way to use a tunnel.
+ if proxy_url is None:
+ return False
+
+ # HTTP destinations never require tunneling, we always forward.
+ if destination_scheme == "http":
+ return False
+
+ # Support for forwarding with HTTPS proxies and HTTPS destinations.
+ if (
+ proxy_url.scheme == "https"
+ and proxy_config
+ and proxy_config.use_forwarding_for_https
+ ):
+ return False
+
+ # Otherwise always use a tunnel.
+ return True
+
+
+def create_proxy_ssl_context(
+ ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None
+):
+ """
+ Generates a default proxy ssl context if one hasn't been provided by the
+ user.
+ """
+ ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(ssl_version),
+ cert_reqs=resolve_cert_reqs(cert_reqs),
+ )
+
+ if (
+ not ca_certs
+ and not ca_cert_dir
+ and not ca_cert_data
+ and hasattr(ssl_context, "load_default_certs")
+ ):
+ ssl_context.load_default_certs()
+
+ return ssl_context
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/queue.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/queue.py
new file mode 100644
index 0000000..4178410
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/queue.py
@@ -0,0 +1,22 @@
+import collections
+
+from ..packages import six
+from ..packages.six.moves import queue
+
+if six.PY2:
+ # Queue is imported for side effects on MS Windows. See issue #229.
+ import Queue as _unused_module_Queue # noqa: F401
+
+
+class LifoQueue(queue.Queue):
+ def _init(self, _):
+ self.queue = collections.deque()
+
+ def _qsize(self, len=len):
+ return len(self.queue)
+
+ def _put(self, item):
+ self.queue.append(item)
+
+ def _get(self):
+ return self.queue.pop()
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/request.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/request.py
new file mode 100644
index 0000000..2510338
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/request.py
@@ -0,0 +1,143 @@
+from __future__ import absolute_import
+
+from base64 import b64encode
+
+from ..exceptions import UnrewindableBodyError
+from ..packages.six import b, integer_types
+
+# Pass as a value within ``headers`` to skip
+# emitting some HTTP headers that are added automatically.
+# The only headers that are supported are ``Accept-Encoding``,
+# ``Host``, and ``User-Agent``.
+SKIP_HEADER = "@@@SKIP_HEADER@@@"
+SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
+
+ACCEPT_ENCODING = "gzip,deflate"
+try:
+ import brotli as _unused_module_brotli # noqa: F401
+except ImportError:
+ pass
+else:
+ ACCEPT_ENCODING += ",br"
+
+_FAILEDTELL = object()
+
+
+def make_headers(
+ keep_alive=None,
+ accept_encoding=None,
+ user_agent=None,
+ basic_auth=None,
+ proxy_basic_auth=None,
+ disable_cache=None,
+):
+ """
+ Shortcuts for generating request headers.
+
+ :param keep_alive:
+ If ``True``, adds 'connection: keep-alive' header.
+
+ :param accept_encoding:
+ Can be a boolean, list, or string.
+ ``True`` translates to 'gzip,deflate'.
+ List will get joined by comma.
+ String will be used as provided.
+
+ :param user_agent:
+ String representing the user-agent you want, such as
+ "python-urllib3/0.6"
+
+ :param basic_auth:
+ Colon-separated username:password string for 'authorization: basic ...'
+ auth header.
+
+ :param proxy_basic_auth:
+ Colon-separated username:password string for 'proxy-authorization: basic ...'
+ auth header.
+
+ :param disable_cache:
+ If ``True``, adds 'cache-control: no-cache' header.
+
+ Example::
+
+ >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+ {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+ >>> make_headers(accept_encoding=True)
+ {'accept-encoding': 'gzip,deflate'}
+ """
+ headers = {}
+ if accept_encoding:
+ if isinstance(accept_encoding, str):
+ pass
+ elif isinstance(accept_encoding, list):
+ accept_encoding = ",".join(accept_encoding)
+ else:
+ accept_encoding = ACCEPT_ENCODING
+ headers["accept-encoding"] = accept_encoding
+
+ if user_agent:
+ headers["user-agent"] = user_agent
+
+ if keep_alive:
+ headers["connection"] = "keep-alive"
+
+ if basic_auth:
+ headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8")
+
+ if proxy_basic_auth:
+ headers["proxy-authorization"] = "Basic " + b64encode(
+ b(proxy_basic_auth)
+ ).decode("utf-8")
+
+ if disable_cache:
+ headers["cache-control"] = "no-cache"
+
+ return headers
+
+
+def set_file_position(body, pos):
+ """
+ If a position is provided, move file to that point.
+ Otherwise, we'll attempt to record a position for future use.
+ """
+ if pos is not None:
+ rewind_body(body, pos)
+ elif getattr(body, "tell", None) is not None:
+ try:
+ pos = body.tell()
+ except (IOError, OSError):
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body.
+ pos = _FAILEDTELL
+
+ return pos
+
+
+def rewind_body(body, body_pos):
+ """
+ Attempt to rewind body to a certain position.
+ Primarily used for request redirects and retries.
+
+ :param body:
+ File-like object that supports seek.
+
+ :param int pos:
+ Position to seek to in file.
+ """
+ body_seek = getattr(body, "seek", None)
+ if body_seek is not None and isinstance(body_pos, integer_types):
+ try:
+ body_seek(body_pos)
+ except (IOError, OSError):
+ raise UnrewindableBodyError(
+ "An error occurred when rewinding request body for redirect/retry."
+ )
+ elif body_pos is _FAILEDTELL:
+ raise UnrewindableBodyError(
+ "Unable to record file position for rewinding "
+ "request body during a redirect/retry."
+ )
+ else:
+ raise ValueError(
+ "body_pos must be of type integer, instead it was %s." % type(body_pos)
+ )
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/response.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/response.py
new file mode 100644
index 0000000..5ea609c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/response.py
@@ -0,0 +1,107 @@
+from __future__ import absolute_import
+
+from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect
+
+from ..exceptions import HeaderParsingError
+from ..packages.six.moves import http_client as httplib
+
+
+def is_fp_closed(obj):
+ """
+ Checks whether a given file-like object is closed.
+
+ :param obj:
+ The file-like object to check.
+ """
+
+ try:
+ # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+ # GH Issue #928
+ return obj.isclosed()
+ except AttributeError:
+ pass
+
+ try:
+ # Check via the official file-like-object way.
+ return obj.closed
+ except AttributeError:
+ pass
+
+ try:
+ # Check if the object is a container for another file-like object that
+ # gets released on exhaustion (e.g. HTTPResponse).
+ return obj.fp is None
+ except AttributeError:
+ pass
+
+ raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+ """
+ Asserts whether all headers have been successfully parsed.
+ Extracts encountered errors from the result of parsing headers.
+
+ Only works on Python 3.
+
+ :param http.client.HTTPMessage headers: Headers to verify.
+
+ :raises urllib3.exceptions.HeaderParsingError:
+ If parsing errors are found.
+ """
+
+ # This will fail silently if we pass in the wrong kind of parameter.
+ # To make debugging easier add an explicit check.
+ if not isinstance(headers, httplib.HTTPMessage):
+ raise TypeError("expected httplib.Message, got {0}.".format(type(headers)))
+
+ defects = getattr(headers, "defects", None)
+ get_payload = getattr(headers, "get_payload", None)
+
+ unparsed_data = None
+ if get_payload:
+ # get_payload is actually email.message.Message.get_payload;
+ # we're only interested in the result if it's not a multipart message
+ if not headers.is_multipart():
+ payload = get_payload()
+
+ if isinstance(payload, (bytes, str)):
+ unparsed_data = payload
+ if defects:
+ # httplib is assuming a response body is available
+ # when parsing headers even when httplib only sends
+ # header data to parse_headers() This results in
+ # defects on multipart responses in particular.
+ # See: https://github.com/urllib3/urllib3/issues/800
+
+ # So we ignore the following defects:
+ # - StartBoundaryNotFoundDefect:
+ # The claimed start boundary was never found.
+ # - MultipartInvariantViolationDefect:
+ # A message claimed to be a multipart but no subparts were found.
+ defects = [
+ defect
+ for defect in defects
+ if not isinstance(
+ defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
+ )
+ ]
+
+ if defects or unparsed_data:
+ raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+ """
+ Checks whether the request of a response has been a HEAD-request.
+ Handles the quirks of AppEngine.
+
+ :param http.client.HTTPResponse response:
+ Response to check if the originating request
+ used 'HEAD' as a method.
+ """
+ # FIXME: Can we do this somehow without accessing private httplib _method?
+ method = response._method
+ if isinstance(method, int): # Platform-specific: Appengine
+ return method == 3
+ return method.upper() == "HEAD"
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/retry.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/retry.py
new file mode 100644
index 0000000..3398323
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/retry.py
@@ -0,0 +1,620 @@
+from __future__ import absolute_import
+
+import email
+import logging
+import re
+import time
+import warnings
+from collections import namedtuple
+from itertools import takewhile
+
+from ..exceptions import (
+ ConnectTimeoutError,
+ InvalidHeader,
+ MaxRetryError,
+ ProtocolError,
+ ProxyError,
+ ReadTimeoutError,
+ ResponseError,
+)
+from ..packages import six
+
+log = logging.getLogger(__name__)
+
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple(
+ "RequestHistory", ["method", "url", "error", "status", "redirect_location"]
+)
+
+
+# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
+_Default = object()
+
+
+class _RetryMeta(type):
+ @property
+ def DEFAULT_METHOD_WHITELIST(cls):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
+ DeprecationWarning,
+ )
+ return cls.DEFAULT_ALLOWED_METHODS
+
+ @DEFAULT_METHOD_WHITELIST.setter
+ def DEFAULT_METHOD_WHITELIST(cls, value):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
+ DeprecationWarning,
+ )
+ cls.DEFAULT_ALLOWED_METHODS = value
+
+ @property
+ def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
+ DeprecationWarning,
+ )
+ return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
+
+ @DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
+ def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
+ DeprecationWarning,
+ )
+ cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
+
+ @property
+ def BACKOFF_MAX(cls):
+ warnings.warn(
+ "Using 'Retry.BACKOFF_MAX' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
+ DeprecationWarning,
+ )
+ return cls.DEFAULT_BACKOFF_MAX
+
+ @BACKOFF_MAX.setter
+ def BACKOFF_MAX(cls, value):
+ warnings.warn(
+ "Using 'Retry.BACKOFF_MAX' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
+ DeprecationWarning,
+ )
+ cls.DEFAULT_BACKOFF_MAX = value
+
+
+@six.add_metaclass(_RetryMeta)
+class Retry(object):
+ """Retry configuration.
+
+ Each retry attempt will create a new Retry object with updated values, so
+ they can be safely reused.
+
+ Retries can be defined as a default for a pool::
+
+ retries = Retry(connect=5, read=2, redirect=5)
+ http = PoolManager(retries=retries)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+ Retries can be disabled by passing ``False``::
+
+ response = http.request('GET', 'http://example.com/', retries=False)
+
+ Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+ retries are disabled, in which case the causing exception will be raised.
+
+ :param int total:
+ Total number of retries to allow. Takes precedence over other counts.
+
+ Set to ``None`` to remove this constraint and fall back on other
+ counts.
+
+ Set to ``0`` to fail on the first retry.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int connect:
+ How many connection-related errors to retry on.
+
+ These are errors raised before the request is sent to the remote server,
+ which we assume has not triggered the server to process the request.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int read:
+ How many times to retry on read errors.
+
+ These errors are raised after the request was sent to the server, so the
+ request may have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int redirect:
+ How many redirects to perform. Limit this to avoid infinite redirect
+ loops.
+
+ A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+ 308.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int status:
+ How many times to retry on bad status codes.
+
+ These are retries made on responses, where status code matches
+ ``status_forcelist``.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int other:
+ How many times to retry on other errors.
+
+ Other errors are errors that are not connect, read, redirect or status errors.
+ These errors might be raised after the request was sent to the server, so the
+ request might have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ If ``total`` is not set, it's a good idea to set this to 0 to account
+ for unexpected edge cases and avoid infinite retry loops.
+
+ :param iterable allowed_methods:
+ Set of uppercased HTTP method verbs that we should retry on.
+
+ By default, we only retry on methods which are considered to be
+ idempotent (multiple requests with the same parameters end with the
+ same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
+
+ Set to a ``False`` value to retry on any verb.
+
+ .. warning::
+
+ Previously this parameter was named ``method_whitelist``, that
+ usage is deprecated in v1.26.0 and will be removed in v2.0.
+
+ :param iterable status_forcelist:
+ A set of integer HTTP status codes that we should force a retry on.
+ A retry is initiated if the request method is in ``allowed_methods``
+ and the response status code is in ``status_forcelist``.
+
+ By default, this is disabled with ``None``.
+
+ :param float backoff_factor:
+ A backoff factor to apply between attempts after the second try
+ (most errors are resolved immediately by a second try without a
+ delay). urllib3 will sleep for::
+
+ {backoff factor} * (2 ** ({number of total retries} - 1))
+
+ seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+ for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+ than :attr:`Retry.DEFAULT_BACKOFF_MAX`.
+
+ By default, backoff is disabled (set to 0).
+
+ :param bool raise_on_redirect: Whether, if the number of redirects is
+ exhausted, to raise a MaxRetryError, or to return a response with a
+ response code in the 3xx range.
+
+ :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+ whether we should raise an exception, or return a response,
+ if status falls in ``status_forcelist`` range and retries have
+ been exhausted.
+
+ :param tuple history: The history of the request encountered during
+ each call to :meth:`~Retry.increment`. The list is in the order
+ the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+ :param bool respect_retry_after_header:
+ Whether to respect Retry-After header on status codes defined as
+ :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+ :param iterable remove_headers_on_redirect:
+ Sequence of headers to remove from the request when a response
+ indicating a redirect is returned before firing off the redirected
+ request.
+ """
+
+ #: Default methods to be used for ``allowed_methods``
+ DEFAULT_ALLOWED_METHODS = frozenset(
+ ["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
+ )
+
+ #: Default status codes to be used for ``status_forcelist``
+ RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+ #: Default headers to be used for ``remove_headers_on_redirect``
+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Authorization"])
+
+ #: Maximum backoff time.
+ DEFAULT_BACKOFF_MAX = 120
+
+ def __init__(
+ self,
+ total=10,
+ connect=None,
+ read=None,
+ redirect=None,
+ status=None,
+ other=None,
+ allowed_methods=_Default,
+ status_forcelist=None,
+ backoff_factor=0,
+ raise_on_redirect=True,
+ raise_on_status=True,
+ history=None,
+ respect_retry_after_header=True,
+ remove_headers_on_redirect=_Default,
+ # TODO: Deprecated, remove in v2.0
+ method_whitelist=_Default,
+ ):
+
+ if method_whitelist is not _Default:
+ if allowed_methods is not _Default:
+ raise ValueError(
+ "Using both 'allowed_methods' and "
+ "'method_whitelist' together is not allowed. "
+ "Instead only use 'allowed_methods'"
+ )
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ allowed_methods = method_whitelist
+ if allowed_methods is _Default:
+ allowed_methods = self.DEFAULT_ALLOWED_METHODS
+ if remove_headers_on_redirect is _Default:
+ remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
+
+ self.total = total
+ self.connect = connect
+ self.read = read
+ self.status = status
+ self.other = other
+
+ if redirect is False or total is False:
+ redirect = 0
+ raise_on_redirect = False
+
+ self.redirect = redirect
+ self.status_forcelist = status_forcelist or set()
+ self.allowed_methods = allowed_methods
+ self.backoff_factor = backoff_factor
+ self.raise_on_redirect = raise_on_redirect
+ self.raise_on_status = raise_on_status
+ self.history = history or tuple()
+ self.respect_retry_after_header = respect_retry_after_header
+ self.remove_headers_on_redirect = frozenset(
+ [h.lower() for h in remove_headers_on_redirect]
+ )
+
+ def new(self, **kw):
+ params = dict(
+ total=self.total,
+ connect=self.connect,
+ read=self.read,
+ redirect=self.redirect,
+ status=self.status,
+ other=self.other,
+ status_forcelist=self.status_forcelist,
+ backoff_factor=self.backoff_factor,
+ raise_on_redirect=self.raise_on_redirect,
+ raise_on_status=self.raise_on_status,
+ history=self.history,
+ remove_headers_on_redirect=self.remove_headers_on_redirect,
+ respect_retry_after_header=self.respect_retry_after_header,
+ )
+
+ # TODO: If already given in **kw we use what's given to us
+ # If not given we need to figure out what to pass. We decide
+ # based on whether our class has the 'method_whitelist' property
+ # and if so we pass the deprecated 'method_whitelist' otherwise
+ # we use 'allowed_methods'. Remove in v2.0
+ if "method_whitelist" not in kw and "allowed_methods" not in kw:
+ if "method_whitelist" in self.__dict__:
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ params["method_whitelist"] = self.allowed_methods
+ else:
+ params["allowed_methods"] = self.allowed_methods
+
+ params.update(kw)
+ return type(self)(**params)
+
+ @classmethod
+ def from_int(cls, retries, redirect=True, default=None):
+ """Backwards-compatibility for the old retries format."""
+ if retries is None:
+ retries = default if default is not None else cls.DEFAULT
+
+ if isinstance(retries, Retry):
+ return retries
+
+ redirect = bool(redirect) and None
+ new_retries = cls(retries, redirect=redirect)
+ log.debug("Converted retries value: %r -> %r", retries, new_retries)
+ return new_retries
+
+ def get_backoff_time(self):
+ """Formula for computing the current backoff
+
+ :rtype: float
+ """
+ # We want to consider only the last consecutive errors sequence (Ignore redirects).
+ consecutive_errors_len = len(
+ list(
+ takewhile(lambda x: x.redirect_location is None, reversed(self.history))
+ )
+ )
+ if consecutive_errors_len <= 1:
+ return 0
+
+ backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+ return min(self.DEFAULT_BACKOFF_MAX, backoff_value)
+
+ def parse_retry_after(self, retry_after):
+ # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
+ seconds = int(retry_after)
+ else:
+ retry_date_tuple = email.utils.parsedate_tz(retry_after)
+ if retry_date_tuple is None:
+ raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+ if retry_date_tuple[9] is None: # Python 2
+ # Assume UTC if no timezone was specified
+ # On Python2.7, parsedate_tz returns None for a timezone offset
+ # instead of 0 if no timezone is given, where mktime_tz treats
+ # a None timezone offset as local time.
+ retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
+
+ retry_date = email.utils.mktime_tz(retry_date_tuple)
+ seconds = retry_date - time.time()
+
+ if seconds < 0:
+ seconds = 0
+
+ return seconds
+
+ def get_retry_after(self, response):
+ """Get the value of Retry-After in seconds."""
+
+ retry_after = response.getheader("Retry-After")
+
+ if retry_after is None:
+ return None
+
+ return self.parse_retry_after(retry_after)
+
+ def sleep_for_retry(self, response=None):
+ retry_after = self.get_retry_after(response)
+ if retry_after:
+ time.sleep(retry_after)
+ return True
+
+ return False
+
+ def _sleep_backoff(self):
+ backoff = self.get_backoff_time()
+ if backoff <= 0:
+ return
+ time.sleep(backoff)
+
+ def sleep(self, response=None):
+ """Sleep between retry attempts.
+
+ This method will respect a server's ``Retry-After`` response header
+ and sleep the duration of the time requested. If that is not present, it
+ will use an exponential backoff. By default, the backoff factor is 0 and
+ this method will return immediately.
+ """
+
+ if self.respect_retry_after_header and response:
+ slept = self.sleep_for_retry(response)
+ if slept:
+ return
+
+ self._sleep_backoff()
+
+ def _is_connection_error(self, err):
+ """Errors when we're fairly sure that the server did not receive the
+ request, so it should be safe to retry.
+ """
+ if isinstance(err, ProxyError):
+ err = err.original_error
+ return isinstance(err, ConnectTimeoutError)
+
+ def _is_read_error(self, err):
+ """Errors that occur after the request has been started, so we should
+ assume that the server began processing it.
+ """
+ return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+ def _is_method_retryable(self, method):
+ """Checks if a given HTTP method should be retried upon, depending if
+ it is included in the allowed_methods
+ """
+ # TODO: For now favor if the Retry implementation sets its own method_whitelist
+ # property outside of our constructor to avoid breaking custom implementations.
+ if "method_whitelist" in self.__dict__:
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ allowed_methods = self.method_whitelist
+ else:
+ allowed_methods = self.allowed_methods
+
+ if allowed_methods and method.upper() not in allowed_methods:
+ return False
+ return True
+
+ def is_retry(self, method, status_code, has_retry_after=False):
+ """Is this method/status code retryable? (Based on allowlists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ if not self._is_method_retryable(method):
+ return False
+
+ if self.status_forcelist and status_code in self.status_forcelist:
+ return True
+
+ return (
+ self.total
+ and self.respect_retry_after_header
+ and has_retry_after
+ and (status_code in self.RETRY_AFTER_STATUS_CODES)
+ )
+
+ def is_exhausted(self):
+ """Are we out of retries?"""
+ retry_counts = (
+ self.total,
+ self.connect,
+ self.read,
+ self.redirect,
+ self.status,
+ self.other,
+ )
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+
+ return min(retry_counts) < 0
+
+ def increment(
+ self,
+ method=None,
+ url=None,
+ response=None,
+ error=None,
+ _pool=None,
+ _stacktrace=None,
+ ):
+ """Return a new Retry object with incremented retry counters.
+
+ :param response: A response object, or None, if the server did not
+ return a response.
+ :type response: :class:`~urllib3.response.HTTPResponse`
+ :param Exception error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: A new ``Retry`` object.
+ """
+ if self.total is False and error:
+ # Disabled, indicate to re-raise the error.
+ raise six.reraise(type(error), error, _stacktrace)
+
+ total = self.total
+ if total is not None:
+ total -= 1
+
+ connect = self.connect
+ read = self.read
+ redirect = self.redirect
+ status_count = self.status
+ other = self.other
+ cause = "unknown"
+ status = None
+ redirect_location = None
+
+ if error and self._is_connection_error(error):
+ # Connect retry?
+ if connect is False:
+ raise six.reraise(type(error), error, _stacktrace)
+ elif connect is not None:
+ connect -= 1
+
+ elif error and self._is_read_error(error):
+ # Read retry?
+ if read is False or not self._is_method_retryable(method):
+ raise six.reraise(type(error), error, _stacktrace)
+ elif read is not None:
+ read -= 1
+
+ elif error:
+ # Other retry?
+ if other is not None:
+ other -= 1
+
+ elif response and response.get_redirect_location():
+ # Redirect retry?
+ if redirect is not None:
+ redirect -= 1
+ cause = "too many redirects"
+ redirect_location = response.get_redirect_location()
+ status = response.status
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and the given method is in the allowed_methods
+ cause = ResponseError.GENERIC_ERROR
+ if response and response.status:
+ if status_count is not None:
+ status_count -= 1
+ cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
+ status = response.status
+
+ history = self.history + (
+ RequestHistory(method, url, error, status, redirect_location),
+ )
+
+ new_retry = self.new(
+ total=total,
+ connect=connect,
+ read=read,
+ redirect=redirect,
+ status=status_count,
+ other=other,
+ history=history,
+ )
+
+ if new_retry.is_exhausted():
+ raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+ log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+ return new_retry
+
+ def __repr__(self):
+ return (
+ "{cls.__name__}(total={self.total}, connect={self.connect}, "
+ "read={self.read}, redirect={self.redirect}, status={self.status})"
+ ).format(cls=type(self), self=self)
+
+ def __getattr__(self, item):
+ if item == "method_whitelist":
+ # TODO: Remove this deprecated alias in v2.0
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ return self.allowed_methods
+ try:
+ return getattr(super(Retry, self), item)
+ except AttributeError:
+ return getattr(Retry, item)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/ssl_.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/ssl_.py
new file mode 100644
index 0000000..2b45d39
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/ssl_.py
@@ -0,0 +1,495 @@
+from __future__ import absolute_import
+
+import hmac
+import os
+import sys
+import warnings
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import (
+ InsecurePlatformWarning,
+ ProxySchemeUnsupported,
+ SNIMissingWarning,
+ SSLError,
+)
+from ..packages import six
+from .url import BRACELESS_IPV6_ADDRZ_RE, IPV4_RE
+
+SSLContext = None
+SSLTransport = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+ALPN_PROTOCOLS = ["http/1.1"]
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}
+
+
+def _const_compare_digest_backport(a, b):
+ """
+ Compare two digests of equal length in constant time.
+
+ The digests must be of type str/bytes.
+ Returns True if the digests match, and False otherwise.
+ """
+ result = abs(len(a) - len(b))
+ for left, right in zip(bytearray(a), bytearray(b)):
+ result |= left ^ right
+ return result == 0
+
+
+_const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport)
+
+try: # Test for SSL features
+ import ssl
+ from ssl import CERT_REQUIRED, wrap_socket
+except ImportError:
+ pass
+
+try:
+ from ssl import HAS_SNI # Has SNI?
+except ImportError:
+ pass
+
+try:
+ from .ssltransport import SSLTransport
+except ImportError:
+ pass
+
+
+try: # Platform-specific: Python 3.6
+ from ssl import PROTOCOL_TLS
+
+ PROTOCOL_SSLv23 = PROTOCOL_TLS
+except ImportError:
+ try:
+ from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS
+
+ PROTOCOL_SSLv23 = PROTOCOL_TLS
+ except ImportError:
+ PROTOCOL_SSLv23 = PROTOCOL_TLS = 2
+
+try:
+ from ssl import PROTOCOL_TLS_CLIENT
+except ImportError:
+ PROTOCOL_TLS_CLIENT = PROTOCOL_TLS
+
+
+try:
+ from ssl import OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3
+except ImportError:
+ OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+ OP_NO_COMPRESSION = 0x20000
+
+
+try: # OP_NO_TICKET was added in Python 3.6
+ from ssl import OP_NO_TICKET
+except ImportError:
+ OP_NO_TICKET = 0x4000
+
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs, DSS, and other
+# insecure ciphers for security reasons.
+# - NOTE: TLS 1.3 cipher suites are managed through a different interface
+# not exposed by CPython (yet!) and are enabled by default if they're available.
+DEFAULT_CIPHERS = ":".join(
+ [
+ "ECDHE+AESGCM",
+ "ECDHE+CHACHA20",
+ "DHE+AESGCM",
+ "DHE+CHACHA20",
+ "ECDH+AESGCM",
+ "DH+AESGCM",
+ "ECDH+AES",
+ "DH+AES",
+ "RSA+AESGCM",
+ "RSA+AES",
+ "!aNULL",
+ "!eNULL",
+ "!MD5",
+ "!DSS",
+ ]
+)
+
+try:
+ from ssl import SSLContext # Modern SSL?
+except ImportError:
+
+ class SSLContext(object): # Platform-specific: Python 2
+ def __init__(self, protocol_version):
+ self.protocol = protocol_version
+ # Use default values from a real SSLContext
+ self.check_hostname = False
+ self.verify_mode = ssl.CERT_NONE
+ self.ca_certs = None
+ self.options = 0
+ self.certfile = None
+ self.keyfile = None
+ self.ciphers = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ self.ca_certs = cafile
+
+ if capath is not None:
+ raise SSLError("CA directories not supported in older Pythons")
+
+ if cadata is not None:
+ raise SSLError("CA data not supported in older Pythons")
+
+ def set_ciphers(self, cipher_suite):
+ self.ciphers = cipher_suite
+
+ def wrap_socket(self, socket, server_hostname=None, server_side=False):
+ warnings.warn(
+ "A true SSLContext object is not available. This prevents "
+ "urllib3 from configuring SSL appropriately and may cause "
+ "certain SSL connections to fail. You can upgrade to a newer "
+ "version of Python to solve this. For more information, see "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#ssl-warnings",
+ InsecurePlatformWarning,
+ )
+ kwargs = {
+ "keyfile": self.keyfile,
+ "certfile": self.certfile,
+ "ca_certs": self.ca_certs,
+ "cert_reqs": self.verify_mode,
+ "ssl_version": self.protocol,
+ "server_side": server_side,
+ }
+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+ """
+ Checks if given fingerprint matches the supplied certificate.
+
+ :param cert:
+ Certificate as bytes object.
+ :param fingerprint:
+ Fingerprint as string of hexdigits, can be interspersed by colons.
+ """
+
+ fingerprint = fingerprint.replace(":", "").lower()
+ digest_length = len(fingerprint)
+ hashfunc = HASHFUNC_MAP.get(digest_length)
+ if not hashfunc:
+ raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint))
+
+ # We need encode() here for py32; works on py2 and p33.
+ fingerprint_bytes = unhexlify(fingerprint.encode())
+
+ cert_digest = hashfunc(cert).digest()
+
+ if not _const_compare_digest(cert_digest, fingerprint_bytes):
+ raise SSLError(
+ 'Fingerprints did not match. Expected "{0}", got "{1}".'.format(
+ fingerprint, hexlify(cert_digest)
+ )
+ )
+
+
+def resolve_cert_reqs(candidate):
+ """
+ Resolves the argument to a numeric constant, which can be passed to
+ the wrap_socket function/method from the ssl module.
+ Defaults to :data:`ssl.CERT_REQUIRED`.
+ If given a string it is assumed to be the name of the constant in the
+ :mod:`ssl` module or its abbreviation.
+ (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+ If it's neither `None` nor a string we assume it is already the numeric
+ constant which can directly be passed to wrap_socket.
+ """
+ if candidate is None:
+ return CERT_REQUIRED
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, "CERT_" + candidate)
+ return res
+
+ return candidate
+
+
+def resolve_ssl_version(candidate):
+ """
+ like resolve_cert_reqs
+ """
+ if candidate is None:
+ return PROTOCOL_TLS
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, "PROTOCOL_" + candidate)
+ return res
+
+ return candidate
+
+
+def create_urllib3_context(
+ ssl_version=None, cert_reqs=None, options=None, ciphers=None
+):
+ """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+ By default, this function does a lot of the same work that
+ ``ssl.create_default_context`` does on Python 3.4+. It:
+
+ - Disables SSLv2, SSLv3, and compression
+ - Sets a restricted set of server ciphers
+
+ If you wish to enable SSLv3, you can do::
+
+ from pip._vendor.urllib3.util import ssl_
+ context = ssl_.create_urllib3_context()
+ context.options &= ~ssl_.OP_NO_SSLv3
+
+ You can do the same to enable compression (substituting ``COMPRESSION``
+ for ``SSLv3`` in the last line above).
+
+ :param ssl_version:
+ The desired protocol version to use. This will default to
+ PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+ the server and your installation of OpenSSL support.
+ :param cert_reqs:
+ Whether to require the certificate verification. This defaults to
+ ``ssl.CERT_REQUIRED``.
+ :param options:
+ Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+ ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.
+ :param ciphers:
+ Which cipher suites to allow the server to select.
+ :returns:
+ Constructed SSLContext object with specified options
+ :rtype: SSLContext
+ """
+ # PROTOCOL_TLS is deprecated in Python 3.10
+ if not ssl_version or ssl_version == PROTOCOL_TLS:
+ ssl_version = PROTOCOL_TLS_CLIENT
+
+ context = SSLContext(ssl_version)
+
+ context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+ # Setting the default here, as we may have no ssl module on import
+ cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+ if options is None:
+ options = 0
+ # SSLv2 is easily broken and is considered harmful and dangerous
+ options |= OP_NO_SSLv2
+ # SSLv3 has several problems and is now dangerous
+ options |= OP_NO_SSLv3
+ # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+ # (issue #309)
+ options |= OP_NO_COMPRESSION
+ # TLSv1.2 only. Unless set explicitly, do not request tickets.
+ # This may save some bandwidth on wire, and although the ticket is encrypted,
+ # there is a risk associated with it being on wire,
+ # if the server is not rotating its ticketing keys properly.
+ options |= OP_NO_TICKET
+
+ context.options |= options
+
+ # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
+ # necessary for conditional client cert authentication with TLS 1.3.
+ # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
+ # versions of Python. We only enable on Python 3.7.4+ or if certificate
+ # verification is enabled to work around Python issue #37428
+ # See: https://bugs.python.org/issue37428
+ if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr(
+ context, "post_handshake_auth", None
+ ) is not None:
+ context.post_handshake_auth = True
+
+ def disable_check_hostname():
+ if (
+ getattr(context, "check_hostname", None) is not None
+ ): # Platform-specific: Python 3.2
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
+
+ # The order of the below lines setting verify_mode and check_hostname
+ # matter due to safe-guards SSLContext has to prevent an SSLContext with
+ # check_hostname=True, verify_mode=NONE/OPTIONAL. This is made even more
+ # complex because we don't know whether PROTOCOL_TLS_CLIENT will be used
+ # or not so we don't know the initial state of the freshly created SSLContext.
+ if cert_reqs == ssl.CERT_REQUIRED:
+ context.verify_mode = cert_reqs
+ disable_check_hostname()
+ else:
+ disable_check_hostname()
+ context.verify_mode = cert_reqs
+
+ # Enable logging of TLS session keys via defacto standard environment variable
+ # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.
+ if hasattr(context, "keylog_filename"):
+ sslkeylogfile = os.environ.get("SSLKEYLOGFILE")
+ if sslkeylogfile:
+ context.keylog_filename = sslkeylogfile
+
+ return context
+
+
+def ssl_wrap_socket(
+ sock,
+ keyfile=None,
+ certfile=None,
+ cert_reqs=None,
+ ca_certs=None,
+ server_hostname=None,
+ ssl_version=None,
+ ciphers=None,
+ ssl_context=None,
+ ca_cert_dir=None,
+ key_password=None,
+ ca_cert_data=None,
+ tls_in_tls=False,
+):
+ """
+ All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+ the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+ :param server_hostname:
+ When SNI is supported, the expected hostname of the certificate
+ :param ssl_context:
+ A pre-made :class:`SSLContext` object. If none is provided, one will
+ be created using :func:`create_urllib3_context`.
+ :param ciphers:
+ A string of ciphers we wish the client to support.
+ :param ca_cert_dir:
+ A directory containing CA certificates in multiple separate files, as
+ supported by OpenSSL's -CApath flag or the capath argument to
+ SSLContext.load_verify_locations().
+ :param key_password:
+ Optional password if the keyfile is encrypted.
+ :param ca_cert_data:
+ Optional string containing CA certificates in PEM format suitable for
+ passing as the cadata parameter to SSLContext.load_verify_locations()
+ :param tls_in_tls:
+ Use SSLTransport to wrap the existing socket.
+ """
+ context = ssl_context
+ if context is None:
+ # Note: This branch of code and all the variables in it are no longer
+ # used by urllib3 itself. We should consider deprecating and removing
+ # this code.
+ context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
+
+ if ca_certs or ca_cert_dir or ca_cert_data:
+ try:
+ context.load_verify_locations(ca_certs, ca_cert_dir, ca_cert_data)
+ except (IOError, OSError) as e:
+ raise SSLError(e)
+
+ elif ssl_context is None and hasattr(context, "load_default_certs"):
+ # try to load OS default certs; works well on Windows (require Python3.4+)
+ context.load_default_certs()
+
+ # Attempt to detect if we get the goofy behavior of the
+ # keyfile being encrypted and OpenSSL asking for the
+ # passphrase via the terminal and instead error out.
+ if keyfile and key_password is None and _is_key_file_encrypted(keyfile):
+ raise SSLError("Client private key is encrypted, password is required")
+
+ if certfile:
+ if key_password is None:
+ context.load_cert_chain(certfile, keyfile)
+ else:
+ context.load_cert_chain(certfile, keyfile, key_password)
+
+ try:
+ if hasattr(context, "set_alpn_protocols"):
+ context.set_alpn_protocols(ALPN_PROTOCOLS)
+ except NotImplementedError: # Defensive: in CI, we always have set_alpn_protocols
+ pass
+
+ # If we detect server_hostname is an IP address then the SNI
+ # extension should not be used according to RFC3546 Section 3.1
+ use_sni_hostname = server_hostname and not is_ipaddress(server_hostname)
+ # SecureTransport uses server_hostname in certificate verification.
+ send_sni = (use_sni_hostname and HAS_SNI) or (
+ IS_SECURETRANSPORT and server_hostname
+ )
+ # Do not warn the user if server_hostname is an invalid SNI hostname.
+ if not HAS_SNI and use_sni_hostname:
+ warnings.warn(
+ "An HTTPS request has been made, but the SNI (Server Name "
+ "Indication) extension to TLS is not available on this platform. "
+ "This may cause the server to present an incorrect TLS "
+ "certificate, which can cause validation failures. You can upgrade to "
+ "a newer version of Python to solve this. For more information, see "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#ssl-warnings",
+ SNIMissingWarning,
+ )
+
+ if send_sni:
+ ssl_sock = _ssl_wrap_socket_impl(
+ sock, context, tls_in_tls, server_hostname=server_hostname
+ )
+ else:
+ ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls)
+ return ssl_sock
+
+
+def is_ipaddress(hostname):
+ """Detects whether the hostname given is an IPv4 or IPv6 address.
+ Also detects IPv6 addresses with Zone IDs.
+
+ :param str hostname: Hostname to examine.
+ :return: True if the hostname is an IP address, False otherwise.
+ """
+ if not six.PY2 and isinstance(hostname, bytes):
+ # IDN A-label bytes are ASCII compatible.
+ hostname = hostname.decode("ascii")
+ return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname))
+
+
+def _is_key_file_encrypted(key_file):
+ """Detects if a key file is encrypted or not."""
+ with open(key_file, "r") as f:
+ for line in f:
+ # Look for Proc-Type: 4,ENCRYPTED
+ if "ENCRYPTED" in line:
+ return True
+
+ return False
+
+
+def _ssl_wrap_socket_impl(sock, ssl_context, tls_in_tls, server_hostname=None):
+ if tls_in_tls:
+ if not SSLTransport:
+ # Import error, ssl is not available.
+ raise ProxySchemeUnsupported(
+ "TLS in TLS requires support for the 'ssl' module"
+ )
+
+ SSLTransport._validate_ssl_context_for_tls_in_tls(ssl_context)
+ return SSLTransport(sock, ssl_context, server_hostname)
+
+ if server_hostname:
+ return ssl_context.wrap_socket(sock, server_hostname=server_hostname)
+ else:
+ return ssl_context.wrap_socket(sock)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py
new file mode 100644
index 0000000..a4b4a56
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py
@@ -0,0 +1,161 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# Note: This file is under the PSF license as the code comes from the python
+# stdlib. http://docs.python.org/3/license.html
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching. This allows
+# util.ssl_match_hostname to continue to be used in Python 2.7.
+try:
+ import ipaddress
+except ImportError:
+ ipaddress = None
+
+__version__ = "3.5.0.1"
+
+
+class CertificateError(ValueError):
+ pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r".")
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count("*")
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn)
+ )
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == "*":
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append("[^.]+")
+ elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
+ return pat.match(hostname)
+
+
+def _to_unicode(obj):
+ if isinstance(obj, str) and sys.version_info < (3,):
+ # ignored flake8 # F821 to support python 2.7 function
+ obj = unicode(obj, encoding="ascii", errors="strict") # noqa: F821
+ return obj
+
+
+def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ # Divergence from upstream: ipaddress can't handle byte str
+ ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+ return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError(
+ "empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED"
+ )
+ try:
+ # Divergence from upstream: ipaddress can't handle byte str
+ host_ip = ipaddress.ip_address(_to_unicode(hostname))
+ except ValueError:
+ # Not an IP address (common case)
+ host_ip = None
+ except UnicodeError:
+ # Divergence from upstream: Have to deal with ipaddress not taking
+ # byte strings. addresses should be all ascii, so we consider it not
+ # an ipaddress in this case
+ host_ip = None
+ except AttributeError:
+ # Divergence from upstream: Make ipaddress library optional
+ if ipaddress is None:
+ host_ip = None
+ else:
+ raise
+ dnsnames = []
+ san = cert.get("subjectAltName", ())
+ for key, value in san:
+ if key == "DNS":
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == "IP Address":
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get("subject", ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == "commonName":
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError(
+ "hostname %r "
+ "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
+ )
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError(
+ "no appropriate commonName or subjectAltName fields were found"
+ )
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/ssltransport.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/ssltransport.py
new file mode 100644
index 0000000..4a7105d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/ssltransport.py
@@ -0,0 +1,221 @@
+import io
+import socket
+import ssl
+
+from ..exceptions import ProxySchemeUnsupported
+from ..packages import six
+
+SSL_BLOCKSIZE = 16384
+
+
+class SSLTransport:
+ """
+ The SSLTransport wraps an existing socket and establishes an SSL connection.
+
+ Contrary to Python's implementation of SSLSocket, it allows you to chain
+ multiple TLS connections together. It's particularly useful if you need to
+ implement TLS within TLS.
+
+ The class supports most of the socket API operations.
+ """
+
+ @staticmethod
+ def _validate_ssl_context_for_tls_in_tls(ssl_context):
+ """
+ Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
+ for TLS in TLS.
+
+ The only requirement is that the ssl_context provides the 'wrap_bio'
+ methods.
+ """
+
+ if not hasattr(ssl_context, "wrap_bio"):
+ if six.PY2:
+ raise ProxySchemeUnsupported(
+ "TLS in TLS requires SSLContext.wrap_bio() which isn't "
+ "supported on Python 2"
+ )
+ else:
+ raise ProxySchemeUnsupported(
+ "TLS in TLS requires SSLContext.wrap_bio() which isn't "
+ "available on non-native SSLContext"
+ )
+
+ def __init__(
+ self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
+ ):
+ """
+ Create an SSLTransport around socket using the provided ssl_context.
+ """
+ self.incoming = ssl.MemoryBIO()
+ self.outgoing = ssl.MemoryBIO()
+
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self.socket = socket
+
+ self.sslobj = ssl_context.wrap_bio(
+ self.incoming, self.outgoing, server_hostname=server_hostname
+ )
+
+ # Perform initial handshake.
+ self._ssl_io_loop(self.sslobj.do_handshake)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *_):
+ self.close()
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ def read(self, len=1024, buffer=None):
+ return self._wrap_ssl_read(len, buffer)
+
+ def recv(self, len=1024, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to recv")
+ return self._wrap_ssl_read(len)
+
+ def recv_into(self, buffer, nbytes=None, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to recv_into")
+ if buffer and (nbytes is None):
+ nbytes = len(buffer)
+ elif nbytes is None:
+ nbytes = 1024
+ return self.read(nbytes, buffer)
+
+ def sendall(self, data, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to sendall")
+ count = 0
+ with memoryview(data) as view, view.cast("B") as byte_view:
+ amount = len(byte_view)
+ while count < amount:
+ v = self.send(byte_view[count:])
+ count += v
+
+ def send(self, data, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to send")
+ response = self._ssl_io_loop(self.sslobj.write, data)
+ return response
+
+ def makefile(
+ self, mode="r", buffering=None, encoding=None, errors=None, newline=None
+ ):
+ """
+ Python's httpclient uses makefile and buffered io when reading HTTP
+ messages and we need to support it.
+
+ This is unfortunately a copy and paste of socket.py makefile with small
+ changes to point to the socket directly.
+ """
+ if not set(mode) <= {"r", "w", "b"}:
+ raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
+
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = socket.SocketIO(self, rawmode)
+ self.socket._io_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
+
+ def unwrap(self):
+ self._ssl_io_loop(self.sslobj.unwrap)
+
+ def close(self):
+ self.socket.close()
+
+ def getpeercert(self, binary_form=False):
+ return self.sslobj.getpeercert(binary_form)
+
+ def version(self):
+ return self.sslobj.version()
+
+ def cipher(self):
+ return self.sslobj.cipher()
+
+ def selected_alpn_protocol(self):
+ return self.sslobj.selected_alpn_protocol()
+
+ def selected_npn_protocol(self):
+ return self.sslobj.selected_npn_protocol()
+
+ def shared_ciphers(self):
+ return self.sslobj.shared_ciphers()
+
+ def compression(self):
+ return self.sslobj.compression()
+
+ def settimeout(self, value):
+ self.socket.settimeout(value)
+
+ def gettimeout(self):
+ return self.socket.gettimeout()
+
+ def _decref_socketios(self):
+ self.socket._decref_socketios()
+
+ def _wrap_ssl_read(self, len, buffer=None):
+ try:
+ return self._ssl_io_loop(self.sslobj.read, len, buffer)
+ except ssl.SSLError as e:
+ if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
+ return 0 # eof, return 0.
+ else:
+ raise
+
+ def _ssl_io_loop(self, func, *args):
+ """Performs an I/O loop between incoming/outgoing and the socket."""
+ should_loop = True
+ ret = None
+
+ while should_loop:
+ errno = None
+ try:
+ ret = func(*args)
+ except ssl.SSLError as e:
+ if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
+ # WANT_READ, and WANT_WRITE are expected, others are not.
+ raise e
+ errno = e.errno
+
+ buf = self.outgoing.read()
+ self.socket.sendall(buf)
+
+ if errno is None:
+ should_loop = False
+ elif errno == ssl.SSL_ERROR_WANT_READ:
+ buf = self.socket.recv(SSL_BLOCKSIZE)
+ if buf:
+ self.incoming.write(buf)
+ else:
+ self.incoming.write_eof()
+ return ret
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/timeout.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/timeout.py
new file mode 100644
index 0000000..ff69593
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/timeout.py
@@ -0,0 +1,268 @@
+from __future__ import absolute_import
+
+import time
+
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+ """Timeout configuration.
+
+ Timeouts can be defined as a default for a pool:
+
+ .. code-block:: python
+
+ timeout = Timeout(connect=2.0, read=7.0)
+ http = PoolManager(timeout=timeout)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool):
+
+ .. code-block:: python
+
+ response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+ Timeouts can be disabled by setting all the parameters to ``None``:
+
+ .. code-block:: python
+
+ no_timeout = Timeout(connect=None, read=None)
+ response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+ :param total:
+ This combines the connect and read timeouts into one; the read timeout
+ will be set to the time leftover from the connect attempt. In the
+ event that both a connect timeout and a total are specified, or a read
+ timeout and a total are specified, the shorter timeout will be applied.
+
+ Defaults to None.
+
+ :type total: int, float, or None
+
+ :param connect:
+ The maximum amount of time (in seconds) to wait for a connection
+ attempt to a server to succeed. Omitting the parameter will default the
+ connect timeout to the system default, probably `the global default
+ timeout in socket.py
+ `_.
+ None will set an infinite timeout for connection attempts.
+
+ :type connect: int, float, or None
+
+ :param read:
+ The maximum amount of time (in seconds) to wait between consecutive
+ read operations for a response from the server. Omitting the parameter
+ will default the read timeout to the system default, probably `the
+ global default timeout in socket.py
+ `_.
+ None will set an infinite timeout.
+
+ :type read: int, float, or None
+
+ .. note::
+
+ Many factors can affect the total amount of time for urllib3 to return
+ an HTTP response.
+
+ For example, Python's DNS resolver does not obey the timeout specified
+ on the socket. Other factors that can affect total request time include
+ high CPU load, high swap, the program running at a low priority level,
+ or other behaviors.
+
+ In addition, the read and total timeouts only measure the time between
+ read operations on the socket connecting the client and the server,
+ not the total amount of time for the request to return a complete
+ response. For most requests, the timeout is raised because the server
+ has not sent the first byte in the specified time. This is not always
+ the case; if a server streams one byte every fifteen seconds, a timeout
+ of 20 seconds will not trigger, even though the request will take
+ several minutes to complete.
+
+ If your goal is to cut off any request after a set amount of wall clock
+ time, consider having a second "watcher" thread to cut off a slow
+ request.
+ """
+
+ #: A sentinel object representing the default timeout value
+ DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+ def __init__(self, total=None, connect=_Default, read=_Default):
+ self._connect = self._validate_timeout(connect, "connect")
+ self._read = self._validate_timeout(read, "read")
+ self.total = self._validate_timeout(total, "total")
+ self._start_connect = None
+
+ def __repr__(self):
+ return "%s(connect=%r, read=%r, total=%r)" % (
+ type(self).__name__,
+ self._connect,
+ self._read,
+ self.total,
+ )
+
+ # __str__ provided for backwards compatibility
+ __str__ = __repr__
+
+ @classmethod
+ def _validate_timeout(cls, value, name):
+ """Check that a timeout attribute is valid.
+
+ :param value: The timeout value to validate
+ :param name: The name of the timeout attribute to validate. This is
+ used to specify in error messages.
+ :return: The validated and casted version of the given value.
+ :raises ValueError: If it is a numeric value less than or equal to
+ zero, or the type is not an integer, float, or None.
+ """
+ if value is _Default:
+ return cls.DEFAULT_TIMEOUT
+
+ if value is None or value is cls.DEFAULT_TIMEOUT:
+ return value
+
+ if isinstance(value, bool):
+ raise ValueError(
+ "Timeout cannot be a boolean value. It must "
+ "be an int, float or None."
+ )
+ try:
+ float(value)
+ except (TypeError, ValueError):
+ raise ValueError(
+ "Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value)
+ )
+
+ try:
+ if value <= 0:
+ raise ValueError(
+ "Attempted to set %s timeout to %s, but the "
+ "timeout cannot be set to a value less "
+ "than or equal to 0." % (name, value)
+ )
+ except TypeError:
+ # Python 3
+ raise ValueError(
+ "Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value)
+ )
+
+ return value
+
+ @classmethod
+ def from_float(cls, timeout):
+ """Create a new Timeout from a legacy timeout value.
+
+ The timeout value used by httplib.py sets the same timeout on the
+ connect(), and recv() socket requests. This creates a :class:`Timeout`
+ object that sets the individual timeouts to the ``timeout`` value
+ passed to this function.
+
+ :param timeout: The legacy timeout value.
+ :type timeout: integer, float, sentinel default object, or None
+ :return: Timeout object
+ :rtype: :class:`Timeout`
+ """
+ return Timeout(read=timeout, connect=timeout)
+
+ def clone(self):
+ """Create a copy of the timeout object
+
+ Timeout properties are stored per-pool but each request needs a fresh
+ Timeout object to ensure each one has its own start/stop configured.
+
+ :return: a copy of the timeout object
+ :rtype: :class:`Timeout`
+ """
+ # We can't use copy.deepcopy because that will also create a new object
+ # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+ # detect the user default.
+ return Timeout(connect=self._connect, read=self._read, total=self.total)
+
+ def start_connect(self):
+ """Start the timeout clock, used during a connect() attempt
+
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to start a timer that has been started already.
+ """
+ if self._start_connect is not None:
+ raise TimeoutStateError("Timeout timer has already been started.")
+ self._start_connect = current_time()
+ return self._start_connect
+
+ def get_connect_duration(self):
+ """Gets the time elapsed since the call to :meth:`start_connect`.
+
+ :return: Elapsed time in seconds.
+ :rtype: float
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to get duration for a timer that hasn't been started.
+ """
+ if self._start_connect is None:
+ raise TimeoutStateError(
+ "Can't get connect duration for timer that has not started."
+ )
+ return current_time() - self._start_connect
+
+ @property
+ def connect_timeout(self):
+ """Get the value to use when setting a connection timeout.
+
+ This will be a positive float or integer, the value None
+ (never timeout), or the default system timeout.
+
+ :return: Connect timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ """
+ if self.total is None:
+ return self._connect
+
+ if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+ return self.total
+
+ return min(self._connect, self.total)
+
+ @property
+ def read_timeout(self):
+ """Get the value for the read timeout.
+
+ This assumes some time has elapsed in the connection timeout and
+ computes the read timeout appropriately.
+
+ If self.total is set, the read timeout is dependent on the amount of
+ time taken by the connect timeout. If the connection time has not been
+ established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+ raised.
+
+ :return: Value to use for the read timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+ has not yet been called on this object.
+ """
+ if (
+ self.total is not None
+ and self.total is not self.DEFAULT_TIMEOUT
+ and self._read is not None
+ and self._read is not self.DEFAULT_TIMEOUT
+ ):
+ # In case the connect timeout has not yet been established.
+ if self._start_connect is None:
+ return self._read
+ return max(0, min(self.total - self.get_connect_duration(), self._read))
+ elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+ return max(0, self.total - self.get_connect_duration())
+ else:
+ return self._read
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/url.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/url.py
new file mode 100644
index 0000000..3651c43
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/url.py
@@ -0,0 +1,432 @@
+from __future__ import absolute_import
+
+import re
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+from ..packages import six
+
+url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"]
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ("http", "https", None)
+
+# Almost all of these patterns were derived from the
+# 'rfc3986' module: https://github.com/python-hyper/rfc3986
+PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
+SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
+URI_RE = re.compile(
+ r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
+ r"(?://([^\\/?#]*))?"
+ r"([^?#]*)"
+ r"(?:\?([^#]*))?"
+ r"(?:#(.*))?$",
+ re.UNICODE | re.DOTALL,
+)
+
+IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
+HEX_PAT = "[0-9A-Fa-f]{1,4}"
+LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
+_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
+_variations = [
+ # 6( h16 ":" ) ls32
+ "(?:%(hex)s:){6}%(ls32)s",
+ # "::" 5( h16 ":" ) ls32
+ "::(?:%(hex)s:){5}%(ls32)s",
+ # [ h16 ] "::" 4( h16 ":" ) ls32
+ "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
+ # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
+ "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
+ # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
+ "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
+ # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
+ "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
+ # [ *4( h16 ":" ) h16 ] "::" ls32
+ "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
+ # [ *5( h16 ":" ) h16 ] "::" h16
+ "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
+ # [ *6( h16 ":" ) h16 ] "::"
+ "(?:(?:%(hex)s:){0,6}%(hex)s)?::",
+]
+
+UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
+IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
+ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
+IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
+REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
+TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
+
+IPV4_RE = re.compile("^" + IPV4_PAT + "$")
+IPV6_RE = re.compile("^" + IPV6_PAT + "$")
+IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
+BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$")
+ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$")
+
+_HOST_PORT_PAT = ("^(%s|%s|%s)(?::([0-9]{0,5}))?$") % (
+ REG_NAME_PAT,
+ IPV4_PAT,
+ IPV6_ADDRZ_PAT,
+)
+_HOST_PORT_RE = re.compile(_HOST_PORT_PAT, re.UNICODE | re.DOTALL)
+
+UNRESERVED_CHARS = set(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
+)
+SUB_DELIM_CHARS = set("!$&'()*+,;=")
+USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"}
+PATH_CHARS = USERINFO_CHARS | {"@", "/"}
+QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"}
+
+
+class Url(namedtuple("Url", url_attrs)):
+ """
+ Data structure for representing an HTTP URL. Used as a return value for
+ :func:`parse_url`. Both the scheme and host are normalized as they are
+ both case-insensitive according to RFC 3986.
+ """
+
+ __slots__ = ()
+
+ def __new__(
+ cls,
+ scheme=None,
+ auth=None,
+ host=None,
+ port=None,
+ path=None,
+ query=None,
+ fragment=None,
+ ):
+ if path and not path.startswith("/"):
+ path = "/" + path
+ if scheme is not None:
+ scheme = scheme.lower()
+ return super(Url, cls).__new__(
+ cls, scheme, auth, host, port, path, query, fragment
+ )
+
+ @property
+ def hostname(self):
+ """For backwards-compatibility with urlparse. We're nice like that."""
+ return self.host
+
+ @property
+ def request_uri(self):
+ """Absolute path including the query string."""
+ uri = self.path or "/"
+
+ if self.query is not None:
+ uri += "?" + self.query
+
+ return uri
+
+ @property
+ def netloc(self):
+ """Network location including host and port"""
+ if self.port:
+ return "%s:%d" % (self.host, self.port)
+ return self.host
+
+ @property
+ def url(self):
+ """
+ Convert self into a url
+
+ This function should more or less round-trip with :func:`.parse_url`. The
+ returned url may not be exactly the same as the url inputted to
+ :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+ with a blank port will have : removed).
+
+ Example: ::
+
+ >>> U = parse_url('http://google.com/mail/')
+ >>> U.url
+ 'http://google.com/mail/'
+ >>> Url('http', 'username:password', 'host.com', 80,
+ ... '/path', 'query', 'fragment').url
+ 'http://username:password@host.com:80/path?query#fragment'
+ """
+ scheme, auth, host, port, path, query, fragment = self
+ url = u""
+
+ # We use "is not None" we want things to happen with empty strings (or 0 port)
+ if scheme is not None:
+ url += scheme + u"://"
+ if auth is not None:
+ url += auth + u"@"
+ if host is not None:
+ url += host
+ if port is not None:
+ url += u":" + str(port)
+ if path is not None:
+ url += path
+ if query is not None:
+ url += u"?" + query
+ if fragment is not None:
+ url += u"#" + fragment
+
+ return url
+
+ def __str__(self):
+ return self.url
+
+
+def split_first(s, delims):
+ """
+ .. deprecated:: 1.25
+
+ Given a string and an iterable of delimiters, split on the first found
+ delimiter. Return two split parts and the matched delimiter.
+
+ If not found, then the first part is the full input string.
+
+ Example::
+
+ >>> split_first('foo/bar?baz', '?/=')
+ ('foo', 'bar?baz', '/')
+ >>> split_first('foo/bar?baz', '123')
+ ('foo/bar?baz', '', None)
+
+ Scales linearly with number of delims. Not ideal for large number of delims.
+ """
+ min_idx = None
+ min_delim = None
+ for d in delims:
+ idx = s.find(d)
+ if idx < 0:
+ continue
+
+ if min_idx is None or idx < min_idx:
+ min_idx = idx
+ min_delim = d
+
+ if min_idx is None or min_idx < 0:
+ return s, "", None
+
+ return s[:min_idx], s[min_idx + 1 :], min_delim
+
+
+def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
+ """Percent-encodes a URI component without reapplying
+ onto an already percent-encoded component.
+ """
+ if component is None:
+ return component
+
+ component = six.ensure_text(component)
+
+ # Normalize existing percent-encoded bytes.
+ # Try to see if the component we're encoding is already percent-encoded
+ # so we can skip all '%' characters but still encode all others.
+ component, percent_encodings = PERCENT_RE.subn(
+ lambda match: match.group(0).upper(), component
+ )
+
+ uri_bytes = component.encode("utf-8", "surrogatepass")
+ is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
+ encoded_component = bytearray()
+
+ for i in range(0, len(uri_bytes)):
+ # Will return a single character bytestring on both Python 2 & 3
+ byte = uri_bytes[i : i + 1]
+ byte_ord = ord(byte)
+ if (is_percent_encoded and byte == b"%") or (
+ byte_ord < 128 and byte.decode() in allowed_chars
+ ):
+ encoded_component += byte
+ continue
+ encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
+
+ return encoded_component.decode(encoding)
+
+
+def _remove_path_dot_segments(path):
+ # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
+ segments = path.split("/") # Turn the path into a list of segments
+ output = [] # Initialize the variable to use to store output
+
+ for segment in segments:
+ # '.' is the current directory, so ignore it, it is superfluous
+ if segment == ".":
+ continue
+ # Anything other than '..', should be appended to the output
+ elif segment != "..":
+ output.append(segment)
+ # In this case segment == '..', if we can, we should pop the last
+ # element
+ elif output:
+ output.pop()
+
+ # If the path starts with '/' and the output is empty or the first string
+ # is non-empty
+ if path.startswith("/") and (not output or output[0]):
+ output.insert(0, "")
+
+ # If the path starts with '/.' or '/..' ensure we add one more empty
+ # string to add a trailing '/'
+ if path.endswith(("/.", "/..")):
+ output.append("")
+
+ return "/".join(output)
+
+
+def _normalize_host(host, scheme):
+ if host:
+ if isinstance(host, six.binary_type):
+ host = six.ensure_str(host)
+
+ if scheme in NORMALIZABLE_SCHEMES:
+ is_ipv6 = IPV6_ADDRZ_RE.match(host)
+ if is_ipv6:
+ match = ZONE_ID_RE.search(host)
+ if match:
+ start, end = match.span(1)
+ zone_id = host[start:end]
+
+ if zone_id.startswith("%25") and zone_id != "%25":
+ zone_id = zone_id[3:]
+ else:
+ zone_id = zone_id[1:]
+ zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
+ return host[:start].lower() + zone_id + host[end:]
+ else:
+ return host.lower()
+ elif not IPV4_RE.match(host):
+ return six.ensure_str(
+ b".".join([_idna_encode(label) for label in host.split(".")])
+ )
+ return host
+
+
+def _idna_encode(name):
+ if name and any([ord(x) > 128 for x in name]):
+ try:
+ from pip._vendor import idna
+ except ImportError:
+ six.raise_from(
+ LocationParseError("Unable to parse URL without the 'idna' module"),
+ None,
+ )
+ try:
+ return idna.encode(name.lower(), strict=True, std3_rules=True)
+ except idna.IDNAError:
+ six.raise_from(
+ LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
+ )
+ return name.lower().encode("ascii")
+
+
+def _encode_target(target):
+ """Percent-encodes a request target so that there are no invalid characters"""
+ path, query = TARGET_RE.match(target).groups()
+ target = _encode_invalid_chars(path, PATH_CHARS)
+ query = _encode_invalid_chars(query, QUERY_CHARS)
+ if query is not None:
+ target += "?" + query
+ return target
+
+
+def parse_url(url):
+ """
+ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+ performed to parse incomplete urls. Fields not provided will be None.
+ This parser is RFC 3986 compliant.
+
+ The parser logic and helper functions are based heavily on
+ work done in the ``rfc3986`` module.
+
+ :param str url: URL to parse into a :class:`.Url` namedtuple.
+
+ Partly backwards-compatible with :mod:`urlparse`.
+
+ Example::
+
+ >>> parse_url('http://google.com/mail/')
+ Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+ >>> parse_url('google.com:80')
+ Url(scheme=None, host='google.com', port=80, path=None, ...)
+ >>> parse_url('/foo?bar')
+ Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+ """
+ if not url:
+ # Empty
+ return Url()
+
+ source_url = url
+ if not SCHEME_RE.search(url):
+ url = "//" + url
+
+ try:
+ scheme, authority, path, query, fragment = URI_RE.match(url).groups()
+ normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
+
+ if scheme:
+ scheme = scheme.lower()
+
+ if authority:
+ auth, _, host_port = authority.rpartition("@")
+ auth = auth or None
+ host, port = _HOST_PORT_RE.match(host_port).groups()
+ if auth and normalize_uri:
+ auth = _encode_invalid_chars(auth, USERINFO_CHARS)
+ if port == "":
+ port = None
+ else:
+ auth, host, port = None, None, None
+
+ if port is not None:
+ port = int(port)
+ if not (0 <= port <= 65535):
+ raise LocationParseError(url)
+
+ host = _normalize_host(host, scheme)
+
+ if normalize_uri and path:
+ path = _remove_path_dot_segments(path)
+ path = _encode_invalid_chars(path, PATH_CHARS)
+ if normalize_uri and query:
+ query = _encode_invalid_chars(query, QUERY_CHARS)
+ if normalize_uri and fragment:
+ fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
+
+ except (ValueError, AttributeError):
+ return six.raise_from(LocationParseError(source_url), None)
+
+ # For the sake of backwards compatibility we put empty
+ # string values for path if there are any defined values
+ # beyond the path in the URL.
+ # TODO: Remove this when we break backwards compatibility.
+ if not path:
+ if query is not None or fragment is not None:
+ path = ""
+ else:
+ path = None
+
+ # Ensure that each part of the URL is a `str` for
+ # backwards compatibility.
+ if isinstance(url, six.text_type):
+ ensure_func = six.ensure_text
+ else:
+ ensure_func = six.ensure_str
+
+ def ensure_type(x):
+ return x if x is None else ensure_func(x)
+
+ return Url(
+ scheme=ensure_type(scheme),
+ auth=ensure_type(auth),
+ host=ensure_type(host),
+ port=port,
+ path=ensure_type(path),
+ query=ensure_type(query),
+ fragment=ensure_type(fragment),
+ )
+
+
+def get_host(url):
+ """
+ Deprecated. Use :func:`parse_url` instead.
+ """
+ p = parse_url(url)
+ return p.scheme or "http", p.hostname, p.port
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/wait.py b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/wait.py
new file mode 100644
index 0000000..c280646
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/wait.py
@@ -0,0 +1,153 @@
+import errno
+import select
+import sys
+from functools import partial
+
+try:
+ from time import monotonic
+except ImportError:
+ from time import time as monotonic
+
+__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
+
+
+class NoWayToWaitForSocketError(Exception):
+ pass
+
+
+# How should we wait on sockets?
+#
+# There are two types of APIs you can use for waiting on sockets: the fancy
+# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
+# select/poll. The stateful APIs are more efficient when you have a lots of
+# sockets to keep track of, because you can set them up once and then use them
+# lots of times. But we only ever want to wait on a single socket at a time
+# and don't want to keep track of state, so the stateless APIs are actually
+# more efficient. So we want to use select() or poll().
+#
+# Now, how do we choose between select() and poll()? On traditional Unixes,
+# select() has a strange calling convention that makes it slow, or fail
+# altogether, for high-numbered file descriptors. The point of poll() is to fix
+# that, so on Unixes, we prefer poll().
+#
+# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
+# for it), but that's OK, because on Windows, select() doesn't have this
+# strange calling convention; plain select() works fine.
+#
+# So: on Windows we use select(), and everywhere else we use poll(). We also
+# fall back to select() in case poll() is somehow broken or missing.
+
+if sys.version_info >= (3, 5):
+ # Modern Python, that retries syscalls by default
+ def _retry_on_intr(fn, timeout):
+ return fn(timeout)
+
+
+else:
+ # Old and broken Pythons.
+ def _retry_on_intr(fn, timeout):
+ if timeout is None:
+ deadline = float("inf")
+ else:
+ deadline = monotonic() + timeout
+
+ while True:
+ try:
+ return fn(timeout)
+ # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
+ except (OSError, select.error) as e:
+ # 'e.args[0]' incantation works for both OSError and select.error
+ if e.args[0] != errno.EINTR:
+ raise
+ else:
+ timeout = deadline - monotonic()
+ if timeout < 0:
+ timeout = 0
+ if timeout == float("inf"):
+ timeout = None
+ continue
+
+
+def select_wait_for_socket(sock, read=False, write=False, timeout=None):
+ if not read and not write:
+ raise RuntimeError("must specify at least one of read=True, write=True")
+ rcheck = []
+ wcheck = []
+ if read:
+ rcheck.append(sock)
+ if write:
+ wcheck.append(sock)
+ # When doing a non-blocking connect, most systems signal success by
+ # marking the socket writable. Windows, though, signals success by marked
+ # it as "exceptional". We paper over the difference by checking the write
+ # sockets for both conditions. (The stdlib selectors module does the same
+ # thing.)
+ fn = partial(select.select, rcheck, wcheck, wcheck)
+ rready, wready, xready = _retry_on_intr(fn, timeout)
+ return bool(rready or wready or xready)
+
+
+def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
+ if not read and not write:
+ raise RuntimeError("must specify at least one of read=True, write=True")
+ mask = 0
+ if read:
+ mask |= select.POLLIN
+ if write:
+ mask |= select.POLLOUT
+ poll_obj = select.poll()
+ poll_obj.register(sock, mask)
+
+ # For some reason, poll() takes timeout in milliseconds
+ def do_poll(t):
+ if t is not None:
+ t *= 1000
+ return poll_obj.poll(t)
+
+ return bool(_retry_on_intr(do_poll, timeout))
+
+
+def null_wait_for_socket(*args, **kwargs):
+ raise NoWayToWaitForSocketError("no select-equivalent available")
+
+
+def _have_working_poll():
+ # Apparently some systems have a select.poll that fails as soon as you try
+ # to use it, either due to strange configuration or broken monkeypatching
+ # from libraries like eventlet/greenlet.
+ try:
+ poll_obj = select.poll()
+ _retry_on_intr(poll_obj.poll, 0)
+ except (AttributeError, OSError):
+ return False
+ else:
+ return True
+
+
+def wait_for_socket(*args, **kwargs):
+ # We delay choosing which implementation to use until the first time we're
+ # called. We could do it at import time, but then we might make the wrong
+ # decision if someone goes wild with monkeypatching select.poll after
+ # we're imported.
+ global wait_for_socket
+ if _have_working_poll():
+ wait_for_socket = poll_wait_for_socket
+ elif hasattr(select, "select"):
+ wait_for_socket = select_wait_for_socket
+ else: # Platform-specific: Appengine.
+ wait_for_socket = null_wait_for_socket
+ return wait_for_socket(*args, **kwargs)
+
+
+def wait_for_read(sock, timeout=None):
+ """Waits for reading to be available on a given socket.
+ Returns True if the socket is readable, or False if the timeout expired.
+ """
+ return wait_for_socket(sock, read=True, timeout=timeout)
+
+
+def wait_for_write(sock, timeout=None):
+ """Waits for writing to be available on a given socket.
+ Returns True if the socket is readable, or False if the timeout expired.
+ """
+ return wait_for_socket(sock, write=True, timeout=timeout)
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/vendor.txt b/venv/lib/python3.9/site-packages/pip/_vendor/vendor.txt
new file mode 100644
index 0000000..2c93c0f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/vendor.txt
@@ -0,0 +1,25 @@
+CacheControl==0.12.10 # Make sure to update the license in pyproject.toml for this.
+colorama==0.4.4
+distlib==0.3.3
+distro==1.6.0
+html5lib==1.1
+msgpack==1.0.3
+packaging==21.3
+pep517==0.12.0
+platformdirs==2.4.1
+progress==1.6
+pyparsing==3.0.7
+requests==2.27.1
+ certifi==2021.10.08
+ chardet==4.0.0
+ idna==3.3
+ urllib3==1.26.8
+rich==11.0.0
+ pygments==2.11.2
+ typing_extensions==4.0.1
+resolvelib==0.8.1
+setuptools==44.0.0
+six==1.16.0
+tenacity==8.0.1
+tomli==1.0.3
+webencodings==0.5.1
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/__init__.py b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/__init__.py
new file mode 100644
index 0000000..d21d697
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/__init__.py
@@ -0,0 +1,342 @@
+# coding: utf-8
+"""
+
+ webencodings
+ ~~~~~~~~~~~~
+
+ This is a Python implementation of the `WHATWG Encoding standard
+ `. See README for details.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+from __future__ import unicode_literals
+
+import codecs
+
+from .labels import LABELS
+
+
+VERSION = '0.5.1'
+
+
+# Some names in Encoding are not valid Python aliases. Remap these.
+PYTHON_NAMES = {
+ 'iso-8859-8-i': 'iso-8859-8',
+ 'x-mac-cyrillic': 'mac-cyrillic',
+ 'macintosh': 'mac-roman',
+ 'windows-874': 'cp874'}
+
+CACHE = {}
+
+
+def ascii_lower(string):
+ r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z.
+
+ :param string: An Unicode string.
+ :returns: A new Unicode string.
+
+ This is used for `ASCII case-insensitive
+ `_
+ matching of encoding labels.
+ The same matching is also used, among other things,
+ for `CSS keywords `_.
+
+ This is different from the :meth:`~py:str.lower` method of Unicode strings
+ which also affect non-ASCII characters,
+ sometimes mapping them into the ASCII range:
+
+ >>> keyword = u'Bac\N{KELVIN SIGN}ground'
+ >>> assert keyword.lower() == u'background'
+ >>> assert ascii_lower(keyword) != keyword.lower()
+ >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground'
+
+ """
+ # This turns out to be faster than unicode.translate()
+ return string.encode('utf8').lower().decode('utf8')
+
+
+def lookup(label):
+ """
+ Look for an encoding by its label.
+ This is the spec’s `get an encoding
+ `_ algorithm.
+ Supported labels are listed there.
+
+ :param label: A string.
+ :returns:
+ An :class:`Encoding` object, or :obj:`None` for an unknown label.
+
+ """
+ # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020.
+ label = ascii_lower(label.strip('\t\n\f\r '))
+ name = LABELS.get(label)
+ if name is None:
+ return None
+ encoding = CACHE.get(name)
+ if encoding is None:
+ if name == 'x-user-defined':
+ from .x_user_defined import codec_info
+ else:
+ python_name = PYTHON_NAMES.get(name, name)
+ # Any python_name value that gets to here should be valid.
+ codec_info = codecs.lookup(python_name)
+ encoding = Encoding(name, codec_info)
+ CACHE[name] = encoding
+ return encoding
+
+
+def _get_encoding(encoding_or_label):
+ """
+ Accept either an encoding object or label.
+
+ :param encoding: An :class:`Encoding` object or a label string.
+ :returns: An :class:`Encoding` object.
+ :raises: :exc:`~exceptions.LookupError` for an unknown label.
+
+ """
+ if hasattr(encoding_or_label, 'codec_info'):
+ return encoding_or_label
+
+ encoding = lookup(encoding_or_label)
+ if encoding is None:
+ raise LookupError('Unknown encoding label: %r' % encoding_or_label)
+ return encoding
+
+
+class Encoding(object):
+ """Reresents a character encoding such as UTF-8,
+ that can be used for decoding or encoding.
+
+ .. attribute:: name
+
+ Canonical name of the encoding
+
+ .. attribute:: codec_info
+
+ The actual implementation of the encoding,
+ a stdlib :class:`~codecs.CodecInfo` object.
+ See :func:`codecs.register`.
+
+ """
+ def __init__(self, name, codec_info):
+ self.name = name
+ self.codec_info = codec_info
+
+ def __repr__(self):
+ return '' % self.name
+
+
+#: The UTF-8 encoding. Should be used for new content and formats.
+UTF8 = lookup('utf-8')
+
+_UTF16LE = lookup('utf-16le')
+_UTF16BE = lookup('utf-16be')
+
+
+def decode(input, fallback_encoding, errors='replace'):
+ """
+ Decode a single string.
+
+ :param input: A byte string
+ :param fallback_encoding:
+ An :class:`Encoding` object or a label string.
+ The encoding to use if :obj:`input` does note have a BOM.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+ :return:
+ A ``(output, encoding)`` tuple of an Unicode string
+ and an :obj:`Encoding`.
+
+ """
+ # Fail early if `encoding` is an invalid label.
+ fallback_encoding = _get_encoding(fallback_encoding)
+ bom_encoding, input = _detect_bom(input)
+ encoding = bom_encoding or fallback_encoding
+ return encoding.codec_info.decode(input, errors)[0], encoding
+
+
+def _detect_bom(input):
+ """Return (bom_encoding, input), with any BOM removed from the input."""
+ if input.startswith(b'\xFF\xFE'):
+ return _UTF16LE, input[2:]
+ if input.startswith(b'\xFE\xFF'):
+ return _UTF16BE, input[2:]
+ if input.startswith(b'\xEF\xBB\xBF'):
+ return UTF8, input[3:]
+ return None, input
+
+
+def encode(input, encoding=UTF8, errors='strict'):
+ """
+ Encode a single string.
+
+ :param input: An Unicode string.
+ :param encoding: An :class:`Encoding` object or a label string.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+ :return: A byte string.
+
+ """
+ return _get_encoding(encoding).codec_info.encode(input, errors)[0]
+
+
+def iter_decode(input, fallback_encoding, errors='replace'):
+ """
+ "Pull"-based decoder.
+
+ :param input:
+ An iterable of byte strings.
+
+ The input is first consumed just enough to determine the encoding
+ based on the precense of a BOM,
+ then consumed on demand when the return value is.
+ :param fallback_encoding:
+ An :class:`Encoding` object or a label string.
+ The encoding to use if :obj:`input` does note have a BOM.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+ :returns:
+ An ``(output, encoding)`` tuple.
+ :obj:`output` is an iterable of Unicode strings,
+ :obj:`encoding` is the :obj:`Encoding` that is being used.
+
+ """
+
+ decoder = IncrementalDecoder(fallback_encoding, errors)
+ generator = _iter_decode_generator(input, decoder)
+ encoding = next(generator)
+ return generator, encoding
+
+
+def _iter_decode_generator(input, decoder):
+ """Return a generator that first yields the :obj:`Encoding`,
+ then yields output chukns as Unicode strings.
+
+ """
+ decode = decoder.decode
+ input = iter(input)
+ for chunck in input:
+ output = decode(chunck)
+ if output:
+ assert decoder.encoding is not None
+ yield decoder.encoding
+ yield output
+ break
+ else:
+ # Input exhausted without determining the encoding
+ output = decode(b'', final=True)
+ assert decoder.encoding is not None
+ yield decoder.encoding
+ if output:
+ yield output
+ return
+
+ for chunck in input:
+ output = decode(chunck)
+ if output:
+ yield output
+ output = decode(b'', final=True)
+ if output:
+ yield output
+
+
+def iter_encode(input, encoding=UTF8, errors='strict'):
+ """
+ “Pull”-based encoder.
+
+ :param input: An iterable of Unicode strings.
+ :param encoding: An :class:`Encoding` object or a label string.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+ :returns: An iterable of byte strings.
+
+ """
+ # Fail early if `encoding` is an invalid label.
+ encode = IncrementalEncoder(encoding, errors).encode
+ return _iter_encode_generator(input, encode)
+
+
+def _iter_encode_generator(input, encode):
+ for chunck in input:
+ output = encode(chunck)
+ if output:
+ yield output
+ output = encode('', final=True)
+ if output:
+ yield output
+
+
+class IncrementalDecoder(object):
+ """
+ “Push”-based decoder.
+
+ :param fallback_encoding:
+ An :class:`Encoding` object or a label string.
+ The encoding to use if :obj:`input` does note have a BOM.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+
+ """
+ def __init__(self, fallback_encoding, errors='replace'):
+ # Fail early if `encoding` is an invalid label.
+ self._fallback_encoding = _get_encoding(fallback_encoding)
+ self._errors = errors
+ self._buffer = b''
+ self._decoder = None
+ #: The actual :class:`Encoding` that is being used,
+ #: or :obj:`None` if that is not determined yet.
+ #: (Ie. if there is not enough input yet to determine
+ #: if there is a BOM.)
+ self.encoding = None # Not known yet.
+
+ def decode(self, input, final=False):
+ """Decode one chunk of the input.
+
+ :param input: A byte string.
+ :param final:
+ Indicate that no more input is available.
+ Must be :obj:`True` if this is the last call.
+ :returns: An Unicode string.
+
+ """
+ decoder = self._decoder
+ if decoder is not None:
+ return decoder(input, final)
+
+ input = self._buffer + input
+ encoding, input = _detect_bom(input)
+ if encoding is None:
+ if len(input) < 3 and not final: # Not enough data yet.
+ self._buffer = input
+ return ''
+ else: # No BOM
+ encoding = self._fallback_encoding
+ decoder = encoding.codec_info.incrementaldecoder(self._errors).decode
+ self._decoder = decoder
+ self.encoding = encoding
+ return decoder(input, final)
+
+
+class IncrementalEncoder(object):
+ """
+ “Push”-based encoder.
+
+ :param encoding: An :class:`Encoding` object or a label string.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+
+ .. method:: encode(input, final=False)
+
+ :param input: An Unicode string.
+ :param final:
+ Indicate that no more input is available.
+ Must be :obj:`True` if this is the last call.
+ :returns: A byte string.
+
+ """
+ def __init__(self, encoding=UTF8, errors='strict'):
+ encoding = _get_encoding(encoding)
+ self.encode = encoding.codec_info.incrementalencoder(errors).encode
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/labels.py b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/labels.py
new file mode 100644
index 0000000..29cbf91
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/labels.py
@@ -0,0 +1,231 @@
+"""
+
+ webencodings.labels
+ ~~~~~~~~~~~~~~~~~~~
+
+ Map encoding labels to their name.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+# XXX Do not edit!
+# This file is automatically generated by mklabels.py
+
+LABELS = {
+ 'unicode-1-1-utf-8': 'utf-8',
+ 'utf-8': 'utf-8',
+ 'utf8': 'utf-8',
+ '866': 'ibm866',
+ 'cp866': 'ibm866',
+ 'csibm866': 'ibm866',
+ 'ibm866': 'ibm866',
+ 'csisolatin2': 'iso-8859-2',
+ 'iso-8859-2': 'iso-8859-2',
+ 'iso-ir-101': 'iso-8859-2',
+ 'iso8859-2': 'iso-8859-2',
+ 'iso88592': 'iso-8859-2',
+ 'iso_8859-2': 'iso-8859-2',
+ 'iso_8859-2:1987': 'iso-8859-2',
+ 'l2': 'iso-8859-2',
+ 'latin2': 'iso-8859-2',
+ 'csisolatin3': 'iso-8859-3',
+ 'iso-8859-3': 'iso-8859-3',
+ 'iso-ir-109': 'iso-8859-3',
+ 'iso8859-3': 'iso-8859-3',
+ 'iso88593': 'iso-8859-3',
+ 'iso_8859-3': 'iso-8859-3',
+ 'iso_8859-3:1988': 'iso-8859-3',
+ 'l3': 'iso-8859-3',
+ 'latin3': 'iso-8859-3',
+ 'csisolatin4': 'iso-8859-4',
+ 'iso-8859-4': 'iso-8859-4',
+ 'iso-ir-110': 'iso-8859-4',
+ 'iso8859-4': 'iso-8859-4',
+ 'iso88594': 'iso-8859-4',
+ 'iso_8859-4': 'iso-8859-4',
+ 'iso_8859-4:1988': 'iso-8859-4',
+ 'l4': 'iso-8859-4',
+ 'latin4': 'iso-8859-4',
+ 'csisolatincyrillic': 'iso-8859-5',
+ 'cyrillic': 'iso-8859-5',
+ 'iso-8859-5': 'iso-8859-5',
+ 'iso-ir-144': 'iso-8859-5',
+ 'iso8859-5': 'iso-8859-5',
+ 'iso88595': 'iso-8859-5',
+ 'iso_8859-5': 'iso-8859-5',
+ 'iso_8859-5:1988': 'iso-8859-5',
+ 'arabic': 'iso-8859-6',
+ 'asmo-708': 'iso-8859-6',
+ 'csiso88596e': 'iso-8859-6',
+ 'csiso88596i': 'iso-8859-6',
+ 'csisolatinarabic': 'iso-8859-6',
+ 'ecma-114': 'iso-8859-6',
+ 'iso-8859-6': 'iso-8859-6',
+ 'iso-8859-6-e': 'iso-8859-6',
+ 'iso-8859-6-i': 'iso-8859-6',
+ 'iso-ir-127': 'iso-8859-6',
+ 'iso8859-6': 'iso-8859-6',
+ 'iso88596': 'iso-8859-6',
+ 'iso_8859-6': 'iso-8859-6',
+ 'iso_8859-6:1987': 'iso-8859-6',
+ 'csisolatingreek': 'iso-8859-7',
+ 'ecma-118': 'iso-8859-7',
+ 'elot_928': 'iso-8859-7',
+ 'greek': 'iso-8859-7',
+ 'greek8': 'iso-8859-7',
+ 'iso-8859-7': 'iso-8859-7',
+ 'iso-ir-126': 'iso-8859-7',
+ 'iso8859-7': 'iso-8859-7',
+ 'iso88597': 'iso-8859-7',
+ 'iso_8859-7': 'iso-8859-7',
+ 'iso_8859-7:1987': 'iso-8859-7',
+ 'sun_eu_greek': 'iso-8859-7',
+ 'csiso88598e': 'iso-8859-8',
+ 'csisolatinhebrew': 'iso-8859-8',
+ 'hebrew': 'iso-8859-8',
+ 'iso-8859-8': 'iso-8859-8',
+ 'iso-8859-8-e': 'iso-8859-8',
+ 'iso-ir-138': 'iso-8859-8',
+ 'iso8859-8': 'iso-8859-8',
+ 'iso88598': 'iso-8859-8',
+ 'iso_8859-8': 'iso-8859-8',
+ 'iso_8859-8:1988': 'iso-8859-8',
+ 'visual': 'iso-8859-8',
+ 'csiso88598i': 'iso-8859-8-i',
+ 'iso-8859-8-i': 'iso-8859-8-i',
+ 'logical': 'iso-8859-8-i',
+ 'csisolatin6': 'iso-8859-10',
+ 'iso-8859-10': 'iso-8859-10',
+ 'iso-ir-157': 'iso-8859-10',
+ 'iso8859-10': 'iso-8859-10',
+ 'iso885910': 'iso-8859-10',
+ 'l6': 'iso-8859-10',
+ 'latin6': 'iso-8859-10',
+ 'iso-8859-13': 'iso-8859-13',
+ 'iso8859-13': 'iso-8859-13',
+ 'iso885913': 'iso-8859-13',
+ 'iso-8859-14': 'iso-8859-14',
+ 'iso8859-14': 'iso-8859-14',
+ 'iso885914': 'iso-8859-14',
+ 'csisolatin9': 'iso-8859-15',
+ 'iso-8859-15': 'iso-8859-15',
+ 'iso8859-15': 'iso-8859-15',
+ 'iso885915': 'iso-8859-15',
+ 'iso_8859-15': 'iso-8859-15',
+ 'l9': 'iso-8859-15',
+ 'iso-8859-16': 'iso-8859-16',
+ 'cskoi8r': 'koi8-r',
+ 'koi': 'koi8-r',
+ 'koi8': 'koi8-r',
+ 'koi8-r': 'koi8-r',
+ 'koi8_r': 'koi8-r',
+ 'koi8-u': 'koi8-u',
+ 'csmacintosh': 'macintosh',
+ 'mac': 'macintosh',
+ 'macintosh': 'macintosh',
+ 'x-mac-roman': 'macintosh',
+ 'dos-874': 'windows-874',
+ 'iso-8859-11': 'windows-874',
+ 'iso8859-11': 'windows-874',
+ 'iso885911': 'windows-874',
+ 'tis-620': 'windows-874',
+ 'windows-874': 'windows-874',
+ 'cp1250': 'windows-1250',
+ 'windows-1250': 'windows-1250',
+ 'x-cp1250': 'windows-1250',
+ 'cp1251': 'windows-1251',
+ 'windows-1251': 'windows-1251',
+ 'x-cp1251': 'windows-1251',
+ 'ansi_x3.4-1968': 'windows-1252',
+ 'ascii': 'windows-1252',
+ 'cp1252': 'windows-1252',
+ 'cp819': 'windows-1252',
+ 'csisolatin1': 'windows-1252',
+ 'ibm819': 'windows-1252',
+ 'iso-8859-1': 'windows-1252',
+ 'iso-ir-100': 'windows-1252',
+ 'iso8859-1': 'windows-1252',
+ 'iso88591': 'windows-1252',
+ 'iso_8859-1': 'windows-1252',
+ 'iso_8859-1:1987': 'windows-1252',
+ 'l1': 'windows-1252',
+ 'latin1': 'windows-1252',
+ 'us-ascii': 'windows-1252',
+ 'windows-1252': 'windows-1252',
+ 'x-cp1252': 'windows-1252',
+ 'cp1253': 'windows-1253',
+ 'windows-1253': 'windows-1253',
+ 'x-cp1253': 'windows-1253',
+ 'cp1254': 'windows-1254',
+ 'csisolatin5': 'windows-1254',
+ 'iso-8859-9': 'windows-1254',
+ 'iso-ir-148': 'windows-1254',
+ 'iso8859-9': 'windows-1254',
+ 'iso88599': 'windows-1254',
+ 'iso_8859-9': 'windows-1254',
+ 'iso_8859-9:1989': 'windows-1254',
+ 'l5': 'windows-1254',
+ 'latin5': 'windows-1254',
+ 'windows-1254': 'windows-1254',
+ 'x-cp1254': 'windows-1254',
+ 'cp1255': 'windows-1255',
+ 'windows-1255': 'windows-1255',
+ 'x-cp1255': 'windows-1255',
+ 'cp1256': 'windows-1256',
+ 'windows-1256': 'windows-1256',
+ 'x-cp1256': 'windows-1256',
+ 'cp1257': 'windows-1257',
+ 'windows-1257': 'windows-1257',
+ 'x-cp1257': 'windows-1257',
+ 'cp1258': 'windows-1258',
+ 'windows-1258': 'windows-1258',
+ 'x-cp1258': 'windows-1258',
+ 'x-mac-cyrillic': 'x-mac-cyrillic',
+ 'x-mac-ukrainian': 'x-mac-cyrillic',
+ 'chinese': 'gbk',
+ 'csgb2312': 'gbk',
+ 'csiso58gb231280': 'gbk',
+ 'gb2312': 'gbk',
+ 'gb_2312': 'gbk',
+ 'gb_2312-80': 'gbk',
+ 'gbk': 'gbk',
+ 'iso-ir-58': 'gbk',
+ 'x-gbk': 'gbk',
+ 'gb18030': 'gb18030',
+ 'hz-gb-2312': 'hz-gb-2312',
+ 'big5': 'big5',
+ 'big5-hkscs': 'big5',
+ 'cn-big5': 'big5',
+ 'csbig5': 'big5',
+ 'x-x-big5': 'big5',
+ 'cseucpkdfmtjapanese': 'euc-jp',
+ 'euc-jp': 'euc-jp',
+ 'x-euc-jp': 'euc-jp',
+ 'csiso2022jp': 'iso-2022-jp',
+ 'iso-2022-jp': 'iso-2022-jp',
+ 'csshiftjis': 'shift_jis',
+ 'ms_kanji': 'shift_jis',
+ 'shift-jis': 'shift_jis',
+ 'shift_jis': 'shift_jis',
+ 'sjis': 'shift_jis',
+ 'windows-31j': 'shift_jis',
+ 'x-sjis': 'shift_jis',
+ 'cseuckr': 'euc-kr',
+ 'csksc56011987': 'euc-kr',
+ 'euc-kr': 'euc-kr',
+ 'iso-ir-149': 'euc-kr',
+ 'korean': 'euc-kr',
+ 'ks_c_5601-1987': 'euc-kr',
+ 'ks_c_5601-1989': 'euc-kr',
+ 'ksc5601': 'euc-kr',
+ 'ksc_5601': 'euc-kr',
+ 'windows-949': 'euc-kr',
+ 'csiso2022kr': 'iso-2022-kr',
+ 'iso-2022-kr': 'iso-2022-kr',
+ 'utf-16be': 'utf-16be',
+ 'utf-16': 'utf-16le',
+ 'utf-16le': 'utf-16le',
+ 'x-user-defined': 'x-user-defined',
+}
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/mklabels.py b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/mklabels.py
new file mode 100644
index 0000000..295dc92
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/mklabels.py
@@ -0,0 +1,59 @@
+"""
+
+ webencodings.mklabels
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Regenarate the webencodings.labels module.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+import json
+try:
+ from urllib import urlopen
+except ImportError:
+ from urllib.request import urlopen
+
+
+def assert_lower(string):
+ assert string == string.lower()
+ return string
+
+
+def generate(url):
+ parts = ['''\
+"""
+
+ webencodings.labels
+ ~~~~~~~~~~~~~~~~~~~
+
+ Map encoding labels to their name.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+# XXX Do not edit!
+# This file is automatically generated by mklabels.py
+
+LABELS = {
+''']
+ labels = [
+ (repr(assert_lower(label)).lstrip('u'),
+ repr(encoding['name']).lstrip('u'))
+ for category in json.loads(urlopen(url).read().decode('ascii'))
+ for encoding in category['encodings']
+ for label in encoding['labels']]
+ max_len = max(len(label) for label, name in labels)
+ parts.extend(
+ ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name)
+ for label, name in labels)
+ parts.append('}')
+ return ''.join(parts)
+
+
+if __name__ == '__main__':
+ print(generate('http://encoding.spec.whatwg.org/encodings.json'))
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/tests.py b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/tests.py
new file mode 100644
index 0000000..e12c10d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/tests.py
@@ -0,0 +1,153 @@
+# coding: utf-8
+"""
+
+ webencodings.tests
+ ~~~~~~~~~~~~~~~~~~
+
+ A basic test suite for Encoding.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+from __future__ import unicode_literals
+
+from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode,
+ IncrementalDecoder, IncrementalEncoder, UTF8)
+
+
+def assert_raises(exception, function, *args, **kwargs):
+ try:
+ function(*args, **kwargs)
+ except exception:
+ return
+ else: # pragma: no cover
+ raise AssertionError('Did not raise %s.' % exception)
+
+
+def test_labels():
+ assert lookup('utf-8').name == 'utf-8'
+ assert lookup('Utf-8').name == 'utf-8'
+ assert lookup('UTF-8').name == 'utf-8'
+ assert lookup('utf8').name == 'utf-8'
+ assert lookup('utf8').name == 'utf-8'
+ assert lookup('utf8 ').name == 'utf-8'
+ assert lookup(' \r\nutf8\t').name == 'utf-8'
+ assert lookup('u8') is None # Python label.
+ assert lookup('utf-8 ') is None # Non-ASCII white space.
+
+ assert lookup('US-ASCII').name == 'windows-1252'
+ assert lookup('iso-8859-1').name == 'windows-1252'
+ assert lookup('latin1').name == 'windows-1252'
+ assert lookup('LATIN1').name == 'windows-1252'
+ assert lookup('latin-1') is None
+ assert lookup('LATİN1') is None # ASCII-only case insensitivity.
+
+
+def test_all_labels():
+ for label in LABELS:
+ assert decode(b'', label) == ('', lookup(label))
+ assert encode('', label) == b''
+ for repeat in [0, 1, 12]:
+ output, _ = iter_decode([b''] * repeat, label)
+ assert list(output) == []
+ assert list(iter_encode([''] * repeat, label)) == []
+ decoder = IncrementalDecoder(label)
+ assert decoder.decode(b'') == ''
+ assert decoder.decode(b'', final=True) == ''
+ encoder = IncrementalEncoder(label)
+ assert encoder.encode('') == b''
+ assert encoder.encode('', final=True) == b''
+ # All encoding names are valid labels too:
+ for name in set(LABELS.values()):
+ assert lookup(name).name == name
+
+
+def test_invalid_label():
+ assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid')
+ assert_raises(LookupError, encode, 'é', 'invalid')
+ assert_raises(LookupError, iter_decode, [], 'invalid')
+ assert_raises(LookupError, iter_encode, [], 'invalid')
+ assert_raises(LookupError, IncrementalDecoder, 'invalid')
+ assert_raises(LookupError, IncrementalEncoder, 'invalid')
+
+
+def test_decode():
+ assert decode(b'\x80', 'latin1') == ('€', lookup('latin1'))
+ assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1'))
+ assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8'))
+ assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8'))
+ assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii'))
+ assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM
+
+ assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM
+ assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM
+ assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be'))
+ assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le'))
+
+ assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be'))
+ assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le'))
+ assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le'))
+
+ assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be'))
+ assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le'))
+ assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le'))
+
+
+def test_encode():
+ assert encode('é', 'latin1') == b'\xe9'
+ assert encode('é', 'utf8') == b'\xc3\xa9'
+ assert encode('é', 'utf8') == b'\xc3\xa9'
+ assert encode('é', 'utf-16') == b'\xe9\x00'
+ assert encode('é', 'utf-16le') == b'\xe9\x00'
+ assert encode('é', 'utf-16be') == b'\x00\xe9'
+
+
+def test_iter_decode():
+ def iter_decode_to_string(input, fallback_encoding):
+ output, _encoding = iter_decode(input, fallback_encoding)
+ return ''.join(output)
+ assert iter_decode_to_string([], 'latin1') == ''
+ assert iter_decode_to_string([b''], 'latin1') == ''
+ assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é'
+ assert iter_decode_to_string([b'hello'], 'latin1') == 'hello'
+ assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello'
+ assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello'
+ assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é'
+ assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é'
+ assert iter_decode_to_string([
+ b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é'
+ assert iter_decode_to_string([
+ b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD'
+ assert iter_decode_to_string([
+ b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é'
+ assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == ''
+ assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»'
+ assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é'
+ assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é'
+ assert iter_decode_to_string([
+ b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é'
+ assert iter_decode_to_string([
+ b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo'
+
+
+def test_iter_encode():
+ assert b''.join(iter_encode([], 'latin1')) == b''
+ assert b''.join(iter_encode([''], 'latin1')) == b''
+ assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9'
+ assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9'
+ assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00'
+ assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00'
+ assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9'
+ assert b''.join(iter_encode([
+ '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo'
+
+
+def test_x_user_defined():
+ encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca'
+ decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca'
+ encoded = b'aa'
+ decoded = 'aa'
+ assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined'))
+ assert encode(decoded, 'x-user-defined') == encoded
diff --git a/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/x_user_defined.py b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/x_user_defined.py
new file mode 100644
index 0000000..d16e326
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/_vendor/webencodings/x_user_defined.py
@@ -0,0 +1,325 @@
+# coding: utf-8
+"""
+
+ webencodings.x_user_defined
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ An implementation of the x-user-defined encoding.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+from __future__ import unicode_literals
+
+import codecs
+
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self, input, errors='strict'):
+ return codecs.charmap_encode(input, errors, encoding_table)
+
+ def decode(self, input, errors='strict'):
+ return codecs.charmap_decode(input, errors, decoding_table)
+
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input, self.errors, encoding_table)[0]
+
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input, self.errors, decoding_table)[0]
+
+
+class StreamWriter(Codec, codecs.StreamWriter):
+ pass
+
+
+class StreamReader(Codec, codecs.StreamReader):
+ pass
+
+
+### encodings module API
+
+codec_info = codecs.CodecInfo(
+ name='x-user-defined',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+)
+
+
+### Decoding Table
+
+# Python 3:
+# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700))
+decoding_table = (
+ '\x00'
+ '\x01'
+ '\x02'
+ '\x03'
+ '\x04'
+ '\x05'
+ '\x06'
+ '\x07'
+ '\x08'
+ '\t'
+ '\n'
+ '\x0b'
+ '\x0c'
+ '\r'
+ '\x0e'
+ '\x0f'
+ '\x10'
+ '\x11'
+ '\x12'
+ '\x13'
+ '\x14'
+ '\x15'
+ '\x16'
+ '\x17'
+ '\x18'
+ '\x19'
+ '\x1a'
+ '\x1b'
+ '\x1c'
+ '\x1d'
+ '\x1e'
+ '\x1f'
+ ' '
+ '!'
+ '"'
+ '#'
+ '$'
+ '%'
+ '&'
+ "'"
+ '('
+ ')'
+ '*'
+ '+'
+ ','
+ '-'
+ '.'
+ '/'
+ '0'
+ '1'
+ '2'
+ '3'
+ '4'
+ '5'
+ '6'
+ '7'
+ '8'
+ '9'
+ ':'
+ ';'
+ '<'
+ '='
+ '>'
+ '?'
+ '@'
+ 'A'
+ 'B'
+ 'C'
+ 'D'
+ 'E'
+ 'F'
+ 'G'
+ 'H'
+ 'I'
+ 'J'
+ 'K'
+ 'L'
+ 'M'
+ 'N'
+ 'O'
+ 'P'
+ 'Q'
+ 'R'
+ 'S'
+ 'T'
+ 'U'
+ 'V'
+ 'W'
+ 'X'
+ 'Y'
+ 'Z'
+ '['
+ '\\'
+ ']'
+ '^'
+ '_'
+ '`'
+ 'a'
+ 'b'
+ 'c'
+ 'd'
+ 'e'
+ 'f'
+ 'g'
+ 'h'
+ 'i'
+ 'j'
+ 'k'
+ 'l'
+ 'm'
+ 'n'
+ 'o'
+ 'p'
+ 'q'
+ 'r'
+ 's'
+ 't'
+ 'u'
+ 'v'
+ 'w'
+ 'x'
+ 'y'
+ 'z'
+ '{'
+ '|'
+ '}'
+ '~'
+ '\x7f'
+ '\uf780'
+ '\uf781'
+ '\uf782'
+ '\uf783'
+ '\uf784'
+ '\uf785'
+ '\uf786'
+ '\uf787'
+ '\uf788'
+ '\uf789'
+ '\uf78a'
+ '\uf78b'
+ '\uf78c'
+ '\uf78d'
+ '\uf78e'
+ '\uf78f'
+ '\uf790'
+ '\uf791'
+ '\uf792'
+ '\uf793'
+ '\uf794'
+ '\uf795'
+ '\uf796'
+ '\uf797'
+ '\uf798'
+ '\uf799'
+ '\uf79a'
+ '\uf79b'
+ '\uf79c'
+ '\uf79d'
+ '\uf79e'
+ '\uf79f'
+ '\uf7a0'
+ '\uf7a1'
+ '\uf7a2'
+ '\uf7a3'
+ '\uf7a4'
+ '\uf7a5'
+ '\uf7a6'
+ '\uf7a7'
+ '\uf7a8'
+ '\uf7a9'
+ '\uf7aa'
+ '\uf7ab'
+ '\uf7ac'
+ '\uf7ad'
+ '\uf7ae'
+ '\uf7af'
+ '\uf7b0'
+ '\uf7b1'
+ '\uf7b2'
+ '\uf7b3'
+ '\uf7b4'
+ '\uf7b5'
+ '\uf7b6'
+ '\uf7b7'
+ '\uf7b8'
+ '\uf7b9'
+ '\uf7ba'
+ '\uf7bb'
+ '\uf7bc'
+ '\uf7bd'
+ '\uf7be'
+ '\uf7bf'
+ '\uf7c0'
+ '\uf7c1'
+ '\uf7c2'
+ '\uf7c3'
+ '\uf7c4'
+ '\uf7c5'
+ '\uf7c6'
+ '\uf7c7'
+ '\uf7c8'
+ '\uf7c9'
+ '\uf7ca'
+ '\uf7cb'
+ '\uf7cc'
+ '\uf7cd'
+ '\uf7ce'
+ '\uf7cf'
+ '\uf7d0'
+ '\uf7d1'
+ '\uf7d2'
+ '\uf7d3'
+ '\uf7d4'
+ '\uf7d5'
+ '\uf7d6'
+ '\uf7d7'
+ '\uf7d8'
+ '\uf7d9'
+ '\uf7da'
+ '\uf7db'
+ '\uf7dc'
+ '\uf7dd'
+ '\uf7de'
+ '\uf7df'
+ '\uf7e0'
+ '\uf7e1'
+ '\uf7e2'
+ '\uf7e3'
+ '\uf7e4'
+ '\uf7e5'
+ '\uf7e6'
+ '\uf7e7'
+ '\uf7e8'
+ '\uf7e9'
+ '\uf7ea'
+ '\uf7eb'
+ '\uf7ec'
+ '\uf7ed'
+ '\uf7ee'
+ '\uf7ef'
+ '\uf7f0'
+ '\uf7f1'
+ '\uf7f2'
+ '\uf7f3'
+ '\uf7f4'
+ '\uf7f5'
+ '\uf7f6'
+ '\uf7f7'
+ '\uf7f8'
+ '\uf7f9'
+ '\uf7fa'
+ '\uf7fb'
+ '\uf7fc'
+ '\uf7fd'
+ '\uf7fe'
+ '\uf7ff'
+)
+
+### Encoding table
+encoding_table = codecs.charmap_build(decoding_table)
diff --git a/venv/lib/python3.9/site-packages/pip/py.typed b/venv/lib/python3.9/site-packages/pip/py.typed
new file mode 100644
index 0000000..493b53e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pip/py.typed
@@ -0,0 +1,4 @@
+pip is a command line program. While it is implemented in Python, and so is
+available for import, you must not use pip's internal APIs in this way. Typing
+information is provided as a convenience only and is not a guarantee. Expect
+unannounced changes to the API and types in releases.
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/__init__.py b/venv/lib/python3.9/site-packages/pkg_resources/__init__.py
new file mode 100644
index 0000000..c84f1dd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/__init__.py
@@ -0,0 +1,3288 @@
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof. The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is. Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files. It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+import sys
+import os
+import io
+import time
+import re
+import types
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
+import errno
+import tempfile
+import textwrap
+import itertools
+import inspect
+import ntpath
+import posixpath
+import importlib
+from pkgutil import get_importer
+
+try:
+ import _imp
+except ImportError:
+ # Python 3.2 compatibility
+ import imp as _imp
+
+try:
+ FileExistsError
+except NameError:
+ FileExistsError = OSError
+
+# capture these to bypass sandboxing
+from os import utime
+try:
+ from os import mkdir, rename, unlink
+ WRITE_SUPPORT = True
+except ImportError:
+ # no write support, probably under GAE
+ WRITE_SUPPORT = False
+
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+ import importlib.machinery as importlib_machinery
+ # access attribute to force import under delayed import mechanisms.
+ importlib_machinery.__name__
+except ImportError:
+ importlib_machinery = None
+
+from pkg_resources.extern import appdirs
+from pkg_resources.extern import packaging
+__import__('pkg_resources.extern.packaging.version')
+__import__('pkg_resources.extern.packaging.specifiers')
+__import__('pkg_resources.extern.packaging.requirements')
+__import__('pkg_resources.extern.packaging.markers')
+
+if sys.version_info < (3, 5):
+ raise RuntimeError("Python 3.5 or later is required")
+
+# declare some globals that will be defined later to
+# satisfy the linters.
+require = None
+working_set = None
+add_activation_listener = None
+resources_stream = None
+cleanup_resources = None
+resource_dir = None
+resource_stream = None
+set_extraction_path = None
+resource_isdir = None
+resource_string = None
+iter_entry_points = None
+resource_listdir = None
+resource_filename = None
+resource_exists = None
+_distribution_finders = None
+_namespace_handlers = None
+_namespace_packages = None
+
+
+class PEP440Warning(RuntimeWarning):
+ """
+ Used when there is an issue with a version or specifier not complying with
+ PEP 440.
+ """
+
+
+def parse_version(v):
+ try:
+ return packaging.version.Version(v)
+ except packaging.version.InvalidVersion:
+ return packaging.version.LegacyVersion(v)
+
+
+_state_vars = {}
+
+
+def _declare_state(vartype, **kw):
+ globals().update(kw)
+ _state_vars.update(dict.fromkeys(kw, vartype))
+
+
+def __getstate__():
+ state = {}
+ g = globals()
+ for k, v in _state_vars.items():
+ state[k] = g['_sget_' + v](g[k])
+ return state
+
+
+def __setstate__(state):
+ g = globals()
+ for k, v in state.items():
+ g['_sset_' + _state_vars[k]](k, g[k], v)
+ return state
+
+
+def _sget_dict(val):
+ return val.copy()
+
+
+def _sset_dict(key, ob, state):
+ ob.clear()
+ ob.update(state)
+
+
+def _sget_object(val):
+ return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+ ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+ """Return this platform's maximum compatible version.
+
+ distutils.util.get_platform() normally reports the minimum version
+ of macOS that would be required to *use* extensions produced by
+ distutils. But what we want when checking compatibility is to know the
+ version of macOS that we are *running*. To allow usage of packages that
+ explicitly require a newer version of macOS, we must also know the
+ current version of the OS.
+
+ If this condition occurs for any other platform with a version in its
+ platform strings, this function should be extended accordingly.
+ """
+ plat = get_build_platform()
+ m = macosVersionString.match(plat)
+ if m is not None and sys.platform == "darwin":
+ try:
+ plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
+ except ValueError:
+ # not macOS
+ pass
+ return plat
+
+
+__all__ = [
+ # Basic resource access and distribution/entry point discovery
+ 'require', 'run_script', 'get_provider', 'get_distribution',
+ 'load_entry_point', 'get_entry_map', 'get_entry_info',
+ 'iter_entry_points',
+ 'resource_string', 'resource_stream', 'resource_filename',
+ 'resource_listdir', 'resource_exists', 'resource_isdir',
+
+ # Environmental control
+ 'declare_namespace', 'working_set', 'add_activation_listener',
+ 'find_distributions', 'set_extraction_path', 'cleanup_resources',
+ 'get_default_cache',
+
+ # Primary implementation classes
+ 'Environment', 'WorkingSet', 'ResourceManager',
+ 'Distribution', 'Requirement', 'EntryPoint',
+
+ # Exceptions
+ 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
+ 'UnknownExtra', 'ExtractionError',
+
+ # Warnings
+ 'PEP440Warning',
+
+ # Parsing functions and string utilities
+ 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+ 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+ 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
+
+ # filesystem utilities
+ 'ensure_directory', 'normalize_path',
+
+ # Distribution "precedence" constants
+ 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+ # "Provider" interfaces, implementations, and registration/lookup APIs
+ 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+ 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+ 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+ 'register_finder', 'register_namespace_handler', 'register_loader_type',
+ 'fixup_namespace_packages', 'get_importer',
+
+ # Warnings
+ 'PkgResourcesDeprecationWarning',
+
+ # Deprecated/backward compatibility only
+ 'run_main', 'AvailableDistributions',
+]
+
+
+class ResolutionError(Exception):
+ """Abstract base for dependency resolution errors"""
+
+ def __repr__(self):
+ return self.__class__.__name__ + repr(self.args)
+
+
+class VersionConflict(ResolutionError):
+ """
+ An already-installed version conflicts with the requested version.
+
+ Should be initialized with the installed Distribution and the requested
+ Requirement.
+ """
+
+ _template = "{self.dist} is installed but {self.req} is required"
+
+ @property
+ def dist(self):
+ return self.args[0]
+
+ @property
+ def req(self):
+ return self.args[1]
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def with_context(self, required_by):
+ """
+ If required_by is non-empty, return a version of self that is a
+ ContextualVersionConflict.
+ """
+ if not required_by:
+ return self
+ args = self.args + (required_by,)
+ return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+ """
+ A VersionConflict that accepts a third parameter, the set of the
+ requirements that required the installed Distribution.
+ """
+
+ _template = VersionConflict._template + ' by {self.required_by}'
+
+ @property
+ def required_by(self):
+ return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+ """A requested distribution was not found"""
+
+ _template = ("The '{self.req}' distribution was not found "
+ "and is required by {self.requirers_str}")
+
+ @property
+ def req(self):
+ return self.args[0]
+
+ @property
+ def requirers(self):
+ return self.args[1]
+
+ @property
+ def requirers_str(self):
+ if not self.requirers:
+ return 'the application'
+ return ', '.join(self.requirers)
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def __str__(self):
+ return self.report()
+
+
+class UnknownExtra(ResolutionError):
+ """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories = {}
+
+PY_MAJOR = '{}.{}'.format(*sys.version_info)
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(loader_type, provider_factory):
+ """Register `provider_factory` to make providers for `loader_type`
+
+ `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+ and `provider_factory` is a function that, passed a *module* object,
+ returns an ``IResourceProvider`` for that module.
+ """
+ _provider_factories[loader_type] = provider_factory
+
+
+def get_provider(moduleOrReq):
+ """Return an IResourceProvider for the named module or requirement"""
+ if isinstance(moduleOrReq, Requirement):
+ return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+ try:
+ module = sys.modules[moduleOrReq]
+ except KeyError:
+ __import__(moduleOrReq)
+ module = sys.modules[moduleOrReq]
+ loader = getattr(module, '__loader__', None)
+ return _find_adapter(_provider_factories, loader)(module)
+
+
+def _macos_vers(_cache=[]):
+ if not _cache:
+ version = platform.mac_ver()[0]
+ # fallback for MacPorts
+ if version == '':
+ plist = '/System/Library/CoreServices/SystemVersion.plist'
+ if os.path.exists(plist):
+ if hasattr(plistlib, 'readPlist'):
+ plist_content = plistlib.readPlist(plist)
+ if 'ProductVersion' in plist_content:
+ version = plist_content['ProductVersion']
+
+ _cache.append(version.split('.'))
+ return _cache[0]
+
+
+def _macos_arch(machine):
+ return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
+
+
+def get_build_platform():
+ """Return this platform's string for platform-specific distributions
+
+ XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+ needs some hacks for Linux and macOS.
+ """
+ from sysconfig import get_platform
+
+ plat = get_platform()
+ if sys.platform == "darwin" and not plat.startswith('macosx-'):
+ try:
+ version = _macos_vers()
+ machine = os.uname()[4].replace(" ", "_")
+ return "macosx-%d.%d-%s" % (
+ int(version[0]), int(version[1]),
+ _macos_arch(machine),
+ )
+ except ValueError:
+ # if someone is running a non-Mac darwin system, this will fall
+ # through to the default implementation
+ pass
+ return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided, required):
+ """Can code for the `provided` platform run on the `required` platform?
+
+ Returns true if either platform is ``None``, or the platforms are equal.
+
+ XXX Needs compatibility checks for Linux and other unixy OSes.
+ """
+ if provided is None or required is None or provided == required:
+ # easy case
+ return True
+
+ # macOS special cases
+ reqMac = macosVersionString.match(required)
+ if reqMac:
+ provMac = macosVersionString.match(provided)
+
+ # is this a Mac package?
+ if not provMac:
+ # this is backwards compatibility for packages built before
+ # setuptools 0.6. All packages built after this point will
+ # use the new macOS designation.
+ provDarwin = darwinVersionString.match(provided)
+ if provDarwin:
+ dversion = int(provDarwin.group(1))
+ macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+ if dversion == 7 and macosversion >= "10.3" or \
+ dversion == 8 and macosversion >= "10.4":
+ return True
+ # egg isn't macOS or legacy darwin
+ return False
+
+ # are they the same major version and machine type?
+ if provMac.group(1) != reqMac.group(1) or \
+ provMac.group(3) != reqMac.group(3):
+ return False
+
+ # is the required OS major update >= the provided one?
+ if int(provMac.group(2)) > int(reqMac.group(2)):
+ return False
+
+ return True
+
+ # XXX Linux and other platforms' special cases should go here
+ return False
+
+
+def run_script(dist_spec, script_name):
+ """Locate distribution `dist_spec` and run its `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ require(dist_spec)[0].run_script(script_name, ns)
+
+
+# backward compatibility
+run_main = run_script
+
+
+def get_distribution(dist):
+ """Return a current distribution object for a Requirement or string"""
+ if isinstance(dist, str):
+ dist = Requirement.parse(dist)
+ if isinstance(dist, Requirement):
+ dist = get_provider(dist)
+ if not isinstance(dist, Distribution):
+ raise TypeError("Expected string, Requirement, or Distribution", dist)
+ return dist
+
+
+def load_entry_point(dist, group, name):
+ """Return `name` entry point of `group` for `dist` or raise ImportError"""
+ return get_distribution(dist).load_entry_point(group, name)
+
+
+def get_entry_map(dist, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider:
+ def has_metadata(name):
+ """Does the package's distribution contain the named metadata?"""
+
+ def get_metadata(name):
+ """The named metadata resource as a string"""
+
+ def get_metadata_lines(name):
+ """Yield named metadata resource as list of non-blank non-comment lines
+
+ Leading and trailing whitespace is stripped from each line, and lines
+ with ``#`` as the first non-blank character are omitted."""
+
+ def metadata_isdir(name):
+ """Is the named metadata a directory? (like ``os.path.isdir()``)"""
+
+ def metadata_listdir(name):
+ """List of metadata names in the directory (like ``os.listdir()``)"""
+
+ def run_script(script_name, namespace):
+ """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider):
+ """An object that provides access to package resources"""
+
+ def get_resource_filename(manager, resource_name):
+ """Return a true filesystem path for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_stream(manager, resource_name):
+ """Return a readable file-like object for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_string(manager, resource_name):
+ """Return a string containing the contents of `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def has_resource(resource_name):
+ """Does the package contain the named resource?"""
+
+ def resource_isdir(resource_name):
+ """Is the named resource a directory? (like ``os.path.isdir()``)"""
+
+ def resource_listdir(resource_name):
+ """List of resource names in the directory (like ``os.listdir()``)"""
+
+
+class WorkingSet:
+ """A collection of active distributions on sys.path (or a similar list)"""
+
+ def __init__(self, entries=None):
+ """Create working set from list of path entries (default=sys.path)"""
+ self.entries = []
+ self.entry_keys = {}
+ self.by_key = {}
+ self.callbacks = []
+
+ if entries is None:
+ entries = sys.path
+
+ for entry in entries:
+ self.add_entry(entry)
+
+ @classmethod
+ def _build_master(cls):
+ """
+ Prepare the master working set.
+ """
+ ws = cls()
+ try:
+ from __main__ import __requires__
+ except ImportError:
+ # The main program does not list any requirements
+ return ws
+
+ # ensure the requirements are met
+ try:
+ ws.require(__requires__)
+ except VersionConflict:
+ return cls._build_from_requirements(__requires__)
+
+ return ws
+
+ @classmethod
+ def _build_from_requirements(cls, req_spec):
+ """
+ Build a working set from a requirement spec. Rewrites sys.path.
+ """
+ # try it without defaults already on sys.path
+ # by starting with an empty path
+ ws = cls([])
+ reqs = parse_requirements(req_spec)
+ dists = ws.resolve(reqs, Environment())
+ for dist in dists:
+ ws.add(dist)
+
+ # add any missing entries from sys.path
+ for entry in sys.path:
+ if entry not in ws.entries:
+ ws.add_entry(entry)
+
+ # then copy back to sys.path
+ sys.path[:] = ws.entries
+ return ws
+
+ def add_entry(self, entry):
+ """Add a path item to ``.entries``, finding any distributions on it
+
+ ``find_distributions(entry, True)`` is used to find distributions
+ corresponding to the path entry, and they are added. `entry` is
+ always appended to ``.entries``, even if it is already present.
+ (This is because ``sys.path`` can contain the same value more than
+ once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+ equal ``sys.path``.)
+ """
+ self.entry_keys.setdefault(entry, [])
+ self.entries.append(entry)
+ for dist in find_distributions(entry, True):
+ self.add(dist, entry, False)
+
+ def __contains__(self, dist):
+ """True if `dist` is the active distribution for its project"""
+ return self.by_key.get(dist.key) == dist
+
+ def find(self, req):
+ """Find a distribution matching requirement `req`
+
+ If there is an active distribution for the requested project, this
+ returns it as long as it meets the version requirement specified by
+ `req`. But, if there is an active distribution for the project and it
+ does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+ If there is no active distribution for the requested project, ``None``
+ is returned.
+ """
+ dist = self.by_key.get(req.key)
+ if dist is not None and dist not in req:
+ # XXX add more info
+ raise VersionConflict(dist, req)
+ return dist
+
+ def iter_entry_points(self, group, name=None):
+ """Yield entry point objects from `group` matching `name`
+
+ If `name` is None, yields all entry points in `group` from all
+ distributions in the working set, otherwise only ones matching
+ both `group` and `name` are yielded (in distribution order).
+ """
+ return (
+ entry
+ for dist in self
+ for entry in dist.get_entry_map(group).values()
+ if name is None or name == entry.name
+ )
+
+ def run_script(self, requires, script_name):
+ """Locate distribution for `requires` and run `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ self.require(requires)[0].run_script(script_name, ns)
+
+ def __iter__(self):
+ """Yield distributions for non-duplicate projects in the working set
+
+ The yield order is the order in which the items' path entries were
+ added to the working set.
+ """
+ seen = {}
+ for item in self.entries:
+ if item not in self.entry_keys:
+ # workaround a cache issue
+ continue
+
+ for key in self.entry_keys[item]:
+ if key not in seen:
+ seen[key] = 1
+ yield self.by_key[key]
+
+ def add(self, dist, entry=None, insert=True, replace=False):
+ """Add `dist` to working set, associated with `entry`
+
+ If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+ On exit from this routine, `entry` is added to the end of the working
+ set's ``.entries`` (if it wasn't already present).
+
+ `dist` is only added to the working set if it's for a project that
+ doesn't already have a distribution in the set, unless `replace=True`.
+ If it's added, any callbacks registered with the ``subscribe()`` method
+ will be called.
+ """
+ if insert:
+ dist.insert_on(self.entries, entry, replace=replace)
+
+ if entry is None:
+ entry = dist.location
+ keys = self.entry_keys.setdefault(entry, [])
+ keys2 = self.entry_keys.setdefault(dist.location, [])
+ if not replace and dist.key in self.by_key:
+ # ignore hidden distros
+ return
+
+ self.by_key[dist.key] = dist
+ if dist.key not in keys:
+ keys.append(dist.key)
+ if dist.key not in keys2:
+ keys2.append(dist.key)
+ self._added_new(dist)
+
+ # FIXME: 'WorkingSet.resolve' is too complex (11)
+ def resolve(self, requirements, env=None, installer=None, # noqa: C901
+ replace_conflicting=False, extras=None):
+ """List all distributions needed to (recursively) meet `requirements`
+
+ `requirements` must be a sequence of ``Requirement`` objects. `env`,
+ if supplied, should be an ``Environment`` instance. If
+ not supplied, it defaults to all distributions available within any
+ entry or distribution in the working set. `installer`, if supplied,
+ will be invoked with each requirement that cannot be met by an
+ already-installed distribution; it should return a ``Distribution`` or
+ ``None``.
+
+ Unless `replace_conflicting=True`, raises a VersionConflict exception
+ if
+ any requirements are found on the path that have the correct name but
+ the wrong version. Otherwise, if an `installer` is supplied it will be
+ invoked to obtain the correct version of the requirement and activate
+ it.
+
+ `extras` is a list of the extras to be used with these requirements.
+ This is important because extra requirements may look like `my_req;
+ extra = "my_extra"`, which would otherwise be interpreted as a purely
+ optional requirement. Instead, we want to be able to assert that these
+ requirements are truly required.
+ """
+
+ # set up the stack
+ requirements = list(requirements)[::-1]
+ # set of processed requirements
+ processed = {}
+ # key -> dist
+ best = {}
+ to_activate = []
+
+ req_extras = _ReqExtras()
+
+ # Mapping of requirement to set of distributions that required it;
+ # useful for reporting info about conflicts.
+ required_by = collections.defaultdict(set)
+
+ while requirements:
+ # process dependencies breadth-first
+ req = requirements.pop(0)
+ if req in processed:
+ # Ignore cyclic or redundant dependencies
+ continue
+
+ if not req_extras.markers_pass(req, extras):
+ continue
+
+ dist = best.get(req.key)
+ if dist is None:
+ # Find the best distribution and add it to the map
+ dist = self.by_key.get(req.key)
+ if dist is None or (dist not in req and replace_conflicting):
+ ws = self
+ if env is None:
+ if dist is None:
+ env = Environment(self.entries)
+ else:
+ # Use an empty environment and workingset to avoid
+ # any further conflicts with the conflicting
+ # distribution
+ env = Environment([])
+ ws = WorkingSet([])
+ dist = best[req.key] = env.best_match(
+ req, ws, installer,
+ replace_conflicting=replace_conflicting
+ )
+ if dist is None:
+ requirers = required_by.get(req, None)
+ raise DistributionNotFound(req, requirers)
+ to_activate.append(dist)
+ if dist not in req:
+ # Oops, the "best" so far conflicts with a dependency
+ dependent_req = required_by[req]
+ raise VersionConflict(dist, req).with_context(dependent_req)
+
+ # push the new requirements onto the stack
+ new_requirements = dist.requires(req.extras)[::-1]
+ requirements.extend(new_requirements)
+
+ # Register the new requirements needed by req
+ for new_requirement in new_requirements:
+ required_by[new_requirement].add(req.project_name)
+ req_extras[new_requirement] = req.extras
+
+ processed[req] = True
+
+ # return list of distros to activate
+ return to_activate
+
+ def find_plugins(
+ self, plugin_env, full_env=None, installer=None, fallback=True):
+ """Find all activatable distributions in `plugin_env`
+
+ Example usage::
+
+ distributions, errors = working_set.find_plugins(
+ Environment(plugin_dirlist)
+ )
+ # add plugins+libs to sys.path
+ map(working_set.add, distributions)
+ # display errors
+ print('Could not load', errors)
+
+ The `plugin_env` should be an ``Environment`` instance that contains
+ only distributions that are in the project's "plugin directory" or
+ directories. The `full_env`, if supplied, should be an ``Environment``
+ contains all currently-available distributions. If `full_env` is not
+ supplied, one is created automatically from the ``WorkingSet`` this
+ method is called on, which will typically mean that every directory on
+ ``sys.path`` will be scanned for distributions.
+
+ `installer` is a standard installer callback as used by the
+ ``resolve()`` method. The `fallback` flag indicates whether we should
+ attempt to resolve older versions of a plugin if the newest version
+ cannot be resolved.
+
+ This method returns a 2-tuple: (`distributions`, `error_info`), where
+ `distributions` is a list of the distributions found in `plugin_env`
+ that were loadable, along with any other distributions that are needed
+ to resolve their dependencies. `error_info` is a dictionary mapping
+ unloadable plugin distributions to an exception instance describing the
+ error that occurred. Usually this will be a ``DistributionNotFound`` or
+ ``VersionConflict`` instance.
+ """
+
+ plugin_projects = list(plugin_env)
+ # scan project names in alphabetic order
+ plugin_projects.sort()
+
+ error_info = {}
+ distributions = {}
+
+ if full_env is None:
+ env = Environment(self.entries)
+ env += plugin_env
+ else:
+ env = full_env + plugin_env
+
+ shadow_set = self.__class__([])
+ # put all our entries in shadow_set
+ list(map(shadow_set.add, self))
+
+ for project_name in plugin_projects:
+
+ for dist in plugin_env[project_name]:
+
+ req = [dist.as_requirement()]
+
+ try:
+ resolvees = shadow_set.resolve(req, env, installer)
+
+ except ResolutionError as v:
+ # save error info
+ error_info[dist] = v
+ if fallback:
+ # try the next older version of project
+ continue
+ else:
+ # give up on this project, keep going
+ break
+
+ else:
+ list(map(shadow_set.add, resolvees))
+ distributions.update(dict.fromkeys(resolvees))
+
+ # success, no need to try any more versions of this project
+ break
+
+ distributions = list(distributions)
+ distributions.sort()
+
+ return distributions, error_info
+
+ def require(self, *requirements):
+ """Ensure that distributions matching `requirements` are activated
+
+ `requirements` must be a string or a (possibly-nested) sequence
+ thereof, specifying the distributions and versions required. The
+ return value is a sequence of the distributions that needed to be
+ activated to fulfill the requirements; all relevant distributions are
+ included, even if they were already activated in this working set.
+ """
+ needed = self.resolve(parse_requirements(requirements))
+
+ for dist in needed:
+ self.add(dist)
+
+ return needed
+
+ def subscribe(self, callback, existing=True):
+ """Invoke `callback` for all distributions
+
+ If `existing=True` (default),
+ call on all existing ones, as well.
+ """
+ if callback in self.callbacks:
+ return
+ self.callbacks.append(callback)
+ if not existing:
+ return
+ for dist in self:
+ callback(dist)
+
+ def _added_new(self, dist):
+ for callback in self.callbacks:
+ callback(dist)
+
+ def __getstate__(self):
+ return (
+ self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
+ self.callbacks[:]
+ )
+
+ def __setstate__(self, e_k_b_c):
+ entries, keys, by_key, callbacks = e_k_b_c
+ self.entries = entries[:]
+ self.entry_keys = keys.copy()
+ self.by_key = by_key.copy()
+ self.callbacks = callbacks[:]
+
+
+class _ReqExtras(dict):
+ """
+ Map each requirement to the extras that demanded it.
+ """
+
+ def markers_pass(self, req, extras=None):
+ """
+ Evaluate markers for req against each extra that
+ demanded it.
+
+ Return False if the req has a marker and fails
+ evaluation. Otherwise, return True.
+ """
+ extra_evals = (
+ req.marker.evaluate({'extra': extra})
+ for extra in self.get(req, ()) + (extras or (None,))
+ )
+ return not req.marker or any(extra_evals)
+
+
+class Environment:
+ """Searchable snapshot of distributions on a search path"""
+
+ def __init__(
+ self, search_path=None, platform=get_supported_platform(),
+ python=PY_MAJOR):
+ """Snapshot distributions available on a search path
+
+ Any distributions found on `search_path` are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used.
+
+ `platform` is an optional string specifying the name of the platform
+ that platform-specific distributions must be compatible with. If
+ unspecified, it defaults to the current platform. `python` is an
+ optional string naming the desired version of Python (e.g. ``'3.6'``);
+ it defaults to the current version.
+
+ You may explicitly set `platform` (and/or `python`) to ``None`` if you
+ wish to map *all* distributions, not just those compatible with the
+ running platform or Python version.
+ """
+ self._distmap = {}
+ self.platform = platform
+ self.python = python
+ self.scan(search_path)
+
+ def can_add(self, dist):
+ """Is distribution `dist` acceptable for this environment?
+
+ The distribution must match the platform and python version
+ requirements specified when this environment was created, or False
+ is returned.
+ """
+ py_compat = (
+ self.python is None
+ or dist.py_version is None
+ or dist.py_version == self.python
+ )
+ return py_compat and compatible_platforms(dist.platform, self.platform)
+
+ def remove(self, dist):
+ """Remove `dist` from the environment"""
+ self._distmap[dist.key].remove(dist)
+
+ def scan(self, search_path=None):
+ """Scan `search_path` for distributions usable in this environment
+
+ Any distributions found are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used. Only distributions conforming to
+ the platform/python version defined at initialization are added.
+ """
+ if search_path is None:
+ search_path = sys.path
+
+ for item in search_path:
+ for dist in find_distributions(item):
+ self.add(dist)
+
+ def __getitem__(self, project_name):
+ """Return a newest-to-oldest list of distributions for `project_name`
+
+ Uses case-insensitive `project_name` comparison, assuming all the
+ project's distributions use their project's name converted to all
+ lowercase as their key.
+
+ """
+ distribution_key = project_name.lower()
+ return self._distmap.get(distribution_key, [])
+
+ def add(self, dist):
+ """Add `dist` if we ``can_add()`` it and it has not already been added
+ """
+ if self.can_add(dist) and dist.has_version():
+ dists = self._distmap.setdefault(dist.key, [])
+ if dist not in dists:
+ dists.append(dist)
+ dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
+ def best_match(
+ self, req, working_set, installer=None, replace_conflicting=False):
+ """Find distribution best matching `req` and usable on `working_set`
+
+ This calls the ``find(req)`` method of the `working_set` to see if a
+ suitable distribution is already active. (This may raise
+ ``VersionConflict`` if an unsuitable version of the project is already
+ active in the specified `working_set`.) If a suitable distribution
+ isn't active, this method returns the newest distribution in the
+ environment that meets the ``Requirement`` in `req`. If no suitable
+ distribution is found, and `installer` is supplied, then the result of
+ calling the environment's ``obtain(req, installer)`` method will be
+ returned.
+ """
+ try:
+ dist = working_set.find(req)
+ except VersionConflict:
+ if not replace_conflicting:
+ raise
+ dist = None
+ if dist is not None:
+ return dist
+ for dist in self[req.key]:
+ if dist in req:
+ return dist
+ # try to download/install
+ return self.obtain(req, installer)
+
+ def obtain(self, requirement, installer=None):
+ """Obtain a distribution matching `requirement` (e.g. via download)
+
+ Obtain a distro that matches requirement (e.g. via download). In the
+ base ``Environment`` class, this routine just returns
+ ``installer(requirement)``, unless `installer` is None, in which case
+ None is returned instead. This method is a hook that allows subclasses
+ to attempt other ways of obtaining a distribution before falling back
+ to the `installer` argument."""
+ if installer is not None:
+ return installer(requirement)
+
+ def __iter__(self):
+ """Yield the unique project names of the available distributions"""
+ for key in self._distmap.keys():
+ if self[key]:
+ yield key
+
+ def __iadd__(self, other):
+ """In-place addition of a distribution or environment"""
+ if isinstance(other, Distribution):
+ self.add(other)
+ elif isinstance(other, Environment):
+ for project in other:
+ for dist in other[project]:
+ self.add(dist)
+ else:
+ raise TypeError("Can't add %r to environment" % (other,))
+ return self
+
+ def __add__(self, other):
+ """Add an environment or distribution to an environment"""
+ new = self.__class__([], platform=None, python=None)
+ for env in self, other:
+ new += env
+ return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+ """An error occurred extracting a resource
+
+ The following attributes are available from instances of this exception:
+
+ manager
+ The resource manager that raised this exception
+
+ cache_path
+ The base directory for resource extraction
+
+ original_error
+ The exception instance that caused extraction to fail
+ """
+
+
+class ResourceManager:
+ """Manage resource extraction and packages"""
+ extraction_path = None
+
+ def __init__(self):
+ self.cached_files = {}
+
+ def resource_exists(self, package_or_requirement, resource_name):
+ """Does the named resource exist?"""
+ return get_provider(package_or_requirement).has_resource(resource_name)
+
+ def resource_isdir(self, package_or_requirement, resource_name):
+ """Is the named resource an existing directory?"""
+ return get_provider(package_or_requirement).resource_isdir(
+ resource_name
+ )
+
+ def resource_filename(self, package_or_requirement, resource_name):
+ """Return a true filesystem path for specified resource"""
+ return get_provider(package_or_requirement).get_resource_filename(
+ self, resource_name
+ )
+
+ def resource_stream(self, package_or_requirement, resource_name):
+ """Return a readable file-like object for specified resource"""
+ return get_provider(package_or_requirement).get_resource_stream(
+ self, resource_name
+ )
+
+ def resource_string(self, package_or_requirement, resource_name):
+ """Return specified resource as a string"""
+ return get_provider(package_or_requirement).get_resource_string(
+ self, resource_name
+ )
+
+ def resource_listdir(self, package_or_requirement, resource_name):
+ """List the contents of the named resource directory"""
+ return get_provider(package_or_requirement).resource_listdir(
+ resource_name
+ )
+
+ def extraction_error(self):
+ """Give an error message for problems extracting file(s)"""
+
+ old_exc = sys.exc_info()[1]
+ cache_path = self.extraction_path or get_default_cache()
+
+ tmpl = textwrap.dedent("""
+ Can't extract file(s) to egg cache
+
+ The following error occurred while trying to extract file(s)
+ to the Python egg cache:
+
+ {old_exc}
+
+ The Python egg cache directory is currently set to:
+
+ {cache_path}
+
+ Perhaps your account does not have write access to this directory?
+ You can change the cache directory by setting the PYTHON_EGG_CACHE
+ environment variable to point to an accessible directory.
+ """).lstrip()
+ err = ExtractionError(tmpl.format(**locals()))
+ err.manager = self
+ err.cache_path = cache_path
+ err.original_error = old_exc
+ raise err
+
+ def get_cache_path(self, archive_name, names=()):
+ """Return absolute location in cache for `archive_name` and `names`
+
+ The parent directory of the resulting path will be created if it does
+ not already exist. `archive_name` should be the base filename of the
+ enclosing egg (which may not be the name of the enclosing zipfile!),
+ including its ".egg" extension. `names`, if provided, should be a
+ sequence of path name parts "under" the egg's extraction location.
+
+ This method should only be called by resource providers that need to
+ obtain an extraction location, and only for names they intend to
+ extract, as it tracks the generated names for possible cleanup later.
+ """
+ extract_path = self.extraction_path or get_default_cache()
+ target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
+ try:
+ _bypass_ensure_directory(target_path)
+ except Exception:
+ self.extraction_error()
+
+ self._warn_unsafe_extraction_path(extract_path)
+
+ self.cached_files[target_path] = 1
+ return target_path
+
+ @staticmethod
+ def _warn_unsafe_extraction_path(path):
+ """
+ If the default extraction path is overridden and set to an insecure
+ location, such as /tmp, it opens up an opportunity for an attacker to
+ replace an extracted file with an unauthorized payload. Warn the user
+ if a known insecure location is used.
+
+ See Distribute #375 for more details.
+ """
+ if os.name == 'nt' and not path.startswith(os.environ['windir']):
+ # On Windows, permissions are generally restrictive by default
+ # and temp directories are not writable by other users, so
+ # bypass the warning.
+ return
+ mode = os.stat(path).st_mode
+ if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+ msg = (
+ "Extraction path is writable by group/others "
+ "and vulnerable to attack when "
+ "used with get_resource_filename ({path}). "
+ "Consider a more secure "
+ "location (set with .set_extraction_path or the "
+ "PYTHON_EGG_CACHE environment variable)."
+ ).format(**locals())
+ warnings.warn(msg, UserWarning)
+
+ def postprocess(self, tempname, filename):
+ """Perform any platform-specific postprocessing of `tempname`
+
+ This is where Mac header rewrites should be done; other platforms don't
+ have anything special they should do.
+
+ Resource providers should call this method ONLY after successfully
+ extracting a compressed resource. They must NOT call it on resources
+ that are already in the filesystem.
+
+ `tempname` is the current (temporary) name of the file, and `filename`
+ is the name it will be renamed to by the caller after this routine
+ returns.
+ """
+
+ if os.name == 'posix':
+ # Make the resource executable
+ mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+ os.chmod(tempname, mode)
+
+ def set_extraction_path(self, path):
+ """Set the base path where resources will be extracted to, if needed.
+
+ If you do not call this routine before any extractions take place, the
+ path defaults to the return value of ``get_default_cache()``. (Which
+ is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+ platform-specific fallbacks. See that routine's documentation for more
+ details.)
+
+ Resources are extracted to subdirectories of this path based upon
+ information given by the ``IResourceProvider``. You may set this to a
+ temporary directory, but then you must call ``cleanup_resources()`` to
+ delete the extracted files when done. There is no guarantee that
+ ``cleanup_resources()`` will be able to remove all extracted files.
+
+ (Note: you may not change the extraction path for a given resource
+ manager once resources have been extracted, unless you first call
+ ``cleanup_resources()``.)
+ """
+ if self.cached_files:
+ raise ValueError(
+ "Can't change extraction path, files already extracted"
+ )
+
+ self.extraction_path = path
+
+ def cleanup_resources(self, force=False):
+ """
+ Delete all extracted resource files and directories, returning a list
+ of the file and directory names that could not be successfully removed.
+ This function does not have any concurrency protection, so it should
+ generally only be called when the extraction path is a temporary
+ directory exclusive to a single process. This method is not
+ automatically called; you must call it explicitly or register it as an
+ ``atexit`` function if you wish to ensure cleanup of a temporary
+ directory used for extractions.
+ """
+ # XXX
+
+
+def get_default_cache():
+ """
+ Return the ``PYTHON_EGG_CACHE`` environment variable
+ or a platform-relevant user cache dir for an app
+ named "Python-Eggs".
+ """
+ return (
+ os.environ.get('PYTHON_EGG_CACHE')
+ or appdirs.user_cache_dir(appname='Python-Eggs')
+ )
+
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """
+ Convert an arbitrary string to a standard version string
+ """
+ try:
+ # normalize the version
+ return str(packaging.version.Version(version))
+ except packaging.version.InvalidVersion:
+ version = version.replace(' ', '.')
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+ """Convert an arbitrary string to a standard 'extra' name
+
+ Any runs of non-alphanumeric characters are replaced with a single '_',
+ and the result is always lowercased.
+ """
+ return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
+ return name.replace('-', '_')
+
+
+def invalid_marker(text):
+ """
+ Validate text as a PEP 508 environment marker; return an exception
+ if invalid or False otherwise.
+ """
+ try:
+ evaluate_marker(text)
+ except SyntaxError as e:
+ e.filename = None
+ e.lineno = None
+ return e
+ return False
+
+
+def evaluate_marker(text, extra=None):
+ """
+ Evaluate a PEP 508 environment marker.
+ Return a boolean indicating the marker result in this environment.
+ Raise SyntaxError if marker is invalid.
+
+ This implementation uses the 'pyparsing' module.
+ """
+ try:
+ marker = packaging.markers.Marker(text)
+ return marker.evaluate()
+ except packaging.markers.InvalidMarker as e:
+ raise SyntaxError(e) from e
+
+
+class NullProvider:
+ """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+ egg_name = None
+ egg_info = None
+ loader = None
+
+ def __init__(self, module):
+ self.loader = getattr(module, '__loader__', None)
+ self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+ def get_resource_filename(self, manager, resource_name):
+ return self._fn(self.module_path, resource_name)
+
+ def get_resource_stream(self, manager, resource_name):
+ return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+ def get_resource_string(self, manager, resource_name):
+ return self._get(self._fn(self.module_path, resource_name))
+
+ def has_resource(self, resource_name):
+ return self._has(self._fn(self.module_path, resource_name))
+
+ def _get_metadata_path(self, name):
+ return self._fn(self.egg_info, name)
+
+ def has_metadata(self, name):
+ if not self.egg_info:
+ return self.egg_info
+
+ path = self._get_metadata_path(name)
+ return self._has(path)
+
+ def get_metadata(self, name):
+ if not self.egg_info:
+ return ""
+ path = self._get_metadata_path(name)
+ value = self._get(path)
+ try:
+ return value.decode('utf-8')
+ except UnicodeDecodeError as exc:
+ # Include the path in the error message to simplify
+ # troubleshooting, and without changing the exception type.
+ exc.reason += ' in {} file at path: {}'.format(name, path)
+ raise
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+ def resource_isdir(self, resource_name):
+ return self._isdir(self._fn(self.module_path, resource_name))
+
+ def metadata_isdir(self, name):
+ return self.egg_info and self._isdir(self._fn(self.egg_info, name))
+
+ def resource_listdir(self, resource_name):
+ return self._listdir(self._fn(self.module_path, resource_name))
+
+ def metadata_listdir(self, name):
+ if self.egg_info:
+ return self._listdir(self._fn(self.egg_info, name))
+ return []
+
+ def run_script(self, script_name, namespace):
+ script = 'scripts/' + script_name
+ if not self.has_metadata(script):
+ raise ResolutionError(
+ "Script {script!r} not found in metadata at {self.egg_info!r}"
+ .format(**locals()),
+ )
+ script_text = self.get_metadata(script).replace('\r\n', '\n')
+ script_text = script_text.replace('\r', '\n')
+ script_filename = self._fn(self.egg_info, script)
+ namespace['__file__'] = script_filename
+ if os.path.exists(script_filename):
+ with open(script_filename) as fid:
+ source = fid.read()
+ code = compile(source, script_filename, 'exec')
+ exec(code, namespace, namespace)
+ else:
+ from linecache import cache
+ cache[script_filename] = (
+ len(script_text), 0, script_text.split('\n'), script_filename
+ )
+ script_code = compile(script_text, script_filename, 'exec')
+ exec(script_code, namespace, namespace)
+
+ def _has(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _isdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _listdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _fn(self, base, resource_name):
+ self._validate_resource_path(resource_name)
+ if resource_name:
+ return os.path.join(base, *resource_name.split('/'))
+ return base
+
+ @staticmethod
+ def _validate_resource_path(path):
+ """
+ Validate the resource paths according to the docs.
+ https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
+
+ >>> warned = getfixture('recwarn')
+ >>> warnings.simplefilter('always')
+ >>> vrp = NullProvider._validate_resource_path
+ >>> vrp('foo/bar.txt')
+ >>> bool(warned)
+ False
+ >>> vrp('../foo/bar.txt')
+ >>> bool(warned)
+ True
+ >>> warned.clear()
+ >>> vrp('/foo/bar.txt')
+ >>> bool(warned)
+ True
+ >>> vrp('foo/../../bar.txt')
+ >>> bool(warned)
+ True
+ >>> warned.clear()
+ >>> vrp('foo/f../bar.txt')
+ >>> bool(warned)
+ False
+
+ Windows path separators are straight-up disallowed.
+ >>> vrp(r'\\foo/bar.txt')
+ Traceback (most recent call last):
+ ...
+ ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+ >>> vrp(r'C:\\foo/bar.txt')
+ Traceback (most recent call last):
+ ...
+ ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+ Blank values are allowed
+
+ >>> vrp('')
+ >>> bool(warned)
+ False
+
+ Non-string values are not.
+
+ >>> vrp(None)
+ Traceback (most recent call last):
+ ...
+ AttributeError: ...
+ """
+ invalid = (
+ os.path.pardir in path.split(posixpath.sep) or
+ posixpath.isabs(path) or
+ ntpath.isabs(path)
+ )
+ if not invalid:
+ return
+
+ msg = "Use of .. or absolute path in a resource path is not allowed."
+
+ # Aggressively disallow Windows absolute paths
+ if ntpath.isabs(path) and not posixpath.isabs(path):
+ raise ValueError(msg)
+
+ # for compatibility, warn; in future
+ # raise ValueError(msg)
+ warnings.warn(
+ msg[:-1] + " and will raise exceptions in a future release.",
+ DeprecationWarning,
+ stacklevel=4,
+ )
+
+ def _get(self, path):
+ if hasattr(self.loader, 'get_data'):
+ return self.loader.get_data(path)
+ raise NotImplementedError(
+ "Can't perform this operation for loaders without 'get_data()'"
+ )
+
+
+register_loader_type(object, NullProvider)
+
+
+def _parents(path):
+ """
+ yield all parents of path including path
+ """
+ last = None
+ while path != last:
+ yield path
+ last = path
+ path, _ = os.path.split(path)
+
+
+class EggProvider(NullProvider):
+ """Provider based on a virtual filesystem"""
+
+ def __init__(self, module):
+ NullProvider.__init__(self, module)
+ self._setup_prefix()
+
+ def _setup_prefix(self):
+ # Assume that metadata may be nested inside a "basket"
+ # of multiple eggs and use module_path instead of .archive.
+ eggs = filter(_is_egg_path, _parents(self.module_path))
+ egg = next(eggs, None)
+ egg and self._set_egg(egg)
+
+ def _set_egg(self, path):
+ self.egg_name = os.path.basename(path)
+ self.egg_info = os.path.join(path, 'EGG-INFO')
+ self.egg_root = path
+
+
+class DefaultProvider(EggProvider):
+ """Provides access to package resources in the filesystem"""
+
+ def _has(self, path):
+ return os.path.exists(path)
+
+ def _isdir(self, path):
+ return os.path.isdir(path)
+
+ def _listdir(self, path):
+ return os.listdir(path)
+
+ def get_resource_stream(self, manager, resource_name):
+ return open(self._fn(self.module_path, resource_name), 'rb')
+
+ def _get(self, path):
+ with open(path, 'rb') as stream:
+ return stream.read()
+
+ @classmethod
+ def _register(cls):
+ loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
+ for name in loader_names:
+ loader_cls = getattr(importlib_machinery, name, type(None))
+ register_loader_type(loader_cls, cls)
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+ """Provider that returns nothing for all requests"""
+
+ module_path = None
+
+ _isdir = _has = lambda self, path: False
+
+ def _get(self, path):
+ return ''
+
+ def _listdir(self, path):
+ return []
+
+ def __init__(self):
+ pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(dict):
+ """
+ zip manifest builder
+ """
+
+ @classmethod
+ def build(cls, path):
+ """
+ Build a dictionary similar to the zipimport directory
+ caches, except instead of tuples, store ZipInfo objects.
+
+ Use a platform-specific path separator (os.sep) for the path keys
+ for compatibility with pypy on Windows.
+ """
+ with zipfile.ZipFile(path) as zfile:
+ items = (
+ (
+ name.replace('/', os.sep),
+ zfile.getinfo(name),
+ )
+ for name in zfile.namelist()
+ )
+ return dict(items)
+
+ load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+ """
+ Memoized zipfile manifests.
+ """
+ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
+
+ def load(self, path):
+ """
+ Load a manifest at path or return a suitable manifest already loaded.
+ """
+ path = os.path.normpath(path)
+ mtime = os.stat(path).st_mtime
+
+ if path not in self or self[path].mtime != mtime:
+ manifest = self.build(path)
+ self[path] = self.manifest_mod(manifest, mtime)
+
+ return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+ """Resource support for zips and eggs"""
+
+ eagers = None
+ _zip_manifests = MemoizedZipManifests()
+
+ def __init__(self, module):
+ EggProvider.__init__(self, module)
+ self.zip_pre = self.loader.archive + os.sep
+
+ def _zipinfo_name(self, fspath):
+ # Convert a virtual filename (full path to file) into a zipfile subpath
+ # usable with the zipimport directory cache for our target archive
+ fspath = fspath.rstrip(os.sep)
+ if fspath == self.loader.archive:
+ return ''
+ if fspath.startswith(self.zip_pre):
+ return fspath[len(self.zip_pre):]
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.zip_pre)
+ )
+
+ def _parts(self, zip_path):
+ # Convert a zipfile subpath into an egg-relative path part list.
+ # pseudo-fs path
+ fspath = self.zip_pre + zip_path
+ if fspath.startswith(self.egg_root + os.sep):
+ return fspath[len(self.egg_root) + 1:].split(os.sep)
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.egg_root)
+ )
+
+ @property
+ def zipinfo(self):
+ return self._zip_manifests.load(self.loader.archive)
+
+ def get_resource_filename(self, manager, resource_name):
+ if not self.egg_name:
+ raise NotImplementedError(
+ "resource_filename() only supported for .egg, not .zip"
+ )
+ # no need to lock for extraction, since we use temp names
+ zip_path = self._resource_to_zip(resource_name)
+ eagers = self._get_eager_resources()
+ if '/'.join(self._parts(zip_path)) in eagers:
+ for name in eagers:
+ self._extract_resource(manager, self._eager_to_zip(name))
+ return self._extract_resource(manager, zip_path)
+
+ @staticmethod
+ def _get_date_and_size(zip_stat):
+ size = zip_stat.file_size
+ # ymdhms+wday, yday, dst
+ date_time = zip_stat.date_time + (0, 0, -1)
+ # 1980 offset already done
+ timestamp = time.mktime(date_time)
+ return timestamp, size
+
+ # FIXME: 'ZipProvider._extract_resource' is too complex (12)
+ def _extract_resource(self, manager, zip_path): # noqa: C901
+
+ if zip_path in self._index():
+ for name in self._index()[zip_path]:
+ last = self._extract_resource(
+ manager, os.path.join(zip_path, name)
+ )
+ # return the extracted directory name
+ return os.path.dirname(last)
+
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+ if not WRITE_SUPPORT:
+ raise IOError('"os.rename" and "os.unlink" are not supported '
+ 'on this platform')
+ try:
+
+ real_path = manager.get_cache_path(
+ self.egg_name, self._parts(zip_path)
+ )
+
+ if self._is_current(real_path, zip_path):
+ return real_path
+
+ outf, tmpnam = _mkstemp(
+ ".$extract",
+ dir=os.path.dirname(real_path),
+ )
+ os.write(outf, self.loader.get_data(zip_path))
+ os.close(outf)
+ utime(tmpnam, (timestamp, timestamp))
+ manager.postprocess(tmpnam, real_path)
+
+ try:
+ rename(tmpnam, real_path)
+
+ except os.error:
+ if os.path.isfile(real_path):
+ if self._is_current(real_path, zip_path):
+ # the file became current since it was checked above,
+ # so proceed.
+ return real_path
+ # Windows, del old file and retry
+ elif os.name == 'nt':
+ unlink(real_path)
+ rename(tmpnam, real_path)
+ return real_path
+ raise
+
+ except os.error:
+ # report a user-friendly error
+ manager.extraction_error()
+
+ return real_path
+
+ def _is_current(self, file_path, zip_path):
+ """
+ Return True if the file_path is current for this zip_path
+ """
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+ if not os.path.isfile(file_path):
+ return False
+ stat = os.stat(file_path)
+ if stat.st_size != size or stat.st_mtime != timestamp:
+ return False
+ # check that the contents match
+ zip_contents = self.loader.get_data(zip_path)
+ with open(file_path, 'rb') as f:
+ file_contents = f.read()
+ return zip_contents == file_contents
+
+ def _get_eager_resources(self):
+ if self.eagers is None:
+ eagers = []
+ for name in ('native_libs.txt', 'eager_resources.txt'):
+ if self.has_metadata(name):
+ eagers.extend(self.get_metadata_lines(name))
+ self.eagers = eagers
+ return self.eagers
+
+ def _index(self):
+ try:
+ return self._dirindex
+ except AttributeError:
+ ind = {}
+ for path in self.zipinfo:
+ parts = path.split(os.sep)
+ while parts:
+ parent = os.sep.join(parts[:-1])
+ if parent in ind:
+ ind[parent].append(parts[-1])
+ break
+ else:
+ ind[parent] = [parts.pop()]
+ self._dirindex = ind
+ return ind
+
+ def _has(self, fspath):
+ zip_path = self._zipinfo_name(fspath)
+ return zip_path in self.zipinfo or zip_path in self._index()
+
+ def _isdir(self, fspath):
+ return self._zipinfo_name(fspath) in self._index()
+
+ def _listdir(self, fspath):
+ return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+ def _eager_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+ def _resource_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+ """Metadata handler for standalone PKG-INFO files
+
+ Usage::
+
+ metadata = FileMetadata("/path/to/PKG-INFO")
+
+ This provider rejects all data and metadata requests except for PKG-INFO,
+ which is treated as existing, and will be the contents of the file at
+ the provided location.
+ """
+
+ def __init__(self, path):
+ self.path = path
+
+ def _get_metadata_path(self, name):
+ return self.path
+
+ def has_metadata(self, name):
+ return name == 'PKG-INFO' and os.path.isfile(self.path)
+
+ def get_metadata(self, name):
+ if name != 'PKG-INFO':
+ raise KeyError("No metadata except PKG-INFO is available")
+
+ with io.open(self.path, encoding='utf-8', errors="replace") as f:
+ metadata = f.read()
+ self._warn_on_replacement(metadata)
+ return metadata
+
+ def _warn_on_replacement(self, metadata):
+ replacement_char = '�'
+ if replacement_char in metadata:
+ tmpl = "{self.path} could not be properly decoded in UTF-8"
+ msg = tmpl.format(**locals())
+ warnings.warn(msg)
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+ """Metadata provider for egg directories
+
+ Usage::
+
+ # Development eggs:
+
+ egg_info = "/path/to/PackageName.egg-info"
+ base_dir = os.path.dirname(egg_info)
+ metadata = PathMetadata(base_dir, egg_info)
+ dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+ dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+ # Unpacked egg directories:
+
+ egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+ dist = Distribution.from_filename(egg_path, metadata=metadata)
+ """
+
+ def __init__(self, path, egg_info):
+ self.module_path = path
+ self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+ """Metadata provider for .egg files"""
+
+ def __init__(self, importer):
+ """Create a metadata provider from a zipimporter"""
+
+ self.zip_pre = importer.archive + os.sep
+ self.loader = importer
+ if importer.prefix:
+ self.module_path = os.path.join(importer.archive, importer.prefix)
+ else:
+ self.module_path = importer.archive
+ self._setup_prefix()
+
+
+_declare_state('dict', _distribution_finders={})
+
+
+def register_finder(importer_type, distribution_finder):
+ """Register `distribution_finder` to find distributions in sys.path items
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `distribution_finder` is a callable that, passed a path
+ item and the importer instance, yields ``Distribution`` instances found on
+ that path item. See ``pkg_resources.find_on_path`` for an example."""
+ _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+ """Yield distributions accessible via `path_item`"""
+ importer = get_importer(path_item)
+ finder = _find_adapter(_distribution_finders, importer)
+ return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(importer, path_item, only=False):
+ """
+ Find eggs in zip files; possibly multiple nested eggs.
+ """
+ if importer.archive.endswith('.whl'):
+ # wheels are not supported with this finder
+ # they don't have PKG-INFO metadata, and won't ever contain eggs
+ return
+ metadata = EggMetadata(importer)
+ if metadata.has_metadata('PKG-INFO'):
+ yield Distribution.from_filename(path_item, metadata=metadata)
+ if only:
+ # don't yield nested distros
+ return
+ for subitem in metadata.resource_listdir(''):
+ if _is_egg_path(subitem):
+ subpath = os.path.join(path_item, subitem)
+ dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
+ for dist in dists:
+ yield dist
+ elif subitem.lower().endswith(('.dist-info', '.egg-info')):
+ subpath = os.path.join(path_item, subitem)
+ submeta = EggMetadata(zipimport.zipimporter(subpath))
+ submeta.egg_info = subpath
+ yield Distribution.from_location(path_item, subitem, submeta)
+
+
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+
+def find_nothing(importer, path_item, only=False):
+ return ()
+
+
+register_finder(object, find_nothing)
+
+
+def _by_version_descending(names):
+ """
+ Given a list of filenames, return them in descending order
+ by version number.
+
+ >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
+ >>> _by_version_descending(names)
+ ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
+ >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
+ >>> _by_version_descending(names)
+ ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
+ >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
+ >>> _by_version_descending(names)
+ ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
+ """
+ def _by_version(name):
+ """
+ Parse each component of the filename
+ """
+ name, ext = os.path.splitext(name)
+ parts = itertools.chain(name.split('-'), [ext])
+ return [packaging.version.parse(part) for part in parts]
+
+ return sorted(names, key=_by_version, reverse=True)
+
+
+def find_on_path(importer, path_item, only=False):
+ """Yield distributions accessible on a sys.path directory"""
+ path_item = _normalize_cached(path_item)
+
+ if _is_unpacked_egg(path_item):
+ yield Distribution.from_filename(
+ path_item, metadata=PathMetadata(
+ path_item, os.path.join(path_item, 'EGG-INFO')
+ )
+ )
+ return
+
+ entries = (
+ os.path.join(path_item, child)
+ for child in safe_listdir(path_item)
+ )
+
+ # for performance, before sorting by version,
+ # screen entries for only those that will yield
+ # distributions
+ filtered = (
+ entry
+ for entry in entries
+ if dist_factory(path_item, entry, only)
+ )
+
+ # scan for .egg and .egg-info in directory
+ path_item_entries = _by_version_descending(filtered)
+ for entry in path_item_entries:
+ fullpath = os.path.join(path_item, entry)
+ factory = dist_factory(path_item, entry, only)
+ for dist in factory(fullpath):
+ yield dist
+
+
+def dist_factory(path_item, entry, only):
+ """Return a dist_factory for the given entry."""
+ lower = entry.lower()
+ is_egg_info = lower.endswith('.egg-info')
+ is_dist_info = (
+ lower.endswith('.dist-info') and
+ os.path.isdir(os.path.join(path_item, entry))
+ )
+ is_meta = is_egg_info or is_dist_info
+ return (
+ distributions_from_metadata
+ if is_meta else
+ find_distributions
+ if not only and _is_egg_path(entry) else
+ resolve_egg_link
+ if not only and lower.endswith('.egg-link') else
+ NoDists()
+ )
+
+
+class NoDists:
+ """
+ >>> bool(NoDists())
+ False
+
+ >>> list(NoDists()('anything'))
+ []
+ """
+ def __bool__(self):
+ return False
+
+ def __call__(self, fullpath):
+ return iter(())
+
+
+def safe_listdir(path):
+ """
+ Attempt to list contents of path, but suppress some exceptions.
+ """
+ try:
+ return os.listdir(path)
+ except (PermissionError, NotADirectoryError):
+ pass
+ except OSError as e:
+ # Ignore the directory if does not exist, not a directory or
+ # permission denied
+ if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
+ raise
+ return ()
+
+
+def distributions_from_metadata(path):
+ root = os.path.dirname(path)
+ if os.path.isdir(path):
+ if len(os.listdir(path)) == 0:
+ # empty metadata dir; skip
+ return
+ metadata = PathMetadata(root, path)
+ else:
+ metadata = FileMetadata(path)
+ entry = os.path.basename(path)
+ yield Distribution.from_location(
+ root, entry, metadata, precedence=DEVELOP_DIST,
+ )
+
+
+def non_empty_lines(path):
+ """
+ Yield non-empty lines from file at path
+ """
+ with open(path) as f:
+ for line in f:
+ line = line.strip()
+ if line:
+ yield line
+
+
+def resolve_egg_link(path):
+ """
+ Given a path to an .egg-link, resolve distributions
+ present in the referenced path.
+ """
+ referenced_paths = non_empty_lines(path)
+ resolved_paths = (
+ os.path.join(os.path.dirname(path), ref)
+ for ref in referenced_paths
+ )
+ dist_groups = map(find_distributions, resolved_paths)
+ return next(dist_groups, ())
+
+
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_finder(importlib_machinery.FileFinder, find_on_path)
+
+_declare_state('dict', _namespace_handlers={})
+_declare_state('dict', _namespace_packages={})
+
+
+def register_namespace_handler(importer_type, namespace_handler):
+ """Register `namespace_handler` to declare namespace packages
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `namespace_handler` is a callable like this::
+
+ def namespace_handler(importer, path_entry, moduleName, module):
+ # return a path_entry to use for child packages
+
+ Namespace handlers are only called if the importer object has already
+ agreed that it can handle the relevant path item, and they should only
+ return a subpath if the module __path__ does not already contain an
+ equivalent subpath. For an example namespace handler, see
+ ``pkg_resources.file_ns_handler``.
+ """
+ _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+ """Ensure that named package includes a subpath of path_item (if needed)"""
+
+ importer = get_importer(path_item)
+ if importer is None:
+ return None
+
+ # use find_spec (PEP 451) and fall-back to find_module (PEP 302)
+ try:
+ loader = importer.find_spec(packageName).loader
+ except AttributeError:
+ # capture warnings due to #1111
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ loader = importer.find_module(packageName)
+
+ if loader is None:
+ return None
+ module = sys.modules.get(packageName)
+ if module is None:
+ module = sys.modules[packageName] = types.ModuleType(packageName)
+ module.__path__ = []
+ _set_parent_ns(packageName)
+ elif not hasattr(module, '__path__'):
+ raise TypeError("Not a package:", packageName)
+ handler = _find_adapter(_namespace_handlers, importer)
+ subpath = handler(importer, path_item, packageName, module)
+ if subpath is not None:
+ path = module.__path__
+ path.append(subpath)
+ importlib.import_module(packageName)
+ _rebuild_mod_path(path, packageName, module)
+ return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module):
+ """
+ Rebuild module.__path__ ensuring that all entries are ordered
+ corresponding to their sys.path order
+ """
+ sys_path = [_normalize_cached(p) for p in sys.path]
+
+ def safe_sys_path_index(entry):
+ """
+ Workaround for #520 and #513.
+ """
+ try:
+ return sys_path.index(entry)
+ except ValueError:
+ return float('inf')
+
+ def position_in_sys_path(path):
+ """
+ Return the ordinal of the path based on its position in sys.path
+ """
+ path_parts = path.split(os.sep)
+ module_parts = package_name.count('.') + 1
+ parts = path_parts[:-module_parts]
+ return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
+
+ new_path = sorted(orig_path, key=position_in_sys_path)
+ new_path = [_normalize_cached(p) for p in new_path]
+
+ if isinstance(module.__path__, list):
+ module.__path__[:] = new_path
+ else:
+ module.__path__ = new_path
+
+
+def declare_namespace(packageName):
+ """Declare that package 'packageName' is a namespace package"""
+
+ _imp.acquire_lock()
+ try:
+ if packageName in _namespace_packages:
+ return
+
+ path = sys.path
+ parent, _, _ = packageName.rpartition('.')
+
+ if parent:
+ declare_namespace(parent)
+ if parent not in _namespace_packages:
+ __import__(parent)
+ try:
+ path = sys.modules[parent].__path__
+ except AttributeError as e:
+ raise TypeError("Not a package:", parent) from e
+
+ # Track what packages are namespaces, so when new path items are added,
+ # they can be updated
+ _namespace_packages.setdefault(parent or None, []).append(packageName)
+ _namespace_packages.setdefault(packageName, [])
+
+ for path_item in path:
+ # Ensure all the parent's path items are reflected in the child,
+ # if they apply
+ _handle_ns(packageName, path_item)
+
+ finally:
+ _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item, parent=None):
+ """Ensure that previously-declared namespace packages include path_item"""
+ _imp.acquire_lock()
+ try:
+ for package in _namespace_packages.get(parent, ()):
+ subpath = _handle_ns(package, path_item)
+ if subpath:
+ fixup_namespace_packages(subpath, package)
+ finally:
+ _imp.release_lock()
+
+
+def file_ns_handler(importer, path_item, packageName, module):
+ """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+ subpath = os.path.join(path_item, packageName.split('.')[-1])
+ normalized = _normalize_cached(subpath)
+ for item in module.__path__:
+ if _normalize_cached(item) == normalized:
+ break
+ else:
+ # Only return the path if it's not already there
+ return subpath
+
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+ return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+def normalize_path(filename):
+ """Normalize a file/dir name for comparison purposes"""
+ return os.path.normcase(os.path.realpath(os.path.normpath(
+ _cygwin_patch(filename))))
+
+
+def _cygwin_patch(filename): # pragma: nocover
+ """
+ Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
+ symlink components. Using
+ os.path.abspath() works around this limitation. A fix in os.getcwd()
+ would probably better, in Cygwin even more so, except
+ that this seems to be by design...
+ """
+ return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
+
+
+def _normalize_cached(filename, _cache={}):
+ try:
+ return _cache[filename]
+ except KeyError:
+ _cache[filename] = result = normalize_path(filename)
+ return result
+
+
+def _is_egg_path(path):
+ """
+ Determine if given path appears to be an egg.
+ """
+ return _is_zip_egg(path) or _is_unpacked_egg(path)
+
+
+def _is_zip_egg(path):
+ return (
+ path.lower().endswith('.egg') and
+ os.path.isfile(path) and
+ zipfile.is_zipfile(path)
+ )
+
+
+def _is_unpacked_egg(path):
+ """
+ Determine if given path appears to be an unpacked egg.
+ """
+ return (
+ path.lower().endswith('.egg') and
+ os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
+ )
+
+
+def _set_parent_ns(packageName):
+ parts = packageName.split('.')
+ name = parts.pop()
+ if parts:
+ parent = '.'.join(parts)
+ setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+def yield_lines(strs):
+ """Yield non-empty/non-comment lines of a string or sequence"""
+ if isinstance(strs, str):
+ for s in strs.splitlines():
+ s = s.strip()
+ # skip blank lines/comments
+ if s and not s.startswith('#'):
+ yield s
+ else:
+ for ss in strs:
+ for s in yield_lines(ss):
+ yield s
+
+
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+ r"""
+ (?P[^-]+) (
+ -(?P[^-]+) (
+ -py(?P[^-]+) (
+ -(?P.+)
+ )?
+ )?
+ )?
+ """,
+ re.VERBOSE | re.IGNORECASE,
+).match
+
+
+class EntryPoint:
+ """Object representing an advertised importable object"""
+
+ def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+ if not MODULE(module_name):
+ raise ValueError("Invalid module name", module_name)
+ self.name = name
+ self.module_name = module_name
+ self.attrs = tuple(attrs)
+ self.extras = tuple(extras)
+ self.dist = dist
+
+ def __str__(self):
+ s = "%s = %s" % (self.name, self.module_name)
+ if self.attrs:
+ s += ':' + '.'.join(self.attrs)
+ if self.extras:
+ s += ' [%s]' % ','.join(self.extras)
+ return s
+
+ def __repr__(self):
+ return "EntryPoint.parse(%r)" % str(self)
+
+ def load(self, require=True, *args, **kwargs):
+ """
+ Require packages for this EntryPoint, then resolve it.
+ """
+ if not require or args or kwargs:
+ warnings.warn(
+ "Parameters to load are deprecated. Call .resolve and "
+ ".require separately.",
+ PkgResourcesDeprecationWarning,
+ stacklevel=2,
+ )
+ if require:
+ self.require(*args, **kwargs)
+ return self.resolve()
+
+ def resolve(self):
+ """
+ Resolve the entry point from its module and attrs.
+ """
+ module = __import__(self.module_name, fromlist=['__name__'], level=0)
+ try:
+ return functools.reduce(getattr, self.attrs, module)
+ except AttributeError as exc:
+ raise ImportError(str(exc)) from exc
+
+ def require(self, env=None, installer=None):
+ if self.extras and not self.dist:
+ raise UnknownExtra("Can't require() without a distribution", self)
+
+ # Get the requirements for this entry point with all its extras and
+ # then resolve them. We have to pass `extras` along when resolving so
+ # that the working set knows what extras we want. Otherwise, for
+ # dist-info distributions, the working set will assume that the
+ # requirements for that extra are purely optional and skip over them.
+ reqs = self.dist.requires(self.extras)
+ items = working_set.resolve(reqs, env, installer, extras=self.extras)
+ list(map(working_set.add, items))
+
+ pattern = re.compile(
+ r'\s*'
+ r'(?P.+?)\s*'
+ r'=\s*'
+ r'(?P[\w.]+)\s*'
+ r'(:\s*(?P[\w.]+))?\s*'
+ r'(?P\[.*\])?\s*$'
+ )
+
+ @classmethod
+ def parse(cls, src, dist=None):
+ """Parse a single entry point from string `src`
+
+ Entry point syntax follows the form::
+
+ name = some.module:some.attr [extra1, extra2]
+
+ The entry name and module name are required, but the ``:attrs`` and
+ ``[extras]`` parts are optional
+ """
+ m = cls.pattern.match(src)
+ if not m:
+ msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+ raise ValueError(msg, src)
+ res = m.groupdict()
+ extras = cls._parse_extras(res['extras'])
+ attrs = res['attr'].split('.') if res['attr'] else ()
+ return cls(res['name'], res['module'], attrs, extras, dist)
+
+ @classmethod
+ def _parse_extras(cls, extras_spec):
+ if not extras_spec:
+ return ()
+ req = Requirement.parse('x' + extras_spec)
+ if req.specs:
+ raise ValueError()
+ return req.extras
+
+ @classmethod
+ def parse_group(cls, group, lines, dist=None):
+ """Parse an entry point group"""
+ if not MODULE(group):
+ raise ValueError("Invalid group name", group)
+ this = {}
+ for line in yield_lines(lines):
+ ep = cls.parse(line, dist)
+ if ep.name in this:
+ raise ValueError("Duplicate entry point", group, ep.name)
+ this[ep.name] = ep
+ return this
+
+ @classmethod
+ def parse_map(cls, data, dist=None):
+ """Parse a map of entry point groups"""
+ if isinstance(data, dict):
+ data = data.items()
+ else:
+ data = split_sections(data)
+ maps = {}
+ for group, lines in data:
+ if group is None:
+ if not lines:
+ continue
+ raise ValueError("Entry points must be listed in groups")
+ group = group.strip()
+ if group in maps:
+ raise ValueError("Duplicate group name", group)
+ maps[group] = cls.parse_group(group, lines, dist)
+ return maps
+
+
+def _version_from_file(lines):
+ """
+ Given an iterable of lines from a Metadata file, return
+ the value of the Version field, if present, or None otherwise.
+ """
+ def is_version_line(line):
+ return line.lower().startswith('version:')
+ version_lines = filter(is_version_line, lines)
+ line = next(iter(version_lines), '')
+ _, _, value = line.partition(':')
+ return safe_version(value.strip()) or None
+
+
+class Distribution:
+ """Wrap an actual or potential sys.path entry w/metadata"""
+ PKG_INFO = 'PKG-INFO'
+
+ def __init__(
+ self, location=None, metadata=None, project_name=None,
+ version=None, py_version=PY_MAJOR, platform=None,
+ precedence=EGG_DIST):
+ self.project_name = safe_name(project_name or 'Unknown')
+ if version is not None:
+ self._version = safe_version(version)
+ self.py_version = py_version
+ self.platform = platform
+ self.location = location
+ self.precedence = precedence
+ self._provider = metadata or empty_provider
+
+ @classmethod
+ def from_location(cls, location, basename, metadata=None, **kw):
+ project_name, version, py_version, platform = [None] * 4
+ basename, ext = os.path.splitext(basename)
+ if ext.lower() in _distributionImpl:
+ cls = _distributionImpl[ext.lower()]
+
+ match = EGG_NAME(basename)
+ if match:
+ project_name, version, py_version, platform = match.group(
+ 'name', 'ver', 'pyver', 'plat'
+ )
+ return cls(
+ location, metadata, project_name=project_name, version=version,
+ py_version=py_version, platform=platform, **kw
+ )._reload_version()
+
+ def _reload_version(self):
+ return self
+
+ @property
+ def hashcmp(self):
+ return (
+ self.parsed_version,
+ self.precedence,
+ self.key,
+ self.location,
+ self.py_version or '',
+ self.platform or '',
+ )
+
+ def __hash__(self):
+ return hash(self.hashcmp)
+
+ def __lt__(self, other):
+ return self.hashcmp < other.hashcmp
+
+ def __le__(self, other):
+ return self.hashcmp <= other.hashcmp
+
+ def __gt__(self, other):
+ return self.hashcmp > other.hashcmp
+
+ def __ge__(self, other):
+ return self.hashcmp >= other.hashcmp
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ # It's not a Distribution, so they are not equal
+ return False
+ return self.hashcmp == other.hashcmp
+
+ def __ne__(self, other):
+ return not self == other
+
+ # These properties have to be lazy so that we don't have to load any
+ # metadata until/unless it's actually needed. (i.e., some distributions
+ # may not know their name or version without loading PKG-INFO)
+
+ @property
+ def key(self):
+ try:
+ return self._key
+ except AttributeError:
+ self._key = key = self.project_name.lower()
+ return key
+
+ @property
+ def parsed_version(self):
+ if not hasattr(self, "_parsed_version"):
+ self._parsed_version = parse_version(self.version)
+
+ return self._parsed_version
+
+ def _warn_legacy_version(self):
+ LV = packaging.version.LegacyVersion
+ is_legacy = isinstance(self._parsed_version, LV)
+ if not is_legacy:
+ return
+
+ # While an empty version is technically a legacy version and
+ # is not a valid PEP 440 version, it's also unlikely to
+ # actually come from someone and instead it is more likely that
+ # it comes from setuptools attempting to parse a filename and
+ # including it in the list. So for that we'll gate this warning
+ # on if the version is anything at all or not.
+ if not self.version:
+ return
+
+ tmpl = textwrap.dedent("""
+ '{project_name} ({version})' is being parsed as a legacy,
+ non PEP 440,
+ version. You may find odd behavior and sort order.
+ In particular it will be sorted as less than 0.0. It
+ is recommended to migrate to PEP 440 compatible
+ versions.
+ """).strip().replace('\n', ' ')
+
+ warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
+
+ @property
+ def version(self):
+ try:
+ return self._version
+ except AttributeError as e:
+ version = self._get_version()
+ if version is None:
+ path = self._get_metadata_path_for_display(self.PKG_INFO)
+ msg = (
+ "Missing 'Version:' header and/or {} file at path: {}"
+ ).format(self.PKG_INFO, path)
+ raise ValueError(msg, self) from e
+
+ return version
+
+ @property
+ def _dep_map(self):
+ """
+ A map of extra to its list of (direct) requirements
+ for this distribution, including the null extra.
+ """
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._filter_extras(self._build_dep_map())
+ return self.__dep_map
+
+ @staticmethod
+ def _filter_extras(dm):
+ """
+ Given a mapping of extras to dependencies, strip off
+ environment markers and filter out any dependencies
+ not matching the markers.
+ """
+ for extra in list(filter(None, dm)):
+ new_extra = extra
+ reqs = dm.pop(extra)
+ new_extra, _, marker = extra.partition(':')
+ fails_marker = marker and (
+ invalid_marker(marker)
+ or not evaluate_marker(marker)
+ )
+ if fails_marker:
+ reqs = []
+ new_extra = safe_extra(new_extra) or None
+
+ dm.setdefault(new_extra, []).extend(reqs)
+ return dm
+
+ def _build_dep_map(self):
+ dm = {}
+ for name in 'requires.txt', 'depends.txt':
+ for extra, reqs in split_sections(self._get_metadata(name)):
+ dm.setdefault(extra, []).extend(parse_requirements(reqs))
+ return dm
+
+ def requires(self, extras=()):
+ """List of Requirements needed for this distro if `extras` are used"""
+ dm = self._dep_map
+ deps = []
+ deps.extend(dm.get(None, ()))
+ for ext in extras:
+ try:
+ deps.extend(dm[safe_extra(ext)])
+ except KeyError as e:
+ raise UnknownExtra(
+ "%s has no such extra feature %r" % (self, ext)
+ ) from e
+ return deps
+
+ def _get_metadata_path_for_display(self, name):
+ """
+ Return the path to the given metadata file, if available.
+ """
+ try:
+ # We need to access _get_metadata_path() on the provider object
+ # directly rather than through this class's __getattr__()
+ # since _get_metadata_path() is marked private.
+ path = self._provider._get_metadata_path(name)
+
+ # Handle exceptions e.g. in case the distribution's metadata
+ # provider doesn't support _get_metadata_path().
+ except Exception:
+ return '[could not detect]'
+
+ return path
+
+ def _get_metadata(self, name):
+ if self.has_metadata(name):
+ for line in self.get_metadata_lines(name):
+ yield line
+
+ def _get_version(self):
+ lines = self._get_metadata(self.PKG_INFO)
+ version = _version_from_file(lines)
+
+ return version
+
+ def activate(self, path=None, replace=False):
+ """Ensure distribution is importable on `path` (default=sys.path)"""
+ if path is None:
+ path = sys.path
+ self.insert_on(path, replace=replace)
+ if path is sys.path:
+ fixup_namespace_packages(self.location)
+ for pkg in self._get_metadata('namespace_packages.txt'):
+ if pkg in sys.modules:
+ declare_namespace(pkg)
+
+ def egg_name(self):
+ """Return what this distribution's standard .egg filename should be"""
+ filename = "%s-%s-py%s" % (
+ to_filename(self.project_name), to_filename(self.version),
+ self.py_version or PY_MAJOR
+ )
+
+ if self.platform:
+ filename += '-' + self.platform
+ return filename
+
+ def __repr__(self):
+ if self.location:
+ return "%s (%s)" % (self, self.location)
+ else:
+ return str(self)
+
+ def __str__(self):
+ try:
+ version = getattr(self, 'version', None)
+ except ValueError:
+ version = None
+ version = version or "[unknown version]"
+ return "%s %s" % (self.project_name, version)
+
+ def __getattr__(self, attr):
+ """Delegate all unrecognized public attributes to .metadata provider"""
+ if attr.startswith('_'):
+ raise AttributeError(attr)
+ return getattr(self._provider, attr)
+
+ def __dir__(self):
+ return list(
+ set(super(Distribution, self).__dir__())
+ | set(
+ attr for attr in self._provider.__dir__()
+ if not attr.startswith('_')
+ )
+ )
+
+ @classmethod
+ def from_filename(cls, filename, metadata=None, **kw):
+ return cls.from_location(
+ _normalize_cached(filename), os.path.basename(filename), metadata,
+ **kw
+ )
+
+ def as_requirement(self):
+ """Return a ``Requirement`` that matches this distribution exactly"""
+ if isinstance(self.parsed_version, packaging.version.Version):
+ spec = "%s==%s" % (self.project_name, self.parsed_version)
+ else:
+ spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+ return Requirement.parse(spec)
+
+ def load_entry_point(self, group, name):
+ """Return the `name` entry point of `group` or raise ImportError"""
+ ep = self.get_entry_info(group, name)
+ if ep is None:
+ raise ImportError("Entry point %r not found" % ((group, name),))
+ return ep.load()
+
+ def get_entry_map(self, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ try:
+ ep_map = self._ep_map
+ except AttributeError:
+ ep_map = self._ep_map = EntryPoint.parse_map(
+ self._get_metadata('entry_points.txt'), self
+ )
+ if group is not None:
+ return ep_map.get(group, {})
+ return ep_map
+
+ def get_entry_info(self, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return self.get_entry_map(group).get(name)
+
+ # FIXME: 'Distribution.insert_on' is too complex (13)
+ def insert_on(self, path, loc=None, replace=False): # noqa: C901
+ """Ensure self.location is on path
+
+ If replace=False (default):
+ - If location is already in path anywhere, do nothing.
+ - Else:
+ - If it's an egg and its parent directory is on path,
+ insert just ahead of the parent.
+ - Else: add to the end of path.
+ If replace=True:
+ - If location is already on path anywhere (not eggs)
+ or higher priority than its parent (eggs)
+ do nothing.
+ - Else:
+ - If it's an egg and its parent directory is on path,
+ insert just ahead of the parent,
+ removing any lower-priority entries.
+ - Else: add it to the front of path.
+ """
+
+ loc = loc or self.location
+ if not loc:
+ return
+
+ nloc = _normalize_cached(loc)
+ bdir = os.path.dirname(nloc)
+ npath = [(p and _normalize_cached(p) or p) for p in path]
+
+ for p, item in enumerate(npath):
+ if item == nloc:
+ if replace:
+ break
+ else:
+ # don't modify path (even removing duplicates) if
+ # found and not replace
+ return
+ elif item == bdir and self.precedence == EGG_DIST:
+ # if it's an .egg, give it precedence over its directory
+ # UNLESS it's already been added to sys.path and replace=False
+ if (not replace) and nloc in npath[p:]:
+ return
+ if path is sys.path:
+ self.check_version_conflict()
+ path.insert(p, loc)
+ npath.insert(p, nloc)
+ break
+ else:
+ if path is sys.path:
+ self.check_version_conflict()
+ if replace:
+ path.insert(0, loc)
+ else:
+ path.append(loc)
+ return
+
+ # p is the spot where we found or inserted loc; now remove duplicates
+ while True:
+ try:
+ np = npath.index(nloc, p + 1)
+ except ValueError:
+ break
+ else:
+ del npath[np], path[np]
+ # ha!
+ p = np
+
+ return
+
+ def check_version_conflict(self):
+ if self.key == 'setuptools':
+ # ignore the inevitable setuptools self-conflicts :(
+ return
+
+ nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+ loc = normalize_path(self.location)
+ for modname in self._get_metadata('top_level.txt'):
+ if (modname not in sys.modules or modname in nsp
+ or modname in _namespace_packages):
+ continue
+ if modname in ('pkg_resources', 'setuptools', 'site'):
+ continue
+ fn = getattr(sys.modules[modname], '__file__', None)
+ if fn and (normalize_path(fn).startswith(loc) or
+ fn.startswith(self.location)):
+ continue
+ issue_warning(
+ "Module %s was already imported from %s, but %s is being added"
+ " to sys.path" % (modname, fn, self.location),
+ )
+
+ def has_version(self):
+ try:
+ self.version
+ except ValueError:
+ issue_warning("Unbuilt egg for " + repr(self))
+ return False
+ return True
+
+ def clone(self, **kw):
+ """Copy this distribution, substituting in any changed keyword args"""
+ names = 'project_name version py_version platform location precedence'
+ for attr in names.split():
+ kw.setdefault(attr, getattr(self, attr, None))
+ kw.setdefault('metadata', self._provider)
+ return self.__class__(**kw)
+
+ @property
+ def extras(self):
+ return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+ def _reload_version(self):
+ """
+ Packages installed by distutils (e.g. numpy or scipy),
+ which uses an old safe_version, and so
+ their version numbers can get mangled when
+ converted to filenames (e.g., 1.11.0.dev0+2329eae to
+ 1.11.0.dev0_2329eae). These distributions will not be
+ parsed properly
+ downstream by Distribution and safe_version, so
+ take an extra step and try to get the version number from
+ the metadata file itself instead of the filename.
+ """
+ md_version = self._get_version()
+ if md_version:
+ self._version = md_version
+ return self
+
+
+class DistInfoDistribution(Distribution):
+ """
+ Wrap an actual or potential sys.path entry
+ w/metadata, .dist-info style.
+ """
+ PKG_INFO = 'METADATA'
+ EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+ @property
+ def _parsed_pkg_info(self):
+ """Parse and cache metadata"""
+ try:
+ return self._pkg_info
+ except AttributeError:
+ metadata = self.get_metadata(self.PKG_INFO)
+ self._pkg_info = email.parser.Parser().parsestr(metadata)
+ return self._pkg_info
+
+ @property
+ def _dep_map(self):
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._compute_dependencies()
+ return self.__dep_map
+
+ def _compute_dependencies(self):
+ """Recompute this distribution's dependencies."""
+ dm = self.__dep_map = {None: []}
+
+ reqs = []
+ # Including any condition expressions
+ for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+ reqs.extend(parse_requirements(req))
+
+ def reqs_for_extra(extra):
+ for req in reqs:
+ if not req.marker or req.marker.evaluate({'extra': extra}):
+ yield req
+
+ common = frozenset(reqs_for_extra(None))
+ dm[None].extend(common)
+
+ for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+ s_extra = safe_extra(extra.strip())
+ dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
+
+ return dm
+
+
+_distributionImpl = {
+ '.egg': Distribution,
+ '.egg-info': EggInfoDistribution,
+ '.dist-info': DistInfoDistribution,
+}
+
+
+def issue_warning(*args, **kw):
+ level = 1
+ g = globals()
+ try:
+ # find the first stack frame that is *not* code in
+ # the pkg_resources module, to use for the warning
+ while sys._getframe(level).f_globals is g:
+ level += 1
+ except ValueError:
+ pass
+ warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+def parse_requirements(strs):
+ """Yield ``Requirement`` objects for each specification in `strs`
+
+ `strs` must be a string, or a (possibly-nested) iterable thereof.
+ """
+ # create a steppable iterator, so we can handle \-continuations
+ lines = iter(yield_lines(strs))
+
+ for line in lines:
+ # Drop comments -- a hash without a space may be in a URL.
+ if ' #' in line:
+ line = line[:line.find(' #')]
+ # If there is a line continuation, drop it, and append the next line.
+ if line.endswith('\\'):
+ line = line[:-2].strip()
+ try:
+ line += next(lines)
+ except StopIteration:
+ return
+ yield Requirement(line)
+
+
+class RequirementParseError(packaging.requirements.InvalidRequirement):
+ "Compatibility wrapper for InvalidRequirement"
+
+
+class Requirement(packaging.requirements.Requirement):
+ def __init__(self, requirement_string):
+ """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+ super(Requirement, self).__init__(requirement_string)
+ self.unsafe_name = self.name
+ project_name = safe_name(self.name)
+ self.project_name, self.key = project_name, project_name.lower()
+ self.specs = [
+ (spec.operator, spec.version) for spec in self.specifier]
+ self.extras = tuple(map(safe_extra, self.extras))
+ self.hashCmp = (
+ self.key,
+ self.url,
+ self.specifier,
+ frozenset(self.extras),
+ str(self.marker) if self.marker else None,
+ )
+ self.__hash = hash(self.hashCmp)
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, Requirement) and
+ self.hashCmp == other.hashCmp
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __contains__(self, item):
+ if isinstance(item, Distribution):
+ if item.key != self.key:
+ return False
+
+ item = item.version
+
+ # Allow prereleases always in order to match the previous behavior of
+ # this method. In the future this should be smarter and follow PEP 440
+ # more accurately.
+ return self.specifier.contains(item, prereleases=True)
+
+ def __hash__(self):
+ return self.__hash
+
+ def __repr__(self):
+ return "Requirement.parse(%r)" % str(self)
+
+ @staticmethod
+ def parse(s):
+ req, = parse_requirements(s)
+ return req
+
+
+def _always_object(classes):
+ """
+ Ensure object appears in the mro even
+ for old-style classes.
+ """
+ if object not in classes:
+ return classes + (object,)
+ return classes
+
+
+def _find_adapter(registry, ob):
+ """Return an adapter factory for `ob` from `registry`"""
+ types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
+ for t in types:
+ if t in registry:
+ return registry[t]
+
+
+def ensure_directory(path):
+ """Ensure that the parent directory of `path` exists"""
+ dirname = os.path.dirname(path)
+ os.makedirs(dirname, exist_ok=True)
+
+
+def _bypass_ensure_directory(path):
+ """Sandbox-bypassing version of ensure_directory()"""
+ if not WRITE_SUPPORT:
+ raise IOError('"os.mkdir" not supported on this platform.')
+ dirname, filename = split(path)
+ if dirname and filename and not isdir(dirname):
+ _bypass_ensure_directory(dirname)
+ try:
+ mkdir(dirname, 0o755)
+ except FileExistsError:
+ pass
+
+
+def split_sections(s):
+ """Split a string or iterable thereof into (section, content) pairs
+
+ Each ``section`` is a stripped version of the section header ("[section]")
+ and each ``content`` is a list of stripped lines excluding blank lines and
+ comment-only lines. If there are any such lines before the first section
+ header, they're returned in a first ``section`` of ``None``.
+ """
+ section = None
+ content = []
+ for line in yield_lines(s):
+ if line.startswith("["):
+ if line.endswith("]"):
+ if section or content:
+ yield section, content
+ section = line[1:-1].strip()
+ content = []
+ else:
+ raise ValueError("Invalid section heading", line)
+ else:
+ content.append(line)
+
+ # wrap up last segment
+ yield section, content
+
+
+def _mkstemp(*args, **kw):
+ old_open = os.open
+ try:
+ # temporarily bypass sandboxing
+ os.open = os_open
+ return tempfile.mkstemp(*args, **kw)
+ finally:
+ # and then put it back
+ os.open = old_open
+
+
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+ f(*args, **kwargs)
+ return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+ "Set up global resource manager (deliberately not state-saved)"
+ manager = ResourceManager()
+ g['_manager'] = manager
+ g.update(
+ (name, getattr(manager, name))
+ for name in dir(manager)
+ if not name.startswith('_')
+ )
+
+
+@_call_aside
+def _initialize_master_working_set():
+ """
+ Prepare the master working set and make the ``require()``
+ API available.
+
+ This function has explicit effects on the global state
+ of pkg_resources. It is intended to be invoked once at
+ the initialization of this module.
+
+ Invocation by other packages is unsupported and done
+ at their own risk.
+ """
+ working_set = WorkingSet._build_master()
+ _declare_state('object', working_set=working_set)
+
+ require = working_set.require
+ iter_entry_points = working_set.iter_entry_points
+ add_activation_listener = working_set.subscribe
+ run_script = working_set.run_script
+ # backward compatibility
+ run_main = run_script
+ # Activate all distributions already on sys.path with replace=False and
+ # ensure that all distributions added to the working set in the future
+ # (e.g. by calling ``require()``) will get activated as well,
+ # with higher priority (replace=True).
+ tuple(
+ dist.activate(replace=False)
+ for dist in working_set
+ )
+ add_activation_listener(
+ lambda dist: dist.activate(replace=True),
+ existing=False,
+ )
+ working_set.entries = []
+ # match order
+ list(map(working_set.add_entry, sys.path))
+ globals().update(locals())
+
+
+class PkgResourcesDeprecationWarning(Warning):
+ """
+ Base class for warning about deprecations in ``pkg_resources``
+
+ This class is not derived from ``DeprecationWarning``, and as such is
+ visible by default.
+ """
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/__init__.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/appdirs.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/appdirs.py
new file mode 100644
index 0000000..ae67001
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/appdirs.py
@@ -0,0 +1,608 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005-2010 ActiveState Software Inc.
+# Copyright (c) 2013 Eddy Petrișor
+
+"""Utilities for determining application-specific dirs.
+
+See for details and usage.
+"""
+# Dev Notes:
+# - MSDN on where to store app data files:
+# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
+# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+__version_info__ = (1, 4, 3)
+__version__ = '.'.join(map(str, __version_info__))
+
+
+import sys
+import os
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ unicode = str
+
+if sys.platform.startswith('java'):
+ import platform
+ os_name = platform.java_ver()[3][0]
+ if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
+ system = 'win32'
+ elif os_name.startswith('Mac'): # "Mac OS X", etc.
+ system = 'darwin'
+ else: # "Linux", "SunOS", "FreeBSD", etc.
+ # Setting this to "linux2" is not ideal, but only Windows or Mac
+ # are actually checked for and the rest of the module expects
+ # *sys.platform* style strings.
+ system = 'linux2'
+else:
+ system = sys.platform
+
+
+
+def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+
+ for a discussion of issues.
+
+ Typical user data directories are:
+ Mac OS X: ~/Library/Application Support/
+ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined
+ Win XP (not roaming): C:\Documents and Settings\\Application Data\\
+ Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\
+ Win 7 (not roaming): C:\Users\\AppData\Local\\
+ Win 7 (roaming): C:\Users\\AppData\Roaming\\
+
+ For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
+ That means, by default "~/.local/share/".
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
+ path = os.path.normpath(_get_win_folder(const))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == 'darwin':
+ path = os.path.expanduser('~/Library/Application Support/')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of data dirs should be
+ returned. By default, the first item from XDG_DATA_DIRS is
+ returned, or '/usr/local/share/',
+ if XDG_DATA_DIRS is not set
+
+ Typical site data directories are:
+ Mac OS X: /Library/Application Support/
+ Unix: /usr/local/share/ or /usr/share/
+ Win XP: C:\Documents and Settings\All Users\Application Data\\
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+ Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7.
+
+ For Unix, this is using the $XDG_DATA_DIRS[0] default.
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == 'darwin':
+ path = os.path.expanduser('/Library/Application Support')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ # XDG default for $XDG_DATA_DIRS
+ # only first, if multipath is False
+ path = os.getenv('XDG_DATA_DIRS',
+ os.pathsep.join(['/usr/local/share', '/usr/share']))
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific config dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+
+ for a discussion of issues.
+
+ Typical user config directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
+ That means, by default "~/.config/".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of config dirs should be
+ returned. By default, the first item from XDG_CONFIG_DIRS is
+ returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set
+
+ Typical site config directories are:
+ Mac OS X: same as site_data_dir
+ Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in
+ $XDG_CONFIG_DIRS
+ Win *: same as site_data_dir
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+
+ For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system in ["win32", "darwin"]:
+ path = site_data_dir(appname, appauthor)
+ if appname and version:
+ path = os.path.join(path, version)
+ else:
+ # XDG default for $XDG_CONFIG_DIRS
+ # only first, if multipath is False
+ path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+
+def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific cache dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Cache" to the base app data dir for Windows. See
+ discussion below.
+
+ Typical user cache directories are:
+ Mac OS X: ~/Library/Caches/
+ Unix: ~/.cache/ (XDG default)
+ Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache
+ Vista: C:\Users\\AppData\Local\\\Cache
+
+ On Windows the only suggestion in the MSDN docs is that local settings go in
+ the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
+ app data dir (the default returned by `user_data_dir` above). Apps typically
+ put cache data somewhere *under* the given dir here. Some examples:
+ ...\Mozilla\Firefox\Profiles\\Cache
+ ...\Acme\SuperApp\Cache\1.0
+ OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ if opinion:
+ path = os.path.join(path, "Cache")
+ elif system == 'darwin':
+ path = os.path.expanduser('~/Library/Caches')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific state dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+
+ for a discussion of issues.
+
+ Typical user state directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow this Debian proposal
+ to extend the XDG spec and support $XDG_STATE_HOME.
+
+ That means, by default "~/.local/state/".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific log dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Logs" to the base app data dir for Windows, and "log" to the
+ base cache dir for Unix. See discussion below.
+
+ Typical user log directories are:
+ Mac OS X: ~/Library/Logs/
+ Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined
+ Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs
+ Vista: C:\Users\\AppData\Local\\\Logs
+
+ On Windows the only suggestion in the MSDN docs is that local settings
+ go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
+ examples of what some windows apps use for a logs dir.)
+
+ OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
+ value for Windows and appends "log" to the user cache dir for Unix.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "darwin":
+ path = os.path.join(
+ os.path.expanduser('~/Library/Logs'),
+ appname)
+ elif system == "win32":
+ path = user_data_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "Logs")
+ else:
+ path = user_cache_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "log")
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+class AppDirs(object):
+ """Convenience wrapper for getting application dirs."""
+ def __init__(self, appname=None, appauthor=None, version=None,
+ roaming=False, multipath=False):
+ self.appname = appname
+ self.appauthor = appauthor
+ self.version = version
+ self.roaming = roaming
+ self.multipath = multipath
+
+ @property
+ def user_data_dir(self):
+ return user_data_dir(self.appname, self.appauthor,
+ version=self.version, roaming=self.roaming)
+
+ @property
+ def site_data_dir(self):
+ return site_data_dir(self.appname, self.appauthor,
+ version=self.version, multipath=self.multipath)
+
+ @property
+ def user_config_dir(self):
+ return user_config_dir(self.appname, self.appauthor,
+ version=self.version, roaming=self.roaming)
+
+ @property
+ def site_config_dir(self):
+ return site_config_dir(self.appname, self.appauthor,
+ version=self.version, multipath=self.multipath)
+
+ @property
+ def user_cache_dir(self):
+ return user_cache_dir(self.appname, self.appauthor,
+ version=self.version)
+
+ @property
+ def user_state_dir(self):
+ return user_state_dir(self.appname, self.appauthor,
+ version=self.version)
+
+ @property
+ def user_log_dir(self):
+ return user_log_dir(self.appname, self.appauthor,
+ version=self.version)
+
+
+#---- internal support stuff
+
+def _get_win_folder_from_registry(csidl_name):
+ """This is a fallback technique at best. I'm not sure if using the
+ registry for this guarantees us the correct answer for all CSIDL_*
+ names.
+ """
+ if PY3:
+ import winreg as _winreg
+ else:
+ import _winreg
+
+ shell_folder_name = {
+ "CSIDL_APPDATA": "AppData",
+ "CSIDL_COMMON_APPDATA": "Common AppData",
+ "CSIDL_LOCAL_APPDATA": "Local AppData",
+ }[csidl_name]
+
+ key = _winreg.OpenKey(
+ _winreg.HKEY_CURRENT_USER,
+ r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+ )
+ dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+ return dir
+
+
+def _get_win_folder_with_pywin32(csidl_name):
+ from win32com.shell import shellcon, shell
+ dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
+ # Try to make this a unicode path because SHGetFolderPath does
+ # not return unicode strings when there is unicode data in the
+ # path.
+ try:
+ dir = unicode(dir)
+
+ # Downgrade to short path name if have highbit chars. See
+ # .
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ try:
+ import win32api
+ dir = win32api.GetShortPathName(dir)
+ except ImportError:
+ pass
+ except UnicodeError:
+ pass
+ return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+ import ctypes
+
+ csidl_const = {
+ "CSIDL_APPDATA": 26,
+ "CSIDL_COMMON_APPDATA": 35,
+ "CSIDL_LOCAL_APPDATA": 28,
+ }[csidl_name]
+
+ buf = ctypes.create_unicode_buffer(1024)
+ ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+ # Downgrade to short path name if have highbit chars. See
+ # .
+ has_high_char = False
+ for c in buf:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf2 = ctypes.create_unicode_buffer(1024)
+ if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+ buf = buf2
+
+ return buf.value
+
+def _get_win_folder_with_jna(csidl_name):
+ import array
+ from com.sun import jna
+ from com.sun.jna.platform import win32
+
+ buf_size = win32.WinDef.MAX_PATH * 2
+ buf = array.zeros('c', buf_size)
+ shell = win32.Shell32.INSTANCE
+ shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ # Downgrade to short path name if have highbit chars. See
+ # .
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf = array.zeros('c', buf_size)
+ kernel = win32.Kernel32.INSTANCE
+ if kernel.GetShortPathName(dir, buf, buf_size):
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ return dir
+
+if system == "win32":
+ try:
+ import win32com.shell
+ _get_win_folder = _get_win_folder_with_pywin32
+ except ImportError:
+ try:
+ from ctypes import windll
+ _get_win_folder = _get_win_folder_with_ctypes
+ except ImportError:
+ try:
+ import com.sun.jna
+ _get_win_folder = _get_win_folder_with_jna
+ except ImportError:
+ _get_win_folder = _get_win_folder_from_registry
+
+
+#---- self test code
+
+if __name__ == "__main__":
+ appname = "MyApp"
+ appauthor = "MyCompany"
+
+ props = ("user_data_dir",
+ "user_config_dir",
+ "user_cache_dir",
+ "user_state_dir",
+ "user_log_dir",
+ "site_data_dir",
+ "site_config_dir")
+
+ print("-- app dirs %s --" % __version__)
+
+ print("-- app dirs (with optional 'version')")
+ dirs = AppDirs(appname, appauthor, version="1.0")
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (without optional 'version')")
+ dirs = AppDirs(appname, appauthor)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (without optional 'appauthor')")
+ dirs = AppDirs(appname)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (with disabled 'appauthor')")
+ dirs = AppDirs(appname, appauthor=False)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/__about__.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/__about__.py
new file mode 100644
index 0000000..4d99857
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/__about__.py
@@ -0,0 +1,27 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "20.4"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD-2-Clause or Apache-2.0"
+__copyright__ = "Copyright 2014-2019 %s" % __author__
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/__init__.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/__init__.py
new file mode 100644
index 0000000..a0cf67d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/__init__.py
@@ -0,0 +1,26 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+from .__about__ import (
+ __author__,
+ __copyright__,
+ __email__,
+ __license__,
+ __summary__,
+ __title__,
+ __uri__,
+ __version__,
+)
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_compat.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_compat.py
new file mode 100644
index 0000000..e54bd4e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_compat.py
@@ -0,0 +1,38 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from ._typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Any, Dict, Tuple, Type
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+# flake8: noqa
+
+if PY3:
+ string_types = (str,)
+else:
+ string_types = (basestring,)
+
+
+def with_metaclass(meta, *bases):
+ # type: (Type[Any], Tuple[Type[Any], ...]) -> Any
+ """
+ Create a base class with a metaclass.
+ """
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta): # type: ignore
+ def __new__(cls, name, this_bases, d):
+ # type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any
+ return meta(name, bases, d)
+
+ return type.__new__(metaclass, "temporary_class", (), {})
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_structures.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_structures.py
new file mode 100644
index 0000000..800d5c5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_structures.py
@@ -0,0 +1,86 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+
+class InfinityType(object):
+ def __repr__(self):
+ # type: () -> str
+ return "Infinity"
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __le__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __ge__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __neg__(self):
+ # type: (object) -> NegativeInfinityType
+ return NegativeInfinity
+
+
+Infinity = InfinityType()
+
+
+class NegativeInfinityType(object):
+ def __repr__(self):
+ # type: () -> str
+ return "-Infinity"
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __le__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __ge__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __neg__(self):
+ # type: (object) -> InfinityType
+ return Infinity
+
+
+NegativeInfinity = NegativeInfinityType()
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_typing.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_typing.py
new file mode 100644
index 0000000..77a8b91
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/_typing.py
@@ -0,0 +1,48 @@
+"""For neatly implementing static typing in packaging.
+
+`mypy` - the static type analysis tool we use - uses the `typing` module, which
+provides core functionality fundamental to mypy's functioning.
+
+Generally, `typing` would be imported at runtime and used in that fashion -
+it acts as a no-op at runtime and does not have any run-time overhead by
+design.
+
+As it turns out, `typing` is not vendorable - it uses separate sources for
+Python 2/Python 3. Thus, this codebase can not expect it to be present.
+To work around this, mypy allows the typing import to be behind a False-y
+optional to prevent it from running at runtime and type-comments can be used
+to remove the need for the types to be accessible directly during runtime.
+
+This module provides the False-y guard in a nicely named fashion so that a
+curious maintainer can reach here to read this.
+
+In packaging, all static-typing related imports should be guarded as follows:
+
+ from packaging._typing import TYPE_CHECKING
+
+ if TYPE_CHECKING:
+ from typing import ...
+
+Ref: https://github.com/python/mypy/issues/3216
+"""
+
+__all__ = ["TYPE_CHECKING", "cast"]
+
+# The TYPE_CHECKING constant defined by the typing module is False at runtime
+# but True while type checking.
+if False: # pragma: no cover
+ from typing import TYPE_CHECKING
+else:
+ TYPE_CHECKING = False
+
+# typing's cast syntax requires calling typing.cast at runtime, but we don't
+# want to import typing at runtime. Here, we inform the type checkers that
+# we're importing `typing.cast` as `cast` and re-implement typing.cast's
+# runtime behavior in a block that is ignored by type checkers.
+if TYPE_CHECKING: # pragma: no cover
+ # not executed at runtime
+ from typing import cast
+else:
+ # executed at runtime
+ def cast(type_, value): # noqa
+ return value
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/markers.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/markers.py
new file mode 100644
index 0000000..fd1559c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/markers.py
@@ -0,0 +1,328 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import operator
+import os
+import platform
+import sys
+
+from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
+from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
+from pkg_resources.extern.pyparsing import Literal as L # noqa
+
+from ._compat import string_types
+from ._typing import TYPE_CHECKING
+from .specifiers import Specifier, InvalidSpecifier
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+ Operator = Callable[[str, str], bool]
+
+
+__all__ = [
+ "InvalidMarker",
+ "UndefinedComparison",
+ "UndefinedEnvironmentName",
+ "Marker",
+ "default_environment",
+]
+
+
+class InvalidMarker(ValueError):
+ """
+ An invalid marker was found, users should refer to PEP 508.
+ """
+
+
+class UndefinedComparison(ValueError):
+ """
+ An invalid operation was attempted on a value that doesn't support it.
+ """
+
+
+class UndefinedEnvironmentName(ValueError):
+ """
+ A name was attempted to be used that does not exist inside of the
+ environment.
+ """
+
+
+class Node(object):
+ def __init__(self, value):
+ # type: (Any) -> None
+ self.value = value
+
+ def __str__(self):
+ # type: () -> str
+ return str(self.value)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+
+ def serialize(self):
+ # type: () -> str
+ raise NotImplementedError
+
+
+class Variable(Node):
+ def serialize(self):
+ # type: () -> str
+ return str(self)
+
+
+class Value(Node):
+ def serialize(self):
+ # type: () -> str
+ return '"{0}"'.format(self)
+
+
+class Op(Node):
+ def serialize(self):
+ # type: () -> str
+ return str(self)
+
+
+VARIABLE = (
+ L("implementation_version")
+ | L("platform_python_implementation")
+ | L("implementation_name")
+ | L("python_full_version")
+ | L("platform_release")
+ | L("platform_version")
+ | L("platform_machine")
+ | L("platform_system")
+ | L("python_version")
+ | L("sys_platform")
+ | L("os_name")
+ | L("os.name") # PEP-345
+ | L("sys.platform") # PEP-345
+ | L("platform.version") # PEP-345
+ | L("platform.machine") # PEP-345
+ | L("platform.python_implementation") # PEP-345
+ | L("python_implementation") # undocumented setuptools legacy
+ | L("extra") # PEP-508
+)
+ALIASES = {
+ "os.name": "os_name",
+ "sys.platform": "sys_platform",
+ "platform.version": "platform_version",
+ "platform.machine": "platform_machine",
+ "platform.python_implementation": "platform_python_implementation",
+ "python_implementation": "platform_python_implementation",
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+ L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results):
+ # type: (Union[ParseResults, List[Any]]) -> List[Any]
+ if isinstance(results, ParseResults):
+ return [_coerce_parse_result(i) for i in results]
+ else:
+ return results
+
+
+def _format_marker(marker, first=True):
+ # type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
+
+ assert isinstance(marker, (list, tuple, string_types))
+
+ # Sometimes we have a structure like [[...]] which is a single item list
+ # where the single item is itself it's own list. In that case we want skip
+ # the rest of this function so that we don't get extraneous () on the
+ # outside.
+ if (
+ isinstance(marker, list)
+ and len(marker) == 1
+ and isinstance(marker[0], (list, tuple))
+ ):
+ return _format_marker(marker[0])
+
+ if isinstance(marker, list):
+ inner = (_format_marker(m, first=False) for m in marker)
+ if first:
+ return " ".join(inner)
+ else:
+ return "(" + " ".join(inner) + ")"
+ elif isinstance(marker, tuple):
+ return " ".join([m.serialize() for m in marker])
+ else:
+ return marker
+
+
+_operators = {
+ "in": lambda lhs, rhs: lhs in rhs,
+ "not in": lambda lhs, rhs: lhs not in rhs,
+ "<": operator.lt,
+ "<=": operator.le,
+ "==": operator.eq,
+ "!=": operator.ne,
+ ">=": operator.ge,
+ ">": operator.gt,
+} # type: Dict[str, Operator]
+
+
+def _eval_op(lhs, op, rhs):
+ # type: (str, Op, str) -> bool
+ try:
+ spec = Specifier("".join([op.serialize(), rhs]))
+ except InvalidSpecifier:
+ pass
+ else:
+ return spec.contains(lhs)
+
+ oper = _operators.get(op.serialize()) # type: Optional[Operator]
+ if oper is None:
+ raise UndefinedComparison(
+ "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
+ )
+
+ return oper(lhs, rhs)
+
+
+class Undefined(object):
+ pass
+
+
+_undefined = Undefined()
+
+
+def _get_env(environment, name):
+ # type: (Dict[str, str], str) -> str
+ value = environment.get(name, _undefined) # type: Union[str, Undefined]
+
+ if isinstance(value, Undefined):
+ raise UndefinedEnvironmentName(
+ "{0!r} does not exist in evaluation environment.".format(name)
+ )
+
+ return value
+
+
+def _evaluate_markers(markers, environment):
+ # type: (List[Any], Dict[str, str]) -> bool
+ groups = [[]] # type: List[List[bool]]
+
+ for marker in markers:
+ assert isinstance(marker, (list, tuple, string_types))
+
+ if isinstance(marker, list):
+ groups[-1].append(_evaluate_markers(marker, environment))
+ elif isinstance(marker, tuple):
+ lhs, op, rhs = marker
+
+ if isinstance(lhs, Variable):
+ lhs_value = _get_env(environment, lhs.value)
+ rhs_value = rhs.value
+ else:
+ lhs_value = lhs.value
+ rhs_value = _get_env(environment, rhs.value)
+
+ groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+ else:
+ assert marker in ["and", "or"]
+ if marker == "or":
+ groups.append([])
+
+ return any(all(item) for item in groups)
+
+
+def format_full_version(info):
+ # type: (sys._version_info) -> str
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
+ kind = info.releaselevel
+ if kind != "final":
+ version += kind[0] + str(info.serial)
+ return version
+
+
+def default_environment():
+ # type: () -> Dict[str, str]
+ if hasattr(sys, "implementation"):
+ # Ignoring the `sys.implementation` reference for type checking due to
+ # mypy not liking that the attribute doesn't exist in Python 2.7 when
+ # run with the `--py27` flag.
+ iver = format_full_version(sys.implementation.version) # type: ignore
+ implementation_name = sys.implementation.name # type: ignore
+ else:
+ iver = "0"
+ implementation_name = ""
+
+ return {
+ "implementation_name": implementation_name,
+ "implementation_version": iver,
+ "os_name": os.name,
+ "platform_machine": platform.machine(),
+ "platform_release": platform.release(),
+ "platform_system": platform.system(),
+ "platform_version": platform.version(),
+ "python_full_version": platform.python_version(),
+ "platform_python_implementation": platform.python_implementation(),
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
+ "sys_platform": sys.platform,
+ }
+
+
+class Marker(object):
+ def __init__(self, marker):
+ # type: (str) -> None
+ try:
+ self._markers = _coerce_parse_result(MARKER.parseString(marker))
+ except ParseException as e:
+ err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
+ marker, marker[e.loc : e.loc + 8]
+ )
+ raise InvalidMarker(err_str)
+
+ def __str__(self):
+ # type: () -> str
+ return _format_marker(self._markers)
+
+ def __repr__(self):
+ # type: () -> str
+ return "".format(str(self))
+
+ def evaluate(self, environment=None):
+ # type: (Optional[Dict[str, str]]) -> bool
+ """Evaluate a marker.
+
+ Return the boolean from evaluating the given marker against the
+ environment. environment is an optional argument to override all or
+ part of the determined environment.
+
+ The environment is determined from the current Python process.
+ """
+ current_environment = default_environment()
+ if environment is not None:
+ current_environment.update(environment)
+
+ return _evaluate_markers(self._markers, current_environment)
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/requirements.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/requirements.py
new file mode 100644
index 0000000..9495a1d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/requirements.py
@@ -0,0 +1,145 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import string
+import re
+
+from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
+from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
+from pkg_resources.extern.pyparsing import Literal as L # noqa
+from urllib import parse as urlparse
+
+from ._typing import TYPE_CHECKING
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import List
+
+
+class InvalidRequirement(ValueError):
+ """
+ An invalid requirement was found, users should refer to PEP 508.
+ """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r"[^ ]+")("url")
+URL = AT + URI
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(
+ VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
+)("_raw_spec")
+_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+ lambda s, l, t: Marker(s[t._original_start : t._original_end])
+)
+MARKER_SEPARATOR = SEMICOLON
+MARKER = MARKER_SEPARATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
+# issue #104
+REQUIREMENT.parseString("x[]")
+
+
+class Requirement(object):
+ """Parse a requirement.
+
+ Parse a given requirement string into its parts, such as name, specifier,
+ URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+ string.
+ """
+
+ # TODO: Can we test whether something is contained within a requirement?
+ # If so how do we do that? Do we need to test against the _name_ of
+ # the thing as well as the version? What about the markers?
+ # TODO: Can we normalize the name and extra name?
+
+ def __init__(self, requirement_string):
+ # type: (str) -> None
+ try:
+ req = REQUIREMENT.parseString(requirement_string)
+ except ParseException as e:
+ raise InvalidRequirement(
+ 'Parse error at "{0!r}": {1}'.format(
+ requirement_string[e.loc : e.loc + 8], e.msg
+ )
+ )
+
+ self.name = req.name
+ if req.url:
+ parsed_url = urlparse.urlparse(req.url)
+ if parsed_url.scheme == "file":
+ if urlparse.urlunparse(parsed_url) != req.url:
+ raise InvalidRequirement("Invalid URL given")
+ elif not (parsed_url.scheme and parsed_url.netloc) or (
+ not parsed_url.scheme and not parsed_url.netloc
+ ):
+ raise InvalidRequirement("Invalid URL: {0}".format(req.url))
+ self.url = req.url
+ else:
+ self.url = None
+ self.extras = set(req.extras.asList() if req.extras else [])
+ self.specifier = SpecifierSet(req.specifier)
+ self.marker = req.marker if req.marker else None
+
+ def __str__(self):
+ # type: () -> str
+ parts = [self.name] # type: List[str]
+
+ if self.extras:
+ parts.append("[{0}]".format(",".join(sorted(self.extras))))
+
+ if self.specifier:
+ parts.append(str(self.specifier))
+
+ if self.url:
+ parts.append("@ {0}".format(self.url))
+ if self.marker:
+ parts.append(" ")
+
+ if self.marker:
+ parts.append("; {0}".format(self.marker))
+
+ return "".join(parts)
+
+ def __repr__(self):
+ # type: () -> str
+ return "".format(str(self))
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000..fe09bb1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/specifiers.py
@@ -0,0 +1,863 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import abc
+import functools
+import itertools
+import re
+
+from ._compat import string_types, with_metaclass
+from ._typing import TYPE_CHECKING
+from .utils import canonicalize_version
+from .version import Version, LegacyVersion, parse
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import (
+ List,
+ Dict,
+ Union,
+ Iterable,
+ Iterator,
+ Optional,
+ Callable,
+ Tuple,
+ FrozenSet,
+ )
+
+ ParsedVersion = Union[Version, LegacyVersion]
+ UnparsedVersion = Union[Version, LegacyVersion, str]
+ CallableOperator = Callable[[ParsedVersion, str], bool]
+
+
+class InvalidSpecifier(ValueError):
+ """
+ An invalid specifier was found, users should refer to PEP 440.
+ """
+
+
+class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): # type: ignore
+ @abc.abstractmethod
+ def __str__(self):
+ # type: () -> str
+ """
+ Returns the str representation of this Specifier like object. This
+ should be representative of the Specifier itself.
+ """
+
+ @abc.abstractmethod
+ def __hash__(self):
+ # type: () -> int
+ """
+ Returns a hash value for this Specifier like object.
+ """
+
+ @abc.abstractmethod
+ def __eq__(self, other):
+ # type: (object) -> bool
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are equal.
+ """
+
+ @abc.abstractmethod
+ def __ne__(self, other):
+ # type: (object) -> bool
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are not equal.
+ """
+
+ @abc.abstractproperty
+ def prereleases(self):
+ # type: () -> Optional[bool]
+ """
+ Returns whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ """
+ Sets whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @abc.abstractmethod
+ def contains(self, item, prereleases=None):
+ # type: (str, Optional[bool]) -> bool
+ """
+ Determines if the given item is contained within this specifier.
+ """
+
+ @abc.abstractmethod
+ def filter(self, iterable, prereleases=None):
+ # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
+ """
+ Takes an iterable of items and filters them so that only items which
+ are contained within this specifier are allowed in it.
+ """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+ _operators = {} # type: Dict[str, str]
+
+ def __init__(self, spec="", prereleases=None):
+ # type: (str, Optional[bool]) -> None
+ match = self._regex.search(spec)
+ if not match:
+ raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+
+ self._spec = (
+ match.group("operator").strip(),
+ match.group("version").strip(),
+ ) # type: Tuple[str, str]
+
+ # Store whether or not this Specifier should accept prereleases
+ self._prereleases = prereleases
+
+ def __repr__(self):
+ # type: () -> str
+ pre = (
+ ", prereleases={0!r}".format(self.prereleases)
+ if self._prereleases is not None
+ else ""
+ )
+
+ return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre)
+
+ def __str__(self):
+ # type: () -> str
+ return "{0}{1}".format(*self._spec)
+
+ @property
+ def _canonical_spec(self):
+ # type: () -> Tuple[str, Union[Version, str]]
+ return self._spec[0], canonicalize_version(self._spec[1])
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._canonical_spec)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, string_types):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._canonical_spec == other._canonical_spec
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, string_types):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._spec != other._spec
+
+ def _get_operator(self, op):
+ # type: (str) -> CallableOperator
+ operator_callable = getattr(
+ self, "_compare_{0}".format(self._operators[op])
+ ) # type: CallableOperator
+ return operator_callable
+
+ def _coerce_version(self, version):
+ # type: (UnparsedVersion) -> ParsedVersion
+ if not isinstance(version, (LegacyVersion, Version)):
+ version = parse(version)
+ return version
+
+ @property
+ def operator(self):
+ # type: () -> str
+ return self._spec[0]
+
+ @property
+ def version(self):
+ # type: () -> str
+ return self._spec[1]
+
+ @property
+ def prereleases(self):
+ # type: () -> Optional[bool]
+ return self._prereleases
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+ def __contains__(self, item):
+ # type: (str) -> bool
+ return self.contains(item)
+
+ def contains(self, item, prereleases=None):
+ # type: (UnparsedVersion, Optional[bool]) -> bool
+
+ # Determine if prereleases are to be allowed or not.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # Normalize item to a Version or LegacyVersion, this allows us to have
+ # a shortcut for ``"2.0" in Specifier(">=2")
+ normalized_item = self._coerce_version(item)
+
+ # Determine if we should be supporting prereleases in this specifier
+ # or not, if we do not support prereleases than we can short circuit
+ # logic if this version is a prereleases.
+ if normalized_item.is_prerelease and not prereleases:
+ return False
+
+ # Actually do the comparison to determine if this item is contained
+ # within this Specifier or not.
+ operator_callable = self._get_operator(self.operator) # type: CallableOperator
+ return operator_callable(normalized_item, self.version)
+
+ def filter(self, iterable, prereleases=None):
+ # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
+
+ yielded = False
+ found_prereleases = []
+
+ kw = {"prereleases": prereleases if prereleases is not None else True}
+
+ # Attempt to iterate over all the values in the iterable and if any of
+ # them match, yield them.
+ for version in iterable:
+ parsed_version = self._coerce_version(version)
+
+ if self.contains(parsed_version, **kw):
+ # If our version is a prerelease, and we were not set to allow
+ # prereleases, then we'll store it for later incase nothing
+ # else matches this specifier.
+ if parsed_version.is_prerelease and not (
+ prereleases or self.prereleases
+ ):
+ found_prereleases.append(version)
+ # Either this is not a prerelease, or we should have been
+ # accepting prereleases from the beginning.
+ else:
+ yielded = True
+ yield version
+
+ # Now that we've iterated over everything, determine if we've yielded
+ # any values, and if we have not and we have any prereleases stored up
+ # then we will go ahead and yield the prereleases.
+ if not yielded and found_prereleases:
+ for version in found_prereleases:
+ yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P(==|!=|<=|>=|<|>))
+ \s*
+ (?P
+ [^,;\s)]* # Since this is a "legacy" specifier, and the version
+ # string can be just about anything, we match everything
+ # except for whitespace, a semi-colon for marker support,
+ # a closing paren since versions can be enclosed in
+ # them, and a comma since it's a version separator.
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ }
+
+ def _coerce_version(self, version):
+ # type: (Union[ParsedVersion, str]) -> LegacyVersion
+ if not isinstance(version, LegacyVersion):
+ version = LegacyVersion(str(version))
+ return version
+
+ def _compare_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective == self._coerce_version(spec)
+
+ def _compare_not_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective != self._coerce_version(spec)
+
+ def _compare_less_than_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective <= self._coerce_version(spec)
+
+ def _compare_greater_than_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective >= self._coerce_version(spec)
+
+ def _compare_less_than(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective < self._coerce_version(spec)
+
+ def _compare_greater_than(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(
+ fn # type: (Callable[[Specifier, ParsedVersion, str], bool])
+):
+ # type: (...) -> Callable[[Specifier, ParsedVersion, str], bool]
+ @functools.wraps(fn)
+ def wrapped(self, prospective, spec):
+ # type: (Specifier, ParsedVersion, str) -> bool
+ if not isinstance(prospective, Version):
+ return False
+ return fn(self, prospective, spec)
+
+ return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P(~=|==|!=|<=|>=|<|>|===))
+ (?P
+ (?:
+ # The identity operators allow for an escape hatch that will
+ # do an exact string match of the version you wish to install.
+ # This will not be parsed by PEP 440 and we cannot determine
+ # any semantic meaning from it. This operator is discouraged
+ # but included entirely as an escape hatch.
+ (?<====) # Only match for the identity operator
+ \s*
+ [^\s]* # We just match everything, except for whitespace
+ # since we are only testing for strict identity.
+ )
+ |
+ (?:
+ # The (non)equality operators allow for wild card and local
+ # versions to be specified so we have to define these two
+ # operators separately to enable that.
+ (?<===|!=) # Only match for equals and not equals
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+
+ # You cannot use a wild card and a dev or local version
+ # together so group them with a | and make them optional.
+ (?:
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+ |
+ \.\* # Wild card syntax of .*
+ )?
+ )
+ |
+ (?:
+ # The compatible operator requires at least two digits in the
+ # release segment.
+ (?<=~=) # Only match for the compatible operator
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ |
+ (?:
+ # All other operators only allow a sub set of what the
+ # (non)equality operators do. Specifically they do not allow
+ # local versions to be specified nor do they allow the prefix
+ # matching wild cards.
+ (?=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ "===": "arbitrary",
+ }
+
+ @_require_version_compare
+ def _compare_compatible(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # Compatible releases have an equivalent combination of >= and ==. That
+ # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+ # implement this in terms of the other specifiers instead of
+ # implementing it ourselves. The only thing we need to do is construct
+ # the other specifiers.
+
+ # We want everything but the last item in the version, but we want to
+ # ignore post and dev releases and we want to treat the pre-release as
+ # it's own separate segment.
+ prefix = ".".join(
+ list(
+ itertools.takewhile(
+ lambda x: (not x.startswith("post") and not x.startswith("dev")),
+ _version_split(spec),
+ )
+ )[:-1]
+ )
+
+ # Add the prefix notation to the end of our string
+ prefix += ".*"
+
+ return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
+ prospective, prefix
+ )
+
+ @_require_version_compare
+ def _compare_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # We need special logic to handle prefix matching
+ if spec.endswith(".*"):
+ # In the case of prefix matching we want to ignore local segment.
+ prospective = Version(prospective.public)
+ # Split the spec out by dots, and pretend that there is an implicit
+ # dot in between a release segment and a pre-release segment.
+ split_spec = _version_split(spec[:-2]) # Remove the trailing .*
+
+ # Split the prospective version out by dots, and pretend that there
+ # is an implicit dot in between a release segment and a pre-release
+ # segment.
+ split_prospective = _version_split(str(prospective))
+
+ # Shorten the prospective version to be the same length as the spec
+ # so that we can determine if the specifier is a prefix of the
+ # prospective version or not.
+ shortened_prospective = split_prospective[: len(split_spec)]
+
+ # Pad out our two sides with zeros so that they both equal the same
+ # length.
+ padded_spec, padded_prospective = _pad_version(
+ split_spec, shortened_prospective
+ )
+
+ return padded_prospective == padded_spec
+ else:
+ # Convert our spec string into a Version
+ spec_version = Version(spec)
+
+ # If the specifier does not have a local segment, then we want to
+ # act as if the prospective version also does not have a local
+ # segment.
+ if not spec_version.local:
+ prospective = Version(prospective.public)
+
+ return prospective == spec_version
+
+ @_require_version_compare
+ def _compare_not_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+ return not self._compare_equal(prospective, spec)
+
+ @_require_version_compare
+ def _compare_less_than_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) <= Version(spec)
+
+ @_require_version_compare
+ def _compare_greater_than_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) >= Version(spec)
+
+ @_require_version_compare
+ def _compare_less_than(self, prospective, spec_str):
+ # type: (ParsedVersion, str) -> bool
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is less than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective < spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a pre-release version, that we do not accept pre-release
+ # versions for the version mentioned in the specifier (e.g. <3.1 should
+ # not match 3.1.dev0, but should match 3.0.dev0).
+ if not spec.is_prerelease and prospective.is_prerelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # less than the spec version *and* it's not a pre-release of the same
+ # version in the spec.
+ return True
+
+ @_require_version_compare
+ def _compare_greater_than(self, prospective, spec_str):
+ # type: (ParsedVersion, str) -> bool
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is greater than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective > spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a post-release version, that we do not accept
+ # post-release versions for the version mentioned in the specifier
+ # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+ if not spec.is_postrelease and prospective.is_postrelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # Ensure that we do not allow a local version of the version mentioned
+ # in the specifier, which is technically greater than, to match.
+ if prospective.local is not None:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # greater than the spec version *and* it's not a pre-release of the
+ # same version in the spec.
+ return True
+
+ def _compare_arbitrary(self, prospective, spec):
+ # type: (Version, str) -> bool
+ return str(prospective).lower() == str(spec).lower()
+
+ @property
+ def prereleases(self):
+ # type: () -> bool
+
+ # If there is an explicit prereleases set for this, then we'll just
+ # blindly use that.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # Look at all of our specifiers and determine if they are inclusive
+ # operators, and if they are if they are including an explicit
+ # prerelease.
+ operator, version = self._spec
+ if operator in ["==", ">=", "<=", "~=", "==="]:
+ # The == specifier can include a trailing .*, if it does we
+ # want to remove before parsing.
+ if operator == "==" and version.endswith(".*"):
+ version = version[:-2]
+
+ # Parse the version, and if it is a pre-release than this
+ # specifier allows pre-releases.
+ if parse(version).is_prerelease:
+ return True
+
+ return False
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version):
+ # type: (str) -> List[str]
+ result = [] # type: List[str]
+ for item in version.split("."):
+ match = _prefix_regex.search(item)
+ if match:
+ result.extend(match.groups())
+ else:
+ result.append(item)
+ return result
+
+
+def _pad_version(left, right):
+ # type: (List[str], List[str]) -> Tuple[List[str], List[str]]
+ left_split, right_split = [], []
+
+ # Get the release segment of our versions
+ left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+ right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+ # Get the rest of our versions
+ left_split.append(left[len(left_split[0]) :])
+ right_split.append(right[len(right_split[0]) :])
+
+ # Insert our padding
+ left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
+ right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
+
+ return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
+
+
+class SpecifierSet(BaseSpecifier):
+ def __init__(self, specifiers="", prereleases=None):
+ # type: (str, Optional[bool]) -> None
+
+ # Split on , to break each individual specifier into it's own item, and
+ # strip each item to remove leading/trailing whitespace.
+ split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+ # Parsed each individual specifier, attempting first to make it a
+ # Specifier and falling back to a LegacySpecifier.
+ parsed = set()
+ for specifier in split_specifiers:
+ try:
+ parsed.add(Specifier(specifier))
+ except InvalidSpecifier:
+ parsed.add(LegacySpecifier(specifier))
+
+ # Turn our parsed specifiers into a frozen set and save them for later.
+ self._specs = frozenset(parsed)
+
+ # Store our prereleases value so we can use it later to determine if
+ # we accept prereleases or not.
+ self._prereleases = prereleases
+
+ def __repr__(self):
+ # type: () -> str
+ pre = (
+ ", prereleases={0!r}".format(self.prereleases)
+ if self._prereleases is not None
+ else ""
+ )
+
+ return "".format(str(self), pre)
+
+ def __str__(self):
+ # type: () -> str
+ return ",".join(sorted(str(s) for s in self._specs))
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._specs)
+
+ def __and__(self, other):
+ # type: (Union[SpecifierSet, str]) -> SpecifierSet
+ if isinstance(other, string_types):
+ other = SpecifierSet(other)
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ specifier = SpecifierSet()
+ specifier._specs = frozenset(self._specs | other._specs)
+
+ if self._prereleases is None and other._prereleases is not None:
+ specifier._prereleases = other._prereleases
+ elif self._prereleases is not None and other._prereleases is None:
+ specifier._prereleases = self._prereleases
+ elif self._prereleases == other._prereleases:
+ specifier._prereleases = self._prereleases
+ else:
+ raise ValueError(
+ "Cannot combine SpecifierSets with True and False prerelease "
+ "overrides."
+ )
+
+ return specifier
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, (string_types, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs == other._specs
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, (string_types, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs != other._specs
+
+ def __len__(self):
+ # type: () -> int
+ return len(self._specs)
+
+ def __iter__(self):
+ # type: () -> Iterator[FrozenSet[_IndividualSpecifier]]
+ return iter(self._specs)
+
+ @property
+ def prereleases(self):
+ # type: () -> Optional[bool]
+
+ # If we have been given an explicit prerelease modifier, then we'll
+ # pass that through here.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # If we don't have any specifiers, and we don't have a forced value,
+ # then we'll just return None since we don't know if this should have
+ # pre-releases or not.
+ if not self._specs:
+ return None
+
+ # Otherwise we'll see if any of the given specifiers accept
+ # prereleases, if any of them do we'll return True, otherwise False.
+ return any(s.prereleases for s in self._specs)
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+ def __contains__(self, item):
+ # type: (Union[ParsedVersion, str]) -> bool
+ return self.contains(item)
+
+ def contains(self, item, prereleases=None):
+ # type: (Union[ParsedVersion, str], Optional[bool]) -> bool
+
+ # Ensure that our item is a Version or LegacyVersion instance.
+ if not isinstance(item, (LegacyVersion, Version)):
+ item = parse(item)
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # We can determine if we're going to allow pre-releases by looking to
+ # see if any of the underlying items supports them. If none of them do
+ # and this item is a pre-release then we do not allow it and we can
+ # short circuit that here.
+ # Note: This means that 1.0.dev1 would not be contained in something
+ # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+ if not prereleases and item.is_prerelease:
+ return False
+
+ # We simply dispatch to the underlying specs here to make sure that the
+ # given version is contained within all of them.
+ # Note: This use of all() here means that an empty set of specifiers
+ # will always return True, this is an explicit design decision.
+ return all(s.contains(item, prereleases=prereleases) for s in self._specs)
+
+ def filter(
+ self,
+ iterable, # type: Iterable[Union[ParsedVersion, str]]
+ prereleases=None, # type: Optional[bool]
+ ):
+ # type: (...) -> Iterable[Union[ParsedVersion, str]]
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # If we have any specifiers, then we want to wrap our iterable in the
+ # filter method for each one, this will act as a logical AND amongst
+ # each specifier.
+ if self._specs:
+ for spec in self._specs:
+ iterable = spec.filter(iterable, prereleases=bool(prereleases))
+ return iterable
+ # If we do not have any specifiers, then we need to have a rough filter
+ # which will filter out any pre-releases, unless there are no final
+ # releases, and which will filter out LegacyVersion in general.
+ else:
+ filtered = [] # type: List[Union[ParsedVersion, str]]
+ found_prereleases = [] # type: List[Union[ParsedVersion, str]]
+
+ for item in iterable:
+ # Ensure that we some kind of Version class for this item.
+ if not isinstance(item, (LegacyVersion, Version)):
+ parsed_version = parse(item)
+ else:
+ parsed_version = item
+
+ # Filter out any item which is parsed as a LegacyVersion
+ if isinstance(parsed_version, LegacyVersion):
+ continue
+
+ # Store any item which is a pre-release for later unless we've
+ # already found a final version or we are accepting prereleases
+ if parsed_version.is_prerelease and not prereleases:
+ if not filtered:
+ found_prereleases.append(item)
+ else:
+ filtered.append(item)
+
+ # If we've found no items except for pre-releases, then we'll go
+ # ahead and use the pre-releases
+ if not filtered and found_prereleases and prereleases is None:
+ return found_prereleases
+
+ return filtered
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/tags.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/tags.py
new file mode 100644
index 0000000..9064910
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/tags.py
@@ -0,0 +1,751 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from __future__ import absolute_import
+
+import distutils.util
+
+try:
+ from importlib.machinery import EXTENSION_SUFFIXES
+except ImportError: # pragma: no cover
+ import imp
+
+ EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
+ del imp
+import logging
+import os
+import platform
+import re
+import struct
+import sys
+import sysconfig
+import warnings
+
+from ._typing import TYPE_CHECKING, cast
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import (
+ Dict,
+ FrozenSet,
+ IO,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ )
+
+ PythonVersion = Sequence[int]
+ MacVersion = Tuple[int, int]
+ GlibcVersion = Tuple[int, int]
+
+
+logger = logging.getLogger(__name__)
+
+INTERPRETER_SHORT_NAMES = {
+ "python": "py", # Generic.
+ "cpython": "cp",
+ "pypy": "pp",
+ "ironpython": "ip",
+ "jython": "jy",
+} # type: Dict[str, str]
+
+
+_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
+
+
+class Tag(object):
+ """
+ A representation of the tag triple for a wheel.
+
+ Instances are considered immutable and thus are hashable. Equality checking
+ is also supported.
+ """
+
+ __slots__ = ["_interpreter", "_abi", "_platform"]
+
+ def __init__(self, interpreter, abi, platform):
+ # type: (str, str, str) -> None
+ self._interpreter = interpreter.lower()
+ self._abi = abi.lower()
+ self._platform = platform.lower()
+
+ @property
+ def interpreter(self):
+ # type: () -> str
+ return self._interpreter
+
+ @property
+ def abi(self):
+ # type: () -> str
+ return self._abi
+
+ @property
+ def platform(self):
+ # type: () -> str
+ return self._platform
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if not isinstance(other, Tag):
+ return NotImplemented
+
+ return (
+ (self.platform == other.platform)
+ and (self.abi == other.abi)
+ and (self.interpreter == other.interpreter)
+ )
+
+ def __hash__(self):
+ # type: () -> int
+ return hash((self._interpreter, self._abi, self._platform))
+
+ def __str__(self):
+ # type: () -> str
+ return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
+
+
+def parse_tag(tag):
+ # type: (str) -> FrozenSet[Tag]
+ """
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+ Returning a set is required due to the possibility that the tag is a
+ compressed tag set.
+ """
+ tags = set()
+ interpreters, abis, platforms = tag.split("-")
+ for interpreter in interpreters.split("."):
+ for abi in abis.split("."):
+ for platform_ in platforms.split("."):
+ tags.add(Tag(interpreter, abi, platform_))
+ return frozenset(tags)
+
+
+def _warn_keyword_parameter(func_name, kwargs):
+ # type: (str, Dict[str, bool]) -> bool
+ """
+ Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only.
+ """
+ if not kwargs:
+ return False
+ elif len(kwargs) > 1 or "warn" not in kwargs:
+ kwargs.pop("warn", None)
+ arg = next(iter(kwargs.keys()))
+ raise TypeError(
+ "{}() got an unexpected keyword argument {!r}".format(func_name, arg)
+ )
+ return kwargs["warn"]
+
+
+def _get_config_var(name, warn=False):
+ # type: (str, bool) -> Union[int, str, None]
+ value = sysconfig.get_config_var(name)
+ if value is None and warn:
+ logger.debug(
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
+ )
+ return value
+
+
+def _normalize_string(string):
+ # type: (str) -> str
+ return string.replace(".", "_").replace("-", "_")
+
+
+def _abi3_applies(python_version):
+ # type: (PythonVersion) -> bool
+ """
+ Determine if the Python version supports abi3.
+
+ PEP 384 was first implemented in Python 3.2.
+ """
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
+
+
+def _cpython_abis(py_version, warn=False):
+ # type: (PythonVersion, bool) -> List[str]
+ py_version = tuple(py_version) # To allow for version comparison.
+ abis = []
+ version = _version_nodot(py_version[:2])
+ debug = pymalloc = ucs4 = ""
+ with_debug = _get_config_var("Py_DEBUG", warn)
+ has_refcount = hasattr(sys, "gettotalrefcount")
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
+ # extension modules is the best option.
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
+ debug = "d"
+ if py_version < (3, 8):
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
+ if with_pymalloc or with_pymalloc is None:
+ pymalloc = "m"
+ if py_version < (3, 3):
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
+ if unicode_size == 4 or (
+ unicode_size is None and sys.maxunicode == 0x10FFFF
+ ):
+ ucs4 = "u"
+ elif debug:
+ # Debug builds can also load "normal" extension modules.
+ # We can also assume no UCS-4 or pymalloc requirement.
+ abis.append("cp{version}".format(version=version))
+ abis.insert(
+ 0,
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
+ ),
+ )
+ return abis
+
+
+def cpython_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a CPython interpreter.
+
+ The tags consist of:
+ - cp--
+ - cp-abi3-
+ - cp-none-
+ - cp-abi3- # Older Python versions down to 3.2.
+
+ If python_version only specifies a major version then user-provided ABIs and
+ the 'none' ABItag will be used.
+
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
+ their normal position and not at the beginning.
+ """
+ warn = _warn_keyword_parameter("cpython_tags", kwargs)
+ if not python_version:
+ python_version = sys.version_info[:2]
+
+ interpreter = "cp{}".format(_version_nodot(python_version[:2]))
+
+ if abis is None:
+ if len(python_version) > 1:
+ abis = _cpython_abis(python_version, warn)
+ else:
+ abis = []
+ abis = list(abis)
+ # 'abi3' and 'none' are explicitly handled later.
+ for explicit_abi in ("abi3", "none"):
+ try:
+ abis.remove(explicit_abi)
+ except ValueError:
+ pass
+
+ platforms = list(platforms or _platform_tags())
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+ if _abi3_applies(python_version):
+ for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
+ yield tag
+ for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
+ yield tag
+
+ if _abi3_applies(python_version):
+ for minor_version in range(python_version[1] - 1, 1, -1):
+ for platform_ in platforms:
+ interpreter = "cp{version}".format(
+ version=_version_nodot((python_version[0], minor_version))
+ )
+ yield Tag(interpreter, "abi3", platform_)
+
+
+def _generic_abi():
+ # type: () -> Iterator[str]
+ abi = sysconfig.get_config_var("SOABI")
+ if abi:
+ yield _normalize_string(abi)
+
+
+def generic_tags(
+ interpreter=None, # type: Optional[str]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a generic interpreter.
+
+ The tags consist of:
+ - --
+
+ The "none" ABI will be added if it was not explicitly provided.
+ """
+ warn = _warn_keyword_parameter("generic_tags", kwargs)
+ if not interpreter:
+ interp_name = interpreter_name()
+ interp_version = interpreter_version(warn=warn)
+ interpreter = "".join([interp_name, interp_version])
+ if abis is None:
+ abis = _generic_abi()
+ platforms = list(platforms or _platform_tags())
+ abis = list(abis)
+ if "none" not in abis:
+ abis.append("none")
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+
+
+def _py_interpreter_range(py_version):
+ # type: (PythonVersion) -> Iterator[str]
+ """
+ Yields Python versions in descending order.
+
+ After the latest version, the major-only version will be yielded, and then
+ all previous versions of that major version.
+ """
+ if len(py_version) > 1:
+ yield "py{version}".format(version=_version_nodot(py_version[:2]))
+ yield "py{major}".format(major=py_version[0])
+ if len(py_version) > 1:
+ for minor in range(py_version[1] - 1, -1, -1):
+ yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
+
+
+def compatible_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ interpreter=None, # type: Optional[str]
+ platforms=None, # type: Optional[Iterable[str]]
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the sequence of tags that are compatible with a specific version of Python.
+
+ The tags consist of:
+ - py*-none-
+ - -none-any # ... if `interpreter` is provided.
+ - py*-none-any
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+ platforms = list(platforms or _platform_tags())
+ for version in _py_interpreter_range(python_version):
+ for platform_ in platforms:
+ yield Tag(version, "none", platform_)
+ if interpreter:
+ yield Tag(interpreter, "none", "any")
+ for version in _py_interpreter_range(python_version):
+ yield Tag(version, "none", "any")
+
+
+def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
+ # type: (str, bool) -> str
+ if not is_32bit:
+ return arch
+
+ if arch.startswith("ppc"):
+ return "ppc"
+
+ return "i386"
+
+
+def _mac_binary_formats(version, cpu_arch):
+ # type: (MacVersion, str) -> List[str]
+ formats = [cpu_arch]
+ if cpu_arch == "x86_64":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat64", "fat32"])
+
+ elif cpu_arch == "i386":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat32", "fat"])
+
+ elif cpu_arch == "ppc64":
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
+ if version > (10, 5) or version < (10, 4):
+ return []
+ formats.append("fat64")
+
+ elif cpu_arch == "ppc":
+ if version > (10, 6):
+ return []
+ formats.extend(["fat32", "fat"])
+
+ formats.append("universal")
+ return formats
+
+
+def mac_platforms(version=None, arch=None):
+ # type: (Optional[MacVersion], Optional[str]) -> Iterator[str]
+ """
+ Yields the platform tags for a macOS system.
+
+ The `version` parameter is a two-item tuple specifying the macOS version to
+ generate platform tags for. The `arch` parameter is the CPU architecture to
+ generate platform tags for. Both parameters default to the appropriate value
+ for the current system.
+ """
+ version_str, _, cpu_arch = platform.mac_ver() # type: ignore
+ if version is None:
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+ else:
+ version = version
+ if arch is None:
+ arch = _mac_arch(cpu_arch)
+ else:
+ arch = arch
+ for minor_version in range(version[1], -1, -1):
+ compat_version = version[0], minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+
+
+# From PEP 513.
+def _is_manylinux_compatible(name, glibc_version):
+ # type: (str, GlibcVersion) -> bool
+ # Check for presence of _manylinux module.
+ try:
+ import _manylinux # noqa
+
+ return bool(getattr(_manylinux, name + "_compatible"))
+ except (ImportError, AttributeError):
+ # Fall through to heuristic check below.
+ pass
+
+ return _have_compatible_glibc(*glibc_version)
+
+
+def _glibc_version_string():
+ # type: () -> Optional[str]
+ # Returns glibc version string, or None if not using glibc.
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
+
+
+def _glibc_version_string_confstr():
+ # type: () -> Optional[str]
+ """
+ Primary implementation of glibc_version_string using os.confstr.
+ """
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module.
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
+ version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821
+ "CS_GNU_LIBC_VERSION"
+ )
+ assert version_string is not None
+ _, version = version_string.split() # type: Tuple[str, str]
+ except (AssertionError, AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def _glibc_version_string_ctypes():
+ # type: () -> Optional[str]
+ """
+ Fallback implementation of glibc_version_string using ctypes.
+ """
+ try:
+ import ctypes
+ except ImportError:
+ return None
+
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+ # manpage says, "If filename is NULL, then the returned handle is for the
+ # main program". This way we can let the linker do the work to figure out
+ # which libc our process is actually using.
+ #
+ # Note: typeshed is wrong here so we are ignoring this line.
+ process_namespace = ctypes.CDLL(None) # type: ignore
+ try:
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
+ except AttributeError:
+ # Symbol doesn't exist -> therefore, we are not linked to
+ # glibc.
+ return None
+
+ # Call gnu_get_libc_version, which returns a string like "2.5"
+ gnu_get_libc_version.restype = ctypes.c_char_p
+ version_str = gnu_get_libc_version() # type: str
+ # py2 / py3 compatibility:
+ if not isinstance(version_str, str):
+ version_str = version_str.decode("ascii")
+
+ return version_str
+
+
+# Separated out from have_compatible_glibc for easier unit testing.
+def _check_glibc_version(version_str, required_major, minimum_minor):
+ # type: (str, int, int) -> bool
+ # Parse string and check against requested version.
+ #
+ # We use a regexp instead of str.split because we want to discard any
+ # random junk that might come after the minor version -- this might happen
+ # in patched/forked versions of glibc (e.g. Linaro's version of glibc
+ # uses version strings like "2.20-2014.11"). See gh-3588.
+ m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str)
+ if not m:
+ warnings.warn(
+ "Expected glibc version with 2 components major.minor,"
+ " got: %s" % version_str,
+ RuntimeWarning,
+ )
+ return False
+ return (
+ int(m.group("major")) == required_major
+ and int(m.group("minor")) >= minimum_minor
+ )
+
+
+def _have_compatible_glibc(required_major, minimum_minor):
+ # type: (int, int) -> bool
+ version_str = _glibc_version_string()
+ if version_str is None:
+ return False
+ return _check_glibc_version(version_str, required_major, minimum_minor)
+
+
+# Python does not provide platform information at sufficient granularity to
+# identify the architecture of the running executable in some cases, so we
+# determine it dynamically by reading the information from the running
+# process. This only applies on Linux, which uses the ELF format.
+class _ELFFileHeader(object):
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
+ class _InvalidELFFileHeader(ValueError):
+ """
+ An invalid ELF file header was found.
+ """
+
+ ELF_MAGIC_NUMBER = 0x7F454C46
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+ EM_386 = 3
+ EM_S390 = 22
+ EM_ARM = 40
+ EM_X86_64 = 62
+ EF_ARM_ABIMASK = 0xFF000000
+ EF_ARM_ABI_VER5 = 0x05000000
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
+
+ def __init__(self, file):
+ # type: (IO[bytes]) -> None
+ def unpack(fmt):
+ # type: (str) -> int
+ try:
+ (result,) = struct.unpack(
+ fmt, file.read(struct.calcsize(fmt))
+ ) # type: (int, )
+ except struct.error:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ return result
+
+ self.e_ident_magic = unpack(">I")
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_class = unpack("B")
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_data = unpack("B")
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_version = unpack("B")
+ self.e_ident_osabi = unpack("B")
+ self.e_ident_abiversion = unpack("B")
+ self.e_ident_pad = file.read(7)
+ format_h = "H"
+ format_i = "I"
+ format_q = "Q"
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
+ self.e_type = unpack(format_h)
+ self.e_machine = unpack(format_h)
+ self.e_version = unpack(format_i)
+ self.e_entry = unpack(format_p)
+ self.e_phoff = unpack(format_p)
+ self.e_shoff = unpack(format_p)
+ self.e_flags = unpack(format_i)
+ self.e_ehsize = unpack(format_h)
+ self.e_phentsize = unpack(format_h)
+ self.e_phnum = unpack(format_h)
+ self.e_shentsize = unpack(format_h)
+ self.e_shnum = unpack(format_h)
+ self.e_shstrndx = unpack(format_h)
+
+
+def _get_elf_header():
+ # type: () -> Optional[_ELFFileHeader]
+ try:
+ with open(sys.executable, "rb") as f:
+ elf_header = _ELFFileHeader(f)
+ except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
+ return None
+ return elf_header
+
+
+def _is_linux_armhf():
+ # type: () -> bool
+ # hard-float ABI can be detected from the ELF header of the running
+ # process
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_ARM
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
+ ) == elf_header.EF_ARM_ABI_VER5
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
+ return result
+
+
+def _is_linux_i686():
+ # type: () -> bool
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_386
+ return result
+
+
+def _have_compatible_manylinux_abi(arch):
+ # type: (str) -> bool
+ if arch == "armv7l":
+ return _is_linux_armhf()
+ if arch == "i686":
+ return _is_linux_i686()
+ return True
+
+
+def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
+ # type: (bool) -> Iterator[str]
+ linux = _normalize_string(distutils.util.get_platform())
+ if is_32bit:
+ if linux == "linux_x86_64":
+ linux = "linux_i686"
+ elif linux == "linux_aarch64":
+ linux = "linux_armv7l"
+ manylinux_support = []
+ _, arch = linux.split("_", 1)
+ if _have_compatible_manylinux_abi(arch):
+ if arch in {"x86_64", "i686", "aarch64", "armv7l", "ppc64", "ppc64le", "s390x"}:
+ manylinux_support.append(
+ ("manylinux2014", (2, 17))
+ ) # CentOS 7 w/ glibc 2.17 (PEP 599)
+ if arch in {"x86_64", "i686"}:
+ manylinux_support.append(
+ ("manylinux2010", (2, 12))
+ ) # CentOS 6 w/ glibc 2.12 (PEP 571)
+ manylinux_support.append(
+ ("manylinux1", (2, 5))
+ ) # CentOS 5 w/ glibc 2.5 (PEP 513)
+ manylinux_support_iter = iter(manylinux_support)
+ for name, glibc_version in manylinux_support_iter:
+ if _is_manylinux_compatible(name, glibc_version):
+ yield linux.replace("linux", name)
+ break
+ # Support for a later manylinux implies support for an earlier version.
+ for name, _ in manylinux_support_iter:
+ yield linux.replace("linux", name)
+ yield linux
+
+
+def _generic_platforms():
+ # type: () -> Iterator[str]
+ yield _normalize_string(distutils.util.get_platform())
+
+
+def _platform_tags():
+ # type: () -> Iterator[str]
+ """
+ Provides the platform tags for this installation.
+ """
+ if platform.system() == "Darwin":
+ return mac_platforms()
+ elif platform.system() == "Linux":
+ return _linux_platforms()
+ else:
+ return _generic_platforms()
+
+
+def interpreter_name():
+ # type: () -> str
+ """
+ Returns the name of the running interpreter.
+ """
+ try:
+ name = sys.implementation.name # type: ignore
+ except AttributeError: # pragma: no cover
+ # Python 2.7 compatibility.
+ name = platform.python_implementation().lower()
+ return INTERPRETER_SHORT_NAMES.get(name) or name
+
+
+def interpreter_version(**kwargs):
+ # type: (bool) -> str
+ """
+ Returns the version of the running interpreter.
+ """
+ warn = _warn_keyword_parameter("interpreter_version", kwargs)
+ version = _get_config_var("py_version_nodot", warn=warn)
+ if version:
+ version = str(version)
+ else:
+ version = _version_nodot(sys.version_info[:2])
+ return version
+
+
+def _version_nodot(version):
+ # type: (PythonVersion) -> str
+ if any(v >= 10 for v in version):
+ sep = "_"
+ else:
+ sep = ""
+ return sep.join(map(str, version))
+
+
+def sys_tags(**kwargs):
+ # type: (bool) -> Iterator[Tag]
+ """
+ Returns the sequence of tag triples for the running interpreter.
+
+ The order of the sequence corresponds to priority order for the
+ interpreter, from most to least important.
+ """
+ warn = _warn_keyword_parameter("sys_tags", kwargs)
+
+ interp_name = interpreter_name()
+ if interp_name == "cp":
+ for tag in cpython_tags(warn=warn):
+ yield tag
+ else:
+ for tag in generic_tags():
+ yield tag
+
+ for tag in compatible_tags():
+ yield tag
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/utils.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/utils.py
new file mode 100644
index 0000000..19579c1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/utils.py
@@ -0,0 +1,65 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import re
+
+from ._typing import TYPE_CHECKING, cast
+from .version import InvalidVersion, Version
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import NewType, Union
+
+ NormalizedName = NewType("NormalizedName", str)
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+
+
+def canonicalize_name(name):
+ # type: (str) -> NormalizedName
+ # This is taken from PEP 503.
+ value = _canonicalize_regex.sub("-", name).lower()
+ return cast("NormalizedName", value)
+
+
+def canonicalize_version(_version):
+ # type: (str) -> Union[Version, str]
+ """
+ This is very similar to Version.__str__, but has one subtle difference
+ with the way it handles the release segment.
+ """
+
+ try:
+ version = Version(_version)
+ except InvalidVersion:
+ # Legacy versions cannot be normalized
+ return _version
+
+ parts = []
+
+ # Epoch
+ if version.epoch != 0:
+ parts.append("{0}!".format(version.epoch))
+
+ # Release segment
+ # NB: This strips trailing '.0's to normalize
+ parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release)))
+
+ # Pre-release
+ if version.pre is not None:
+ parts.append("".join(str(x) for x in version.pre))
+
+ # Post-release
+ if version.post is not None:
+ parts.append(".post{0}".format(version.post))
+
+ # Development release
+ if version.dev is not None:
+ parts.append(".dev{0}".format(version.dev))
+
+ # Local version segment
+ if version.local is not None:
+ parts.append("+{0}".format(version.local))
+
+ return "".join(parts)
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/version.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/version.py
new file mode 100644
index 0000000..00371e8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/packaging/version.py
@@ -0,0 +1,535 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import collections
+import itertools
+import re
+
+from ._structures import Infinity, NegativeInfinity
+from ._typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
+
+ from ._structures import InfinityType, NegativeInfinityType
+
+ InfiniteTypes = Union[InfinityType, NegativeInfinityType]
+ PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
+ SubLocalType = Union[InfiniteTypes, int, str]
+ LocalType = Union[
+ NegativeInfinityType,
+ Tuple[
+ Union[
+ SubLocalType,
+ Tuple[SubLocalType, str],
+ Tuple[NegativeInfinityType, SubLocalType],
+ ],
+ ...,
+ ],
+ ]
+ CmpKey = Tuple[
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
+ ]
+ LegacyCmpKey = Tuple[int, Tuple[str, ...]]
+ VersionComparisonMethod = Callable[
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
+ ]
+
+__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
+
+
+_Version = collections.namedtuple(
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
+)
+
+
+def parse(version):
+ # type: (str) -> Union[LegacyVersion, Version]
+ """
+ Parse the given version string and return either a :class:`Version` object
+ or a :class:`LegacyVersion` object depending on if the given version is
+ a valid PEP 440 version or a legacy version.
+ """
+ try:
+ return Version(version)
+ except InvalidVersion:
+ return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+ """
+ An invalid version was found, users should refer to PEP 440.
+ """
+
+
+class _BaseVersion(object):
+ _key = None # type: Union[CmpKey, LegacyCmpKey]
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._key)
+
+ def __lt__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s < o)
+
+ def __le__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s <= o)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return self._compare(other, lambda s, o: s == o)
+
+ def __ge__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s >= o)
+
+ def __gt__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s > o)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return self._compare(other, lambda s, o: s != o)
+
+ def _compare(self, other, method):
+ # type: (object, VersionComparisonMethod) -> Union[bool, NotImplemented]
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+ def __init__(self, version):
+ # type: (str) -> None
+ self._version = str(version)
+ self._key = _legacy_cmpkey(self._version)
+
+ def __str__(self):
+ # type: () -> str
+ return self._version
+
+ def __repr__(self):
+ # type: () -> str
+ return "".format(repr(str(self)))
+
+ @property
+ def public(self):
+ # type: () -> str
+ return self._version
+
+ @property
+ def base_version(self):
+ # type: () -> str
+ return self._version
+
+ @property
+ def epoch(self):
+ # type: () -> int
+ return -1
+
+ @property
+ def release(self):
+ # type: () -> None
+ return None
+
+ @property
+ def pre(self):
+ # type: () -> None
+ return None
+
+ @property
+ def post(self):
+ # type: () -> None
+ return None
+
+ @property
+ def dev(self):
+ # type: () -> None
+ return None
+
+ @property
+ def local(self):
+ # type: () -> None
+ return None
+
+ @property
+ def is_prerelease(self):
+ # type: () -> bool
+ return False
+
+ @property
+ def is_postrelease(self):
+ # type: () -> bool
+ return False
+
+ @property
+ def is_devrelease(self):
+ # type: () -> bool
+ return False
+
+
+_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
+
+_legacy_version_replacement_map = {
+ "pre": "c",
+ "preview": "c",
+ "-": "final-",
+ "rc": "c",
+ "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+ # type: (str) -> Iterator[str]
+ for part in _legacy_version_component_re.split(s):
+ part = _legacy_version_replacement_map.get(part, part)
+
+ if not part or part == ".":
+ continue
+
+ if part[:1] in "0123456789":
+ # pad for numeric comparison
+ yield part.zfill(8)
+ else:
+ yield "*" + part
+
+ # ensure that alpha/beta/candidate are before final
+ yield "*final"
+
+
+def _legacy_cmpkey(version):
+ # type: (str) -> LegacyCmpKey
+
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
+ # which uses the defacto standard originally implemented by setuptools,
+ # as before all PEP 440 versions.
+ epoch = -1
+
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
+ # it's adoption of the packaging library.
+ parts = [] # type: List[str]
+ for part in _parse_version_parts(version.lower()):
+ if part.startswith("*"):
+ # remove "-" before a prerelease tag
+ if part < "*final":
+ while parts and parts[-1] == "*final-":
+ parts.pop()
+
+ # remove trailing zeros from each series of numeric parts
+ while parts and parts[-1] == "00000000":
+ parts.pop()
+
+ parts.append(part)
+
+ return epoch, tuple(parts)
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+ v?
+ (?:
+ (?:(?P[0-9]+)!)? # epoch
+ (?P[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P # pre-release
+ [-_\.]?
+ (?P(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P[0-9]+)?
+ )?
+ (?P # post release
+ (?:-(?P[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?Ppost|rev|r)
+ [-_\.]?
+ (?P[0-9]+)?
+ )
+ )?
+ (?P # dev release
+ [-_\.]?
+ (?Pdev)
+ [-_\.]?
+ (?P[0-9]+)?
+ )?
+ )
+ (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ def __init__(self, version):
+ # type: (str) -> None
+
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+ post=_parse_letter_version(
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+ ),
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self):
+ # type: () -> str
+ return "".format(repr(str(self)))
+
+ def __str__(self):
+ # type: () -> str
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append("{0}!".format(self.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ # Pre-release
+ if self.pre is not None:
+ parts.append("".join(str(x) for x in self.pre))
+
+ # Post-release
+ if self.post is not None:
+ parts.append(".post{0}".format(self.post))
+
+ # Development release
+ if self.dev is not None:
+ parts.append(".dev{0}".format(self.dev))
+
+ # Local version segment
+ if self.local is not None:
+ parts.append("+{0}".format(self.local))
+
+ return "".join(parts)
+
+ @property
+ def epoch(self):
+ # type: () -> int
+ _epoch = self._version.epoch # type: int
+ return _epoch
+
+ @property
+ def release(self):
+ # type: () -> Tuple[int, ...]
+ _release = self._version.release # type: Tuple[int, ...]
+ return _release
+
+ @property
+ def pre(self):
+ # type: () -> Optional[Tuple[str, int]]
+ _pre = self._version.pre # type: Optional[Tuple[str, int]]
+ return _pre
+
+ @property
+ def post(self):
+ # type: () -> Optional[Tuple[str, int]]
+ return self._version.post[1] if self._version.post else None
+
+ @property
+ def dev(self):
+ # type: () -> Optional[Tuple[str, int]]
+ return self._version.dev[1] if self._version.dev else None
+
+ @property
+ def local(self):
+ # type: () -> Optional[str]
+ if self._version.local:
+ return ".".join(str(x) for x in self._version.local)
+ else:
+ return None
+
+ @property
+ def public(self):
+ # type: () -> str
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self):
+ # type: () -> str
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append("{0}!".format(self.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ return "".join(parts)
+
+ @property
+ def is_prerelease(self):
+ # type: () -> bool
+ return self.dev is not None or self.pre is not None
+
+ @property
+ def is_postrelease(self):
+ # type: () -> bool
+ return self.post is not None
+
+ @property
+ def is_devrelease(self):
+ # type: () -> bool
+ return self.dev is not None
+
+ @property
+ def major(self):
+ # type: () -> int
+ return self.release[0] if len(self.release) >= 1 else 0
+
+ @property
+ def minor(self):
+ # type: () -> int
+ return self.release[1] if len(self.release) >= 2 else 0
+
+ @property
+ def micro(self):
+ # type: () -> int
+ return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+ letter, # type: str
+ number, # type: Union[str, bytes, SupportsInt]
+):
+ # type: (...) -> Optional[Tuple[str, int]]
+
+ if letter:
+ # We consider there to be an implicit 0 in a pre-release if there is
+ # not a numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume if we are given a number, but we are not given a letter
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+ return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+ # type: (str) -> Optional[LocalType]
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_separators.split(local)
+ )
+ return None
+
+
+def _cmpkey(
+ epoch, # type: int
+ release, # type: Tuple[int, ...]
+ pre, # type: Optional[Tuple[str, int]]
+ post, # type: Optional[Tuple[str, int]]
+ dev, # type: Optional[Tuple[str, int]]
+ local, # type: Optional[Tuple[SubLocalType]]
+):
+ # type: (...) -> CmpKey
+
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non zero, then take the rest
+ # re-reverse it back into the correct order and make it a tuple and use
+ # that for our sorting key.
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
+ # if there is not a pre or a post segment. If we have one of those then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ _pre = NegativeInfinity # type: PrePostDevType
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ _pre = Infinity
+ else:
+ _pre = pre
+
+ # Versions without a post segment should sort before those with one.
+ if post is None:
+ _post = NegativeInfinity # type: PrePostDevType
+
+ else:
+ _post = post
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ _dev = Infinity # type: PrePostDevType
+
+ else:
+ _dev = dev
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ _local = NegativeInfinity # type: LocalType
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alpha numeric segments sort before numeric segments
+ # - Alpha numeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ _local = tuple(
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+ )
+
+ return epoch, _release, _pre, _post, _dev, _local
diff --git a/venv/lib/python3.9/site-packages/pkg_resources/_vendor/pyparsing.py b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/pyparsing.py
new file mode 100644
index 0000000..4cae788
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/pkg_resources/_vendor/pyparsing.py
@@ -0,0 +1,5742 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2018 Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and executing simple grammars,
+vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
+provides a library of classes that you use to construct the grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form
+C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements
+(L{'+'} operator gives L{And} expressions, strings are auto-converted to
+L{Literal} expressions)::
+
+ from pyparsing import Word, alphas
+
+ # define grammar of a greeting
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+ Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the self-explanatory
+class names, and the use of '+', '|' and '^' operators.
+
+The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an
+object with named attributes.
+
+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
+ - quoted strings
+ - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+ - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
+ - construct character word-group expressions using the L{Word} class
+ - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
+ - use L{'+'}, L{'|'}, L{'^'}, and L{'&'} operators to combine simple expressions into more complex ones
+ - associate names with your parsed results using L{ParserElement.setResultsName}
+ - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
+ - find more useful common expressions in the L{pyparsing_common} namespace class
+"""
+
+__version__ = "2.2.1"
+__versionTime__ = "18 Sep 2018 00:49 UTC"
+__author__ = "Paul McGuire "
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+
+try:
+ from _thread import RLock
+except ImportError:
+ from threading import RLock
+
+try:
+ # Python 3
+ from collections.abc import Iterable
+ from collections.abc import MutableMapping
+except ImportError:
+ # Python 2.7
+ from collections import Iterable
+ from collections import MutableMapping
+
+try:
+ from collections import OrderedDict as _OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict as _OrderedDict
+ except ImportError:
+ _OrderedDict = None
+
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
+'CloseMatch', 'tokenMap', 'pyparsing_common',
+]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+ _MAX_INT = sys.maxsize
+ basestring = str
+ unichr = chr
+ _ustr = str
+
+ # build list of single arg builtins, that can be used as parse actions
+ singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+ _MAX_INT = sys.maxint
+ range = xrange
+
+ def _ustr(obj):
+ """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
+ str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
+ then < returns the unicode object | encodes it with the default encoding | ... >.
+ """
+ if isinstance(obj,unicode):
+ return obj
+
+ try:
+ # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+ # it won't break any existing code.
+ return str(obj)
+
+ except UnicodeEncodeError:
+ # Else encode it
+ ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+ xmlcharref = Regex(r'\d+;')
+ xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+ return xmlcharref.transformString(ret)
+
+ # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+ singleArgBuiltins = []
+ import __builtin__
+ for fname in "sum len sorted reversed list tuple set any all min max".split():
+ try:
+ singleArgBuiltins.append(getattr(__builtin__,fname))
+ except AttributeError:
+ continue
+
+_generatorType = type((y for y in range(1)))
+
+def _xml_escape(data):
+ """Escape &, <, >, ", ', etc. in a string of data."""
+
+ # ampersand must be replaced first
+ from_symbols = '&><"\''
+ to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
+ for from_,to_ in zip(from_symbols, to_symbols):
+ data = data.replace(from_, to_)
+ return data
+
+class _Constants(object):
+ pass
+
+alphas = string.ascii_uppercase + string.ascii_lowercase
+nums = "0123456789"
+hexnums = nums + "ABCDEFabcdef"
+alphanums = alphas + nums
+_bslash = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+class ParseBaseException(Exception):
+ """base exception class for all parsing runtime exceptions"""
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__( self, pstr, loc=0, msg=None, elem=None ):
+ self.loc = loc
+ if msg is None:
+ self.msg = pstr
+ self.pstr = ""
+ else:
+ self.msg = msg
+ self.pstr = pstr
+ self.parserElement = elem
+ self.args = (pstr, loc, msg)
+
+ @classmethod
+ def _from_exception(cls, pe):
+ """
+ internal factory method to simplify creating one type of ParseException
+ from another - avoids having __init__ signature conflicts among subclasses
+ """
+ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+ def __getattr__( self, aname ):
+ """supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+ """
+ if( aname == "lineno" ):
+ return lineno( self.loc, self.pstr )
+ elif( aname in ("col", "column") ):
+ return col( self.loc, self.pstr )
+ elif( aname == "line" ):
+ return line( self.loc, self.pstr )
+ else:
+ raise AttributeError(aname)
+
+ def __str__( self ):
+ return "%s (at char %d), (line:%d, col:%d)" % \
+ ( self.msg, self.loc, self.lineno, self.column )
+ def __repr__( self ):
+ return _ustr(self)
+ def markInputline( self, markerString = ">!<" ):
+ """Extracts the exception line from the input string, and marks
+ the location of the exception with a special symbol.
+ """
+ line_str = self.line
+ line_column = self.column - 1
+ if markerString:
+ line_str = "".join((line_str[:line_column],
+ markerString, line_str[line_column:]))
+ return line_str.strip()
+ def __dir__(self):
+ return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+ """
+ Exception thrown when parse expressions don't match class;
+ supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+
+ Example::
+ try:
+ Word(nums).setName("integer").parseString("ABC")
+ except ParseException as pe:
+ print(pe)
+ print("column: {}".format(pe.col))
+
+ prints::
+ Expected integer (at char 0), (line:1, col:1)
+ column: 1
+ """
+ pass
+
+class ParseFatalException(ParseBaseException):
+ """user-throwable exception thrown when inconsistent parse content
+ is found; stops all parsing immediately"""
+ pass
+
+class ParseSyntaxException(ParseFatalException):
+ """just like L{ParseFatalException}, but thrown internally when an
+ L{ErrorStop} ('-' operator) indicates that parsing is to stop
+ immediately because an unbacktrackable syntax error has been found"""
+ pass
+
+#~ class ReparseException(ParseBaseException):
+ #~ """Experimental class - parse actions can raise this exception to cause
+ #~ pyparsing to reparse the input string:
+ #~ - with a modified input string, and/or
+ #~ - with a modified start location
+ #~ Set the values of the ReparseException in the constructor, and raise the
+ #~ exception in a parse action to cause pyparsing to use the new string/location.
+ #~ Setting the values as None causes no change to be made.
+ #~ """
+ #~ def __init_( self, newstring, restartLoc ):
+ #~ self.newParseText = newstring
+ #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+ """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
+ def __init__( self, parseElementList ):
+ self.parseElementTrace = parseElementList
+
+ def __str__( self ):
+ return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+ def __init__(self,p1,p2):
+ self.tup = (p1,p2)
+ def __getitem__(self,i):
+ return self.tup[i]
+ def __repr__(self):
+ return repr(self.tup[0])
+ def setOffset(self,i):
+ self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+ """
+ Structured parse results, to provide multiple means of access to the parsed data:
+ - as a list (C{len(results)})
+ - by list index (C{results[0], results[1]}, etc.)
+ - by attribute (C{results.} - see L{ParserElement.setResultsName})
+
+ Example::
+ integer = Word(nums)
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+ # equivalent form:
+ # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ # parseString returns a ParseResults object
+ result = date_str.parseString("1999/12/31")
+
+ def test(s, fn=repr):
+ print("%s -> %s" % (s, fn(eval(s))))
+ test("list(result)")
+ test("result[0]")
+ test("result['month']")
+ test("result.day")
+ test("'month' in result")
+ test("'minutes' in result")
+ test("result.dump()", str)
+ prints::
+ list(result) -> ['1999', '/', '12', '/', '31']
+ result[0] -> '1999'
+ result['month'] -> '12'
+ result.day -> '31'
+ 'month' in result -> True
+ 'minutes' in result -> False
+ result.dump() -> ['1999', '/', '12', '/', '31']
+ - day: 31
+ - month: 12
+ - year: 1999
+ """
+ def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
+ if isinstance(toklist, cls):
+ return toklist
+ retobj = object.__new__(cls)
+ retobj.__doinit = True
+ return retobj
+
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
+ if self.__doinit:
+ self.__doinit = False
+ self.__name = None
+ self.__parent = None
+ self.__accumNames = {}
+ self.__asList = asList
+ self.__modal = modal
+ if toklist is None:
+ toklist = []
+ if isinstance(toklist, list):
+ self.__toklist = toklist[:]
+ elif isinstance(toklist, _generatorType):
+ self.__toklist = list(toklist)
+ else:
+ self.__toklist = [toklist]
+ self.__tokdict = dict()
+
+ if name is not None and name:
+ if not modal:
+ self.__accumNames[name] = 0
+ if isinstance(name,int):
+ name = _ustr(name) # will always return a str, but use _ustr for consistency
+ self.__name = name
+ if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
+ if isinstance(toklist,basestring):
+ toklist = [ toklist ]
+ if asList:
+ if isinstance(toklist,ParseResults):
+ self[name] = _ParseResultsWithOffset(toklist.copy(),0)
+ else:
+ self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+ self[name].__name = name
+ else:
+ try:
+ self[name] = toklist[0]
+ except (KeyError,TypeError,IndexError):
+ self[name] = toklist
+
+ def __getitem__( self, i ):
+ if isinstance( i, (int,slice) ):
+ return self.__toklist[i]
+ else:
+ if i not in self.__accumNames:
+ return self.__tokdict[i][-1][0]
+ else:
+ return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+ def __setitem__( self, k, v, isinstance=isinstance ):
+ if isinstance(v,_ParseResultsWithOffset):
+ self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+ sub = v[0]
+ elif isinstance(k,(int,slice)):
+ self.__toklist[k] = v
+ sub = v
+ else:
+ self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+ sub = v
+ if isinstance(sub,ParseResults):
+ sub.__parent = wkref(self)
+
+ def __delitem__( self, i ):
+ if isinstance(i,(int,slice)):
+ mylen = len( self.__toklist )
+ del self.__toklist[i]
+
+ # convert int to slice
+ if isinstance(i, int):
+ if i < 0:
+ i += mylen
+ i = slice(i, i+1)
+ # get removed indices
+ removed = list(range(*i.indices(mylen)))
+ removed.reverse()
+ # fixup indices in token dictionary
+ for name,occurrences in self.__tokdict.items():
+ for j in removed:
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+ else:
+ del self.__tokdict[i]
+
+ def __contains__( self, k ):
+ return k in self.__tokdict
+
+ def __len__( self ): return len( self.__toklist )
+ def __bool__(self): return ( not not self.__toklist )
+ __nonzero__ = __bool__
+ def __iter__( self ): return iter( self.__toklist )
+ def __reversed__( self ): return iter( self.__toklist[::-1] )
+ def _iterkeys( self ):
+ if hasattr(self.__tokdict, "iterkeys"):
+ return self.__tokdict.iterkeys()
+ else:
+ return iter(self.__tokdict)
+
+ def _itervalues( self ):
+ return (self[k] for k in self._iterkeys())
+
+ def _iteritems( self ):
+ return ((k, self[k]) for k in self._iterkeys())
+
+ if PY_3:
+ keys = _iterkeys
+ """Returns an iterator of all named result keys (Python 3.x only)."""
+
+ values = _itervalues
+ """Returns an iterator of all named result values (Python 3.x only)."""
+
+ items = _iteritems
+ """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
+
+ else:
+ iterkeys = _iterkeys
+ """Returns an iterator of all named result keys (Python 2.x only)."""
+
+ itervalues = _itervalues
+ """Returns an iterator of all named result values (Python 2.x only)."""
+
+ iteritems = _iteritems
+ """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+ def keys( self ):
+ """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iterkeys())
+
+ def values( self ):
+ """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.itervalues())
+
+ def items( self ):
+ """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iteritems())
+
+ def haskeys( self ):
+ """Since keys() returns an iterator, this method is helpful in bypassing
+ code that looks for the existence of any defined results names."""
+ return bool(self.__tokdict)
+
+ def pop( self, *args, **kwargs):
+ """
+ Removes and returns item at specified index (default=C{last}).
+ Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
+ argument or an integer argument, it will use C{list} semantics
+ and pop tokens from the list of parsed tokens. If passed a
+ non-integer argument (most likely a string), it will use C{dict}
+ semantics and pop the corresponding value from any defined
+ results names. A second default return value argument is
+ supported, just as in C{dict.pop()}.
+
+ Example::
+ def remove_first(tokens):
+ tokens.pop(0)
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+ print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+ label = Word(alphas)
+ patt = label("LABEL") + OneOrMore(Word(nums))
+ print(patt.parseString("AAB 123 321").dump())
+
+ # Use pop() in a parse action to remove named result (note that corresponding value is not
+ # removed from list form of results)
+ def remove_LABEL(tokens):
+ tokens.pop("LABEL")
+ return tokens
+ patt.addParseAction(remove_LABEL)
+ print(patt.parseString("AAB 123 321").dump())
+ prints::
+ ['AAB', '123', '321']
+ - LABEL: AAB
+
+ ['AAB', '123', '321']
+ """
+ if not args:
+ args = [-1]
+ for k,v in kwargs.items():
+ if k == 'default':
+ args = (args[0], v)
+ else:
+ raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+ if (isinstance(args[0], int) or
+ len(args) == 1 or
+ args[0] in self):
+ index = args[0]
+ ret = self[index]
+ del self[index]
+ return ret
+ else:
+ defaultvalue = args[1]
+ return defaultvalue
+
+ def get(self, key, defaultValue=None):
+ """
+ Returns named result matching the given key, or if there is no
+ such name, then returns the given C{defaultValue} or C{None} if no
+ C{defaultValue} is specified.
+
+ Similar to C{dict.get()}.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString("1999/12/31")
+ print(result.get("year")) # -> '1999'
+ print(result.get("hour", "not specified")) # -> 'not specified'
+ print(result.get("hour")) # -> None
+ """
+ if key in self:
+ return self[key]
+ else:
+ return defaultValue
+
+ def insert( self, index, insStr ):
+ """
+ Inserts new element at location index in the list of parsed tokens.
+
+ Similar to C{list.insert()}.
+
+ Example::
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to insert the parse location in the front of the parsed results
+ def insert_locn(locn, tokens):
+ tokens.insert(0, locn)
+ print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+ """
+ self.__toklist.insert(index, insStr)
+ # fixup indices in token dictionary
+ for name,occurrences in self.__tokdict.items():
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+ def append( self, item ):
+ """
+ Add single element to end of ParseResults list of elements.
+
+ Example::
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to compute the sum of the parsed integers, and add it to the end
+ def append_sum(tokens):
+ tokens.append(sum(map(int, tokens)))
+ print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+ """
+ self.__toklist.append(item)
+
+ def extend( self, itemseq ):
+ """
+ Add sequence of elements to end of ParseResults list of elements.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+
+ # use a parse action to append the reverse of the matched strings, to make a palindrome
+ def make_palindrome(tokens):
+ tokens.extend(reversed([t[::-1] for t in tokens]))
+ return ''.join(tokens)
+ print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+ """
+ if isinstance(itemseq, ParseResults):
+ self += itemseq
+ else:
+ self.__toklist.extend(itemseq)
+
+ def clear( self ):
+ """
+ Clear all elements and results names.
+ """
+ del self.__toklist[:]
+ self.__tokdict.clear()
+
+ def __getattr__( self, name ):
+ try:
+ return self[name]
+ except KeyError:
+ return ""
+
+ if name in self.__tokdict:
+ if name not in self.__accumNames:
+ return self.__tokdict[name][-1][0]
+ else:
+ return ParseResults([ v[0] for v in self.__tokdict[name] ])
+ else:
+ return ""
+
+ def __add__( self, other ):
+ ret = self.copy()
+ ret += other
+ return ret
+
+ def __iadd__( self, other ):
+ if other.__tokdict:
+ offset = len(self.__toklist)
+ addoffset = lambda a: offset if a<0 else a+offset
+ otheritems = other.__tokdict.items()
+ otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+ for (k,vlist) in otheritems for v in vlist]
+ for k,v in otherdictitems:
+ self[k] = v
+ if isinstance(v[0],ParseResults):
+ v[0].__parent = wkref(self)
+
+ self.__toklist += other.__toklist
+ self.__accumNames.update( other.__accumNames )
+ return self
+
+ def __radd__(self, other):
+ if isinstance(other,int) and other == 0:
+ # useful for merging many ParseResults using sum() builtin
+ return self.copy()
+ else:
+ # this may raise a TypeError - so be it
+ return other + self
+
+ def __repr__( self ):
+ return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+ def __str__( self ):
+ return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+ def _asStringList( self, sep='' ):
+ out = []
+ for item in self.__toklist:
+ if out and sep:
+ out.append(sep)
+ if isinstance( item, ParseResults ):
+ out += item._asStringList()
+ else:
+ out.append( _ustr(item) )
+ return out
+
+ def asList( self ):
+ """
+ Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+ result = patt.parseString("sldkj lsdkj sldkj")
+ # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+ print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj']
+
+ # Use asList() to create an actual list
+ result_list = result.asList()
+ print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj']
+ """
+ return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
+
+ def asDict( self ):
+ """
+ Returns the named parse results as a nested dictionary.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+
+ result_dict = result.asDict()
+ print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'}
+
+ # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+ import json
+ print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+ print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+ """
+ if PY_3:
+ item_fn = self.items
+ else:
+ item_fn = self.iteritems
+
+ def toItem(obj):
+ if isinstance(obj, ParseResults):
+ if obj.haskeys():
+ return obj.asDict()
+ else:
+ return [toItem(v) for v in obj]
+ else:
+ return obj
+
+ return dict((k,toItem(v)) for k,v in item_fn())
+
+ def copy( self ):
+ """
+ Returns a new copy of a C{ParseResults} object.
+ """
+ ret = ParseResults( self.__toklist )
+ ret.__tokdict = self.__tokdict.copy()
+ ret.__parent = self.__parent
+ ret.__accumNames.update( self.__accumNames )
+ ret.__name = self.__name
+ return ret
+
+ def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+ """
+ (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+ """
+ nl = "\n"
+ out = []
+ namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
+ for v in vlist)
+ nextLevelIndent = indent + " "
+
+ # collapse out indents if formatting is not desired
+ if not formatted:
+ indent = ""
+ nextLevelIndent = ""
+ nl = ""
+
+ selfTag = None
+ if doctag is not None:
+ selfTag = doctag
+ else:
+ if self.__name:
+ selfTag = self.__name
+
+ if not selfTag:
+ if namedItemsOnly:
+ return ""
+ else:
+ selfTag = "ITEM"
+
+ out += [ nl, indent, "<", selfTag, ">" ]
+
+ for i,res in enumerate(self.__toklist):
+ if isinstance(res,ParseResults):
+ if i in namedItems:
+ out += [ res.asXML(namedItems[i],
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ out += [ res.asXML(None,
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ # individual token, see if there is a name for it
+ resTag = None
+ if i in namedItems:
+ resTag = namedItems[i]
+ if not resTag:
+ if namedItemsOnly:
+ continue
+ else:
+ resTag = "ITEM"
+ xmlBodyText = _xml_escape(_ustr(res))
+ out += [ nl, nextLevelIndent, "<", resTag, ">",
+ xmlBodyText,
+ "", resTag, ">" ]
+
+ out += [ nl, indent, "", selfTag, ">" ]
+ return "".join(out)
+
+ def __lookup(self,sub):
+ for k,vlist in self.__tokdict.items():
+ for v,loc in vlist:
+ if sub is v:
+ return k
+ return None
+
+ def getName(self):
+ r"""
+ Returns the results name for this token expression. Useful when several
+ different expressions might match at a particular location.
+
+ Example::
+ integer = Word(nums)
+ ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+ house_number_expr = Suppress('#') + Word(nums, alphanums)
+ user_data = (Group(house_number_expr)("house_number")
+ | Group(ssn_expr)("ssn")
+ | Group(integer)("age"))
+ user_info = OneOrMore(user_data)
+
+ result = user_info.parseString("22 111-22-3333 #221B")
+ for item in result:
+ print(item.getName(), ':', item[0])
+ prints::
+ age : 22
+ ssn : 111-22-3333
+ house_number : 221B
+ """
+ if self.__name:
+ return self.__name
+ elif self.__parent:
+ par = self.__parent()
+ if par:
+ return par.__lookup(self)
+ else:
+ return None
+ elif (len(self) == 1 and
+ len(self.__tokdict) == 1 and
+ next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
+ return next(iter(self.__tokdict.keys()))
+ else:
+ return None
+
+ def dump(self, indent='', depth=0, full=True):
+ """
+ Diagnostic method for listing out the contents of a C{ParseResults}.
+ Accepts an optional C{indent} argument so that this string can be embedded
+ in a nested display of other data.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(result.dump())
+ prints::
+ ['12', '/', '31', '/', '1999']
+ - day: 1999
+ - month: 31
+ - year: 12
+ """
+ out = []
+ NL = '\n'
+ out.append( indent+_ustr(self.asList()) )
+ if full:
+ if self.haskeys():
+ items = sorted((str(k), v) for k,v in self.items())
+ for k,v in items:
+ if out:
+ out.append(NL)
+ out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
+ if isinstance(v,ParseResults):
+ if v:
+ out.append( v.dump(indent,depth+1) )
+ else:
+ out.append(_ustr(v))
+ else:
+ out.append(repr(v))
+ elif any(isinstance(vv,ParseResults) for vv in self):
+ v = self
+ for i,vv in enumerate(v):
+ if isinstance(vv,ParseResults):
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
+ else:
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
+
+ return "".join(out)
+
+ def pprint(self, *args, **kwargs):
+ """
+ Pretty-printer for parsed results as a list, using the C{pprint} module.
+ Accepts additional positional or keyword args as defined for the
+ C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
+
+ Example::
+ ident = Word(alphas, alphanums)
+ num = Word(nums)
+ func = Forward()
+ term = ident | num | Group('(' + func + ')')
+ func <<= ident + Group(Optional(delimitedList(term)))
+ result = func.parseString("fna a,b,(fnb c,d,200),100")
+ result.pprint(width=40)
+ prints::
+ ['fna',
+ ['a',
+ 'b',
+ ['(', 'fnb', ['c', 'd', '200'], ')'],
+ '100']]
+ """
+ pprint.pprint(self.asList(), *args, **kwargs)
+
+ # add support for pickle protocol
+ def __getstate__(self):
+ return ( self.__toklist,
+ ( self.__tokdict.copy(),
+ self.__parent is not None and self.__parent() or None,
+ self.__accumNames,
+ self.__name ) )
+
+ def __setstate__(self,state):
+ self.__toklist = state[0]
+ (self.__tokdict,
+ par,
+ inAccumNames,
+ self.__name) = state[1]
+ self.__accumNames = {}
+ self.__accumNames.update(inAccumNames)
+ if par is not None:
+ self.__parent = wkref(par)
+ else:
+ self.__parent = None
+
+ def __getnewargs__(self):
+ return self.__toklist, self.__name, self.__asList, self.__modal
+
+ def __dir__(self):
+ return (dir(type(self)) + list(self.keys()))
+
+MutableMapping.register(ParseResults)
+
+def col (loc,strg):
+ """Returns current column within a string, counting newlines as line separators.
+ The first column is number 1.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{ParserElement.parseString}} for more information
+ on parsing strings containing C{}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+ """
+ s = strg
+ return 1 if 0} for more information
+ on parsing strings containing C{}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+ """
+ return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+ """Returns the line of text containing loc within a string, counting newlines as line separators.
+ """
+ lastCR = strg.rfind("\n", 0, loc)
+ nextCR = strg.find("\n", loc)
+ if nextCR >= 0:
+ return strg[lastCR+1:nextCR]
+ else:
+ return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+ print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+ print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+ print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+ """'Do-nothing' debug action, to suppress debugging output during parsing."""
+ pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+ #~ if func in singleArgBuiltins:
+ #~ return lambda s,l,t: func(t)
+ #~ limit = 0
+ #~ foundArity = False
+ #~ def wrapper(*args):
+ #~ nonlocal limit,foundArity
+ #~ while 1:
+ #~ try:
+ #~ ret = func(*args[limit:])
+ #~ foundArity = True
+ #~ return ret
+ #~ except TypeError:
+ #~ if limit == maxargs or foundArity:
+ #~ raise
+ #~ limit += 1
+ #~ continue
+ #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+ if func in singleArgBuiltins:
+ return lambda s,l,t: func(t)
+ limit = [0]
+ foundArity = [False]
+
+ # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+ if system_version[:2] >= (3,5):
+ def extract_stack(limit=0):
+ # special handling for Python 3.5.0 - extra deep call stack by 1
+ offset = -3 if system_version == (3,5,0) else -2
+ frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
+ return [frame_summary[:2]]
+ def extract_tb(tb, limit=0):
+ frames = traceback.extract_tb(tb, limit=limit)
+ frame_summary = frames[-1]
+ return [frame_summary[:2]]
+ else:
+ extract_stack = traceback.extract_stack
+ extract_tb = traceback.extract_tb
+
+ # synthesize what would be returned by traceback.extract_stack at the call to
+ # user's parse action 'func', so that we don't incur call penalty at parse time
+
+ LINE_DIFF = 6
+ # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
+ # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+ this_line = extract_stack(limit=2)[-1]
+ pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
+
+ def wrapper(*args):
+ while 1:
+ try:
+ ret = func(*args[limit[0]:])
+ foundArity[0] = True
+ return ret
+ except TypeError:
+ # re-raise TypeErrors if they did not come from our arity testing
+ if foundArity[0]:
+ raise
+ else:
+ try:
+ tb = sys.exc_info()[-1]
+ if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+ raise
+ finally:
+ del tb
+
+ if limit[0] <= maxargs:
+ limit[0] += 1
+ continue
+ raise
+
+ # copy func name to wrapper for sensible debug output
+ func_name = ""
+ try:
+ func_name = getattr(func, '__name__',
+ getattr(func, '__class__').__name__)
+ except Exception:
+ func_name = str(func)
+ wrapper.__name__ = func_name
+
+ return wrapper
+
+class ParserElement(object):
+ """Abstract base level parser element class."""
+ DEFAULT_WHITE_CHARS = " \n\t\r"
+ verbose_stacktrace = False
+
+ @staticmethod
+ def setDefaultWhitespaceChars( chars ):
+ r"""
+ Overrides the default whitespace chars
+
+ Example::
+ # default whitespace chars are space, and newline
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
+
+ # change to just treat newline as significant
+ ParserElement.setDefaultWhitespaceChars(" \t")
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
+ """
+ ParserElement.DEFAULT_WHITE_CHARS = chars
+
+ @staticmethod
+ def inlineLiteralsUsing(cls):
+ """
+ Set class to be used for inclusion of string literals into a parser.
+
+ Example::
+ # default literal class used is Literal
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+
+ # change to Suppress
+ ParserElement.inlineLiteralsUsing(Suppress)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
+ """
+ ParserElement._literalStringClass = cls
+
+ def __init__( self, savelist=False ):
+ self.parseAction = list()
+ self.failAction = None
+ #~ self.name = "" # don't define self.name, let subclasses try/except upcall
+ self.strRepr = None
+ self.resultsName = None
+ self.saveAsList = savelist
+ self.skipWhitespace = True
+ self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+ self.copyDefaultWhiteChars = True
+ self.mayReturnEmpty = False # used when checking for left-recursion
+ self.keepTabs = False
+ self.ignoreExprs = list()
+ self.debug = False
+ self.streamlined = False
+ self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+ self.errmsg = ""
+ self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+ self.debugActions = ( None, None, None ) #custom debug actions
+ self.re = None
+ self.callPreparse = True # used to avoid redundant calls to preParse
+ self.callDuringTry = False
+
+ def copy( self ):
+ """
+ Make a copy of this C{ParserElement}. Useful for defining different parse actions
+ for the same parsing pattern, using copies of the original parse element.
+
+ Example::
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
+ integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+
+ print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+ prints::
+ [5120, 100, 655360, 268435456]
+ Equivalent form of C{expr.copy()} is just C{expr()}::
+ integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+ """
+ cpy = copy.copy( self )
+ cpy.parseAction = self.parseAction[:]
+ cpy.ignoreExprs = self.ignoreExprs[:]
+ if self.copyDefaultWhiteChars:
+ cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+ return cpy
+
+ def setName( self, name ):
+ """
+ Define name for this expression, makes debugging and exception messages clearer.
+
+ Example::
+ Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+ Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
+ """
+ self.name = name
+ self.errmsg = "Expected " + self.name
+ if hasattr(self,"exception"):
+ self.exception.msg = self.errmsg
+ return self
+
+ def setResultsName( self, name, listAllMatches=False ):
+ """
+ Define name for referencing matching tokens as a nested attribute
+ of the returned parse results.
+ NOTE: this returns a *copy* of the original C{ParserElement} object;
+ this is so that the client can define a basic element, such as an
+ integer, and reference it in multiple places with different names.
+
+ You can also set results names using the abbreviated syntax,
+ C{expr("name")} in place of C{expr.setResultsName("name")} -
+ see L{I{__call__}<__call__>}.
+
+ Example::
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+
+ # equivalent form:
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+ """
+ newself = self.copy()
+ if name.endswith("*"):
+ name = name[:-1]
+ listAllMatches=True
+ newself.resultsName = name
+ newself.modalResults = not listAllMatches
+ return newself
+
+ def setBreak(self,breakFlag = True):
+ """Method to invoke the Python pdb debugger when this element is
+ about to be parsed. Set C{breakFlag} to True to enable, False to
+ disable.
+ """
+ if breakFlag:
+ _parseMethod = self._parse
+ def breaker(instring, loc, doActions=True, callPreParse=True):
+ import pdb
+ pdb.set_trace()
+ return _parseMethod( instring, loc, doActions, callPreParse )
+ breaker._originalParseMethod = _parseMethod
+ self._parse = breaker
+ else:
+ if hasattr(self._parse,"_originalParseMethod"):
+ self._parse = self._parse._originalParseMethod
+ return self
+
+ def setParseAction( self, *fns, **kwargs ):
+ """
+ Define one or more actions to perform when successfully matching parse element definition.
+ Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
+ C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
+ - s = the original string being parsed (see note below)
+ - loc = the location of the matching substring
+ - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
+ If the functions in fns modify the tokens, they can return them as the return
+ value from fn, and the modified list of tokens will replace the original.
+ Otherwise, fn does not need to return any value.
+
+ Optional keyword arguments:
+ - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{parseString}} for more information
+ on parsing strings containing C{}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer + '/' + integer + '/' + integer
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+ # use parse action to convert to ints at parse time
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ date_str = integer + '/' + integer + '/' + integer
+
+ # note that integer fields are now ints, not strings
+ date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
+ """
+ self.parseAction = list(map(_trim_arity, list(fns)))
+ self.callDuringTry = kwargs.get("callDuringTry", False)
+ return self
+
+ def addParseAction( self, *fns, **kwargs ):
+ """
+ Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}}.
+
+ See examples in L{I{copy}}.
+ """
+ self.parseAction += list(map(_trim_arity, list(fns)))
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def addCondition(self, *fns, **kwargs):
+ """Add a boolean predicate function to expression's list of parse actions. See
+ L{I{setParseAction}} for function call signatures. Unlike C{setParseAction},
+ functions passed to C{addCondition} need to return boolean success/fail of the condition.
+
+ Optional keyword arguments:
+ - message = define a custom message to be used in the raised exception
+ - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+
+ Example::
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ year_int = integer.copy()
+ year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+ date_str = year_int + '/' + integer + '/' + integer
+
+ result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+ """
+ msg = kwargs.get("message", "failed user-defined condition")
+ exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
+ for fn in fns:
+ def pa(s,l,t):
+ if not bool(_trim_arity(fn)(s,l,t)):
+ raise exc_type(s,l,msg)
+ self.parseAction.append(pa)
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def setFailAction( self, fn ):
+ """Define action to perform if parsing fails at this expression.
+ Fail acton fn is a callable function that takes the arguments
+ C{fn(s,loc,expr,err)} where:
+ - s = string being parsed
+ - loc = location where expression match was attempted and failed
+ - expr = the parse expression that failed
+ - err = the exception thrown
+ The function returns no value. It may throw C{L{ParseFatalException}}
+ if it is desired to stop parsing immediately."""
+ self.failAction = fn
+ return self
+
+ def _skipIgnorables( self, instring, loc ):
+ exprsFound = True
+ while exprsFound:
+ exprsFound = False
+ for e in self.ignoreExprs:
+ try:
+ while 1:
+ loc,dummy = e._parse( instring, loc )
+ exprsFound = True
+ except ParseException:
+ pass
+ return loc
+
+ def preParse( self, instring, loc ):
+ if self.ignoreExprs:
+ loc = self._skipIgnorables( instring, loc )
+
+ if self.skipWhitespace:
+ wt = self.whiteChars
+ instrlen = len(instring)
+ while loc < instrlen and instring[loc] in wt:
+ loc += 1
+
+ return loc
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ return loc, []
+
+ def postParse( self, instring, loc, tokenlist ):
+ return tokenlist
+
+ #~ @profile
+ def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+ debugging = ( self.debug ) #and doActions )
+
+ if debugging or self.failAction:
+ #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+ if (self.debugActions[0] ):
+ self.debugActions[0]( instring, loc, self )
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse( instring, loc )
+ else:
+ preloc = loc
+ tokensStart = preloc
+ try:
+ try:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+ except IndexError:
+ raise ParseException( instring, len(instring), self.errmsg, self )
+ except ParseBaseException as err:
+ #~ print ("Exception raised:", err)
+ if self.debugActions[2]:
+ self.debugActions[2]( instring, tokensStart, self, err )
+ if self.failAction:
+ self.failAction( instring, tokensStart, self, err )
+ raise
+ else:
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse( instring, loc )
+ else:
+ preloc = loc
+ tokensStart = preloc
+ if self.mayIndexError or preloc >= len(instring):
+ try:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+ except IndexError:
+ raise ParseException( instring, len(instring), self.errmsg, self )
+ else:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+ tokens = self.postParse( instring, loc, tokens )
+
+ retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+ if self.parseAction and (doActions or self.callDuringTry):
+ if debugging:
+ try:
+ for fn in self.parseAction:
+ tokens = fn( instring, tokensStart, retTokens )
+ if tokens is not None:
+ retTokens = ParseResults( tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+ modal=self.modalResults )
+ except ParseBaseException as err:
+ #~ print "Exception raised in user parse action:", err
+ if (self.debugActions[2] ):
+ self.debugActions[2]( instring, tokensStart, self, err )
+ raise
+ else:
+ for fn in self.parseAction:
+ tokens = fn( instring, tokensStart, retTokens )
+ if tokens is not None:
+ retTokens = ParseResults( tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+ modal=self.modalResults )
+ if debugging:
+ #~ print ("Matched",self,"->",retTokens.asList())
+ if (self.debugActions[1] ):
+ self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+ return loc, retTokens
+
+ def tryParse( self, instring, loc ):
+ try:
+ return self._parse( instring, loc, doActions=False )[0]
+ except ParseFatalException:
+ raise ParseException( instring, loc, self.errmsg, self)
+
+ def canParseNext(self, instring, loc):
+ try:
+ self.tryParse(instring, loc)
+ except (ParseException, IndexError):
+ return False
+ else:
+ return True
+
+ class _UnboundedCache(object):
+ def __init__(self):
+ cache = {}
+ self.not_in_cache = not_in_cache = object()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ if _OrderedDict is not None:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = _OrderedDict()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(cache) > size:
+ try:
+ cache.popitem(False)
+ except KeyError:
+ pass
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ else:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = {}
+ key_fifo = collections.deque([], size)
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(key_fifo) > size:
+ cache.pop(key_fifo.popleft(), None)
+ key_fifo.append(key)
+
+ def clear(self):
+ cache.clear()
+ key_fifo.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ # argument cache for optimizing repeated calls when backtracking through recursive expressions
+ packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+ packrat_cache_lock = RLock()
+ packrat_cache_stats = [0, 0]
+
+ # this method gets repeatedly called during backtracking with the same arguments -
+ # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+ def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+ HIT, MISS = 0, 1
+ lookup = (self, instring, loc, callPreParse, doActions)
+ with ParserElement.packrat_cache_lock:
+ cache = ParserElement.packrat_cache
+ value = cache.get(lookup)
+ if value is cache.not_in_cache:
+ ParserElement.packrat_cache_stats[MISS] += 1
+ try:
+ value = self._parseNoCache(instring, loc, doActions, callPreParse)
+ except ParseBaseException as pe:
+ # cache a copy of the exception, without the traceback
+ cache.set(lookup, pe.__class__(*pe.args))
+ raise
+ else:
+ cache.set(lookup, (value[0], value[1].copy()))
+ return value
+ else:
+ ParserElement.packrat_cache_stats[HIT] += 1
+ if isinstance(value, Exception):
+ raise value
+ return (value[0], value[1].copy())
+
+ _parse = _parseNoCache
+
+ @staticmethod
+ def resetCache():
+ ParserElement.packrat_cache.clear()
+ ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+ _packratEnabled = False
+ @staticmethod
+ def enablePackrat(cache_size_limit=128):
+ """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+ Repeated parse attempts at the same string location (which happens
+ often in many complex grammars) can immediately return a cached value,
+ instead of re-executing parsing/validating code. Memoizing is done of
+ both valid results and parsing exceptions.
+
+ Parameters:
+ - cache_size_limit - (default=C{128}) - if an integer value is provided
+ will limit the size of the packrat cache; if None is passed, then
+ the cache size will be unbounded; if 0 is passed, the cache will
+ be effectively disabled.
+
+ This speedup may break existing programs that use parse actions that
+ have side-effects. For this reason, packrat parsing is disabled when
+ you first import pyparsing. To activate the packrat feature, your
+ program must call the class method C{ParserElement.enablePackrat()}. If
+ your program uses C{psyco} to "compile as you go", you must call
+ C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
+ Python will crash. For best results, call C{enablePackrat()} immediately
+ after importing pyparsing.
+
+ Example::
+ import pyparsing
+ pyparsing.ParserElement.enablePackrat()
+ """
+ if not ParserElement._packratEnabled:
+ ParserElement._packratEnabled = True
+ if cache_size_limit is None:
+ ParserElement.packrat_cache = ParserElement._UnboundedCache()
+ else:
+ ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+ ParserElement._parse = ParserElement._parseCache
+
+ def parseString( self, instring, parseAll=False ):
+ """
+ Execute the parse expression with the given string.
+ This is the main interface to the client code, once the complete
+ expression has been built.
+
+ If you want the grammar to require that the entire input string be
+ successfully parsed, then set C{parseAll} to True (equivalent to ending
+ the grammar with C{L{StringEnd()}}).
+
+ Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
+ in order to report proper column numbers in parse actions.
+ If the input string contains tabs and
+ the grammar uses parse actions that use the C{loc} argument to index into the
+ string being parsed, you can ensure you have a consistent view of the input
+ string by:
+ - calling C{parseWithTabs} on your grammar before calling C{parseString}
+ (see L{I{parseWithTabs}})
+ - define your parse action using the full C{(s,loc,toks)} signature, and
+ reference the input string using the parse action's C{s} argument
+ - explicitly expand the tabs in your input string before calling
+ C{parseString}
+
+ Example::
+ Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
+ Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
+ """
+ ParserElement.resetCache()
+ if not self.streamlined:
+ self.streamline()
+ #~ self.saveAsList = True
+ for e in self.ignoreExprs:
+ e.streamline()
+ if not self.keepTabs:
+ instring = instring.expandtabs()
+ try:
+ loc, tokens = self._parse( instring, 0 )
+ if parseAll:
+ loc = self.preParse( instring, loc )
+ se = Empty() + StringEnd()
+ se._parse( instring, loc )
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+ else:
+ return tokens
+
+ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+ """
+ Scan the input string for expression matches. Each match will return the
+ matching tokens, start location, and end location. May be called with optional
+ C{maxMatches} argument, to clip scanning after 'n' matches are found. If
+ C{overlap} is specified, then overlapping matches will be reported.
+
+ Note that the start and end locations are reported relative to the string
+ being parsed. See L{I{parseString}} for more information on parsing
+ strings with embedded tabs.
+
+ Example::
+ source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+ print(source)
+ for tokens,start,end in Word(alphas).scanString(source):
+ print(' '*start + '^'*(end-start))
+ print(' '*start + tokens[0])
+
+ prints::
+
+ sldjf123lsdjjkf345sldkjf879lkjsfd987
+ ^^^^^
+ sldjf
+ ^^^^^^^
+ lsdjjkf
+ ^^^^^^
+ sldkjf
+ ^^^^^^
+ lkjsfd
+ """
+ if not self.streamlined:
+ self.streamline()
+ for e in self.ignoreExprs:
+ e.streamline()
+
+ if not self.keepTabs:
+ instring = _ustr(instring).expandtabs()
+ instrlen = len(instring)
+ loc = 0
+ preparseFn = self.preParse
+ parseFn = self._parse
+ ParserElement.resetCache()
+ matches = 0
+ try:
+ while loc <= instrlen and matches < maxMatches:
+ try:
+ preloc = preparseFn( instring, loc )
+ nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+ except ParseException:
+ loc = preloc+1
+ else:
+ if nextLoc > loc:
+ matches += 1
+ yield tokens, preloc, nextLoc
+ if overlap:
+ nextloc = preparseFn( instring, loc )
+ if nextloc > loc:
+ loc = nextLoc
+ else:
+ loc += 1
+ else:
+ loc = nextLoc
+ else:
+ loc = preloc+1
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def transformString( self, instring ):
+ """
+ Extension to C{L{scanString}}, to modify matching text with modified tokens that may
+ be returned from a parse action. To use C{transformString}, define a grammar and
+ attach a parse action to it that modifies the returned token list.
+ Invoking C{transformString()} on a target string will then scan for matches,
+ and replace the matched text patterns according to the logic in the parse
+ action. C{transformString()} returns the resulting transformed string.
+
+ Example::
+ wd = Word(alphas)
+ wd.setParseAction(lambda toks: toks[0].title())
+
+ print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+ Prints::
+ Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+ """
+ out = []
+ lastE = 0
+ # force preservation of s, to minimize unwanted transformation of string, and to
+ # keep string locs straight between transformString and scanString
+ self.keepTabs = True
+ try:
+ for t,s,e in self.scanString( instring ):
+ out.append( instring[lastE:s] )
+ if t:
+ if isinstance(t,ParseResults):
+ out += t.asList()
+ elif isinstance(t,list):
+ out += t
+ else:
+ out.append(t)
+ lastE = e
+ out.append(instring[lastE:])
+ out = [o for o in out if o]
+ return "".join(map(_ustr,_flatten(out)))
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def searchString( self, instring, maxMatches=_MAX_INT ):
+ """
+ Another extension to C{L{scanString}}, simplifying the access to the tokens found
+ to match the given parse expression. May be called with optional
+ C{maxMatches} argument, to clip searching after 'n' matches are found.
+
+ Example::
+ # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+ cap_word = Word(alphas.upper(), alphas.lower())
+
+ print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+ # the sum() builtin can be used to merge results into a single ParseResults object
+ print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+ prints::
+ [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+ ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+ """
+ try:
+ return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+ """
+ Generator method to split a string using the given expression as a separator.
+ May be called with optional C{maxsplit} argument, to limit the number of splits;
+ and the optional C{includeSeparators} argument (default=C{False}), if the separating
+ matching text should be included in the split results.
+
+ Example::
+ punc = oneOf(list(".,;:/-!?"))
+ print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+ prints::
+ ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+ """
+ splits = 0
+ last = 0
+ for t,s,e in self.scanString(instring, maxMatches=maxsplit):
+ yield instring[last:s]
+ if includeSeparators:
+ yield t[0]
+ last = e
+ yield instring[last:]
+
+ def __add__(self, other ):
+ """
+ Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
+ converts them to L{Literal}s by default.
+
+ Example::
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+ Prints::
+ Hello, World! -> ['Hello', ',', 'World', '!']
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return And( [ self, other ] )
+
+ def __radd__(self, other ):
+ """
+ Implementation of + operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other + self
+
+ def __sub__(self, other):
+ """
+ Implementation of - operator, returns C{L{And}} with error stop
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return self + And._ErrorStop() + other
+
+ def __rsub__(self, other ):
+ """
+ Implementation of - operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other - self
+
+ def __mul__(self,other):
+ """
+ Implementation of * operator, allows use of C{expr * 3} in place of
+ C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
+ tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
+ may also include C{None} as in:
+ - C{expr*(n,None)} or C{expr*(n,)} is equivalent
+ to C{expr*n + L{ZeroOrMore}(expr)}
+ (read as "at least n instances of C{expr}")
+ - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
+ (read as "0 to n instances of C{expr}")
+ - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
+ - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
+
+ Note that C{expr*(None,n)} does not raise an exception if
+ more than n exprs exist in the input stream; that is,
+ C{expr*(None,n)} does not enforce a maximum number of expr
+ occurrences. If this behavior is desired, then write
+ C{expr*(None,n) + ~expr}
+ """
+ if isinstance(other,int):
+ minElements, optElements = other,0
+ elif isinstance(other,tuple):
+ other = (other + (None, None))[:2]
+ if other[0] is None:
+ other = (0, other[1])
+ if isinstance(other[0],int) and other[1] is None:
+ if other[0] == 0:
+ return ZeroOrMore(self)
+ if other[0] == 1:
+ return OneOrMore(self)
+ else:
+ return self*other[0] + ZeroOrMore(self)
+ elif isinstance(other[0],int) and isinstance(other[1],int):
+ minElements, optElements = other
+ optElements -= minElements
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+ if minElements < 0:
+ raise ValueError("cannot multiply ParserElement by negative value")
+ if optElements < 0:
+ raise ValueError("second tuple value must be greater or equal to first tuple value")
+ if minElements == optElements == 0:
+ raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+ if (optElements):
+ def makeOptionalList(n):
+ if n>1:
+ return Optional(self + makeOptionalList(n-1))
+ else:
+ return Optional(self)
+ if minElements:
+ if minElements == 1:
+ ret = self + makeOptionalList(optElements)
+ else:
+ ret = And([self]*minElements) + makeOptionalList(optElements)
+ else:
+ ret = makeOptionalList(optElements)
+ else:
+ if minElements == 1:
+ ret = self
+ else:
+ ret = And([self]*minElements)
+ return ret
+
+ def __rmul__(self, other):
+ return self.__mul__(other)
+
+ def __or__(self, other ):
+ """
+ Implementation of | operator - returns C{L{MatchFirst}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return MatchFirst( [ self, other ] )
+
+ def __ror__(self, other ):
+ """
+ Implementation of | operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other | self
+
+ def __xor__(self, other ):
+ """
+ Implementation of ^ operator - returns C{L{Or}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Or( [ self, other ] )
+
+ def __rxor__(self, other ):
+ """
+ Implementation of ^ operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other ^ self
+
+ def __and__(self, other ):
+ """
+ Implementation of & operator - returns C{L{Each}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Each( [ self, other ] )
+
+ def __rand__(self, other ):
+ """
+ Implementation of & operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other & self
+
+ def __invert__( self ):
+ """
+ Implementation of ~ operator - returns C{L{NotAny}}
+ """
+ return NotAny( self )
+
+ def __call__(self, name=None):
+ """
+ Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
+
+ If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
+ passed as C{True}.
+
+ If C{name} is omitted, same as calling C{L{copy}}.
+
+ Example::
+ # these are equivalent
+ userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+ userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
+ """
+ if name is not None:
+ return self.setResultsName(name)
+ else:
+ return self.copy()
+
+ def suppress( self ):
+ """
+ Suppresses the output of this C{ParserElement}; useful to keep punctuation from
+ cluttering up returned output.
+ """
+ return Suppress( self )
+
+ def leaveWhitespace( self ):
+ """
+ Disables the skipping of whitespace before matching the characters in the
+ C{ParserElement}'s defined pattern. This is normally only used internally by
+ the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+ """
+ self.skipWhitespace = False
+ return self
+
+ def setWhitespaceChars( self, chars ):
+ """
+ Overrides the default whitespace chars
+ """
+ self.skipWhitespace = True
+ self.whiteChars = chars
+ self.copyDefaultWhiteChars = False
+ return self
+
+ def parseWithTabs( self ):
+ """
+ Overrides default behavior to expand C{}s to spaces before parsing the input string.
+ Must be called before C{parseString} when the input grammar contains elements that
+ match C{} characters.
+ """
+ self.keepTabs = True
+ return self
+
+ def ignore( self, other ):
+ """
+ Define expression to be ignored (e.g., comments) while doing pattern
+ matching; may be called repeatedly, to define multiple comment or other
+ ignorable patterns.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+
+ patt.ignore(cStyleComment)
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+ """
+ if isinstance(other, basestring):
+ other = Suppress(other)
+
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ self.ignoreExprs.append(other)
+ else:
+ self.ignoreExprs.append( Suppress( other.copy() ) )
+ return self
+
+ def setDebugActions( self, startAction, successAction, exceptionAction ):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ """
+ self.debugActions = (startAction or _defaultStartDebugAction,
+ successAction or _defaultSuccessDebugAction,
+ exceptionAction or _defaultExceptionDebugAction)
+ self.debug = True
+ return self
+
+ def setDebug( self, flag=True ):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ Set C{flag} to True to enable, False to disable.
+
+ Example::
+ wd = Word(alphas).setName("alphaword")
+ integer = Word(nums).setName("numword")
+ term = wd | integer
+
+ # turn on debugging for wd
+ wd.setDebug()
+
+ OneOrMore(term).parseString("abc 123 xyz 890")
+
+ prints::
+ Match alphaword at loc 0(1,1)
+ Matched alphaword -> ['abc']
+ Match alphaword at loc 3(1,4)
+ Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+ Match alphaword at loc 7(1,8)
+ Matched alphaword -> ['xyz']
+ Match alphaword at loc 11(1,12)
+ Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+ Match alphaword at loc 15(1,16)
+ Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+ The output shown is that produced by the default debug actions - custom debug actions can be
+ specified using L{setDebugActions}. Prior to attempting
+ to match the C{wd} expression, the debugging message C{"Match at loc