diff --git a/.vscode/settings.json b/.vscode/settings.json index 819a8e5..66f478e 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -12,4 +12,15 @@ ], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, + "yaml.schemas": { + "https://squidfunk.github.io/mkdocs-material/schema.json": "mkdocs.yml" + }, + "yaml.customTags": [ + "!ENV scalar", + "!ENV sequence", + "!relative scalar", + "tag:yaml.org,2002:python/name:material.extensions.emoji.to_svg", + "tag:yaml.org,2002:python/name:material.extensions.emoji.twemoji", + "tag:yaml.org,2002:python/name:pymdownx.superfences.fence_code_format" + ] } \ No newline at end of file diff --git a/README.md b/README.md index 5b81915..2113d1e 100644 --- a/README.md +++ b/README.md @@ -1,303 +1,43 @@ # Rigging -Rigging is a lightweight LLM interaction framework built on Pydantic XML and LiteLLM. It supports useful primitives for validating LLM output and adding tool calling abilities to models that don't natively support it. It also has various helpers for common tasks like structured object parsing, templating chats, overloading generation parameters, stripping chat segments, and continuing conversations. +Rigging is a lightweight LLM interaction framework built on Pydantic XML. The goal is to make leveraging LLMs in production pipelines as simple and effictive as possible. Here are the highlights: -Modern python with type hints, pydantic validation, native serialization support, etc. +- **Structured Pydantic models** can be used interchangably with unstructured text output. +- LiteLLM as the default generator giving you **instant access to a huge array of models**. +- Add easy **tool calling** abilities to models which don't natively support it. +- Store different models and configs as **simple connection strings** just like databases. +- Chat templating, forking, continuations, generation parameter overloads, stripping segments, etc. +- Modern python with type hints, async support, pydantic validation, serialization, etc. -``` -pip install rigging -``` - -### Basic Chats - -```python +```py import rigging as rg +from rigging.model import CommaDelimitedAnswer as Answer -generator = rg.get_generator("claude-2.1") -chat = generator.chat( - [ - {"role": "system", "content": "You are a wizard harry."}, - {"role": "user", "content": "Say hello!"}, - ] -).run() - -print(chat.last) -# [assistant]: Hello! - -print(f"{chat.last!r}") -# Message(role='assistant', parts=[], content='Hello!') - -print(chat.prev) -# [ -# Message(role='system', parts=[], content='You are a wizard harry.'), -# Message(role='user', parts=[], content='Say hello!'), -# ] - -print(chat.json) -# [{ ... }] - -``` - -### Model Parsing - -```python -import rigging as rg - -class Answer(rg.Model): - content: str - -chat = ( - rg.get_generator("claude-3-haiku-20240307") - .chat([ - {"role": "user", "content": f"Say your name between {Answer.xml_tags()}."}, - ]) - .until_parsed_as(Answer) +answer = rg.get_generator('gpt-4') \ + .chat(f"Give me 3 famous authors between {Answer.xml_tags()} tags.") \ + .until_parsed_as(Answer) \ .run() -) answer = chat.last.parse(Answer) -print(answer.content) - -# "Claude" - -print(f"{chat.last!r}") - -# Message(role='assistant', parts=[ -# ParsedMessagePart(model=Answer(content='Claude'), ref='Claude') -# ], content='Claude') +print(answer.items) -chat.last.content = "new content" # Updating content strips parsed parts -print(f"{chat.last!r}") - -# Message(role='assistant', parts=[], content='new content') +# ['J. R. R. Tolkien', 'Stephen King', 'George Orwell'] ``` -### Mutliple Models - -```python -import rigging as rg - -class Joke(rg.Model): - content: str - -chat = ( - rg.get_generator("claude-2.1") - .chat([{ - "role": "user", - "content": f"Provide 3 short jokes each wrapped with {Joke.xml_tags()} tags."}, - ]) - .run() -) - -jokes = chat.last.parse_set(Joke) - -# [ -# Joke(content="Why don't eggs tell jokes? They'd crack each other up!"), -# Joke(content='What do you call a bear with no teeth? A gummy bear!'), -# Joke(content='What do you call a fake noodle? An Impasta!') -# ] -``` - -### Complex Models - -```python -import rigging as rg - -class Inner(rg.Model): - type: str = rg.attr() - content: str - -class Outer(rg.Model): - name: str = rg.attr() - inners: list[Inner] = rg.element() - -outer = Outer(name="foo", inners=[ - Inner(type="cat", content="meow"), - Inner(type="dog", content="bark") -]) - -print(outer.to_pretty_xml()) - -# -# meow -# bark -# -``` - -### Tools - -```python -from typing import Annotated -import rigging as rg - -class WeatherTool(rg.Tool): - @property - def name(self) -> str: - return "weather" - - @property - def description(self) -> str: - return "A tool to get the weather for a location" - - def get_for_city(self, city: Annotated[str, "The city name to get weather for"]) -> str: - print(f"[=] get_for_city('{city}')") - return f"The weather in {city} is nice today" - -chat = ( - rg.get_generator("mistral/mistral-tiny") - .chat( - [ - {"role": "user", "content": "What is the weather in London?"}, - ] - ) - .using(WeatherTool(), force=True) - .run() -) - -# [=] get_for_city('London') - -print(chat.last.content) - -# "Based on the information I've received, the weather in London is nice today." -``` +Rigging is built and maintained by [dreadnode](https://dreadnode.io) where we use it daily for our work. -### Continuing Chats - -```python -import rigging as rg - -generator = rg.get_generator("gpt-3.5-turbo") -chat = generator.chat([ - {"role": "user", "content": "Hello, how are you?"}, -]).run() - -print(chat.last.content) - -# "Hello! I'm an AI language model, ..." - -cont = chat.continue_( - {"role": "user", "content": "That's good, tell me a joke"} -).run() - -print(cont.last.content) - -# "Sure, here's a joke for you: ..." -``` - -### Basic Templating - -```python -import rigging as rg - -template = rg.get_generator("gpt-4").chat([ - {"role": "user", "content": "What is the capitol of $country?"}, -]) - -for country in ["France", "Germany"]: - print(template.apply(country=country).run().last) - -# The capital of France is Paris. -# The capital of Germany is Berlin. -``` - -### Overload Generation Params - -```python -import rigging as rg - -pending = rg.get_generator("gpt-3.5-turbo,max_tokens=50").chat([ - {"role": "user", "content": "Say a haiku about boats"}, -]) - -for temp in [0.1, 0.5, 1.0]: - print(pending.overload(temperature=temp).run().last.content) - -``` - -### Strip Parsed Sections - -```python -import rigging as rg - -class Reasoning(rg.Model): - content: str - -meaning = rg.get_generator("claude-2.1").chat([ - { - "role": "user", - "content": "What is the meaning of life in one sentence? " - f"Document your reasoning between {Reasoning.xml_tags()} tags.", - }, -]).run() - -# Gracefully handle mising models -reasoning = meaning.last.try_parse(Reasoning) -if reasoning: - print("reasoning:", reasoning.content.strip()) - -# Strip parsed content to avoid sharing -# previous thoughts with the model. -without_reasons = meaning.strip(Reasoning) -print("meaning of life:", without_reasons.last.content.strip()) - -# follow_up = without_thoughts.continue_(...) -``` - -### Custom Generator - -Any custom generator simply needs to implement a `complete` function, and -then it can be used anywhere inside rigging. - -```python -class Custom(Generator): - # model: str - # api_key: str - # params: GeneratorParams - - custom_field: bool - - def complete( - self, - messages: t.Sequence[rg.Message], - overloads: GenerateParams = GenerateParams(), - ) -> rg.Message: - # Access self vars where needed - api_key = self.api_key - model_id = self.model - - # Merge in args for API overloads - marged: dict[str, t.Any] = self._merge_params(overloads) - - # response: str = ... - - return rg.Message("assistant", response) - - -generator = Custom(model='foo', custom_field=True) -generator.chat(...) +## Installation +We publish every version to Pypi: +```bash +pip install rigging ``` -*Note: we currently don't have anyway to "register" custom generators for `get_generator`.* - -### Logging - -By default rigging disables it's logger with loguru. To enable it run: - -```python -from loguru import logger - -logger.enable('rigging') +If you want to build from source: +```bash +cd rigging/ +poetry install ``` -To configure loguru terminal + file logging format overrides: - -```python -from rigging.logging import configure_logging +## Getting Started -configure_logging( - 'info', # stderr level - 'out.log', # log file (optional) - 'trace' # log file level -) -``` -*(This will remove existing handlers, so you might prefer to configure them yourself)* \ No newline at end of file +Head over to **[our documentation](https://rigging.dreadnode.io) for more information. \ No newline at end of file diff --git a/docs/api/chat.md b/docs/api/chat.md new file mode 100644 index 0000000..6bb886e --- /dev/null +++ b/docs/api/chat.md @@ -0,0 +1 @@ +::: rigging.chat \ No newline at end of file diff --git a/docs/api/completion.md b/docs/api/completion.md new file mode 100644 index 0000000..a484659 --- /dev/null +++ b/docs/api/completion.md @@ -0,0 +1 @@ +::: rigging.completion \ No newline at end of file diff --git a/docs/api/error.md b/docs/api/error.md new file mode 100644 index 0000000..3596392 --- /dev/null +++ b/docs/api/error.md @@ -0,0 +1 @@ +::: rigging.error diff --git a/docs/api/generator.md b/docs/api/generator.md new file mode 100644 index 0000000..6b9738a --- /dev/null +++ b/docs/api/generator.md @@ -0,0 +1 @@ +::: rigging.generator \ No newline at end of file diff --git a/docs/api/logging.md b/docs/api/logging.md new file mode 100644 index 0000000..b216e71 --- /dev/null +++ b/docs/api/logging.md @@ -0,0 +1 @@ +::: rigging.logging \ No newline at end of file diff --git a/docs/api/message.md b/docs/api/message.md new file mode 100644 index 0000000..55d2c77 --- /dev/null +++ b/docs/api/message.md @@ -0,0 +1 @@ +::: rigging.message \ No newline at end of file diff --git a/docs/api/model.md b/docs/api/model.md new file mode 100644 index 0000000..e8ea08a --- /dev/null +++ b/docs/api/model.md @@ -0,0 +1 @@ +::: rigging.model \ No newline at end of file diff --git a/docs/api/parsing.md b/docs/api/parsing.md new file mode 100644 index 0000000..53c2c2a --- /dev/null +++ b/docs/api/parsing.md @@ -0,0 +1 @@ +::: rigging.parsing \ No newline at end of file diff --git a/docs/api/tool.md b/docs/api/tool.md new file mode 100644 index 0000000..94e64e1 --- /dev/null +++ b/docs/api/tool.md @@ -0,0 +1 @@ +::: rigging.tool diff --git a/docs/assets/logo_black.png b/docs/assets/logo_black.png new file mode 100644 index 0000000..60a5eb6 Binary files /dev/null and b/docs/assets/logo_black.png differ diff --git a/docs/assets/logo_white.png b/docs/assets/logo_white.png new file mode 100644 index 0000000..574f288 Binary files /dev/null and b/docs/assets/logo_white.png differ diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..596caa0 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,67 @@ +# Rigging + +Rigging is a lightweight LLM interaction framework built on Pydantic XML. The goal is to make leveraging LLMs in production pipelines as simple and effictive as possible. Here are the highlights: + +- **Structured Pydantic models** can be used interchangably with unstructured text output. +- LiteLLM as the default generator giving you **instant access to a huge array of models**. +- Add easy **tool calling** abilities to models which don't natively support it. +- Store different models and configs as **simple connection strings** just like databases. +- Chat templating, forking, continuations, generation parameter overloads, stripping segments, etc. +- Modern python with type hints, async support, pydantic validation, serialization, etc. + +```py +import rigging as rg +from rigging.model import CommaDelimitedAnswer as Answer + +answer = rg.get_generator('gpt-4') \ + .chat(f"Give me 3 famous authors between {Answer.xml_tags()} tags.") \ + .until_parsed_as(Answer) \ + .run() + +answer = chat.last.parse(Answer) +print(answer.items) + +# ['J. R. R. Tolkien', 'Stephen King', 'George Orwell'] +``` + +Rigging is built and maintained by [dreadnode](https://dreadnode.io) where we use it daily for our work. + +## Installation +We publish every version to Pypi: +```bash +pip install rigging +``` + +If you want to build from source: +```bash +cd rigging/ +poetry install +``` + +## Workflow + +1. Get a [`Generator`][rigging.generator.Generator] object - usually with [`get_generator()`][rigging.generator.get_generator]. +2. Call [`generator.chat()`][rigging.generator.Generator.chat] to produce a [`PendingChat`][rigging.chat.PendingChat] and ready it for generation. +3. Call [`pending.run()`][rigging.chat.PendingChat.run] to kick off generation and get your final [`Chat`][rigging.chat.Chat] object. + +[`PendingChat`][rigging.chat.PendingChat] objects hold any messages waiting to be delivered to an LLM in exchange +for a new response message. These objects are also where most of the power in rigging comes from. You'll build a +generation pipeline with options, parsing, callbacks, etc. After prep this pending chat is converted into a +final [`Chat`][rigging.chat.Chat] which holds all messages prior to generation ([`.prev`][rigging.chat.Chat.prev]) +and after generation ([`.next`][rigging.chat.Chat.next]). + +You should think of [`PendingChat`][rigging.chat.PendingChat] objects like the configurable pre-generation step +with calls like [`.with_()`][rigging.chat.PendingChat.with_], [`.apply()`][rigging.chat.PendingChat.apply], +[`.until()`][rigging.chat.PendingChat.until], [`.using()`][rigging.chat.PendingChat.using], etc. Once you call one +of the many [`.run()`][rigging.chat.PendingChat.run] functions, the generator is used to produce the next +message (or many messages) based on the prior context and any constraints you have in place. Once you have a +[`Chat`][rigging.chat.Chat] object, the interation is "done" and you can inspect and operate on the messages. + +You'll often see us use functional styling chaining as most of our +utility functions return the object back to you. + +```python +chat = generator.chat(...) \ + .using(...).until(...).with_(...) \ + .run() +``` \ No newline at end of file diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 0000000..81e4a4a --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,31 @@ +[data-md-color-scheme="slate"] { + --md-primary-fg-color: #EAEAEA; + --md-accent-fg-color: rgb(149, 133, 227); + + --md-primary-color: #EAEAEA; + --md-primary-bg-color: #191919; + --md-default-bg-color: #191919; + + --md-default-fg-color: hsla(0, 0%, 100%, 0.90); + --md-default-fg-color--light: hsla(0, 0%, 100%, 0.70); + --md-default-fg-color--lighter: hsla(0, 0%, 100%, 0.60); + --md-default-fg-color--lightest: hsla(0, 0%, 100%, 0.40); + + --md-footer-bg-color: hsla(0, 0%, 10%, 0.87); + --md-footer-bg-color--dark: hsla(0, 0%, 8%, 1); + + --md-typeset-a-color: var(--md-accent-fg-color); + + --md-code-hl-number-color: rgb(231, 107, 93); + --md-code-hl-special-color: hsla(340, 83%, 66%, 1); + --md-code-hl-function-color: hsla(291, 57%, 65%, 1); + --md-code-hl-constant-color: hsla(250, 62%, 70%, 1); + --md-code-hl-keyword-color: hsla(219, 66%, 64%, 1); + --md-code-hl-string-color: var(--md-accent-fg-color); + --md-code-hl-name-color: var(--md-default-fg-color--light); + --md-code-hl-operator-color: var(--md-default-fg-color--light); + --md-code-hl-punctuation-color: var(--md-default-fg-color--light); + --md-code-hl-comment-color: rgb(55, 161, 108); + --md-code-hl-generic-color: var(--md-default-fg-color--light); + --md-code-hl-variable-color: var(--md-default-fg-color--light); +} \ No newline at end of file diff --git a/docs/topics/chats.md b/docs/topics/chats.md new file mode 100644 index 0000000..af84250 --- /dev/null +++ b/docs/topics/chats.md @@ -0,0 +1,99 @@ +!!! note + This content is currently being refactored + +### Basic Chats + +```python +import rigging as rg + +generator = rg.get_generator("claude-2.1") +chat = generator.chat( + [ + {"role": "system", "content": "You are a wizard harry."}, + {"role": "user", "content": "Say hello!"}, + ] +).run() + +print(chat.last) +# [assistant]: Hello! + +print(f"{chat.last!r}") +# Message(role='assistant', parts=[], content='Hello!') + +print(chat.prev) +# [ +# Message(role='system', parts=[], content='You are a wizard harry.'), +# Message(role='user', parts=[], content='Say hello!'), +# ] + +print(chat.json) +# [{ ... }] + +``` + +### Continuing Chats + +```python +import rigging as rg + +generator = rg.get_generator("gpt-3.5-turbo") +chat = generator.chat([ + {"role": "user", "content": "Hello, how are you?"}, +]) + +# We can fork (continue_) before generation has occured +specific = chat.fork("Be specific please.").run() +poetic = chat.fork("Be as poetic as possible").overload(temperature=1.5).run() + +# We can also fork (continue_) after generation +next_chat = poetic.fork( + {"role": "user", "content": "That's good, tell me a joke"} +) + +update = next_chat.run() +``` + +### Basic Templating + +```python +import rigging as rg + +template = rg.get_generator("gpt-4").chat([ + {"role": "user", "content": "What is the capitol of $country?"}, +]) + +for country in ["France", "Germany"]: + print(template.apply(country=country).run().last) + +# The capital of France is Paris. +# The capital of Germany is Berlin. +``` + +### Strip Parsed Sections + +```python +import rigging as rg + +class Reasoning(rg.Model): + content: str + +meaning = rg.get_generator("claude-2.1").chat([ + { + "role": "user", + "content": "What is the meaning of life in one sentence? " + f"Document your reasoning between {Reasoning.xml_tags()} tags.", + }, +]).run() + +# Gracefully handle mising models +reasoning = meaning.last.try_parse(Reasoning) +if reasoning: + print("reasoning:", reasoning.content.strip()) + +# Strip parsed content to avoid sharing +# previous thoughts with the model. +without_reasons = meaning.strip(Reasoning) +print("meaning of life:", without_reasons.last.content.strip()) + +# follow_up = without_thoughts.continue_(...) +``` \ No newline at end of file diff --git a/docs/topics/generators.md b/docs/topics/generators.md new file mode 100644 index 0000000..c65d5be --- /dev/null +++ b/docs/topics/generators.md @@ -0,0 +1,52 @@ +!!! note + This content is currently being refactored + +### Overload Generation Params + +```python +import rigging as rg + +pending = rg.get_generator("gpt-3.5-turbo,max_tokens=50").chat([ + {"role": "user", "content": "Say a haiku about boats"}, +]) + +for temp in [0.1, 0.5, 1.0]: + print(pending.overload(temperature=temp).run().last.content) + +``` + +### Custom Generator + +Any custom generator simply needs to implement a `complete` function, and +then it can be used anywhere inside rigging. + +```python +class Custom(Generator): + # model: str + # api_key: str + # params: GeneratorParams + + custom_field: bool + + def complete( + self, + messages: t.Sequence[rg.Message], + overloads: GenerateParams = GenerateParams(), + ) -> rg.Message: + # Access self vars where needed + api_key = self.api_key + model_id = self.model + + # Merge in args for API overloads + marged: dict[str, t.Any] = self._merge_params(overloads) + + # response: str = ... + + return rg.Message("assistant", response) + + +generator = Custom(model='foo', custom_field=True) +generator.chat(...) +``` + +*Note: we currently don't have anyway to "register" custom generators for `get_generator`.* \ No newline at end of file diff --git a/docs/topics/models.md b/docs/topics/models.md new file mode 100644 index 0000000..36ca97e --- /dev/null +++ b/docs/topics/models.md @@ -0,0 +1,88 @@ +!!! note + This content is currently being refactored + +### Model Parsing + +```python +import rigging as rg + +class Answer(rg.Model): + content: str + +chat = ( + rg.get_generator("claude-3-haiku-20240307") + .chat([ + {"role": "user", "content": f"Say your name between {Answer.xml_tags()}."}, + ]) + .until_parsed_as(Answer) + .run() +) + +answer = chat.last.parse(Answer) +print(answer.content) + +# "Claude" + +print(f"{chat.last!r}") + +# Message(role='assistant', parts=[ +# ParsedMessagePart(model=Answer(content='Claude'), ref='Claude') +# ], content='Claude') + +chat.last.content = "new content" # Updating content strips parsed parts +print(f"{chat.last!r}") + +# Message(role='assistant', parts=[], content='new content') +``` + +### Mutliple Models + +```python +import rigging as rg + +class Joke(rg.Model): + content: str + +chat = ( + rg.get_generator("claude-2.1") + .chat([{ + "role": "user", + "content": f"Provide 3 short jokes each wrapped with {Joke.xml_tags()} tags."}, + ]) + .run() +) + +jokes = chat.last.parse_set(Joke) + +# [ +# Joke(content="Why don't eggs tell jokes? They'd crack each other up!"), +# Joke(content='What do you call a bear with no teeth? A gummy bear!'), +# Joke(content='What do you call a fake noodle? An Impasta!') +# ] +``` + +### Complex Models + +```python +import rigging as rg + +class Inner(rg.Model): + type: str = rg.attr() + content: str + +class Outer(rg.Model): + name: str = rg.attr() + inners: list[Inner] = rg.element() + +outer = Outer(name="foo", inners=[ + Inner(type="cat", content="meow"), + Inner(type="dog", content="bark") +]) + +print(outer.to_pretty_xml()) + +# +# meow +# bark +# +``` \ No newline at end of file diff --git a/docs/topics/setup_logging.md b/docs/topics/setup_logging.md new file mode 100644 index 0000000..1074a81 --- /dev/null +++ b/docs/topics/setup_logging.md @@ -0,0 +1,23 @@ +Rigging uses [loguru](https://loguru.readthedocs.io/) for it's logging. By default it disables it's logger allowing users to choose when/how to gather messages. + +If you want to let rigging messages flow into loguru, you should enable it: + +```python +from loguru import logger + +logger.enable('rigging') +``` + +If you want to have some sane default handlers with dual console & file logging, +you can use the [rigging.logging.configure_logging][] function. + +```python +from rigging.logging import configure_logging + +configure_logging( + 'info', # stderr level + 'out.log', # log file (optional) + 'trace' # log file level +) +``` +*(This will remove existing handlers, so you might prefer to configure them yourself)* diff --git a/docs/topics/tools.md b/docs/topics/tools.md new file mode 100644 index 0000000..8f232fb --- /dev/null +++ b/docs/topics/tools.md @@ -0,0 +1,39 @@ +!!! note + This content is currently being refactored + +### Tools + +```python +from typing import Annotated +import rigging as rg + +class WeatherTool(rg.Tool): + @property + def name(self) -> str: + return "weather" + + @property + def description(self) -> str: + return "A tool to get the weather for a location" + + def get_for_city(self, city: Annotated[str, "The city name to get weather for"]) -> str: + print(f"[=] get_for_city('{city}')") + return f"The weather in {city} is nice today" + +chat = ( + rg.get_generator("mistral/mistral-tiny") + .chat( + [ + {"role": "user", "content": "What is the weather in London?"}, + ] + ) + .using(WeatherTool(), force=True) + .run() +) + +# [=] get_for_city('London') + +print(chat.last.content) + +# "Based on the information I've received, the weather in London is nice today." +``` \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..c285add --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,96 @@ +site_name: Rigging +site_description: A lightweight LLM interaction framework +site_author: Dreadnode +site_url: https://rigging.dreadnode.io +repo_url: https://github.com/dreadnode/rigging + +nav: + - Home: index.md + - Topics: + - Generators: topics/generators.md + - Chats: topics/chats.md + - Models: topics/models.md + - Tools: topics/tools.md + - Logging: topics/setup_logging.md + - API: + - rigging.chat: api/chat.md + - rigging.completion: api/completion.md + - rigging.generator: api/generator.md + - rigging.model: api/model.md + - rigging.message: api/message.md + - rigging.tool: api/tool.md + - rigging.parsing: api/parsing.md + - rigging.logging: api/logging.md + - rigging.error: api/error.md + + +theme: + logo: assets/logo_black.png + favicon: assets/logo_white.png + name: material + icon: + repo: fontawesome/brands/github + palette: + scheme: slate + primary: custom + features: + - content.code.copy + - content.code.annotate + - toc.integrate + - navigation.footer + - navigation.indexes + - navigation.sections + - navigation.expand + - navigation.path + - navigation.top + - navigation.tabs + +plugins: + - search + - section-index + - social + - mkdocstrings: + handlers: + python: + paths: [rigging] + options: + docstring_options: + ignore_init_summary: true + docstring_section_style: list + heading_level: 2 + merge_init_into_class: true + show_signature_annotations: true + show_symbol_type_heading: true + show_symbol_type_toc: true + signature_crossrefs: true + +watch: + - rigging/ + +markdown_extensions: + - admonition + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.superfences + - pymdownx.details + - pymdownx.tabbed + +extra_css: + - stylesheets/extra.css + +extra_javascript: + - https://polyfill.io/v3/polyfill.min.js?features=es6 + +extra: + homepage: https://dreadnode.io + social: + - icon: fontawesome/brands/github + link: https://github.com/dreadnode + - icon: fontawesome/brands/twitter + link: https://twitter.com/dreadnode + - icon: fontawesome/brands/python + link: https://pypi.org/project/rigging/ \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index ff77064..266224a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,87 +2,87 @@ [[package]] name = "aiohttp" -version = "3.9.3" +version = "3.9.5" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.8" files = [ - {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:939677b61f9d72a4fa2a042a5eee2a99a24001a67c13da113b2e30396567db54"}, - {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f5cd333fcf7590a18334c90f8c9147c837a6ec8a178e88d90a9b96ea03194cc"}, - {file = "aiohttp-3.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82e6aa28dd46374f72093eda8bcd142f7771ee1eb9d1e223ff0fa7177a96b4a5"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f56455b0c2c7cc3b0c584815264461d07b177f903a04481dfc33e08a89f0c26b"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bca77a198bb6e69795ef2f09a5f4c12758487f83f33d63acde5f0d4919815768"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e083c285857b78ee21a96ba1eb1b5339733c3563f72980728ca2b08b53826ca5"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab40e6251c3873d86ea9b30a1ac6d7478c09277b32e14745d0d3c6e76e3c7e29"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df822ee7feaaeffb99c1a9e5e608800bd8eda6e5f18f5cfb0dc7eeb2eaa6bbec"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:acef0899fea7492145d2bbaaaec7b345c87753168589cc7faf0afec9afe9b747"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cd73265a9e5ea618014802ab01babf1940cecb90c9762d8b9e7d2cc1e1969ec6"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a78ed8a53a1221393d9637c01870248a6f4ea5b214a59a92a36f18151739452c"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6b0e029353361f1746bac2e4cc19b32f972ec03f0f943b390c4ab3371840aabf"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7cf5c9458e1e90e3c390c2639f1017a0379a99a94fdfad3a1fd966a2874bba52"}, - {file = "aiohttp-3.9.3-cp310-cp310-win32.whl", hash = "sha256:3e59c23c52765951b69ec45ddbbc9403a8761ee6f57253250c6e1536cacc758b"}, - {file = "aiohttp-3.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:055ce4f74b82551678291473f66dc9fb9048a50d8324278751926ff0ae7715e5"}, - {file = "aiohttp-3.9.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6b88f9386ff1ad91ace19d2a1c0225896e28815ee09fc6a8932fded8cda97c3d"}, - {file = "aiohttp-3.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c46956ed82961e31557b6857a5ca153c67e5476972e5f7190015018760938da2"}, - {file = "aiohttp-3.9.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:07b837ef0d2f252f96009e9b8435ec1fef68ef8b1461933253d318748ec1acdc"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad46e6f620574b3b4801c68255492e0159d1712271cc99d8bdf35f2043ec266"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ed3e046ea7b14938112ccd53d91c1539af3e6679b222f9469981e3dac7ba1ce"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:039df344b45ae0b34ac885ab5b53940b174530d4dd8a14ed8b0e2155b9dddccb"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7943c414d3a8d9235f5f15c22ace69787c140c80b718dcd57caaade95f7cd93b"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84871a243359bb42c12728f04d181a389718710129b36b6aad0fc4655a7647d4"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5eafe2c065df5401ba06821b9a054d9cb2848867f3c59801b5d07a0be3a380ae"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9d3c9b50f19704552f23b4eaea1fc082fdd82c63429a6506446cbd8737823da3"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:f033d80bc6283092613882dfe40419c6a6a1527e04fc69350e87a9df02bbc283"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:2c895a656dd7e061b2fd6bb77d971cc38f2afc277229ce7dd3552de8313a483e"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1f5a71d25cd8106eab05f8704cd9167b6e5187bcdf8f090a66c6d88b634802b4"}, - {file = "aiohttp-3.9.3-cp311-cp311-win32.whl", hash = "sha256:50fca156d718f8ced687a373f9e140c1bb765ca16e3d6f4fe116e3df7c05b2c5"}, - {file = "aiohttp-3.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:5fe9ce6c09668063b8447f85d43b8d1c4e5d3d7e92c63173e6180b2ac5d46dd8"}, - {file = "aiohttp-3.9.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:38a19bc3b686ad55804ae931012f78f7a534cce165d089a2059f658f6c91fa60"}, - {file = "aiohttp-3.9.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:770d015888c2a598b377bd2f663adfd947d78c0124cfe7b959e1ef39f5b13869"}, - {file = "aiohttp-3.9.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee43080e75fc92bf36219926c8e6de497f9b247301bbf88c5c7593d931426679"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52df73f14ed99cee84865b95a3d9e044f226320a87af208f068ecc33e0c35b96"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc9b311743a78043b26ffaeeb9715dc360335e5517832f5a8e339f8a43581e4d"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b955ed993491f1a5da7f92e98d5dad3c1e14dc175f74517c4e610b1f2456fb11"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:504b6981675ace64c28bf4a05a508af5cde526e36492c98916127f5a02354d53"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6fe5571784af92b6bc2fda8d1925cccdf24642d49546d3144948a6a1ed58ca5"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ba39e9c8627edc56544c8628cc180d88605df3892beeb2b94c9bc857774848ca"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e5e46b578c0e9db71d04c4b506a2121c0cb371dd89af17a0586ff6769d4c58c1"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:938a9653e1e0c592053f815f7028e41a3062e902095e5a7dc84617c87267ebd5"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:c3452ea726c76e92f3b9fae4b34a151981a9ec0a4847a627c43d71a15ac32aa6"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ff30218887e62209942f91ac1be902cc80cddb86bf00fbc6783b7a43b2bea26f"}, - {file = "aiohttp-3.9.3-cp312-cp312-win32.whl", hash = "sha256:38f307b41e0bea3294a9a2a87833191e4bcf89bb0365e83a8be3a58b31fb7f38"}, - {file = "aiohttp-3.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:b791a3143681a520c0a17e26ae7465f1b6f99461a28019d1a2f425236e6eedb5"}, - {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ed621426d961df79aa3b963ac7af0d40392956ffa9be022024cd16297b30c8c"}, - {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f46acd6a194287b7e41e87957bfe2ad1ad88318d447caf5b090012f2c5bb528"}, - {file = "aiohttp-3.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:feeb18a801aacb098220e2c3eea59a512362eb408d4afd0c242044c33ad6d542"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f734e38fd8666f53da904c52a23ce517f1b07722118d750405af7e4123933511"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b40670ec7e2156d8e57f70aec34a7216407848dfe6c693ef131ddf6e76feb672"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fdd215b7b7fd4a53994f238d0f46b7ba4ac4c0adb12452beee724ddd0743ae5d"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:017a21b0df49039c8f46ca0971b3a7fdc1f56741ab1240cb90ca408049766168"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99abf0bba688259a496f966211c49a514e65afa9b3073a1fcee08856e04425b"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:648056db9a9fa565d3fa851880f99f45e3f9a771dd3ff3bb0c048ea83fb28194"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8aacb477dc26797ee089721536a292a664846489c49d3ef9725f992449eda5a8"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:522a11c934ea660ff8953eda090dcd2154d367dec1ae3c540aff9f8a5c109ab4"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5bce0dc147ca85caa5d33debc4f4d65e8e8b5c97c7f9f660f215fa74fc49a321"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b4af9f25b49a7be47c0972139e59ec0e8285c371049df1a63b6ca81fdd216a2"}, - {file = "aiohttp-3.9.3-cp38-cp38-win32.whl", hash = "sha256:298abd678033b8571995650ccee753d9458dfa0377be4dba91e4491da3f2be63"}, - {file = "aiohttp-3.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:69361bfdca5468c0488d7017b9b1e5ce769d40b46a9f4a2eed26b78619e9396c"}, - {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0fa43c32d1643f518491d9d3a730f85f5bbaedcbd7fbcae27435bb8b7a061b29"}, - {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:835a55b7ca49468aaaac0b217092dfdff370e6c215c9224c52f30daaa735c1c1"}, - {file = "aiohttp-3.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06a9b2c8837d9a94fae16c6223acc14b4dfdff216ab9b7202e07a9a09541168f"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abf151955990d23f84205286938796c55ff11bbfb4ccfada8c9c83ae6b3c89a3"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59c26c95975f26e662ca78fdf543d4eeaef70e533a672b4113dd888bd2423caa"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f95511dd5d0e05fd9728bac4096319f80615aaef4acbecb35a990afebe953b0e"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:595f105710293e76b9dc09f52e0dd896bd064a79346234b521f6b968ffdd8e58"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7c8b816c2b5af5c8a436df44ca08258fc1a13b449393a91484225fcb7545533"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f1088fa100bf46e7b398ffd9904f4808a0612e1d966b4aa43baa535d1b6341eb"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f59dfe57bb1ec82ac0698ebfcdb7bcd0e99c255bd637ff613760d5f33e7c81b3"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:361a1026c9dd4aba0109e4040e2aecf9884f5cfe1b1b1bd3d09419c205e2e53d"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:363afe77cfcbe3a36353d8ea133e904b108feea505aa4792dad6585a8192c55a"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e2c45c208c62e955e8256949eb225bd8b66a4c9b6865729a786f2aa79b72e9d"}, - {file = "aiohttp-3.9.3-cp39-cp39-win32.whl", hash = "sha256:f7217af2e14da0856e082e96ff637f14ae45c10a5714b63c77f26d8884cf1051"}, - {file = "aiohttp-3.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:27468897f628c627230dba07ec65dc8d0db566923c48f29e084ce382119802bc"}, - {file = "aiohttp-3.9.3.tar.gz", hash = "sha256:90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"}, + {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"}, + {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"}, + {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"}, + {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"}, + {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"}, + {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"}, + {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"}, + {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"}, + {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"}, ] [package.dependencies] @@ -202,6 +202,61 @@ tests = ["attrs[tests-no-zope]", "zope-interface"] tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +[[package]] +name = "babel" +version = "2.14.0" +description = "Internationalization utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Babel-2.14.0-py3-none-any.whl", hash = "sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287"}, + {file = "Babel-2.14.0.tar.gz", hash = "sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363"}, +] + +[package.extras] +dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] + +[[package]] +name = "cairocffi" +version = "1.7.0" +description = "cffi-based cairo bindings for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cairocffi-1.7.0-py3-none-any.whl", hash = "sha256:1f29a8d41dbda4090c0aa33bcdea64f3b493e95f74a43ea107c4a8a7b7f632ef"}, + {file = "cairocffi-1.7.0.tar.gz", hash = "sha256:7761863603894305f3160eca68452f373433ca8745ab7dd445bd2c6ce50dcab7"}, +] + +[package.dependencies] +cffi = ">=1.1.0" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["numpy", "pikepdf", "pytest", "ruff"] +xcb = ["xcffib (>=1.4.0)"] + +[[package]] +name = "cairosvg" +version = "2.7.1" +description = "A Simple SVG Converter based on Cairo" +optional = false +python-versions = ">=3.5" +files = [ + {file = "CairoSVG-2.7.1-py3-none-any.whl", hash = "sha256:8a5222d4e6c3f86f1f7046b63246877a63b49923a1cd202184c3a634ef546b3b"}, + {file = "CairoSVG-2.7.1.tar.gz", hash = "sha256:432531d72347291b9a9ebfb6777026b607563fd8719c46ee742db0aef7271ba0"}, +] + +[package.dependencies] +cairocffi = "*" +cssselect2 = "*" +defusedxml = "*" +pillow = "*" +tinycss2 = "*" + +[package.extras] +doc = ["sphinx", "sphinx-rtd-theme"] +test = ["flake8", "isort", "pytest"] + [[package]] name = "certifi" version = "2024.2.2" @@ -418,6 +473,25 @@ traitlets = ">=4" [package.extras] test = ["pytest"] +[[package]] +name = "cssselect2" +version = "0.7.0" +description = "CSS selectors for Python ElementTree" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cssselect2-0.7.0-py3-none-any.whl", hash = "sha256:fd23a65bfd444595913f02fc71f6b286c29261e354c41d722ca7a261a49b5969"}, + {file = "cssselect2-0.7.0.tar.gz", hash = "sha256:1ccd984dab89fc68955043aca4e1b03e0cf29cad9880f6e28e3ba7a74b14aa5a"}, +] + +[package.dependencies] +tinycss2 = "*" +webencodings = "*" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["flake8", "isort", "pytest"] + [[package]] name = "debugpy" version = "1.8.1" @@ -460,6 +534,17 @@ files = [ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] +[[package]] +name = "defusedxml" +version = "0.7.1" +description = "XML bomb protection for Python stdlib modules" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] + [[package]] name = "distro" version = "1.9.0" @@ -473,13 +558,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, ] [package.extras] @@ -501,13 +586,13 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth [[package]] name = "filelock" -version = "3.13.3" +version = "3.14.0" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.13.3-py3-none-any.whl", hash = "sha256:5ffa845303983e7a0b7ae17636509bc97997d58afeafa72fb141a17b152284cb"}, - {file = "filelock-3.13.3.tar.gz", hash = "sha256:a79895a25bbefdf55d1a2a0a80968f7dbb28edcd6d4234a0afb3f37ecde4b546"}, + {file = "filelock-3.14.0-py3-none-any.whl", hash = "sha256:43339835842f110ca7ae60f1e1c160714c5a6afd15a2873419ab185334975c0f"}, + {file = "filelock-3.14.0.tar.gz", hash = "sha256:6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a"}, ] [package.extras] @@ -636,6 +721,37 @@ smb = ["smbprotocol"] ssh = ["paramiko"] tqdm = ["tqdm"] +[[package]] +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." +optional = false +python-versions = "*" +files = [ + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, +] + +[package.dependencies] +python-dateutil = ">=2.8.1" + +[package.extras] +dev = ["flake8", "markdown", "twine", "wheel"] + +[[package]] +name = "griffe" +version = "0.44.0" +description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." +optional = false +python-versions = ">=3.8" +files = [ + {file = "griffe-0.44.0-py3-none-any.whl", hash = "sha256:8a4471c469ba980b87c843f1168850ce39d0c1d0c7be140dca2480f76c8e5446"}, + {file = "griffe-0.44.0.tar.gz", hash = "sha256:34aee1571042f9bf00529bc715de4516fb6f482b164e90d030300601009e0223"}, +] + +[package.dependencies] +colorama = ">=0.4" + [[package]] name = "h11" version = "0.14.0" @@ -649,13 +765,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.4" +version = "1.0.5" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"}, - {file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"}, + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, ] [package.dependencies] @@ -666,7 +782,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.25.0)"] +trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" @@ -694,13 +810,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.22.1" +version = "0.23.0" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.22.1-py3-none-any.whl", hash = "sha256:eac63947923d15c9a68681d7ed2d9599e058860617064e3ee6bd91a4b954faaf"}, - {file = "huggingface_hub-0.22.1.tar.gz", hash = "sha256:5b8aaee5f3618cd432f49886da9935bbe8fab92d719011826430907b93171dd8"}, + {file = "huggingface_hub-0.23.0-py3-none-any.whl", hash = "sha256:075c30d48ee7db2bba779190dc526d2c11d422aed6f9044c5e2fdc2c432fdb91"}, + {file = "huggingface_hub-0.23.0.tar.gz", hash = "sha256:7126dedd10a4c6fac796ced4d87a8cf004efc722a5125c2c09299017fa366fa9"}, ] [package.dependencies] @@ -713,28 +829,28 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] inference = ["aiohttp", "minijinja (>=1.0)"] quality = ["mypy (==1.5.1)", "ruff (>=0.3.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] tensorflow-testing = ["keras (<3.0)", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["safetensors", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] name = "idna" -version = "3.6" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -769,13 +885,13 @@ files = [ [[package]] name = "ipykernel" -version = "6.29.3" +version = "6.29.4" description = "IPython Kernel for Jupyter" optional = false python-versions = ">=3.8" files = [ - {file = "ipykernel-6.29.3-py3-none-any.whl", hash = "sha256:5aa086a4175b0229d4eca211e181fb473ea78ffd9869af36ba7694c947302a21"}, - {file = "ipykernel-6.29.3.tar.gz", hash = "sha256:e14c250d1f9ea3989490225cc1a542781b095a18a19447fcf2b5eaf7d0ac5bd2"}, + {file = "ipykernel-6.29.4-py3-none-any.whl", hash = "sha256:1181e653d95c6808039c509ef8e67c4126b3b3af7781496c7cbfb5ed938a27da"}, + {file = "ipykernel-6.29.4.tar.gz", hash = "sha256:3d44070060f9475ac2092b760123fadf105d2e2493c24848b6691a7c4f42af5c"}, ] [package.dependencies] @@ -802,13 +918,13 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio [[package]] name = "ipython" -version = "8.22.2" +version = "8.24.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.10" files = [ - {file = "ipython-8.22.2-py3-none-any.whl", hash = "sha256:3c86f284c8f3d8f2b6c662f885c4889a91df7cd52056fd02b7d8d6195d7f56e9"}, - {file = "ipython-8.22.2.tar.gz", hash = "sha256:2dcaad9049f9056f1fef63514f176c7d41f930daa78d05b82a176202818f2c14"}, + {file = "ipython-8.24.0-py3-none-any.whl", hash = "sha256:d7bf2f6c4314984e3e02393213bab8703cf163ede39672ce5918c51fe253a2a3"}, + {file = "ipython-8.24.0.tar.gz", hash = "sha256:010db3f8a728a578bb641fdd06c063b9fb8e96a9464c63aec6310fbcb5e80501"}, ] [package.dependencies] @@ -822,18 +938,20 @@ prompt-toolkit = ">=3.0.41,<3.1.0" pygments = ">=2.4.0" stack-data = "*" traitlets = ">=5.13.0" +typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} [package.extras] -all = ["ipython[black,doc,kernel,nbconvert,nbformat,notebook,parallel,qtconsole,terminal]", "ipython[test,test-extra]"] +all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] black = ["black"] doc = ["docrepr", "exceptiongroup", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "stack-data", "typing-extensions"] kernel = ["ipykernel"] +matplotlib = ["matplotlib"] nbconvert = ["nbconvert"] nbformat = ["nbformat"] notebook = ["ipywidgets", "notebook"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] -test = ["pickleshare", "pytest (<8)", "pytest-asyncio (<0.22)", "testpath"] +test = ["pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] [[package]] @@ -958,6 +1076,21 @@ win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} [package.extras] dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.4.1)", "mypy (==v1.5.1)", "pre-commit (==3.4.0)", "pytest (==6.1.2)", "pytest (==7.4.0)", "pytest-cov (==2.12.1)", "pytest-cov (==4.1.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.0.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.3.0)", "tox (==3.27.1)", "tox (==4.11.0)"] +[[package]] +name = "markdown" +version = "3.6" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Markdown-3.6-py3-none-any.whl", hash = "sha256:48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f"}, + {file = "Markdown-3.6.tar.gz", hash = "sha256:ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224"}, +] + +[package.extras] +docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + [[package]] name = "markupsafe" version = "2.1.5" @@ -1029,18 +1162,188 @@ files = [ [[package]] name = "matplotlib-inline" -version = "0.1.6" +version = "0.1.7" description = "Inline Matplotlib backend for Jupyter" optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, - {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, + {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, + {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, ] [package.dependencies] traitlets = "*" +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +optional = false +python-versions = ">=3.6" +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + +[[package]] +name = "mkdocs" +version = "1.6.0" +description = "Project documentation with Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs-1.6.0-py3-none-any.whl", hash = "sha256:1eb5cb7676b7d89323e62b56235010216319217d4af5ddc543a91beb8d125ea7"}, + {file = "mkdocs-1.6.0.tar.gz", hash = "sha256:a73f735824ef83a4f3bcb7a231dcab23f5a838f88b7efc54a0eef5fbdbc3c512"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +jinja2 = ">=2.11.1" +markdown = ">=3.3.6" +markupsafe = ">=2.0.1" +mergedeep = ">=1.3.4" +mkdocs-get-deps = ">=0.2.0" +packaging = ">=20.5" +pathspec = ">=0.11.1" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +watchdog = ">=2.0" + +[package.extras] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.4)", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-autorefs" +version = "1.0.1" +description = "Automatically link across pages in MkDocs." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_autorefs-1.0.1-py3-none-any.whl", hash = "sha256:aacdfae1ab197780fb7a2dac92ad8a3d8f7ca8049a9cbe56a4218cd52e8da570"}, + {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, +] + +[package.dependencies] +Markdown = ">=3.3" +markupsafe = ">=2.0.1" +mkdocs = ">=1.1" + +[[package]] +name = "mkdocs-get-deps" +version = "0.2.0" +description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, + {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, +] + +[package.dependencies] +mergedeep = ">=1.3.4" +platformdirs = ">=2.2.0" +pyyaml = ">=5.1" + +[[package]] +name = "mkdocs-material" +version = "9.5.21" +description = "Documentation that simply works" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_material-9.5.21-py3-none-any.whl", hash = "sha256:210e1f179682cd4be17d5c641b2f4559574b9dea2f589c3f0e7c17c5bd1959bc"}, + {file = "mkdocs_material-9.5.21.tar.gz", hash = "sha256:049f82770f40559d3c2aa2259c562ea7257dbb4aaa9624323b5ef27b2d95a450"}, +] + +[package.dependencies] +babel = ">=2.10,<3.0" +cairosvg = {version = ">=2.6,<3.0", optional = true, markers = "extra == \"imaging\""} +colorama = ">=0.4,<1.0" +jinja2 = ">=3.0,<4.0" +markdown = ">=3.2,<4.0" +mkdocs = ">=1.6,<2.0" +mkdocs-material-extensions = ">=1.3,<2.0" +paginate = ">=0.5,<1.0" +pillow = {version = ">=10.2,<11.0", optional = true, markers = "extra == \"imaging\""} +pygments = ">=2.16,<3.0" +pymdown-extensions = ">=10.2,<11.0" +regex = ">=2022.4" +requests = ">=2.26,<3.0" + +[package.extras] +git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] +imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] +recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] + +[[package]] +name = "mkdocs-material-extensions" +version = "1.3.1" +description = "Extension pack for Python Markdown and MkDocs Material." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, + {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, +] + +[[package]] +name = "mkdocs-section-index" +version = "0.3.9" +description = "MkDocs plugin to allow clickable sections that lead to an index page" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_section_index-0.3.9-py3-none-any.whl", hash = "sha256:5e5eb288e8d7984d36c11ead5533f376fdf23498f44e903929d72845b24dfe34"}, + {file = "mkdocs_section_index-0.3.9.tar.gz", hash = "sha256:b66128d19108beceb08b226ee1ba0981840d14baf8a652b6c59e650f3f92e4f8"}, +] + +[package.dependencies] +mkdocs = ">=1.2" + +[[package]] +name = "mkdocstrings" +version = "0.25.0" +description = "Automatic documentation from sources, for MkDocs." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocstrings-0.25.0-py3-none-any.whl", hash = "sha256:df1b63f26675fcde8c1b77e7ea996cd2f93220b148e06455428f676f5dc838f1"}, + {file = "mkdocstrings-0.25.0.tar.gz", hash = "sha256:066986b3fb5b9ef2d37c4417255a808f7e63b40ff8f67f6cab8054d903fbc91d"}, +] + +[package.dependencies] +click = ">=7.0" +Jinja2 = ">=2.11.1" +Markdown = ">=3.3" +MarkupSafe = ">=1.1" +mkdocs = ">=1.4" +mkdocs-autorefs = ">=0.3.1" +platformdirs = ">=2.2.0" +pymdown-extensions = ">=6.3" + +[package.extras] +crystal = ["mkdocstrings-crystal (>=0.3.4)"] +python = ["mkdocstrings-python (>=0.5.2)"] +python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] + +[[package]] +name = "mkdocstrings-python" +version = "1.10.0" +description = "A Python handler for mkdocstrings." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocstrings_python-1.10.0-py3-none-any.whl", hash = "sha256:ba833fbd9d178a4b9d5cb2553a4df06e51dc1f51e41559a4d2398c16a6f69ecc"}, + {file = "mkdocstrings_python-1.10.0.tar.gz", hash = "sha256:71678fac657d4d2bb301eed4e4d2d91499c095fd1f8a90fa76422a87a5693828"}, +] + +[package.dependencies] +griffe = ">=0.44" +mkdocstrings = ">=0.24.2" + [[package]] name = "multidict" version = "6.0.5" @@ -1142,38 +1445,38 @@ files = [ [[package]] name = "mypy" -version = "1.9.0" +version = "1.10.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, + {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, + {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, + {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, + {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, + {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, + {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, + {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, + {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, + {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, + {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, + {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, + {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, + {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, + {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, + {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, + {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, + {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, ] [package.dependencies] @@ -1211,13 +1514,13 @@ files = [ [[package]] name = "openai" -version = "1.14.3" +version = "1.25.1" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.14.3-py3-none-any.whl", hash = "sha256:7a465994a7ccf677a110c6cc2ef9d86229bad42c060b585b67049aa749f3b774"}, - {file = "openai-1.14.3.tar.gz", hash = "sha256:37b514e9c0ff45383ec9b242abd0f7859b1080d4b54b61393ed341ecad1b8eb9"}, + {file = "openai-1.25.1-py3-none-any.whl", hash = "sha256:aa2f381f476f5fa4df8728a34a3e454c321caa064b7b68ab6e9daa1ed082dbf9"}, + {file = "openai-1.25.1.tar.gz", hash = "sha256:f561ce86f4b4008eb6c78622d641e4b7e1ab8a8cdb15d2f0b2a49942d40d21a8"}, ] [package.dependencies] @@ -1243,20 +1546,41 @@ files = [ {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] +[[package]] +name = "paginate" +version = "0.5.6" +description = "Divides large result sets into pages for easier browsing" +optional = false +python-versions = "*" +files = [ + {file = "paginate-0.5.6.tar.gz", hash = "sha256:5e6007b6a9398177a7e1648d04fdd9f8c9766a1a945bceac82f1929e8c78af2d"}, +] + [[package]] name = "parso" -version = "0.8.3" +version = "0.8.4" description = "A Python Parser" optional = false python-versions = ">=3.6" files = [ - {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, - {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, + {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, + {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, ] [package.extras] -qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] -testing = ["docopt", "pytest (<6.0.0)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["docopt", "pytest"] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] [[package]] name = "pexpect" @@ -1272,30 +1596,117 @@ files = [ [package.dependencies] ptyprocess = ">=0.5" +[[package]] +name = "pillow" +version = "10.3.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, + {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, + {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, + {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, + {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, + {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, + {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, + {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, + {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, + {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, + {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, + {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, + {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, + {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, + {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, + {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, + {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + [[package]] name = "platformdirs" -version = "4.2.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "4.2.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, - {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, + {file = "platformdirs-4.2.1-py3-none-any.whl", hash = "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1"}, + {file = "platformdirs-4.2.1.tar.gz", hash = "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] @@ -1371,13 +1782,13 @@ tests = ["pytest"] [[package]] name = "pycparser" -version = "2.21" +version = "2.22" description = "C parser in Python" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.8" files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] [[package]] @@ -1536,28 +1947,45 @@ lxml = ["lxml (>=4.9.0)"] [[package]] name = "pygments" -version = "2.17.2" +version = "2.18.0" description = "Pygments is a syntax highlighting package written in Python." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, - {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, ] [package.extras] -plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pymdown-extensions" +version = "10.8.1" +description = "Extension pack for Python Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pymdown_extensions-10.8.1-py3-none-any.whl", hash = "sha256:f938326115884f48c6059c67377c46cf631c733ef3629b6eed1349989d1b30cb"}, + {file = "pymdown_extensions-10.8.1.tar.gz", hash = "sha256:3ab1db5c9e21728dabf75192d71471f8e50f216627e9a1fa9535ecb0231b9940"}, +] + +[package.dependencies] +markdown = ">=3.6" +pyyaml = "*" + +[package.extras] +extra = ["pygments (>=2.12)"] + [[package]] name = "pytest" -version = "8.1.1" +version = "8.2.0" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"}, - {file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"}, + {file = "pytest-8.2.0-py3-none-any.whl", hash = "sha256:1733f0620f6cda4095bbf0d9ff8022486e91892245bb9e7d5542c018f612f233"}, + {file = "pytest-8.2.0.tar.gz", hash = "sha256:d507d4482197eac0ba2bae2e9babf0672eb333017bcedaa5fb1a3d42c1174b3f"}, ] [package.dependencies] @@ -1565,11 +1993,11 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=1.4,<2.0" +pluggy = ">=1.5,<2.0" tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "python-dateutil" @@ -1682,106 +2110,115 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "pyyaml-env-tag" +version = "0.1" +description = "A custom YAML tag for referencing environment variables in YAML files. " +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, + {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, +] + +[package.dependencies] +pyyaml = "*" + [[package]] name = "pyzmq" -version = "25.1.2" +version = "26.0.3" description = "Python bindings for 0MQ" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:e624c789359f1a16f83f35e2c705d07663ff2b4d4479bad35621178d8f0f6ea4"}, - {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49151b0efece79f6a79d41a461d78535356136ee70084a1c22532fc6383f4ad0"}, - {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9a5f194cf730f2b24d6af1f833c14c10f41023da46a7f736f48b6d35061e76e"}, - {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:faf79a302f834d9e8304fafdc11d0d042266667ac45209afa57e5efc998e3872"}, - {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f51a7b4ead28d3fca8dda53216314a553b0f7a91ee8fc46a72b402a78c3e43d"}, - {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0ddd6d71d4ef17ba5a87becf7ddf01b371eaba553c603477679ae817a8d84d75"}, - {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:246747b88917e4867e2367b005fc8eefbb4a54b7db363d6c92f89d69abfff4b6"}, - {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:00c48ae2fd81e2a50c3485de1b9d5c7c57cd85dc8ec55683eac16846e57ac979"}, - {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5a68d491fc20762b630e5db2191dd07ff89834086740f70e978bb2ef2668be08"}, - {file = "pyzmq-25.1.2-cp310-cp310-win32.whl", hash = "sha256:09dfe949e83087da88c4a76767df04b22304a682d6154de2c572625c62ad6886"}, - {file = "pyzmq-25.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:fa99973d2ed20417744fca0073390ad65ce225b546febb0580358e36aa90dba6"}, - {file = "pyzmq-25.1.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:82544e0e2d0c1811482d37eef297020a040c32e0687c1f6fc23a75b75db8062c"}, - {file = "pyzmq-25.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:01171fc48542348cd1a360a4b6c3e7d8f46cdcf53a8d40f84db6707a6768acc1"}, - {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc69c96735ab501419c432110016329bf0dea8898ce16fab97c6d9106dc0b348"}, - {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3e124e6b1dd3dfbeb695435dff0e383256655bb18082e094a8dd1f6293114642"}, - {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7598d2ba821caa37a0f9d54c25164a4fa351ce019d64d0b44b45540950458840"}, - {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d1299d7e964c13607efd148ca1f07dcbf27c3ab9e125d1d0ae1d580a1682399d"}, - {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4e6f689880d5ad87918430957297c975203a082d9a036cc426648fcbedae769b"}, - {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cc69949484171cc961e6ecd4a8911b9ce7a0d1f738fcae717177c231bf77437b"}, - {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9880078f683466b7f567b8624bfc16cad65077be046b6e8abb53bed4eeb82dd3"}, - {file = "pyzmq-25.1.2-cp311-cp311-win32.whl", hash = "sha256:4e5837af3e5aaa99a091302df5ee001149baff06ad22b722d34e30df5f0d9097"}, - {file = "pyzmq-25.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:25c2dbb97d38b5ac9fd15586e048ec5eb1e38f3d47fe7d92167b0c77bb3584e9"}, - {file = "pyzmq-25.1.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:11e70516688190e9c2db14fcf93c04192b02d457b582a1f6190b154691b4c93a"}, - {file = "pyzmq-25.1.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:313c3794d650d1fccaaab2df942af9f2c01d6217c846177cfcbc693c7410839e"}, - {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b3cbba2f47062b85fe0ef9de5b987612140a9ba3a9c6d2543c6dec9f7c2ab27"}, - {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc31baa0c32a2ca660784d5af3b9487e13b61b3032cb01a115fce6588e1bed30"}, - {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02c9087b109070c5ab0b383079fa1b5f797f8d43e9a66c07a4b8b8bdecfd88ee"}, - {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f8429b17cbb746c3e043cb986328da023657e79d5ed258b711c06a70c2ea7537"}, - {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5074adeacede5f810b7ef39607ee59d94e948b4fd954495bdb072f8c54558181"}, - {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7ae8f354b895cbd85212da245f1a5ad8159e7840e37d78b476bb4f4c3f32a9fe"}, - {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b264bf2cc96b5bc43ce0e852be995e400376bd87ceb363822e2cb1964fcdc737"}, - {file = "pyzmq-25.1.2-cp312-cp312-win32.whl", hash = "sha256:02bbc1a87b76e04fd780b45e7f695471ae6de747769e540da909173d50ff8e2d"}, - {file = "pyzmq-25.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:ced111c2e81506abd1dc142e6cd7b68dd53747b3b7ae5edbea4578c5eeff96b7"}, - {file = "pyzmq-25.1.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7b6d09a8962a91151f0976008eb7b29b433a560fde056ec7a3db9ec8f1075438"}, - {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:967668420f36878a3c9ecb5ab33c9d0ff8d054f9c0233d995a6d25b0e95e1b6b"}, - {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5edac3f57c7ddaacdb4d40f6ef2f9e299471fc38d112f4bc6d60ab9365445fb0"}, - {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0dabfb10ef897f3b7e101cacba1437bd3a5032ee667b7ead32bbcdd1a8422fe7"}, - {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:2c6441e0398c2baacfe5ba30c937d274cfc2dc5b55e82e3749e333aabffde561"}, - {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:16b726c1f6c2e7625706549f9dbe9b06004dfbec30dbed4bf50cbdfc73e5b32a"}, - {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:a86c2dd76ef71a773e70551a07318b8e52379f58dafa7ae1e0a4be78efd1ff16"}, - {file = "pyzmq-25.1.2-cp36-cp36m-win32.whl", hash = "sha256:359f7f74b5d3c65dae137f33eb2bcfa7ad9ebefd1cab85c935f063f1dbb245cc"}, - {file = "pyzmq-25.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:55875492f820d0eb3417b51d96fea549cde77893ae3790fd25491c5754ea2f68"}, - {file = "pyzmq-25.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8c8a419dfb02e91b453615c69568442e897aaf77561ee0064d789705ff37a92"}, - {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8807c87fa893527ae8a524c15fc505d9950d5e856f03dae5921b5e9aa3b8783b"}, - {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5e319ed7d6b8f5fad9b76daa0a68497bc6f129858ad956331a5835785761e003"}, - {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3c53687dde4d9d473c587ae80cc328e5b102b517447456184b485587ebd18b62"}, - {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9add2e5b33d2cd765ad96d5eb734a5e795a0755f7fc49aa04f76d7ddda73fd70"}, - {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e690145a8c0c273c28d3b89d6fb32c45e0d9605b2293c10e650265bf5c11cfec"}, - {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:00a06faa7165634f0cac1abb27e54d7a0b3b44eb9994530b8ec73cf52e15353b"}, - {file = "pyzmq-25.1.2-cp37-cp37m-win32.whl", hash = "sha256:0f97bc2f1f13cb16905a5f3e1fbdf100e712d841482b2237484360f8bc4cb3d7"}, - {file = "pyzmq-25.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6cc0020b74b2e410287e5942e1e10886ff81ac77789eb20bec13f7ae681f0fdd"}, - {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bef02cfcbded83473bdd86dd8d3729cd82b2e569b75844fb4ea08fee3c26ae41"}, - {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e10a4b5a4b1192d74853cc71a5e9fd022594573926c2a3a4802020360aa719d8"}, - {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8c5f80e578427d4695adac6fdf4370c14a2feafdc8cb35549c219b90652536ae"}, - {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5dde6751e857910c1339890f3524de74007958557593b9e7e8c5f01cd919f8a7"}, - {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea1608dd169da230a0ad602d5b1ebd39807ac96cae1845c3ceed39af08a5c6df"}, - {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0f513130c4c361201da9bc69df25a086487250e16b5571ead521b31ff6b02220"}, - {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:019744b99da30330798bb37df33549d59d380c78e516e3bab9c9b84f87a9592f"}, - {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2e2713ef44be5d52dd8b8e2023d706bf66cb22072e97fc71b168e01d25192755"}, - {file = "pyzmq-25.1.2-cp38-cp38-win32.whl", hash = "sha256:07cd61a20a535524906595e09344505a9bd46f1da7a07e504b315d41cd42eb07"}, - {file = "pyzmq-25.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb7e49a17fb8c77d3119d41a4523e432eb0c6932187c37deb6fbb00cc3028088"}, - {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:94504ff66f278ab4b7e03e4cba7e7e400cb73bfa9d3d71f58d8972a8dc67e7a6"}, - {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6dd0d50bbf9dca1d0bdea219ae6b40f713a3fb477c06ca3714f208fd69e16fd8"}, - {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:004ff469d21e86f0ef0369717351073e0e577428e514c47c8480770d5e24a565"}, - {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c0b5ca88a8928147b7b1e2dfa09f3b6c256bc1135a1338536cbc9ea13d3b7add"}, - {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c9a79f1d2495b167119d02be7448bfba57fad2a4207c4f68abc0bab4b92925b"}, - {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:518efd91c3d8ac9f9b4f7dd0e2b7b8bf1a4fe82a308009016b07eaa48681af82"}, - {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1ec23bd7b3a893ae676d0e54ad47d18064e6c5ae1fadc2f195143fb27373f7f6"}, - {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db36c27baed588a5a8346b971477b718fdc66cf5b80cbfbd914b4d6d355e44e2"}, - {file = "pyzmq-25.1.2-cp39-cp39-win32.whl", hash = "sha256:39b1067f13aba39d794a24761e385e2eddc26295826530a8c7b6c6c341584289"}, - {file = "pyzmq-25.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:8e9f3fabc445d0ce320ea2c59a75fe3ea591fdbdeebec5db6de530dd4b09412e"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a8c1d566344aee826b74e472e16edae0a02e2a044f14f7c24e123002dcff1c05"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:759cfd391a0996345ba94b6a5110fca9c557ad4166d86a6e81ea526c376a01e8"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c61e346ac34b74028ede1c6b4bcecf649d69b707b3ff9dc0fab453821b04d1e"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb8fc1f8d69b411b8ec0b5f1ffbcaf14c1db95b6bccea21d83610987435f1a4"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3c00c9b7d1ca8165c610437ca0c92e7b5607b2f9076f4eb4b095c85d6e680a1d"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:df0c7a16ebb94452d2909b9a7b3337940e9a87a824c4fc1c7c36bb4404cb0cde"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:45999e7f7ed5c390f2e87ece7f6c56bf979fb213550229e711e45ecc7d42ccb8"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ac170e9e048b40c605358667aca3d94e98f604a18c44bdb4c102e67070f3ac9b"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1b604734bec94f05f81b360a272fc824334267426ae9905ff32dc2be433ab96"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a793ac733e3d895d96f865f1806f160696422554e46d30105807fdc9841b9f7d"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0806175f2ae5ad4b835ecd87f5f85583316b69f17e97786f7443baaf54b9bb98"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ef12e259e7bc317c7597d4f6ef59b97b913e162d83b421dd0db3d6410f17a244"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea253b368eb41116011add00f8d5726762320b1bda892f744c91997b65754d73"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b9b1f2ad6498445a941d9a4fee096d387fee436e45cc660e72e768d3d8ee611"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8b14c75979ce932c53b79976a395cb2a8cd3aaf14aef75e8c2cb55a330b9b49d"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:889370d5174a741a62566c003ee8ddba4b04c3f09a97b8000092b7ca83ec9c49"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a18fff090441a40ffda8a7f4f18f03dc56ae73f148f1832e109f9bffa85df15"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99a6b36f95c98839ad98f8c553d8507644c880cf1e0a57fe5e3a3f3969040882"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4345c9a27f4310afbb9c01750e9461ff33d6fb74cd2456b107525bbeebcb5be3"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3516e0b6224cf6e43e341d56da15fd33bdc37fa0c06af4f029f7d7dfceceabbc"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:146b9b1f29ead41255387fb07be56dc29639262c0f7344f570eecdcd8d683314"}, - {file = "pyzmq-25.1.2.tar.gz", hash = "sha256:93f1aa311e8bb912e34f004cf186407a4e90eec4f0ecc0efd26056bf7eda0226"}, + {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:44dd6fc3034f1eaa72ece33588867df9e006a7303725a12d64c3dff92330f625"}, + {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:acb704195a71ac5ea5ecf2811c9ee19ecdc62b91878528302dd0be1b9451cc90"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dbb9c997932473a27afa93954bb77a9f9b786b4ccf718d903f35da3232317de"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6bcb34f869d431799c3ee7d516554797f7760cb2198ecaa89c3f176f72d062be"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ece17ec5f20d7d9b442e5174ae9f020365d01ba7c112205a4d59cf19dc38ee"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ba6e5e6588e49139a0979d03a7deb9c734bde647b9a8808f26acf9c547cab1bf"}, + {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3bf8b000a4e2967e6dfdd8656cd0757d18c7e5ce3d16339e550bd462f4857e59"}, + {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2136f64fbb86451dbbf70223635a468272dd20075f988a102bf8a3f194a411dc"}, + {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e8918973fbd34e7814f59143c5f600ecd38b8038161239fd1a3d33d5817a38b8"}, + {file = "pyzmq-26.0.3-cp310-cp310-win32.whl", hash = "sha256:0aaf982e68a7ac284377d051c742610220fd06d330dcd4c4dbb4cdd77c22a537"}, + {file = "pyzmq-26.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:f1a9b7d00fdf60b4039f4455afd031fe85ee8305b019334b72dcf73c567edc47"}, + {file = "pyzmq-26.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:80b12f25d805a919d53efc0a5ad7c0c0326f13b4eae981a5d7b7cc343318ebb7"}, + {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:a72a84570f84c374b4c287183debc776dc319d3e8ce6b6a0041ce2e400de3f32"}, + {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ca684ee649b55fd8f378127ac8462fb6c85f251c2fb027eb3c887e8ee347bcd"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e222562dc0f38571c8b1ffdae9d7adb866363134299264a1958d077800b193b7"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f17cde1db0754c35a91ac00b22b25c11da6eec5746431d6e5092f0cd31a3fea9"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b7c0c0b3244bb2275abe255d4a30c050d541c6cb18b870975553f1fb6f37527"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:ac97a21de3712afe6a6c071abfad40a6224fd14fa6ff0ff8d0c6e6cd4e2f807a"}, + {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:88b88282e55fa39dd556d7fc04160bcf39dea015f78e0cecec8ff4f06c1fc2b5"}, + {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:72b67f966b57dbd18dcc7efbc1c7fc9f5f983e572db1877081f075004614fcdd"}, + {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f4b6cecbbf3b7380f3b61de3a7b93cb721125dc125c854c14ddc91225ba52f83"}, + {file = "pyzmq-26.0.3-cp311-cp311-win32.whl", hash = "sha256:eed56b6a39216d31ff8cd2f1d048b5bf1700e4b32a01b14379c3b6dde9ce3aa3"}, + {file = "pyzmq-26.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:3191d312c73e3cfd0f0afdf51df8405aafeb0bad71e7ed8f68b24b63c4f36500"}, + {file = "pyzmq-26.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:b6907da3017ef55139cf0e417c5123a84c7332520e73a6902ff1f79046cd3b94"}, + {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:068ca17214038ae986d68f4a7021f97e187ed278ab6dccb79f837d765a54d753"}, + {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7821d44fe07335bea256b9f1f41474a642ca55fa671dfd9f00af8d68a920c2d4"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeb438a26d87c123bb318e5f2b3d86a36060b01f22fbdffd8cf247d52f7c9a2b"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69ea9d6d9baa25a4dc9cef5e2b77b8537827b122214f210dd925132e34ae9b12"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7daa3e1369355766dea11f1d8ef829905c3b9da886ea3152788dc25ee6079e02"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6ca7a9a06b52d0e38ccf6bca1aeff7be178917893f3883f37b75589d42c4ac20"}, + {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1b7d0e124948daa4d9686d421ef5087c0516bc6179fdcf8828b8444f8e461a77"}, + {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e746524418b70f38550f2190eeee834db8850088c834d4c8406fbb9bc1ae10b2"}, + {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6b3146f9ae6af82c47a5282ac8803523d381b3b21caeae0327ed2f7ecb718798"}, + {file = "pyzmq-26.0.3-cp312-cp312-win32.whl", hash = "sha256:2b291d1230845871c00c8462c50565a9cd6026fe1228e77ca934470bb7d70ea0"}, + {file = "pyzmq-26.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:926838a535c2c1ea21c903f909a9a54e675c2126728c21381a94ddf37c3cbddf"}, + {file = "pyzmq-26.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:5bf6c237f8c681dfb91b17f8435b2735951f0d1fad10cc5dfd96db110243370b"}, + {file = "pyzmq-26.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c0991f5a96a8e620f7691e61178cd8f457b49e17b7d9cfa2067e2a0a89fc1d5"}, + {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dbf012d8fcb9f2cf0643b65df3b355fdd74fc0035d70bb5c845e9e30a3a4654b"}, + {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:01fbfbeb8249a68d257f601deb50c70c929dc2dfe683b754659569e502fbd3aa"}, + {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c8eb19abe87029c18f226d42b8a2c9efdd139d08f8bf6e085dd9075446db450"}, + {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5344b896e79800af86ad643408ca9aa303a017f6ebff8cee5a3163c1e9aec987"}, + {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:204e0f176fd1d067671157d049466869b3ae1fc51e354708b0dc41cf94e23a3a"}, + {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a42db008d58530efa3b881eeee4991146de0b790e095f7ae43ba5cc612decbc5"}, + {file = "pyzmq-26.0.3-cp37-cp37m-win32.whl", hash = "sha256:8d7a498671ca87e32b54cb47c82a92b40130a26c5197d392720a1bce1b3c77cf"}, + {file = "pyzmq-26.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:3b4032a96410bdc760061b14ed6a33613ffb7f702181ba999df5d16fb96ba16a"}, + {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2cc4e280098c1b192c42a849de8de2c8e0f3a84086a76ec5b07bfee29bda7d18"}, + {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bde86a2ed3ce587fa2b207424ce15b9a83a9fa14422dcc1c5356a13aed3df9d"}, + {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:34106f68e20e6ff253c9f596ea50397dbd8699828d55e8fa18bd4323d8d966e6"}, + {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ebbbd0e728af5db9b04e56389e2299a57ea8b9dd15c9759153ee2455b32be6ad"}, + {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6b1d1c631e5940cac5a0b22c5379c86e8df6a4ec277c7a856b714021ab6cfad"}, + {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e891ce81edd463b3b4c3b885c5603c00141151dd9c6936d98a680c8c72fe5c67"}, + {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9b273ecfbc590a1b98f014ae41e5cf723932f3b53ba9367cfb676f838038b32c"}, + {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b32bff85fb02a75ea0b68f21e2412255b5731f3f389ed9aecc13a6752f58ac97"}, + {file = "pyzmq-26.0.3-cp38-cp38-win32.whl", hash = "sha256:f6c21c00478a7bea93caaaef9e7629145d4153b15a8653e8bb4609d4bc70dbfc"}, + {file = "pyzmq-26.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:3401613148d93ef0fd9aabdbddb212de3db7a4475367f49f590c837355343972"}, + {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:2ed8357f4c6e0daa4f3baf31832df8a33334e0fe5b020a61bc8b345a3db7a606"}, + {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c1c8f2a2ca45292084c75bb6d3a25545cff0ed931ed228d3a1810ae3758f975f"}, + {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b63731993cdddcc8e087c64e9cf003f909262b359110070183d7f3025d1c56b5"}, + {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b3cd31f859b662ac5d7f4226ec7d8bd60384fa037fc02aee6ff0b53ba29a3ba8"}, + {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:115f8359402fa527cf47708d6f8a0f8234f0e9ca0cab7c18c9c189c194dbf620"}, + {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:715bdf952b9533ba13dfcf1f431a8f49e63cecc31d91d007bc1deb914f47d0e4"}, + {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e1258c639e00bf5e8a522fec6c3eaa3e30cf1c23a2f21a586be7e04d50c9acab"}, + {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15c59e780be8f30a60816a9adab900c12a58d79c1ac742b4a8df044ab2a6d920"}, + {file = "pyzmq-26.0.3-cp39-cp39-win32.whl", hash = "sha256:d0cdde3c78d8ab5b46595054e5def32a755fc028685add5ddc7403e9f6de9879"}, + {file = "pyzmq-26.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:ce828058d482ef860746bf532822842e0ff484e27f540ef5c813d516dd8896d2"}, + {file = "pyzmq-26.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:788f15721c64109cf720791714dc14afd0f449d63f3a5487724f024345067381"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c18645ef6294d99b256806e34653e86236eb266278c8ec8112622b61db255de"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e6bc96ebe49604df3ec2c6389cc3876cabe475e6bfc84ced1bf4e630662cb35"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:971e8990c5cc4ddcff26e149398fc7b0f6a042306e82500f5e8db3b10ce69f84"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8416c23161abd94cc7da80c734ad7c9f5dbebdadfdaa77dad78244457448223"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:082a2988364b60bb5de809373098361cf1dbb239623e39e46cb18bc035ed9c0c"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d57dfbf9737763b3a60d26e6800e02e04284926329aee8fb01049635e957fe81"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:77a85dca4c2430ac04dc2a2185c2deb3858a34fe7f403d0a946fa56970cf60a1"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4c82a6d952a1d555bf4be42b6532927d2a5686dd3c3e280e5f63225ab47ac1f5"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4496b1282c70c442809fc1b151977c3d967bfb33e4e17cedbf226d97de18f709"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:e4946d6bdb7ba972dfda282f9127e5756d4f299028b1566d1245fa0d438847e6"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:03c0ae165e700364b266876d712acb1ac02693acd920afa67da2ebb91a0b3c09"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:3e3070e680f79887d60feeda051a58d0ac36622e1759f305a41059eff62c6da7"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6ca08b840fe95d1c2bd9ab92dac5685f949fc6f9ae820ec16193e5ddf603c3b2"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e76654e9dbfb835b3518f9938e565c7806976c07b37c33526b574cc1a1050480"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:871587bdadd1075b112e697173e946a07d722459d20716ceb3d1bd6c64bd08ce"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d0a2d1bd63a4ad79483049b26514e70fa618ce6115220da9efdff63688808b17"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0270b49b6847f0d106d64b5086e9ad5dc8a902413b5dbbb15d12b60f9c1747a4"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:703c60b9910488d3d0954ca585c34f541e506a091a41930e663a098d3b794c67"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74423631b6be371edfbf7eabb02ab995c2563fee60a80a30829176842e71722a"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4adfbb5451196842a88fda3612e2c0414134874bffb1c2ce83ab4242ec9e027d"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3516119f4f9b8671083a70b6afaa0a070f5683e431ab3dc26e9215620d7ca1ad"}, + {file = "pyzmq-26.0.3.tar.gz", hash = "sha256:dba7d9f2e047dfa2bca3b01f4f84aa5246725203d6284e3790f2ca15fba6b40a"}, ] [package.dependencies] @@ -1789,104 +2226,90 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "regex" -version = "2023.12.25" +version = "2024.4.28" description = "Alternative regular expression module, to replace re." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "regex-2023.12.25-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0694219a1d54336fd0445ea382d49d36882415c0134ee1e8332afd1529f0baa5"}, - {file = "regex-2023.12.25-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b014333bd0217ad3d54c143de9d4b9a3ca1c5a29a6d0d554952ea071cff0f1f8"}, - {file = "regex-2023.12.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d865984b3f71f6d0af64d0d88f5733521698f6c16f445bb09ce746c92c97c586"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e0eabac536b4cc7f57a5f3d095bfa557860ab912f25965e08fe1545e2ed8b4c"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c25a8ad70e716f96e13a637802813f65d8a6760ef48672aa3502f4c24ea8b400"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9b6d73353f777630626f403b0652055ebfe8ff142a44ec2cf18ae470395766e"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9cc99d6946d750eb75827cb53c4371b8b0fe89c733a94b1573c9dd16ea6c9e4"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88d1f7bef20c721359d8675f7d9f8e414ec5003d8f642fdfd8087777ff7f94b5"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cb3fe77aec8f1995611f966d0c656fdce398317f850d0e6e7aebdfe61f40e1cd"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7aa47c2e9ea33a4a2a05f40fcd3ea36d73853a2aae7b4feab6fc85f8bf2c9704"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:df26481f0c7a3f8739fecb3e81bc9da3fcfae34d6c094563b9d4670b047312e1"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c40281f7d70baf6e0db0c2f7472b31609f5bc2748fe7275ea65a0b4601d9b392"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:d94a1db462d5690ebf6ae86d11c5e420042b9898af5dcf278bd97d6bda065423"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba1b30765a55acf15dce3f364e4928b80858fa8f979ad41f862358939bdd1f2f"}, - {file = "regex-2023.12.25-cp310-cp310-win32.whl", hash = "sha256:150c39f5b964e4d7dba46a7962a088fbc91f06e606f023ce57bb347a3b2d4630"}, - {file = "regex-2023.12.25-cp310-cp310-win_amd64.whl", hash = "sha256:09da66917262d9481c719599116c7dc0c321ffcec4b1f510c4f8a066f8768105"}, - {file = "regex-2023.12.25-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1b9d811f72210fa9306aeb88385b8f8bcef0dfbf3873410413c00aa94c56c2b6"}, - {file = "regex-2023.12.25-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d902a43085a308cef32c0d3aea962524b725403fd9373dea18110904003bac97"}, - {file = "regex-2023.12.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d166eafc19f4718df38887b2bbe1467a4f74a9830e8605089ea7a30dd4da8887"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7ad32824b7f02bb3c9f80306d405a1d9b7bb89362d68b3c5a9be53836caebdb"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:636ba0a77de609d6510235b7f0e77ec494d2657108f777e8765efc060094c98c"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fda75704357805eb953a3ee15a2b240694a9a514548cd49b3c5124b4e2ad01b"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f72cbae7f6b01591f90814250e636065850c5926751af02bb48da94dfced7baa"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db2a0b1857f18b11e3b0e54ddfefc96af46b0896fb678c85f63fb8c37518b3e7"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7502534e55c7c36c0978c91ba6f61703faf7ce733715ca48f499d3dbbd7657e0"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e8c7e08bb566de4faaf11984af13f6bcf6a08f327b13631d41d62592681d24fe"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:283fc8eed679758de38fe493b7d7d84a198b558942b03f017b1f94dda8efae80"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f44dd4d68697559d007462b0a3a1d9acd61d97072b71f6d1968daef26bc744bd"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:67d3ccfc590e5e7197750fcb3a2915b416a53e2de847a728cfa60141054123d4"}, - {file = "regex-2023.12.25-cp311-cp311-win32.whl", hash = "sha256:68191f80a9bad283432385961d9efe09d783bcd36ed35a60fb1ff3f1ec2efe87"}, - {file = "regex-2023.12.25-cp311-cp311-win_amd64.whl", hash = "sha256:7d2af3f6b8419661a0c421584cfe8aaec1c0e435ce7e47ee2a97e344b98f794f"}, - {file = "regex-2023.12.25-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8a0ccf52bb37d1a700375a6b395bff5dd15c50acb745f7db30415bae3c2b0715"}, - {file = "regex-2023.12.25-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c3c4a78615b7762740531c27cf46e2f388d8d727d0c0c739e72048beb26c8a9d"}, - {file = "regex-2023.12.25-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad83e7545b4ab69216cef4cc47e344d19622e28aabec61574b20257c65466d6a"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7a635871143661feccce3979e1727c4e094f2bdfd3ec4b90dfd4f16f571a87a"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d498eea3f581fbe1b34b59c697512a8baef88212f92e4c7830fcc1499f5b45a5"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:43f7cd5754d02a56ae4ebb91b33461dc67be8e3e0153f593c509e21d219c5060"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51f4b32f793812714fd5307222a7f77e739b9bc566dc94a18126aba3b92b98a3"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba99d8077424501b9616b43a2d208095746fb1284fc5ba490139651f971d39d9"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4bfc2b16e3ba8850e0e262467275dd4d62f0d045e0e9eda2bc65078c0110a11f"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8c2c19dae8a3eb0ea45a8448356ed561be843b13cbc34b840922ddf565498c1c"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:60080bb3d8617d96f0fb7e19796384cc2467447ef1c491694850ebd3670bc457"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b77e27b79448e34c2c51c09836033056a0547aa360c45eeeb67803da7b0eedaf"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:518440c991f514331f4850a63560321f833979d145d7d81186dbe2f19e27ae3d"}, - {file = "regex-2023.12.25-cp312-cp312-win32.whl", hash = "sha256:e2610e9406d3b0073636a3a2e80db05a02f0c3169b5632022b4e81c0364bcda5"}, - {file = "regex-2023.12.25-cp312-cp312-win_amd64.whl", hash = "sha256:cc37b9aeebab425f11f27e5e9e6cf580be7206c6582a64467a14dda211abc232"}, - {file = "regex-2023.12.25-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:da695d75ac97cb1cd725adac136d25ca687da4536154cdc2815f576e4da11c69"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d126361607b33c4eb7b36debc173bf25d7805847346dd4d99b5499e1fef52bc7"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4719bb05094d7d8563a450cf8738d2e1061420f79cfcc1fa7f0a44744c4d8f73"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dd58946bce44b53b06d94aa95560d0b243eb2fe64227cba50017a8d8b3cd3e2"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22a86d9fff2009302c440b9d799ef2fe322416d2d58fc124b926aa89365ec482"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2aae8101919e8aa05ecfe6322b278f41ce2994c4a430303c4cd163fef746e04f"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e692296c4cc2873967771345a876bcfc1c547e8dd695c6b89342488b0ea55cd8"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:263ef5cc10979837f243950637fffb06e8daed7f1ac1e39d5910fd29929e489a"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d6f7e255e5fa94642a0724e35406e6cb7001c09d476ab5fce002f652b36d0c39"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:88ad44e220e22b63b0f8f81f007e8abbb92874d8ced66f32571ef8beb0643b2b"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:3a17d3ede18f9cedcbe23d2daa8a2cd6f59fe2bf082c567e43083bba3fb00347"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d15b274f9e15b1a0b7a45d2ac86d1f634d983ca40d6b886721626c47a400bf39"}, - {file = "regex-2023.12.25-cp37-cp37m-win32.whl", hash = "sha256:ed19b3a05ae0c97dd8f75a5d8f21f7723a8c33bbc555da6bbe1f96c470139d3c"}, - {file = "regex-2023.12.25-cp37-cp37m-win_amd64.whl", hash = "sha256:a6d1047952c0b8104a1d371f88f4ab62e6275567d4458c1e26e9627ad489b445"}, - {file = "regex-2023.12.25-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b43523d7bc2abd757119dbfb38af91b5735eea45537ec6ec3a5ec3f9562a1c53"}, - {file = "regex-2023.12.25-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:efb2d82f33b2212898f1659fb1c2e9ac30493ac41e4d53123da374c3b5541e64"}, - {file = "regex-2023.12.25-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7fca9205b59c1a3d5031f7e64ed627a1074730a51c2a80e97653e3e9fa0d415"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086dd15e9435b393ae06f96ab69ab2d333f5d65cbe65ca5a3ef0ec9564dfe770"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e81469f7d01efed9b53740aedd26085f20d49da65f9c1f41e822a33992cb1590"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:34e4af5b27232f68042aa40a91c3b9bb4da0eeb31b7632e0091afc4310afe6cb"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9852b76ab558e45b20bf1893b59af64a28bd3820b0c2efc80e0a70a4a3ea51c1"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff100b203092af77d1a5a7abe085b3506b7eaaf9abf65b73b7d6905b6cb76988"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cc038b2d8b1470364b1888a98fd22d616fba2b6309c5b5f181ad4483e0017861"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:094ba386bb5c01e54e14434d4caabf6583334090865b23ef58e0424a6286d3dc"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5cd05d0f57846d8ba4b71d9c00f6f37d6b97d5e5ef8b3c3840426a475c8f70f4"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:9aa1a67bbf0f957bbe096375887b2505f5d8ae16bf04488e8b0f334c36e31360"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:98a2636994f943b871786c9e82bfe7883ecdaba2ef5df54e1450fa9869d1f756"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:37f8e93a81fc5e5bd8db7e10e62dc64261bcd88f8d7e6640aaebe9bc180d9ce2"}, - {file = "regex-2023.12.25-cp38-cp38-win32.whl", hash = "sha256:d78bd484930c1da2b9679290a41cdb25cc127d783768a0369d6b449e72f88beb"}, - {file = "regex-2023.12.25-cp38-cp38-win_amd64.whl", hash = "sha256:b521dcecebc5b978b447f0f69b5b7f3840eac454862270406a39837ffae4e697"}, - {file = "regex-2023.12.25-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f7bc09bc9c29ebead055bcba136a67378f03d66bf359e87d0f7c759d6d4ffa31"}, - {file = "regex-2023.12.25-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e14b73607d6231f3cc4622809c196b540a6a44e903bcfad940779c80dffa7be7"}, - {file = "regex-2023.12.25-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9eda5f7a50141291beda3edd00abc2d4a5b16c29c92daf8d5bd76934150f3edc"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc6bb9aa69aacf0f6032c307da718f61a40cf970849e471254e0e91c56ffca95"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:298dc6354d414bc921581be85695d18912bea163a8b23cac9a2562bbcd5088b1"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f4e475a80ecbd15896a976aa0b386c5525d0ed34d5c600b6d3ebac0a67c7ddf"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531ac6cf22b53e0696f8e1d56ce2396311254eb806111ddd3922c9d937151dae"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22f3470f7524b6da61e2020672df2f3063676aff444db1daa283c2ea4ed259d6"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:89723d2112697feaa320c9d351e5f5e7b841e83f8b143dba8e2d2b5f04e10923"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0ecf44ddf9171cd7566ef1768047f6e66975788258b1c6c6ca78098b95cf9a3d"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:905466ad1702ed4acfd67a902af50b8db1feeb9781436372261808df7a2a7bca"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:4558410b7a5607a645e9804a3e9dd509af12fb72b9825b13791a37cd417d73a5"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:7e316026cc1095f2a3e8cc012822c99f413b702eaa2ca5408a513609488cb62f"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3b1de218d5375cd6ac4b5493e0b9f3df2be331e86520f23382f216c137913d20"}, - {file = "regex-2023.12.25-cp39-cp39-win32.whl", hash = "sha256:11a963f8e25ab5c61348d090bf1b07f1953929c13bd2309a0662e9ff680763c9"}, - {file = "regex-2023.12.25-cp39-cp39-win_amd64.whl", hash = "sha256:e693e233ac92ba83a87024e1d32b5f9ab15ca55ddd916d878146f4e3406b5c91"}, - {file = "regex-2023.12.25.tar.gz", hash = "sha256:29171aa128da69afdf4bde412d5bedc335f2ca8fcfe4489038577d05f16181e5"}, + {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd196d056b40af073d95a2879678585f0b74ad35190fac04ca67954c582c6b61"}, + {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8bb381f777351bd534462f63e1c6afb10a7caa9fa2a421ae22c26e796fe31b1f"}, + {file = "regex-2024.4.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:47af45b6153522733aa6e92543938e97a70ce0900649ba626cf5aad290b737b6"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99d6a550425cc51c656331af0e2b1651e90eaaa23fb4acde577cf15068e2e20f"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bf29304a8011feb58913c382902fde3395957a47645bf848eea695839aa101b7"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:92da587eee39a52c91aebea8b850e4e4f095fe5928d415cb7ed656b3460ae79a"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6277d426e2f31bdbacb377d17a7475e32b2d7d1f02faaecc48d8e370c6a3ff31"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28e1f28d07220c0f3da0e8fcd5a115bbb53f8b55cecf9bec0c946eb9a059a94c"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:aaa179975a64790c1f2701ac562b5eeb733946eeb036b5bcca05c8d928a62f10"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6f435946b7bf7a1b438b4e6b149b947c837cb23c704e780c19ba3e6855dbbdd3"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:19d6c11bf35a6ad077eb23852827f91c804eeb71ecb85db4ee1386825b9dc4db"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:fdae0120cddc839eb8e3c15faa8ad541cc6d906d3eb24d82fb041cfe2807bc1e"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e672cf9caaf669053121f1766d659a8813bd547edef6e009205378faf45c67b8"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f57515750d07e14743db55d59759893fdb21d2668f39e549a7d6cad5d70f9fea"}, + {file = "regex-2024.4.28-cp310-cp310-win32.whl", hash = "sha256:a1409c4eccb6981c7baabc8888d3550df518add6e06fe74fa1d9312c1838652d"}, + {file = "regex-2024.4.28-cp310-cp310-win_amd64.whl", hash = "sha256:1f687a28640f763f23f8a9801fe9e1b37338bb1ca5d564ddd41619458f1f22d1"}, + {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:84077821c85f222362b72fdc44f7a3a13587a013a45cf14534df1cbbdc9a6796"}, + {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b45d4503de8f4f3dc02f1d28a9b039e5504a02cc18906cfe744c11def942e9eb"}, + {file = "regex-2024.4.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:457c2cd5a646dd4ed536c92b535d73548fb8e216ebee602aa9f48e068fc393f3"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b51739ddfd013c6f657b55a508de8b9ea78b56d22b236052c3a85a675102dc6"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:459226445c7d7454981c4c0ce0ad1a72e1e751c3e417f305722bbcee6697e06a"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:670fa596984b08a4a769491cbdf22350431970d0112e03d7e4eeaecaafcd0fec"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe00f4fe11c8a521b173e6324d862ee7ee3412bf7107570c9b564fe1119b56fb"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:36f392dc7763fe7924575475736bddf9ab9f7a66b920932d0ea50c2ded2f5636"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:23a412b7b1a7063f81a742463f38821097b6a37ce1e5b89dd8e871d14dbfd86b"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f1d6e4b7b2ae3a6a9df53efbf199e4bfcff0959dbdb5fd9ced34d4407348e39a"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:499334ad139557de97cbc4347ee921c0e2b5e9c0f009859e74f3f77918339257"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:0940038bec2fe9e26b203d636c44d31dd8766abc1fe66262da6484bd82461ccf"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:66372c2a01782c5fe8e04bff4a2a0121a9897e19223d9eab30c54c50b2ebeb7f"}, + {file = "regex-2024.4.28-cp311-cp311-win32.whl", hash = "sha256:c77d10ec3c1cf328b2f501ca32583625987ea0f23a0c2a49b37a39ee5c4c4630"}, + {file = "regex-2024.4.28-cp311-cp311-win_amd64.whl", hash = "sha256:fc0916c4295c64d6890a46e02d4482bb5ccf33bf1a824c0eaa9e83b148291f90"}, + {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:08a1749f04fee2811c7617fdd46d2e46d09106fa8f475c884b65c01326eb15c5"}, + {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b8eb28995771c087a73338f695a08c9abfdf723d185e57b97f6175c5051ff1ae"}, + {file = "regex-2024.4.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dd7ef715ccb8040954d44cfeff17e6b8e9f79c8019daae2fd30a8806ef5435c0"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb0315a2b26fde4005a7c401707c5352df274460f2f85b209cf6024271373013"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2fc053228a6bd3a17a9b0a3f15c3ab3cf95727b00557e92e1cfe094b88cc662"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fe9739a686dc44733d52d6e4f7b9c77b285e49edf8570754b322bca6b85b4cc"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74fcf77d979364f9b69fcf8200849ca29a374973dc193a7317698aa37d8b01c"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:965fd0cf4694d76f6564896b422724ec7b959ef927a7cb187fc6b3f4e4f59833"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2fef0b38c34ae675fcbb1b5db760d40c3fc3612cfa186e9e50df5782cac02bcd"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bc365ce25f6c7c5ed70e4bc674f9137f52b7dd6a125037f9132a7be52b8a252f"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ac69b394764bb857429b031d29d9604842bc4cbfd964d764b1af1868eeebc4f0"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:144a1fc54765f5c5c36d6d4b073299832aa1ec6a746a6452c3ee7b46b3d3b11d"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2630ca4e152c221072fd4a56d4622b5ada876f668ecd24d5ab62544ae6793ed6"}, + {file = "regex-2024.4.28-cp312-cp312-win32.whl", hash = "sha256:7f3502f03b4da52bbe8ba962621daa846f38489cae5c4a7b5d738f15f6443d17"}, + {file = "regex-2024.4.28-cp312-cp312-win_amd64.whl", hash = "sha256:0dd3f69098511e71880fb00f5815db9ed0ef62c05775395968299cb400aeab82"}, + {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:374f690e1dd0dbdcddea4a5c9bdd97632cf656c69113f7cd6a361f2a67221cb6"}, + {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f87ae6b96374db20f180eab083aafe419b194e96e4f282c40191e71980c666"}, + {file = "regex-2024.4.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5dbc1bcc7413eebe5f18196e22804a3be1bfdfc7e2afd415e12c068624d48247"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f85151ec5a232335f1be022b09fbbe459042ea1951d8a48fef251223fc67eee1"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57ba112e5530530fd175ed550373eb263db4ca98b5f00694d73b18b9a02e7185"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:224803b74aab56aa7be313f92a8d9911dcade37e5f167db62a738d0c85fdac4b"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a54a047b607fd2d2d52a05e6ad294602f1e0dec2291152b745870afc47c1397"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a2a512d623f1f2d01d881513af9fc6a7c46e5cfffb7dc50c38ce959f9246c94"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c06bf3f38f0707592898428636cbb75d0a846651b053a1cf748763e3063a6925"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1031a5e7b048ee371ab3653aad3030ecfad6ee9ecdc85f0242c57751a05b0ac4"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7a353ebfa7154c871a35caca7bfd8f9e18666829a1dc187115b80e35a29393e"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:7e76b9cfbf5ced1aca15a0e5b6f229344d9b3123439ffce552b11faab0114a02"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5ce479ecc068bc2a74cb98dd8dba99e070d1b2f4a8371a7dfe631f85db70fe6e"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d77b6f63f806578c604dca209280e4c54f0fa9a8128bb8d2cc5fb6f99da4150"}, + {file = "regex-2024.4.28-cp38-cp38-win32.whl", hash = "sha256:d84308f097d7a513359757c69707ad339da799e53b7393819ec2ea36bc4beb58"}, + {file = "regex-2024.4.28-cp38-cp38-win_amd64.whl", hash = "sha256:2cc1b87bba1dd1a898e664a31012725e48af826bf3971e786c53e32e02adae6c"}, + {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7413167c507a768eafb5424413c5b2f515c606be5bb4ef8c5dee43925aa5718b"}, + {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:108e2dcf0b53a7c4ab8986842a8edcb8ab2e59919a74ff51c296772e8e74d0ae"}, + {file = "regex-2024.4.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f1c5742c31ba7d72f2dedf7968998730664b45e38827637e0f04a2ac7de2f5f1"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecc6148228c9ae25ce403eade13a0961de1cb016bdb35c6eafd8e7b87ad028b1"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7d893c8cf0e2429b823ef1a1d360a25950ed11f0e2a9df2b5198821832e1947"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4290035b169578ffbbfa50d904d26bec16a94526071ebec3dadbebf67a26b25e"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44a22ae1cfd82e4ffa2066eb3390777dc79468f866f0625261a93e44cdf6482b"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd24fd140b69f0b0bcc9165c397e9b2e89ecbeda83303abf2a072609f60239e2"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:39fb166d2196413bead229cd64a2ffd6ec78ebab83fff7d2701103cf9f4dfd26"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9301cc6db4d83d2c0719f7fcda37229691745168bf6ae849bea2e85fc769175d"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7c3d389e8d76a49923683123730c33e9553063d9041658f23897f0b396b2386f"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:99ef6289b62042500d581170d06e17f5353b111a15aa6b25b05b91c6886df8fc"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b91d529b47798c016d4b4c1d06cc826ac40d196da54f0de3c519f5a297c5076a"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:43548ad74ea50456e1c68d3c67fff3de64c6edb85bcd511d1136f9b5376fc9d1"}, + {file = "regex-2024.4.28-cp39-cp39-win32.whl", hash = "sha256:05d9b6578a22db7dedb4df81451f360395828b04f4513980b6bd7a1412c679cc"}, + {file = "regex-2024.4.28-cp39-cp39-win_amd64.whl", hash = "sha256:3986217ec830c2109875be740531feb8ddafe0dfa49767cdcd072ed7e8927962"}, + {file = "regex-2024.4.28.tar.gz", hash = "sha256:83ab366777ea45d58f72593adf35d36ca911ea8bd838483c1823b883a121b0e4"}, ] [[package]] @@ -2029,132 +2452,140 @@ requests = ">=2.26.0" [package.extras] blobfile = ["blobfile (>=2)"] +[[package]] +name = "tinycss2" +version = "1.3.0" +description = "A tiny CSS parser" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tinycss2-1.3.0-py3-none-any.whl", hash = "sha256:54a8dbdffb334d536851be0226030e9505965bb2f30f21a4a82c55fb2a80fae7"}, + {file = "tinycss2-1.3.0.tar.gz", hash = "sha256:152f9acabd296a8375fbca5b84c961ff95971fcfc32e79550c8df8e29118c54d"}, +] + +[package.dependencies] +webencodings = ">=0.4" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["pytest", "ruff"] + [[package]] name = "tokenizers" -version = "0.15.2" +version = "0.19.1" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "tokenizers-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52f6130c9cbf70544287575a985bf44ae1bda2da7e8c24e97716080593638012"}, - {file = "tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:054c1cc9c6d68f7ffa4e810b3d5131e0ba511b6e4be34157aa08ee54c2f8d9ee"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9b9b070fdad06e347563b88c278995735292ded1132f8657084989a4c84a6d5"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea621a7eef4b70e1f7a4e84dd989ae3f0eeb50fc8690254eacc08acb623e82f1"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf7fd9a5141634fa3aa8d6b7be362e6ae1b4cda60da81388fa533e0b552c98fd"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44f2a832cd0825295f7179eaf173381dc45230f9227ec4b44378322d900447c9"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b9ec69247a23747669ec4b0ca10f8e3dfb3545d550258129bd62291aabe8605"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b6a4c78da863ff26dbd5ad9a8ecc33d8a8d97b535172601cf00aee9d7ce9ce"}, - {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5ab2a4d21dcf76af60e05af8063138849eb1d6553a0d059f6534357bce8ba364"}, - {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a47acfac7e511f6bbfcf2d3fb8c26979c780a91e06fb5b9a43831b2c0153d024"}, - {file = "tokenizers-0.15.2-cp310-none-win32.whl", hash = "sha256:064ff87bb6acdbd693666de9a4b692add41308a2c0ec0770d6385737117215f2"}, - {file = "tokenizers-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3b919afe4df7eb6ac7cafd2bd14fb507d3f408db7a68c43117f579c984a73843"}, - {file = "tokenizers-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:89cd1cb93e4b12ff39bb2d626ad77e35209de9309a71e4d3d4672667b4b256e7"}, - {file = "tokenizers-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cfed5c64e5be23d7ee0f0e98081a25c2a46b0b77ce99a4f0605b1ec43dd481fa"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a907d76dcfda37023ba203ab4ceeb21bc5683436ebefbd895a0841fd52f6f6f2"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ea60479de6fc7b8ae756b4b097572372d7e4032e2521c1bbf3d90c90a99ff0"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48e2b9335be2bc0171df9281385c2ed06a15f5cf121c44094338306ab7b33f2c"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:112a1dd436d2cc06e6ffdc0b06d55ac019a35a63afd26475205cb4b1bf0bfbff"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4620cca5c2817177ee8706f860364cc3a8845bc1e291aaf661fb899e5d1c45b0"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccd73a82751c523b3fc31ff8194702e4af4db21dc20e55b30ecc2079c5d43cb7"}, - {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:107089f135b4ae7817affe6264f8c7a5c5b4fd9a90f9439ed495f54fcea56fb4"}, - {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0ff110ecc57b7aa4a594396525a3451ad70988e517237fe91c540997c4e50e29"}, - {file = "tokenizers-0.15.2-cp311-none-win32.whl", hash = "sha256:6d76f00f5c32da36c61f41c58346a4fa7f0a61be02f4301fd30ad59834977cc3"}, - {file = "tokenizers-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:cc90102ed17271cf0a1262babe5939e0134b3890345d11a19c3145184b706055"}, - {file = "tokenizers-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f86593c18d2e6248e72fb91c77d413a815153b8ea4e31f7cd443bdf28e467670"}, - {file = "tokenizers-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0774bccc6608eca23eb9d620196687c8b2360624619623cf4ba9dc9bd53e8b51"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d0222c5b7c9b26c0b4822a82f6a7011de0a9d3060e1da176f66274b70f846b98"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3835738be1de66624fff2f4f6f6684775da4e9c00bde053be7564cbf3545cc66"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0143e7d9dcd811855c1ce1ab9bf5d96d29bf5e528fd6c7824d0465741e8c10fd"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db35825f6d54215f6b6009a7ff3eedee0848c99a6271c870d2826fbbedf31a38"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f5e64b0389a2be47091d8cc53c87859783b837ea1a06edd9d8e04004df55a5c"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e0480c452217edd35eca56fafe2029fb4d368b7c0475f8dfa3c5c9c400a7456"}, - {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a33ab881c8fe70474980577e033d0bc9a27b7ab8272896e500708b212995d834"}, - {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a308a607ca9de2c64c1b9ba79ec9a403969715a1b8ba5f998a676826f1a7039d"}, - {file = "tokenizers-0.15.2-cp312-none-win32.whl", hash = "sha256:b8fcfa81bcb9447df582c5bc96a031e6df4da2a774b8080d4f02c0c16b42be0b"}, - {file = "tokenizers-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:38d7ab43c6825abfc0b661d95f39c7f8af2449364f01d331f3b51c94dcff7221"}, - {file = "tokenizers-0.15.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:38bfb0204ff3246ca4d5e726e8cc8403bfc931090151e6eede54d0e0cf162ef0"}, - {file = "tokenizers-0.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c861d35e8286a53e06e9e28d030b5a05bcbf5ac9d7229e561e53c352a85b1fc"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:936bf3842db5b2048eaa53dade907b1160f318e7c90c74bfab86f1e47720bdd6"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:620beacc3373277700d0e27718aa8b25f7b383eb8001fba94ee00aeea1459d89"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2735ecbbf37e52db4ea970e539fd2d450d213517b77745114f92867f3fc246eb"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:473c83c5e2359bb81b0b6fde870b41b2764fcdd36d997485e07e72cc3a62264a"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968fa1fb3c27398b28a4eca1cbd1e19355c4d3a6007f7398d48826bbe3a0f728"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:865c60ae6eaebdde7da66191ee9b7db52e542ed8ee9d2c653b6d190a9351b980"}, - {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7c0d8b52664ab2d4a8d6686eb5effc68b78608a9008f086a122a7b2996befbab"}, - {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f33dfbdec3784093a9aebb3680d1f91336c56d86cc70ddf88708251da1fe9064"}, - {file = "tokenizers-0.15.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d44ba80988ff9424e33e0a49445072ac7029d8c0e1601ad25a0ca5f41ed0c1d6"}, - {file = "tokenizers-0.15.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:dce74266919b892f82b1b86025a613956ea0ea62a4843d4c4237be2c5498ed3a"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0ef06b9707baeb98b316577acb04f4852239d856b93e9ec3a299622f6084e4be"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73e2e74bbb07910da0d37c326869f34113137b23eadad3fc00856e6b3d9930c"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eeb12daf02a59e29f578a865f55d87cd103ce62bd8a3a5874f8fdeaa82e336b"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ba9f6895af58487ca4f54e8a664a322f16c26bbb442effd01087eba391a719e"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccec77aa7150e38eec6878a493bf8c263ff1fa8a62404e16c6203c64c1f16a26"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f40604f5042ff210ba82743dda2b6aa3e55aa12df4e9f2378ee01a17e2855e"}, - {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5645938a42d78c4885086767c70923abad047163d809c16da75d6b290cb30bbe"}, - {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05a77cbfebe28a61ab5c3891f9939cc24798b63fa236d84e5f29f3a85a200c00"}, - {file = "tokenizers-0.15.2-cp37-none-win32.whl", hash = "sha256:361abdc068e8afe9c5b818769a48624687fb6aaed49636ee39bec4e95e1a215b"}, - {file = "tokenizers-0.15.2-cp37-none-win_amd64.whl", hash = "sha256:7ef789f83eb0f9baeb4d09a86cd639c0a5518528f9992f38b28e819df397eb06"}, - {file = "tokenizers-0.15.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4fe1f74a902bee74a3b25aff180fbfbf4f8b444ab37c4d496af7afd13a784ed2"}, - {file = "tokenizers-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4b89038a684f40a6b15d6b09f49650ac64d951ad0f2a3ea9169687bbf2a8ba"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d05a1b06f986d41aed5f2de464c003004b2df8aaf66f2b7628254bcbfb72a438"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508711a108684111ec8af89d3a9e9e08755247eda27d0ba5e3c50e9da1600f6d"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:daa348f02d15160cb35439098ac96e3a53bacf35885072611cd9e5be7d333daa"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494fdbe5932d3416de2a85fc2470b797e6f3226c12845cadf054dd906afd0442"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2d60f5246f4da9373f75ff18d64c69cbf60c3bca597290cea01059c336d2470"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93268e788825f52de4c7bdcb6ebc1fcd4a5442c02e730faa9b6b08f23ead0e24"}, - {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6fc7083ab404019fc9acafe78662c192673c1e696bd598d16dc005bd663a5cf9"}, - {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e39b41e5531d6b2122a77532dbea60e171ef87a3820b5a3888daa847df4153"}, - {file = "tokenizers-0.15.2-cp38-none-win32.whl", hash = "sha256:06cd0487b1cbfabefb2cc52fbd6b1f8d4c37799bd6c6e1641281adaa6b2504a7"}, - {file = "tokenizers-0.15.2-cp38-none-win_amd64.whl", hash = "sha256:5179c271aa5de9c71712e31cb5a79e436ecd0d7532a408fa42a8dbfa4bc23fd9"}, - {file = "tokenizers-0.15.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82f8652a74cc107052328b87ea8b34291c0f55b96d8fb261b3880216a9f9e48e"}, - {file = "tokenizers-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:02458bee6f5f3139f1ebbb6d042b283af712c0981f5bc50edf771d6b762d5e4f"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9a09cd26cca2e1c349f91aa665309ddb48d71636370749414fbf67bc83c5343"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158be8ea8554e5ed69acc1ce3fbb23a06060bd4bbb09029431ad6b9a466a7121"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ddba9a2b0c8c81633eca0bb2e1aa5b3a15362b1277f1ae64176d0f6eba78ab1"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ef5dd1d39797044642dbe53eb2bc56435308432e9c7907728da74c69ee2adca"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:454c203164e07a860dbeb3b1f4a733be52b0edbb4dd2e5bd75023ffa8b49403a"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cf6b7f1d4dc59af960e6ffdc4faffe6460bbfa8dce27a58bf75755ffdb2526d"}, - {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2ef09bbc16519f6c25d0c7fc0c6a33a6f62923e263c9d7cca4e58b8c61572afb"}, - {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c9a2ebdd2ad4ec7a68e7615086e633857c85e2f18025bd05d2a4399e6c5f7169"}, - {file = "tokenizers-0.15.2-cp39-none-win32.whl", hash = "sha256:918fbb0eab96fe08e72a8c2b5461e9cce95585d82a58688e7f01c2bd546c79d0"}, - {file = "tokenizers-0.15.2-cp39-none-win_amd64.whl", hash = "sha256:524e60da0135e106b254bd71f0659be9f89d83f006ea9093ce4d1fab498c6d0d"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9b648a58281c4672212fab04e60648fde574877d0139cd4b4f93fe28ca8944"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7c7d18b733be6bbca8a55084027f7be428c947ddf871c500ee603e375013ffba"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:13ca3611de8d9ddfbc4dc39ef54ab1d2d4aaa114ac8727dfdc6a6ec4be017378"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237d1bf3361cf2e6463e6c140628e6406766e8b27274f5fcc62c747ae3c6f094"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a0fe1e49e60c664915e9fb6b0cb19bac082ab1f309188230e4b2920230edb3"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e022fe65e99230b8fd89ebdfea138c24421f91c1a4f4781a8f5016fd5cdfb4d"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d857be2df69763362ac699f8b251a8cd3fac9d21893de129bc788f8baaef2693"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:708bb3e4283177236309e698da5fcd0879ce8fd37457d7c266d16b550bcbbd18"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c35e09e9899b72a76e762f9854e8750213f67567787d45f37ce06daf57ca78"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1257f4394be0d3b00de8c9e840ca5601d0a4a8438361ce9c2b05c7d25f6057b"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02272fe48280e0293a04245ca5d919b2c94a48b408b55e858feae9618138aeda"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dc3ad9ebc76eabe8b1d7c04d38be884b8f9d60c0cdc09b0aa4e3bcf746de0388"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:32e16bdeffa7c4f46bf2152172ca511808b952701d13e7c18833c0b73cb5c23f"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fb16ba563d59003028b678d2361a27f7e4ae0ab29c7a80690efa20d829c81fdb"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2277c36d2d6cdb7876c274547921a42425b6810d38354327dd65a8009acf870c"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cf75d32e8d250781940d07f7eece253f2fe9ecdb1dc7ba6e3833fa17b82fcbc"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b3b31884dc8e9b21508bb76da80ebf7308fdb947a17affce815665d5c4d028"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10122d8d8e30afb43bb1fe21a3619f62c3e2574bff2699cf8af8b0b6c5dc4a3"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d88b96ff0fe8e91f6ef01ba50b0d71db5017fa4e3b1d99681cec89a85faf7bf7"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:37aaec5a52e959892870a7c47cef80c53797c0db9149d458460f4f31e2fb250e"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2ea752f2b0fe96eb6e2f3adbbf4d72aaa1272079b0dfa1145507bd6a5d537e6"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b19a808d8799fda23504a5cd31d2f58e6f52f140380082b352f877017d6342b"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c86e5e068ac8b19204419ed8ca90f9d25db20578f5881e337d203b314f4104"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de19c4dc503c612847edf833c82e9f73cd79926a384af9d801dcf93f110cea4e"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea09acd2fe3324174063d61ad620dec3bcf042b495515f27f638270a7d466e8b"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cf27fd43472e07b57cf420eee1e814549203d56de00b5af8659cb99885472f1f"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7ca22bd897537a0080521445d91a58886c8c04084a6a19e6c78c586e0cfa92a5"}, - {file = "tokenizers-0.15.2.tar.gz", hash = "sha256:e6e9c6e019dd5484be5beafc775ae6c925f4c69a3487040ed09b45e13df2cb91"}, + {file = "tokenizers-0.19.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97"}, + {file = "tokenizers-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f03727225feaf340ceeb7e00604825addef622d551cbd46b7b775ac834c1e1c4"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:453e4422efdfc9c6b6bf2eae00d5e323f263fff62b29a8c9cd526c5003f3f642"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02e81bf089ebf0e7f4df34fa0207519f07e66d8491d963618252f2e0729e0b46"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b07c538ba956843833fee1190cf769c60dc62e1cf934ed50d77d5502194d63b1"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28cab1582e0eec38b1f38c1c1fb2e56bce5dc180acb1724574fc5f47da2a4fe"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b01afb7193d47439f091cd8f070a1ced347ad0f9144952a30a41836902fe09e"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7fb297edec6c6841ab2e4e8f357209519188e4a59b557ea4fafcf4691d1b4c98"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2e8a3dd055e515df7054378dc9d6fa8c8c34e1f32777fb9a01fea81496b3f9d3"}, + {file = "tokenizers-0.19.1-cp310-none-win32.whl", hash = "sha256:7ff898780a155ea053f5d934925f3902be2ed1f4d916461e1a93019cc7250837"}, + {file = "tokenizers-0.19.1-cp310-none-win_amd64.whl", hash = "sha256:bea6f9947e9419c2fda21ae6c32871e3d398cba549b93f4a65a2d369662d9403"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b"}, + {file = "tokenizers-0.19.1-cp311-none-win32.whl", hash = "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256"}, + {file = "tokenizers-0.19.1-cp311-none-win_amd64.whl", hash = "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c"}, + {file = "tokenizers-0.19.1-cp312-none-win32.whl", hash = "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57"}, + {file = "tokenizers-0.19.1-cp312-none-win_amd64.whl", hash = "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a"}, + {file = "tokenizers-0.19.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:bb9dfe7dae85bc6119d705a76dc068c062b8b575abe3595e3c6276480e67e3f1"}, + {file = "tokenizers-0.19.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:1f0360cbea28ea99944ac089c00de7b2e3e1c58f479fb8613b6d8d511ce98267"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:71e3ec71f0e78780851fef28c2a9babe20270404c921b756d7c532d280349214"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b82931fa619dbad979c0ee8e54dd5278acc418209cc897e42fac041f5366d626"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e8ff5b90eabdcdaa19af697885f70fe0b714ce16709cf43d4952f1f85299e73a"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e742d76ad84acbdb1a8e4694f915fe59ff6edc381c97d6dfdd054954e3478ad4"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8c5d59d7b59885eab559d5bc082b2985555a54cda04dda4c65528d90ad252ad"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b2da5c32ed869bebd990c9420df49813709e953674c0722ff471a116d97b22d"}, + {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:638e43936cc8b2cbb9f9d8dde0fe5e7e30766a3318d2342999ae27f68fdc9bd6"}, + {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:78e769eb3b2c79687d9cb0f89ef77223e8e279b75c0a968e637ca7043a84463f"}, + {file = "tokenizers-0.19.1-cp37-none-win32.whl", hash = "sha256:72791f9bb1ca78e3ae525d4782e85272c63faaef9940d92142aa3eb79f3407a3"}, + {file = "tokenizers-0.19.1-cp37-none-win_amd64.whl", hash = "sha256:f3bbb7a0c5fcb692950b041ae11067ac54826204318922da754f908d95619fbc"}, + {file = "tokenizers-0.19.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:07f9295349bbbcedae8cefdbcfa7f686aa420be8aca5d4f7d1ae6016c128c0c5"}, + {file = "tokenizers-0.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10a707cc6c4b6b183ec5dbfc5c34f3064e18cf62b4a938cb41699e33a99e03c1"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6309271f57b397aa0aff0cbbe632ca9d70430839ca3178bf0f06f825924eca22"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ad23d37d68cf00d54af184586d79b84075ada495e7c5c0f601f051b162112dc"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:427c4f0f3df9109314d4f75b8d1f65d9477033e67ffaec4bca53293d3aca286d"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e83a31c9cf181a0a3ef0abad2b5f6b43399faf5da7e696196ddd110d332519ee"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c27b99889bd58b7e301468c0838c5ed75e60c66df0d4db80c08f43462f82e0d3"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bac0b0eb952412b0b196ca7a40e7dce4ed6f6926489313414010f2e6b9ec2adf"}, + {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8a6298bde623725ca31c9035a04bf2ef63208d266acd2bed8c2cb7d2b7d53ce6"}, + {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:08a44864e42fa6d7d76d7be4bec62c9982f6f6248b4aa42f7302aa01e0abfd26"}, + {file = "tokenizers-0.19.1-cp38-none-win32.whl", hash = "sha256:1de5bc8652252d9357a666e609cb1453d4f8e160eb1fb2830ee369dd658e8975"}, + {file = "tokenizers-0.19.1-cp38-none-win_amd64.whl", hash = "sha256:0bcce02bf1ad9882345b34d5bd25ed4949a480cf0e656bbd468f4d8986f7a3f1"}, + {file = "tokenizers-0.19.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0b9394bd204842a2a1fd37fe29935353742be4a3460b6ccbaefa93f58a8df43d"}, + {file = "tokenizers-0.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4692ab92f91b87769d950ca14dbb61f8a9ef36a62f94bad6c82cc84a51f76f6a"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6258c2ef6f06259f70a682491c78561d492e885adeaf9f64f5389f78aa49a051"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c85cf76561fbd01e0d9ea2d1cbe711a65400092bc52b5242b16cfd22e51f0c58"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670b802d4d82bbbb832ddb0d41df7015b3e549714c0e77f9bed3e74d42400fbe"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85aa3ab4b03d5e99fdd31660872249df5e855334b6c333e0bc13032ff4469c4a"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cbf001afbbed111a79ca47d75941e9e5361297a87d186cbfc11ed45e30b5daba"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c89aa46c269e4e70c4d4f9d6bc644fcc39bb409cb2a81227923404dd6f5227"}, + {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:39c1ec76ea1027438fafe16ecb0fb84795e62e9d643444c1090179e63808c69d"}, + {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c2a0d47a89b48d7daa241e004e71fb5a50533718897a4cd6235cb846d511a478"}, + {file = "tokenizers-0.19.1-cp39-none-win32.whl", hash = "sha256:61b7fe8886f2e104d4caf9218b157b106207e0f2a4905c9c7ac98890688aabeb"}, + {file = "tokenizers-0.19.1-cp39-none-win_amd64.whl", hash = "sha256:f97660f6c43efd3e0bfd3f2e3e5615bf215680bad6ee3d469df6454b8c6e8256"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b11853f17b54c2fe47742c56d8a33bf49ce31caf531e87ac0d7d13d327c9334"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d26194ef6c13302f446d39972aaa36a1dda6450bc8949f5eb4c27f51191375bd"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e8d1ed93beda54bbd6131a2cb363a576eac746d5c26ba5b7556bc6f964425594"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca407133536f19bdec44b3da117ef0d12e43f6d4b56ac4c765f37eca501c7bda"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce05fde79d2bc2e46ac08aacbc142bead21614d937aac950be88dc79f9db9022"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:35583cd46d16f07c054efd18b5d46af4a2f070a2dd0a47914e66f3ff5efb2b1e"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b4399b59d1af5645bcee2072a463318114c39b8547437a7c2d6a186a1b5a0e2d"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6852c5b2a853b8b0ddc5993cd4f33bfffdca4fcc5d52f89dd4b8eada99379285"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcd266ae85c3d39df2f7e7d0e07f6c41a55e9a3123bb11f854412952deacd828"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecb2651956eea2aa0a2d099434134b1b68f1c31f9a5084d6d53f08ed43d45ff2"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b279ab506ec4445166ac476fb4d3cc383accde1ea152998509a94d82547c8e2a"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:89183e55fb86e61d848ff83753f64cded119f5d6e1f553d14ffee3700d0a4a49"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2edbc75744235eea94d595a8b70fe279dd42f3296f76d5a86dde1d46e35f574"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0e64bfde9a723274e9a71630c3e9494ed7b4c0f76a1faacf7fe294cd26f7ae7c"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b5ca92bfa717759c052e345770792d02d1f43b06f9e790ca0a1db62838816f3"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f8a20266e695ec9d7a946a019c1d5ca4eddb6613d4f466888eee04f16eedb85"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c38f45d8f2a2ec0f3a20073cccb335b9f99f73b3c69483cd52ebc75369d8a1"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dd26e3afe8a7b61422df3176e06664503d3f5973b94f45d5c45987e1cb711876"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:eddd5783a4a6309ce23432353cdb36220e25cbb779bfa9122320666508b44b88"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:56ae39d4036b753994476a1b935584071093b55c7a72e3b8288e68c313ca26e7"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f9939ca7e58c2758c01b40324a59c034ce0cebad18e0d4563a9b1beab3018243"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c330c0eb815d212893c67a032e9dc1b38a803eccb32f3e8172c19cc69fbb439"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec11802450a2487cdf0e634b750a04cbdc1c4d066b97d94ce7dd2cb51ebb325b"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b718f316b596f36e1dae097a7d5b91fc5b85e90bf08b01ff139bd8953b25af"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ed69af290c2b65169f0ba9034d1dc39a5db9459b32f1dd8b5f3f32a3fcf06eab"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f8a9c828277133af13f3859d1b6bf1c3cb6e9e1637df0e45312e6b7c2e622b1f"}, + {file = "tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3"}, ] [package.dependencies] -huggingface_hub = ">=0.16.4,<1.0" +huggingface-hub = ">=0.16.4,<1.0" [package.extras] dev = ["tokenizers[testing]"] -docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"] -testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] [[package]] name = "tomli" @@ -2189,13 +2620,13 @@ files = [ [[package]] name = "tqdm" -version = "4.66.2" +version = "4.66.4" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.2-py3-none-any.whl", hash = "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9"}, - {file = "tqdm-4.66.2.tar.gz", hash = "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531"}, + {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, + {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, ] [package.dependencies] @@ -2209,28 +2640,28 @@ telegram = ["requests"] [[package]] name = "traitlets" -version = "5.14.2" +version = "5.14.3" description = "Traitlets Python configuration system" optional = false python-versions = ">=3.8" files = [ - {file = "traitlets-5.14.2-py3-none-any.whl", hash = "sha256:fcdf85684a772ddeba87db2f398ce00b40ff550d1528c03c14dbf6a02003cd80"}, - {file = "traitlets-5.14.2.tar.gz", hash = "sha256:8cdd83c040dab7d1dee822678e5f5d100b514f7b72b01615b26fc5718916fdf9"}, + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, ] [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.1)", "pytest-mock", "pytest-mypy-testing"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] [[package]] name = "typing-extensions" -version = "4.10.0" +version = "4.11.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, - {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, + {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, + {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, ] [[package]] @@ -2250,6 +2681,47 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "watchdog" +version = "4.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, + {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, + {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, + {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, + {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, + {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, + {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, + {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + [[package]] name = "wcwidth" version = "0.2.13" @@ -2261,6 +2733,17 @@ files = [ {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, ] +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +optional = false +python-versions = "*" +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + [[package]] name = "win32-setctime" version = "1.1.0" @@ -2396,4 +2879,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "<3.13,>=3.10" -content-hash = "9f90e0b50761687dd70040e3d1bde55558e2d3e2743440deb63d4b37d0a4b051" +content-hash = "544f9df84f9b877c3bcbb425bf0970f630c2b7191de9ab84ef54e8c17f406911" diff --git a/pyproject.toml b/pyproject.toml index f183211..5dbea84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,14 @@ [tool.poetry] name = "rigging" -version = "0.2.2" +version = "1.0.0rc0" description = "LLM Interaction Framework" authors = ["Nick Landers "] license = "MIT" repository = "https://github.com/dreadnode/rigging" readme = "README.md" +packages = [ + {include = "rigging"} +] [tool.poetry.dependencies] python = "<3.13,>=3.10" @@ -20,6 +23,15 @@ mypy = "^1.8.0" ruff = "^0.1.14" pytest = "^8.0.0" +[tool.poetry.group.docs.dependencies] +mkdocs = "^1.6.0" +mkdocs-material = {extras = ["imaging"], version = "^9.5.20"} +mkdocstrings = "^0.25.0" +mkdocstrings-python = "^1.10.0" +mkdocs-section-index = "^0.3.9" +pymdown-extensions = "^10.8.1" +pygments = "^2.18.0" + [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" diff --git a/rigging/__init__.py b/rigging/__init__.py index a2dee15..7a83c57 100644 --- a/rigging/__init__.py +++ b/rigging/__init__.py @@ -1,5 +1,6 @@ from rigging.chat import Chat, PendingChat -from rigging.generator import GenerateParams, Generator, get_generator +from rigging.completion import Completion, PendingCompletion +from rigging.generator import GenerateParams, Generator, chat, complete, get_generator from rigging.message import Message, MessageDict, Messages from rigging.model import Model, attr, element, wrapped from rigging.tool import Tool @@ -18,6 +19,10 @@ "PendingChat", "Generator", "GenerateParams", + "chat", + "complete", + "Completion", + "PendingCompletion", ] from loguru import logger diff --git a/rigging/chat.py b/rigging/chat.py index 764d807..7307309 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -1,7 +1,25 @@ +""" +Chats are used pre and post generation to hold messages. + +They are the primary way to interact with the generator. +""" + +import asyncio import typing as t +from copy import deepcopy +from dataclasses import dataclass +from datetime import datetime +from typing import runtime_checkable +from uuid import UUID, uuid4 from loguru import logger -from pydantic import ValidationError +from pydantic import ( + BaseModel, + ConfigDict, + Field, + ValidationError, + computed_field, +) from rigging.error import ExhaustedMaxRoundsError from rigging.message import Message, MessageDict, Messages @@ -20,83 +38,221 @@ DEFAULT_MAX_ROUNDS = 5 -class Chat: +class Chat(BaseModel): + """ + Represents a completed chat conversation. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + uuid: UUID = Field(default_factory=uuid4) + """The unique identifier for the chat.""" + timestamp: datetime = Field(default_factory=datetime.now, repr=False) + """The timestamp when the chat was created.""" + messages: list[Message] + """The list of messages prior to generation.""" + generated: list[Message] = Field(default_factory=list) + """The list of messages resulting from the generation.""" + metadata: dict[str, t.Any] = Field(default_factory=dict) + """Additional metadata for the chat.""" + + generator: t.Optional["Generator"] = Field(None, exclude=True, repr=False) + """The generator associated with the chat.""" + params: t.Optional["GenerateParams"] = Field(None, exclude=True, repr=False) + """Any additional generation params used for this chat.""" + + @computed_field(repr=False) + def generator_id(self) -> str | None: + """The identifier of the generator used to create the chat""" + if self.generator is not None: + return self.generator.to_identifier(self.params) + return None + def __init__( self, messages: Messages, - next_messages: Messages | None = None, - pending: t.Optional["PendingChat"] = None, + generated: Messages | None = None, + generator: t.Optional["Generator"] = None, + **kwargs: t.Any, ): - self.messages: list[Message] = Message.fit_list(messages) - self.next_messages: list[Message] = [] - if next_messages is not None: - self.next_messages = Message.fit_list(next_messages) - self.pending_chat = pending + """ + Initialize a Chat object. + + Args: + messages: The messages for the chat. + generated: The next messages for the chat. + generator: The generator associated with this chat. + **kwargs: Additional keyword arguments (typically used for deserialization) + """ + from rigging.generator import get_generator + + if "generator_id" in kwargs and generator is None: + # TODO: Should we move params to self.params? + generator = get_generator(kwargs.pop("generator_id")) + + super().__init__( + messages=Message.fit_as_list(messages), + generated=Message.fit_as_list(generated) if generated is not None else [], + generator=generator, + **kwargs, + ) def __len__(self) -> int: - return len(self.messages) + len(self.next_messages) + return len(self.messages) + len(self.generated) @property def all(self) -> list[Message]: - return self.messages + self.next_messages + """Returns all messages in the chat, including the next messages.""" + return self.messages + self.generated @property def prev(self) -> list[Message]: + """Alias for the .messages property""" return self.messages @property def next(self) -> list[Message]: - return self.next_messages + """Alias for the .generated property""" + return self.generated @property def last(self) -> Message: - return self.next_messages[-1] + """Alias for .generated[-1]""" + return self.generated[-1] @property - def json(self) -> list[MessageDict]: - return [t.cast(MessageDict, message.model_dump()) for message in self.all] + def conversation(self) -> str: + """Returns a string representation of the chat.""" + return "\n\n".join([str(m) for m in self.all]) + + def meta(self, **kwargs: t.Any) -> "Chat": + """ + Updates the metadata of the chat with the provided key-value pairs. + + Args: + **kwargs: Key-value pairs representing the metadata to be updated. + + Returns: + The updated chat object. + """ + self.metadata.update(kwargs) + return self + + def restart(self, *, generator: t.Optional["Generator"] = None, include_all: bool = False) -> "PendingChat": + """ + Attempt to convert back to a PendingChat for further generation. - def restart(self) -> "PendingChat": - if self.pending_chat is None: - raise ValueError("Cannot restart chat that was not created with a PendingChat") - return PendingChat(self.pending_chat.generator, self.messages, self.pending_chat.params) + Args: + generator: The generator to use for the restarted chat. Otherwise + the generator from the original PendingChat will be used. + include_all: Whether to include the next messages in the restarted chat. - # TODO: Why are these overloads here? I wonder if IDEs preferred them + Returns: + The restarted chat. + + Raises: + ValueError: If the chat was not created with a PendingChat and no generator is provided. + """ + messages = self.all if include_all else self.messages + if generator is None: + generator = self.generator + if generator is None: + raise ValueError("Cannot restart a chat without an associated generator") + return generator.chat(messages, self.params) def fork( - self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str + self, + messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str, + *, + include_all: bool = False, ) -> "PendingChat": - if self.pending_chat is None: - raise ValueError("Cannot continue chat that was not created with a PendingChat") + """ + Forks the chat by creating calling [rigging.chat.Chat.restart][] and appending the specified messages. - pending = PendingChat(self.pending_chat.generator, self.all, self.pending_chat.params) - pending.add(messages) - return pending + Args: + messages: + The messages to be added to the new `PendingChat` instance. + include_all: Whether to include the next messages in the restarted chat. - def continue_(self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | str) -> "PendingChat": - return self.fork(messages) + Returns: + A new instance of `PendingChat` with the specified messages added. + + """ + return self.restart(include_all=include_all).add(messages) - def clone(self) -> "Chat": - return Chat( - [m.model_copy() for m in self.messages], [m.model_copy() for m in self.next_messages], self.pending_chat + def continue_(self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | str) -> "PendingChat": + """Alias for the [rigging.chat.Chat.fork][] with `include_all=True`.""" + return self.fork(messages, include_all=True) + + def clone(self, *, only_messages: bool = False) -> "Chat": + """Creates a deep copy of the chat.""" + new = Chat( + [m.model_copy() for m in self.messages], + [m.model_copy() for m in self.generated], + self.generator, ) + if not only_messages: + new.metadata = deepcopy(self.metadata) + return new def apply(self, **kwargs: str) -> "Chat": - self.messages[-1].apply(**kwargs) + """ + Calls [rigging.message.Message.apply][] on the last message in the chat with the given keyword arguments. + + Args: + **kwargs: The string mapping of replacements. + + Returns: + The modified Chat object. + """ + self.last.apply(**kwargs) return self def apply_to_all(self, **kwargs: str) -> "Chat": - for message in self.messages: + """ + Calls [rigging.message.Message.apply][] on all messages in the chat with the given keyword arguments. + + Args: + **kwargs: The string mapping of replacements. + + Returns: + The modified chat object. + """ + for message in self.all: message.apply(**kwargs) return self def strip(self, model_type: type[Model], fail_on_missing: bool = False) -> "Chat": + """ + Strips all parsed parts of a particular type from the message content. + + Args: + model_type: The type of model to keep in the chat. + fail_on_missing: Whether to raise an exception if a message of the specified model type is not found. + + Returns: + A new Chat object with only the messages of the specified model type. + """ new = self.clone() for message in new.all: - message.strip(model_type, fail_on_missing) + message.strip(model_type, fail_on_missing=fail_on_missing) return new def inject_system_content(self, content: str) -> Message: + """ + Injects content into the chat as a system message. + + Note: + If the chat is empty or the first message is not a system message, + a new system message with the given content is inserted at the beginning of the chat. + If the first message is a system message, the content is appended to it. + + Args: + content: The content to be injected. + + Returns: + The updated system message. + """ if len(self.messages) == 0 or self.messages[0].role != "system": self.messages.insert(0, Message(role="system", content=content)) elif self.messages[0].role == "system": @@ -104,90 +260,368 @@ def inject_system_content(self, content: str) -> Message: return self.messages[0] def inject_tool_prompt(self, tools: t.Sequence[Tool]) -> None: + """ + Injects a default tool use prompt into the system prompt. + + Args: + tools: A sequence of Tool objects. + """ call_format = ToolCalls.xml_example() tool_description_list = ToolDescriptionList(tools=[t.get_description() for t in tools]) tool_system_prompt = system_tool_extension(call_format, tool_description_list.to_pretty_xml()) self.inject_system_content(tool_system_prompt) -# Passed the next message, returns whether or not to continue -# and an optional list of messages to append before continuing -UntilCallback = t.Callable[[Message], tuple[bool, list[Message]]] +# Callbacks for pending chat + + +class UntilMessageCallback(t.Protocol): + def __call__(self, message: Message) -> tuple[bool, list[Message]]: + """ + Passed the next message, returns whether or not to continue and an + optional list of messages to append before continuing. + """ + ... + + +@runtime_checkable +class ThenChatCallback(t.Protocol): + def __call__(self, chat: Chat) -> Chat | None: + """ + Passed a finalized chat to process and can return a new chat to replace it. + """ + ... + + +@runtime_checkable +class AsyncThenChatCallback(t.Protocol): + async def __call__(self, chat: Chat) -> Chat | None: + """ + async variant of the [rigging.chat.ThenChatCallback][] protocol. + """ + ... + + +@runtime_checkable +class MapChatCallback(t.Protocol): + def __call__(self, chats: list[Chat]) -> list[Chat]: + """ + Passed a finalized chats to process. Can replace chats in the pipeline by returning + a new chat object. + """ + ... + + +@runtime_checkable +class AsyncMapChatCallback(t.Protocol): + async def __call__(self, chats: list[Chat]) -> list[Chat]: + """ + async variant of the [rigging.chat.MapChatCallback][] protocol. + """ + ... + + +PostRunCallbacks = ThenChatCallback | AsyncThenChatCallback | MapChatCallback | AsyncMapChatCallback + +MessageProducer = t.Generator[t.Sequence[Message], None, None] +MessagesProducer = t.Generator[t.Sequence[t.Sequence[Message]], None, None] + +# Helper classes to manage complexity inside the run functions + + +@dataclass +class RunState: + messages: list[Message] + params: "GenerateParams" + processor: t.Generator[list[Message], Message, list[Message]] + chat: Chat | None = None + done: bool = False + + +@dataclass +class BatchRunState: + inputs: list[Message] + messages: list[Message] + params: "GenerateParams" + processor: t.Generator[list[Message], Message, list[Message]] + chat: Chat | None = None + done: bool = False class PendingChat: - def __init__(self, generator: "Generator", messages: t.Sequence[Message], params: "GenerateParams"): + """ + Represents a pending chat that can be modified and executed. + """ + + def __init__( + self, generator: "Generator", messages: t.Sequence[Message], params: t.Optional["GenerateParams"] = None + ): self.generator: "Generator" = generator + """The generator object responsible for generating the chat.""" self.chat: Chat = Chat(messages, pending=self) + """The chat object representing the conversation.""" + self.params = params + """The parameters for generating messages.""" + self.metadata: dict[str, t.Any] = {} + """Additional metadata associated with the chat.""" # (callback, attempt_recovery, drop_dialog, max_rounds) - self.until_callbacks: list[tuple[UntilCallback, bool, bool, int]] = [] + self.until_callbacks: list[tuple[UntilMessageCallback, bool, bool, int]] = [] self.until_types: list[type[Model]] = [] self.until_tools: list[Tool] = [] self.inject_tool_prompt: bool = True self.force_tool: bool = False + self.post_run_callbacks: list[PostRunCallbacks] = [] + # self.producer: MessageProducer | None = None - self.params = params + def with_(self, params: t.Optional["GenerateParams"] = None, **kwargs: t.Any) -> "PendingChat": + """ + Assign specific generation parameter overloads for this chat. + + Note: + This will trigger a `clone` if overload params have already been set. - def overload(self, **kwargs: t.Any) -> "PendingChat": + Args: + params: The parameters to set for the chat. + **kwargs: An alternative way to pass parameters as keyword arguments. + + Returns: + A new instance of PendingChat with the updated parameters. + """ from rigging.generator import GenerateParams - return self.with_params(GenerateParams(**kwargs)) + if params is None: + params = GenerateParams(**kwargs) + + if self.params is not None: + new = self.clone() + new.params = self.params.merge_with(params) + return new - def with_params(self, params: "GenerateParams") -> "PendingChat": - if params is not None: - self.params = params + self.params = params return self def add( self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str ) -> "PendingChat": - message_list: list[Message] = ( - [Message.fit(messages)] - if not isinstance(messages, t.Sequence) or isinstance(messages, str) - else Message.fit_list(messages) - ) + """ + Appends new message(s) to the internal chat before generation. + + Note: + If the last message in the chat is the same role as the first new message, + the content will be appended. instead of a new message being created. + + Args: + messages: The messages to be added to the chat. It can be a single message or a sequence of messages. + + Returns: + The updated PendingChat object. + """ + message_list = Message.fit_as_list(messages) # If the last message is the same role as the first new message, append to it - if self.chat.next_messages and self.chat.next_messages[-1].role == message_list[0].role: - self.chat.next_messages[-1].content += "\n" + message_list[0].content + if self.chat.all and self.chat.all[-1].role == message_list[0].role: + self.chat.all[-1].content += "\n" + message_list[0].content message_list = message_list[1:] else: - self.chat.next_messages += message_list + self.chat.generated += message_list return self def fork( self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str ) -> "PendingChat": + """ + Creates a new instance of `PendingChat` by forking the current chat and adding the specified messages. + + This is a convenience method for calling `clone().add(messages)`. + + Args: + messages: A sequence of messages or a single message to be added to the new chat. + + Returns: + A new instance the pending chat with the specified messages added. + """ return self.clone().add(messages) - def continue_( - self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str - ) -> "PendingChat": - return self.fork(messages) + def clone(self, *, only_messages: bool = False) -> "PendingChat": + """ + Creates a clone of the current `PendingChat` instance. - def clone(self) -> "PendingChat": + Args: + only_messages: If True, only the messages will be cloned. + If False (default), the entire `PendingChat` instance will be cloned + including until callbacks, types, and tools. + + Returns: + A new instance of `PendingChat` that is a clone of the current instance. + """ new = PendingChat(self.generator, [], self.params) new.chat = self.chat.clone() + if not only_messages: + new.until_callbacks = self.until_callbacks.copy() + new.until_types = self.until_types.copy() + new.until_tools = self.until_tools.copy() + new.inject_tool_prompt = self.inject_tool_prompt + new.force_tool = self.force_tool + new.metadata = deepcopy(self.metadata) return new + def meta(self, **kwargs: t.Any) -> "PendingChat": + """ + Updates the metadata of the chat with the provided key-value pairs. + + Args: + **kwargs: Key-value pairs representing the metadata to be updated. + + Returns: + The updated chat object. + """ + self.metadata.update(kwargs) + return self + + def then(self, callback: ThenChatCallback | AsyncThenChatCallback) -> "PendingChat": + """ + Registers a callback to be executed after the generation process completes. + + Note: + Returning a Chat object from the callback will replace the current chat. + for the remainder of the callbacks + return value of `run()`. This is + optional. + + Warning: + If you implement an async callback, you must use the async variant of the + run methods when executing the generation process. + + ``` + def process(chat: Chat) -> Chat | None: + ... + + pending.then(process).run() + ``` + + Args: + callback: The callback function to be executed. + + Returns: + The current instance of the chat. + """ + self.post_run_callbacks.append(callback) + return self + + def map(self, callback: MapChatCallback | AsyncMapChatCallback) -> "PendingChat": + """ + Registers a callback to be executed after the generation process completes. + + Note: + You must return a list of Chat objects from the callback which will + represent the state of chats for the remainder of the callbacks and return. + + Warning: + If you implement an async callback, you must use the async variant of the + run methods when executing the generation process. + + ``` + def process(chats: list[Chat]) -> list[Chat]: + ... + + pending.map(process).run() + ``` + + Args: + callback: The callback function to be executed. + + Returns: + The current instance of the chat. + """ + self.post_run_callbacks.append(callback) + return self + + # def from_(self, producer: MessageProducer) -> "PendingChat": + # """ + # Adds a generator to the chat to produce messages. + + # Args: + # producer: The generator that produces messages. + + # Returns: + # The current instance of the chat. + + # Raises: + # ValueError: If a producer has already been set. + # """ + # if self.producer is not None: + # raise ValueError("A producer has already been set") + # self.producer = producer + # return self + def apply(self, **kwargs: str) -> "PendingChat": + """ + Clones this pending chat and calls [rigging.chat.Chat.apply][] with the given keyword arguments. + + Args: + **kwargs: Keyword arguments to be applied to the chat. + + Returns: + A new instance of PendingChat with the applied arguments. + """ new = self.clone() new.chat.apply(**kwargs) return new def apply_to_all(self, **kwargs: str) -> "PendingChat": + """ + Clones this pending chat and calls [rigging.chat.Chat.apply_to_all][] with the given keyword arguments. + + Args: + **kwargs: Keyword arguments to be applied to the chat. + + Returns: + A new instance of PendingChat with the applied arguments. + """ new = self.clone() new.chat.apply_to_all(**kwargs) return new def until( self, - callback: UntilCallback, + callback: UntilMessageCallback, *, attempt_recovery: bool = False, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, ) -> "PendingChat": + """ + Registers a callback to participate in validating the generation process. + + ```python + # Takes the next message being generated, and returns whether or not to continue + # generating new messages in addition to a list of messages to append before continuing + + def callback(message: Message) -> tuple[bool, list[Message]]: + if is_valid(message): + return (False, [message]) + else: + return (True, [message, ...]) + + pending.until(callback).run() + ``` + + Note: + In general, your callback function should always include the message that was passed to it. + + Whether these messages get used or discarded in the next round depends on `attempt_recovery`. + + Args: + callback: The callback function to be executed. + attempt_recovery: Whether to attempt recovery by continuing to append prior messages + before the next round of generation. + drop_dialog: Whether to drop the intermediate dialog of recovery before returning + the final chat back to the caller. + max_rounds: The maximum number of rounds to attempt generation + callbacks + before giving uop. + + Returns: + The current instance of the chat. + """ self.until_callbacks.append((callback, attempt_recovery, drop_dialog, max_rounds)) return self @@ -201,6 +635,24 @@ def using( max_rounds: int = DEFAULT_MAX_ROUNDS, inject_prompt: bool | None = None, ) -> "PendingChat": + """ + Adds a tool or a sequence of tools to participate in the generation process. + + Args: + tool: The tool or sequence of tools to be added. + force: Whether to force the use of the tool(s) at least once. + attempt_recovery: Whether to attempt recovery if the tool(s) fail by providing + validation feedback to the model before the next round. + drop_dialog: Whether to drop the intermediate dialog of recovery efforts + before returning the final chat to the caller. + max_rounds: The maximum number of rounds to attempt recovery. + inject_prompt: Whether to inject the tool guidance prompt into a + system message.and will override self.inject_tool_prompt if provided. + + Returns: + The updated PendingChat object. + + """ self.until_tools += tool if isinstance(tool, t.Sequence) else [tool] self.inject_tool_prompt = inject_prompt or self.inject_tool_prompt self.force_tool = force @@ -217,34 +669,49 @@ def using( def until_parsed_as( self, - types: type[ModelT] | t.Sequence[type[ModelT]], - *, + *types: type[ModelT], attempt_recovery: bool = False, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, ) -> "PendingChat": - self.until_types += types if isinstance(types, t.Sequence) else [types] + """ + Adds the specified types to the list of types which should successfully parse + before the generation process completes. + + Args: + *types: The type or types of models to wait for. + attempt_recovery: Whether to attempt recovery if parsing fails by providing + validation feedback to the model before the next round. + drop_dialog: Whether to drop the intermediate dialog of recovery efforts + before returning the final chat to the caller. + max_rounds: The maximum number of rounds to try to parse + successfully. + + Returns: + The updated PendingChat object. + """ + self.until_types += types if next((c for c in self.until_callbacks if c[0] == self._until_parse_callback), None) is None: self.until_callbacks.append((self._until_parse_callback, attempt_recovery, drop_dialog, max_rounds)) return self def _until_tools_callback(self, message: Message) -> tuple[bool, list[Message]]: - next_messages: list[Message] = [message] + generated: list[Message] = [message] try: tool_calls = message.try_parse(ToolCalls) except ValidationError as e: - next_messages.append(Message.from_model(ValidationErrorModel(content=e))) - return (True, next_messages) + generated.append(Message.from_model(ValidationErrorModel(content=e))) + return (True, generated) if tool_calls is None: if self.force_tool: logger.debug("No tool calls or types, returning error") - next_messages.append(Message.from_model(SystemErrorModel(content="You must use a tool"))) + generated.append(Message.from_model(SystemErrorModel(content="You must use a tool"))) else: logger.debug("No tool calls or types, returning message") - return (self.force_tool, next_messages) + return (self.force_tool, generated) self.force_tool = False @@ -265,21 +732,21 @@ def _until_tools_callback(self, message: Message) -> tuple[bool, list[Message]]: tool_results.append(tool(call)) if errors: - next_messages.append(Message.from_model(errors, suffix="Rewrite your message with all the required tags.")) + generated.append(Message.from_model(errors, suffix="Rewrite your message with all the required tags.")) else: - next_messages.append(Message.from_model(ToolResults(results=tool_results))) + generated.append(Message.from_model(ToolResults(results=tool_results))) - return (True, next_messages) + return (True, generated) def _until_parse_callback(self, message: Message) -> tuple[bool, list[Message]]: should_continue: bool = False - next_messages: list[Message] = [message] + generated: list[Message] = [message] try: - message.parse_many(self.until_types) + message.parse_many(*self.until_types) except ValidationError as e: should_continue = True - next_messages.append( + generated.append( Message.from_model( ValidationErrorModel(content=e), suffix="Rewrite your entire message with all the required elements.", @@ -287,23 +754,23 @@ def _until_parse_callback(self, message: Message) -> tuple[bool, list[Message]]: ) except Exception as e: should_continue = True - next_messages.append( + generated.append( Message.from_model( SystemErrorModel(content=e), suffix="Rewrite your entire message with all the required elements." ) ) - return (should_continue, next_messages) + return (should_continue, generated) def _until( self, - messages: list[Message], - callback: UntilCallback, + message: Message, + callback: UntilMessageCallback, attempt_recovery: bool, drop_dialog: bool, max_rounds: int, - ) -> list[Message]: - should_continue, step_messages = callback(messages[-1]) + ) -> t.Generator[list[Message], Message, list[Message]]: + should_continue, step_messages = callback(message) if not should_continue: return step_messages @@ -311,9 +778,9 @@ def _until( for _ in range(max_rounds): logger.trace( - f"_until({callback.__name__}) round {_ + 1}/{max_rounds} (attempt_recovery={attempt_recovery})" + f"_until({callback.__call__.__name__}) round {_ + 1}/{max_rounds} (attempt_recovery={attempt_recovery})" ) - next_message = self.generator.complete(messages[:-1] + running_messages, self.params) + next_message = yield running_messages should_continue, step_messages = callback(next_message) logger.trace(f" |- returned {should_continue} with {len(step_messages)} new messages)") @@ -326,8 +793,50 @@ def _until( logger.warning(f"Exhausted max rounds ({max_rounds})") raise ExhaustedMaxRoundsError(max_rounds) - def _execute(self) -> list[Message]: + # TODO: Much like the PendingCompletion code, it's opaque + # exactly how multiple callbacks should be blended together + # when generating. I think we should look at limiting it to + # one callback in total, but I'll leave the behavior as is + # for now with the knowledge that behavior might be a bit + # unpredictable. + def _process(self) -> t.Generator[list[Message], Message, list[Message]]: + first_response = yield [] + new_messages = [first_response] + for callback, reset_between, drop_internal, max_rounds in self.until_callbacks: + generated = yield from self._until(new_messages[-1], callback, reset_between, drop_internal, max_rounds) + new_messages = new_messages[:-1] + generated + return new_messages + + def _post_run(self, chats: list[Chat]) -> list[Chat]: + for callback in self.post_run_callbacks: + if isinstance(callback, ThenChatCallback): + chats = [callback(chat) or chat for chat in chats] + elif isinstance(callback, MapChatCallback): + chats = callback(chats) + + return chats + + async def _apost_run(self, chats: list[Chat]) -> list[Chat]: + if not all( + isinstance(callback, AsyncThenChatCallback | AsyncMapChatCallback) for callback in self.post_run_callbacks + ): + raise ValueError("Cannot use async then()/map() callbacks inside a non-async run call") + + for callback in self.post_run_callbacks: + if isinstance(callback, AsyncThenChatCallback): + updated = await asyncio.gather(*[callback(chat) for chat in chats]) + chats = [updated[i] or chat for i, chat in enumerate(chats)] + elif isinstance(callback, AsyncMapChatCallback): + chats = await callback(chats) + + return chats + + def _pre_run(self) -> None: if self.until_tools: + if self.inject_tool_prompt: + self.chat.inject_tool_prompt(self.until_tools) + self.inject_tool_prompt = False + # TODO: This can cause issues when certain APIs do not return # the stop sequence as part of the response. This behavior # seems like a larger issue than the model continuining after @@ -335,57 +844,248 @@ def _execute(self) -> list[Message]: # # self.params.stop = [ToolCalls.xml_end_tag()] - if self.inject_tool_prompt: - self.chat.inject_tool_prompt(self.until_tools) + def _fit_params( + self, count: int, params: t.Sequence[t.Optional["GenerateParams"] | None] | None = None + ) -> list["GenerateParams"]: + from rigging.generator import GenerateParams - new_messages: list[Message] = [self.generator.complete(self.chat.all, self.params)] + params = [None] * count if params is None else list(params) + if len(params) != count: + raise ValueError(f"The number of params must be {count}") + if self.params is not None: + params = [self.params.merge_with(p) for p in params] + return [(p or GenerateParams()) for p in params] - for callback, reset_between, drop_internal, max_rounds in self.until_callbacks: - next_messages = self._until( - self.chat.all + new_messages, callback, reset_between, drop_internal, max_rounds - ) - new_messages = new_messages[:-1] + next_messages + def _fit_many( + self, + count: int, + many: t.Sequence[t.Sequence[Message]] | t.Sequence[Message] | t.Sequence[MessageDict] | t.Sequence[str], + ) -> list[list[Message]]: + many = [Message.fit_as_list(m) for m in many] + if len(many) < count: + if len(many) != 1: + raise ValueError(f"Can't fit many of length {len(many)} to {count}") + many = many * count + return many - return new_messages + # TODO: There is an embarrassing amount of code duplication here + # between the async and non-async methods, batch and many, etc. - @t.overload - def run_with( - self, - messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, - count: t.Literal[None] = None, - ) -> Chat: - ... + # Single messages + + def run(self) -> Chat: + """ + Execute the generation process to produce the final chat. + + Returns: + The generated Chat. + """ + return self.run_many(1)[0] - @t.overload - def run_with( + async def arun(self) -> Chat: + """async variant of the [rigging.chat.PendingChat.run][] method.""" + return (await self.arun_many(1))[0] + + __call__ = run + + # Many messages + + def run_many( self, - messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, count: int, + *, + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + skip_failed: bool = False, ) -> list[Chat]: - ... + """ + Executes the generation process multiple times with the same inputs. + + Parameters: + count: The number of times to execute the generation process. + params: A sequence of parameters to be used for each execution. + skip_failed: Enable to ignore any max rounds errors and return only successful chats. + + Returns: + A list of generatated Chats. + """ + states: list[RunState] = [RunState([], p, self._process()) for p in self._fit_params(count, params)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = self.generator.generate_messages( + [s.messages for s in pending_states], [s.params for s in pending_states], prefix=self.chat.all + ) - def run_with( + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.messages = state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.chat = Chat( + self.chat.all, + t.cast(list[Message], stop.value), + generator=self.generator, + metadata=self.metadata, + params=state.params, + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return self._post_run([s.chat for s in states if s.chat is not None]) + + async def arun_many( self, - messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, - count: int | None = None, - ) -> Chat | list[Chat]: - return self.add(messages).run(count) - - @t.overload - def run(self, count: t.Literal[None] = None) -> Chat: - ... - - @t.overload - def run(self, count: int) -> list[Chat]: - ... + count: int, + *, + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + skip_failed: bool = False, + ) -> list[Chat]: + """async variant of the [rigging.chat.PendingChat.run_many][] method.""" + states: list[RunState] = [RunState([], p, self._process()) for p in self._fit_params(count, params)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = await self.generator.agenerate_messages( + [s.messages for s in pending_states], [s.params for s in pending_states], prefix=self.chat.all + ) - def run(self, count: int | None = None) -> Chat | list[Chat]: - if count is not None: - return self.run_many(count) - else: - return Chat(self.chat.all, self._execute(), pending=self) + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.messages = state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.chat = Chat( + self.chat.all, + t.cast(list[Message], stop.value), + generator=self.generator, + metadata=self.metadata, + params=state.params, + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return await self._apost_run([s.chat for s in states if s.chat is not None]) + + # Batch messages + + def run_batch( + self, + many: t.Sequence[t.Sequence[Message]] | t.Sequence[Message] | t.Sequence[MessageDict] | t.Sequence[str], + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + *, + skip_failed: bool = False, + ) -> list[Chat]: + """ + Executes the generation process accross multiple input messages. + + Note: + Anything already in this pending chat will be used as the `prefix` parameter + to [rigging.generator.Generator.generate_messages][]. + + Parameters: + many: A sequence of sequences of messages to be generated. + params: A sequence of parameters to be used for each set of messages. + skip_failed: Enable to ignore any max rounds errors and return only successful chats. + + Returns: + A list of generatated Chats. + """ + if isinstance(many, str | dict): + raise ValueError("many must be a sequence, even if it only contains one item") + + count = max(len(many), len(params) if params is not None else 0) + many = self._fit_many(count, many) + params = self._fit_params(count, params) + + states: list[BatchRunState] = [ + BatchRunState(m, [], p, self._process()) for m, p in zip(many, params, strict=True) + ] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = self.generator.generate_messages( + [s.inputs + s.messages for s in pending_states], + [s.params for s in pending_states], + prefix=self.chat.all, + ) - def run_many(self, count: int) -> list[Chat]: - return [Chat(self.chat.all, self._execute(), pending=self) for _ in range(count)] + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.messages = state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.chat = Chat( + self.chat.all, + t.cast(list[Message], stop.value), + generator=self.generator, + metadata=self.metadata, + params=state.params, + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return self._post_run([s.chat for s in states if s.chat is not None]) + + async def arun_batch( + self, + many: t.Sequence[t.Sequence[Message]] | t.Sequence[Message] | t.Sequence[MessageDict] | t.Sequence[str], + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + *, + skip_failed: bool = False, + ) -> list[Chat]: + """async variant of the [rigging.chat.PendingChat.run_batch][] method.""" + if isinstance(many, str | dict): + raise ValueError("many must be a sequence, even if it only contains one item") + + count = max(len(many), len(params) if params is not None else 0) + many = self._fit_many(count, many) + params = self._fit_params(count, params) + + states: list[BatchRunState] = [ + BatchRunState(m, [], p, self._process()) for m, p in zip(many, params, strict=True) + ] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = await self.generator.agenerate_messages( + [s.inputs + s.messages for s in pending_states], + [s.params for s in pending_states], + prefix=self.chat.all, + ) - __call__ = run + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.messages = state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.chat = Chat( + self.chat.all, + t.cast(list[Message], stop.value), + generator=self.generator, + metadata=self.metadata, + params=state.params, + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return await self._apost_run([s.chat for s in states if s.chat is not None]) diff --git a/rigging/completion.py b/rigging/completion.py new file mode 100644 index 0000000..45a82c0 --- /dev/null +++ b/rigging/completion.py @@ -0,0 +1,603 @@ +""" +Completions work with isolated strings of text pre and post generation. +""" + +import string +import typing as t +from copy import deepcopy +from dataclasses import dataclass +from datetime import datetime +from uuid import UUID, uuid4 + +from loguru import logger +from pydantic import ( + BaseModel, + ConfigDict, + Field, + computed_field, +) + +from rigging.error import ExhaustedMaxRoundsError +from rigging.model import ( + Model, + ModelT, +) +from rigging.parsing import parse_many + +if t.TYPE_CHECKING: + from rigging.generator import GenerateParams, Generator + +DEFAULT_MAX_ROUNDS = 5 + +# TODO: Chats and Completions share a lot of structure and code. +# Ideally we should build out a base class which they both inherit from. + + +class Completion(BaseModel): + """ + Represents a completed text generation. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + uuid: UUID = Field(default_factory=uuid4) + """The unique identifier.""" + timestamp: datetime = Field(default_factory=datetime.now, repr=False) + """The timestamp when the completion was created.""" + text: str + """The original text.""" + generated: str + """The generated text.""" + metadata: dict[str, t.Any] = Field(default_factory=dict) + """Additional metadata for the chat.""" + + pending: t.Optional["PendingCompletion"] = Field(None, exclude=True, repr=False) + """The pending completion associated with this completion.""" + + @computed_field(repr=False) + def generator_id(self) -> str | None: + """The identifier of the generator used to create the completion""" + if self.pending is not None: + return self.pending.generator.to_identifier(self.pending.params) + return None + + def __init__( + self, + text: str, + generated: str, + pending: t.Optional["PendingCompletion"] = None, + **kwargs: t.Any, + ): + """ + Initialize a Chat object. + + Args: + text: The original text. + generated: The generated text. + pending: The pending completion associated with this completion. + **kwargs: Additional keyword arguments (typically used for serialization). + """ + from rigging.generator import get_generator + + if "generator_id" in kwargs and pending is None: + generator = get_generator(kwargs.pop("generator_id")) + pending = generator.complete(text) + + super().__init__( + text=text, + generated=generated, + pending=pending, + **kwargs, + ) + + def __len__(self) -> int: + return len(self.text) + len(self.generated) + + @property + def all(self) -> str: + """Returns both the text and the generation.""" + return self.text + self.generated + + def restart(self, *, generator: t.Optional["Generator"] = None, include_all: bool = False) -> "PendingCompletion": + """ + Attempt to convert back to a PendingCompletion for further generation. + + Args: + generator: The generator to use for the restarted chat. Otherwise + the generator from the original PendingCompletion will be used. + include_all: Whether to include the generation before the next round. + + Returns: + The restarted completion. + + Raises: + ValueError: If the completion was not created with a PendingCompletion and no generator is provided. + """ + + text = self.all if include_all else self.text + if generator is not None: + return generator.complete(text) + elif self.pending is None: + raise ValueError("Cannot restart Completion that was not created with a PendingCompletion") + return PendingCompletion(self.pending.generator, text, self.pending.params) + + def fork(self, text: str) -> "PendingCompletion": + """ + Forks the completion by creating calling [rigging.completion.Completion.restart][] and appends the specified text. + + Args: + text: The text to append. + + Returns: + A new instance of a pending competion with the specified messages added. + """ + return self.restart().add(text) + + def clone(self) -> "Completion": + """Creates a deep copy of the chat.""" + return Completion(self.text, self.generated, self.pending) + + +# Passed the next message, returns whether or not to continue +# and an optional list of messages to append before continuing +UntilCompletionCallback = t.Callable[[str], bool] + +ThenCompletionCallback = t.Callable[[Completion], Completion | None] + + +@dataclass +class RunState: + text: str + params: "GenerateParams" + processor: t.Generator[None, str, str] + completion: Completion | None = None + done: bool = False + + +class PendingCompletion: + """ + Represents a pending completion that can be modified and executed. + """ + + def __init__(self, generator: "Generator", text: str, params: t.Optional["GenerateParams"] = None): + self.generator: "Generator" = generator + """The generator object responsible for generating the completion.""" + self.text = text + """The text to be completed.""" + self.params = params + """The parameters for generating the completion.""" + self.metadata: dict[str, t.Any] = {} + """Additional metadata associated with the completion.""" + + # (callback, all_text, max_rounds) + self.until_callbacks: list[tuple[UntilCompletionCallback, bool, int]] = [] + self.until_types: list[type[Model]] = [] + self.then_callbacks: list[ThenCompletionCallback] = [] + + def with_(self, params: t.Optional["GenerateParams"] = None, **kwargs: t.Any) -> "PendingCompletion": + """ + Assign specific generation parameter overloads for this completion. + + Note: + This will trigger a `clone` if overload params have already been set. + + Args: + params: The parameters to set for the completion. + **kwargs: An alternative way to pass parameters as keyword arguments. + + Returns: + The current (or cloned) instance of the completion. + """ + from rigging.generator import GenerateParams + + if params is None: + params = GenerateParams(**kwargs) + + if self.params is not None: + new = self.clone() + new.params = params + return new + + self.params = params + return self + + def then(self, callback: ThenCompletionCallback) -> "PendingCompletion": + """ + Registers a callback to be executed after the generation process completes. + + Note: + Returning a Completion object from the callback will replace the current completion. + for the remainder of the callbacks + return value of `run()`. + + ``` + def process(chat: Completion) -> Completion | None: + ... + + pending.then(process).run() + ``` + + Args: + callback: The callback function to be executed. + + Returns: + The current instance of the pending completion. + """ + self.then_callbacks.append(callback) + return self + + def add(self, text: str) -> "PendingCompletion": + """ + Appends new text to the internal text before generation. + + Args: + text: The text to be added to the completion. + + Returns: + The updated PendingCompletion object. + """ + self.text += text + return self + + def fork(self, text: str) -> "PendingCompletion": + """ + Creates a new instance of `PendingCompletion` by forking the current completion and adding the specified text. + + This is a convenience method for calling `clone().add(text)`. + + Args: + text: The text to be added to the new completion. + + Returns: + A new instance of `PendingCompletion` with the specified text added. + """ + return self.clone().add(text) + + def clone(self, *, only_text: bool = False) -> "PendingCompletion": + """ + Creates a clone of the current `PendingCompletion` instance. + + Args: + only_text: If True, only the text will be cloned. + If False (default), the entire `PendingCompletion` instance will be cloned + including until callbacks and types. + + Returns: + A new instance of `PendingCompletion` that is a clone of the current instance. + """ + new = PendingCompletion(self.generator, self.text, self.params) + if not only_text: + new.until_callbacks = self.until_callbacks.copy() + new.until_types = self.until_types.copy() + new.metadata = deepcopy(self.metadata) + return new + + def meta(self, **kwargs: t.Any) -> "PendingCompletion": + """ + Updates the metadata of the completion with the provided key-value pairs. + + Args: + **kwargs: Key-value pairs representing the metadata to be updated. + + Returns: + The updated completion object. + """ + self.metadata.update(kwargs) + return self + + def apply(self, **kwargs: str) -> "PendingCompletion": + """ + Applies keyword arguments to the text using string template substitution. + + Args: + **kwargs: Keyword arguments to be applied to the text. + + Returns: + A new instance of PendingCompletion with the applied arguments. + """ + new = self.clone() + template = string.Template(self.text) + new.text = template.safe_substitute(**kwargs) + return new + + def until( + self, + callback: UntilCompletionCallback, + *, + use_all_text: bool = False, + max_rounds: int = DEFAULT_MAX_ROUNDS, + ) -> "PendingCompletion": + """ + Registers a callback to participate in validating the generation process. + + ```python + # Takes the generated text, and returns whether or not to retry generation. + + def callback(text: str) -> bool: + if is_valid(text): + return False + else: + return True + + pending.until(callback).run() + ``` + + Args: + callback: The callback function to be executed. + use_all_text: Whether to pass the entire text (including prompt) to the callback. + + max_rounds: The maximum number of rounds to attempt generation + callbacks + before giving up. + + Returns: + The current instance of the completion. + """ + self.until_callbacks.append((callback, use_all_text, max_rounds)) + return self + + def until_parsed_as( + self, + *types: type[ModelT], + use_all_text: bool = False, + max_rounds: int = DEFAULT_MAX_ROUNDS, + ) -> "PendingCompletion": + """ + Adds the specified types to the list of types which should successfully parse + before the generation process completes. + + Args: + *types: The type or types of models to wait for. + use_all_text: Whether to pass the entire text (including prompt) to the parser. + + max_rounds: The maximum number of rounds to try to parse + successfully. + + Returns: + The updated PendingCompletion object. + """ + self.until_types += types + if next((c for c in self.until_callbacks if c[0] == self._until_parse_callback), None) is None: + self.until_callbacks.append((self._until_parse_callback, use_all_text, max_rounds)) + + return self + + def _until_parse_callback(self, text: str) -> bool: + try: + parse_many(text, *self.until_types) + except Exception: + return True + return False + + def _then(self, chat: Completion) -> Completion: + # TODO: Adding async support here would be nice + for callback in self.then_callbacks: + chat = callback(chat) or chat + return chat + + def _fit_params( + self, count: int, params: t.Sequence[t.Optional["GenerateParams"] | None] | None = None + ) -> list["GenerateParams"]: + from rigging.generator import GenerateParams + + params = [None] * count if params is None else list(params) + if len(params) != count: + raise ValueError(f"The number of params must be {count}") + if self.params is not None: + params = [self.params.merge_with(p) for p in params] + return [(p or GenerateParams()) for p in params] + + # TODO: It's opaque exactly how we should blend multiple + # until callbacks together, so here is the current implementation: + # + # - We take the lowest max_rounds from all until_callbacks + # - Each loop, we let every callback run, if any tell us to retry, we do + # - If we leave the loop with should_retry still True, we raise an error + # - Assuming every should_retry is False, we break out of the loop and return + + def _process(self) -> t.Generator[None, str, str]: + # If there are no until_callbacks, we can just yield the text + if not self.until_callbacks: + generated = yield + return generated + + lowest_max_rounds = min((c[2] for c in self.until_callbacks), default=1) + + current_round = 0 + should_retry = True + while should_retry and current_round < lowest_max_rounds: + current_round += 1 + generated = yield + for callback, use_all_text, _ in self.until_callbacks: + should_retry = callback(self.text + generated if use_all_text else generated) + if should_retry: + continue + + if should_retry: + logger.warning(f"Exhausted lowest max rounds ({lowest_max_rounds})") + raise ExhaustedMaxRoundsError(lowest_max_rounds) + + return generated + + def run(self) -> Completion: + """ + Execute the generation process to produce the final completion. + + Returns: + The generated Completion. + """ + return self.run_many(1)[0] + + async def arun(self) -> Completion: + """async variant of the [rigging.chat.PendingChat.run][] method.""" + return (await self.arun_many(1))[0] + + __call__ = run + + # Many messages + + def run_many( + self, + count: int, + *, + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + skip_failed: bool = False, + ) -> list[Completion]: + """ + Executes the generation process multiple times with the same inputs. + + Parameters: + count: The number of times to execute the generation process. + params: A sequence of parameters to be used for each execution. + skip_failed: Enable to ignore any max rounds errors and return only successful completions. + + Returns: + A list of generatated Completions. + """ + states: list[RunState] = [RunState(self.text, p, self._process()) for p in self._fit_params(count, params)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = self.generator.generate_texts( + [s.text for s in pending_states], [s.params for s in pending_states] + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.completion = Completion( + self.text, t.cast(str, stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.completion) for s in states if s.completion is not None] + + async def arun_many( + self, + count: int, + *, + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + skip_failed: bool = False, + ) -> list[Completion]: + """async variant of the [rigging.completion.PendingCompletion.run_many][] method.""" + states: list[RunState] = [RunState(self.text, p, self._process()) for p in self._fit_params(count, params)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = await self.generator.agenerate_texts( + [s.text for s in pending_states], [s.params for s in pending_states] + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.completion = Completion( + self.text, t.cast(str, stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.completion) for s in states if s.completion is not None] + + # Batch completions + + def run_batch( + self, + many: t.Sequence[str], + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + *, + skip_failed: bool = False, + ) -> list[Completion]: + """ + Executes the generation process accross multiple input messages. + + Note: + Anything already in this pending completion will be used as the `prefix` parameter + to [rigging.generator.Generator.generate_messages][]. + + Parameters: + many: A sequence of texts to generate with. + params: A sequence of parameters to be used for each text. + skip_failed: Enable to ignore any max rounds errors and return only successful completions. + + Returns: + A list of generatated Completions. + """ + params = self._fit_params(len(many), params) + states: list[RunState] = [RunState(m, p, self._process()) for m, p in zip(many, params, strict=True)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = self.generator.generate_texts( + [s.text for s in pending_states], + [s.params for s in pending_states], + prefix=self.text, + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.completion = Completion( + self.text, t.cast(str, stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.completion) for s in states if s.completion is not None] + + async def arun_batch( + self, + many: t.Sequence[str], + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + *, + skip_failed: bool = False, + ) -> list[Completion]: + """async variant of the [rigging.chat.PendingChat.run_batch][] method.""" + params = self._fit_params(len(many), params) + states: list[RunState] = [RunState(m, p, self._process()) for m, p in zip(many, params, strict=True)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = await self.generator.agenerate_texts( + [s.text for s in pending_states], + [s.params for s in pending_states], + prefix=self.text, + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.completion = Completion( + self.text, t.cast(str, stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.completion) for s in states if s.completion is not None] diff --git a/rigging/error.py b/rigging/error.py index 9e6e272..821e743 100644 --- a/rigging/error.py +++ b/rigging/error.py @@ -1,14 +1,33 @@ +""" +We try to avoid creating custom exceptions unless they are necessary. + +We use the built-in and pydantic exceptions as much as possible. +""" + + class ExhaustedMaxRoundsError(Exception): + """ + Raised when the maximum number of rounds is exceeded while generating. + """ + def __init__(self, max_rounds: int): super().__init__(f"Exhausted max rounds ({max_rounds}) while generating") self.max_rounds = max_rounds class InvalidModelSpecifiedError(Exception): + """ + Raised when an invalid identifier is specified when getting a generator. + """ + def __init__(self, model: str): super().__init__(f"Invalid model specified: {model}") class MissingModelError(Exception): + """ + Raised when a model is missing when parsing a message. + """ + def __init__(self, content: str): super().__init__(content) diff --git a/rigging/generator.py b/rigging/generator.py index ee8b154..b9f9036 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -1,11 +1,16 @@ -import abc +""" +Generators produce completions for a given set of messages or text. +""" + +import asyncio import typing as t import litellm # type: ignore from loguru import logger -from pydantic import BaseModel, ConfigDict, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator -from rigging.chat import PendingChat +from rigging.chat import Chat, PendingChat +from rigging.completion import Completion, PendingCompletion from rigging.error import InvalidModelSpecifiedError from rigging.message import ( Message, @@ -17,6 +22,9 @@ # fix it to prevent confusion litellm.drop_params = True +# Global provider map +g_providers: dict[str, type["Generator"]] = {} + # TODO: Ideally we flex this to support arbitrary # generator params, but we'll limit things @@ -26,17 +34,47 @@ # parallel generation eventually -> need to # update our interfaces to support that class GenerateParams(BaseModel): + """ + Parameters for generating text using a language model. + + These are designed to generally overlap with underlying + APIs like litellm, but will be extended as needed. + + Note: + Use the `extra` field to pass additional parameters to the API. + """ + model_config = ConfigDict(extra="forbid") temperature: float | None = None + """The sampling temperature.""" + max_tokens: int | None = None + """The maximum number of tokens to generate.""" + top_p: float | None = None + """The nucleus sampling probability.""" + stop: list[str] | None = None + """A list of stop sequences to stop generation at.""" + presence_penalty: float | None = None + """The presence penalty.""" + frequency_penalty: float | None = None + """The frequency penalty.""" + api_base: str | None = None + """The base URL for the API.""" + timeout: int | None = None + """The timeout for the API request.""" + seed: int | None = None + """The random seed.""" + + extra: dict[str, t.Any] = Field(default_factory=dict) + """Extra parameters to be passed to the API.""" @field_validator("stop", mode="before") def validate_stop(cls, value: t.Any) -> t.Any: @@ -46,83 +84,316 @@ def validate_stop(cls, value: t.Any) -> t.Any: return value raise ValueError("Stop sequences must be a list or a string separated by ';'") + def merge_with(self, *others: t.Optional["GenerateParams"]) -> "GenerateParams": + """ + Apply a series of parameter overrides to the current instance and return a copy. + + Args: + *others: The parameters to be merged with the current instance's parameters. + Can be multiple and overrides will be applied in order. + + Returns: + The merged parameters instance. + """ + if len(others) == 0 or all(p is None for p in others): + return self + + updates: dict[str, t.Any] = {} + for other in [o for o in others if o is not None]: + other_dict = other.model_dump(exclude_unset=True) + for name, value in other_dict.items(): + if value is not None: + updates[name] = value + + return self.model_copy(update=updates) + + def to_dict(self) -> dict[str, t.Any]: + """ + Convert the parameters to a dictionary. + + Returns: + The parameters as a dictionary. + """ + params = self.model_dump(exclude_unset=True) + if "extra" in params: + params.update(params.pop("extra")) + return params -class Generator(BaseModel, abc.ABC): - model: str - api_key: str | None = None - params: GenerateParams - def _merge_params(self, overloads: GenerateParams) -> dict[str, t.Any]: - params: dict[str, t.Any] = self.params.model_dump(exclude_unset=True) if self.params else {} - for name, value in overloads.model_dump(exclude_unset=True).items(): - if value is not None: - params[name] = value - return params +class Generator(BaseModel): + """ + Base class for all rigging generators. - @abc.abstractmethod - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams) -> Message: - ... + This class provides common functionality and methods for generating completion messages. + + A subclass of this can implement any of the following: + + - `generate_messages`: Process a batch of messages. + - `generate_texts`: Process a batch of texts. + + (In addition to async variants of these functions) + """ + + model: str + """The model name to be used by the generator.""" + api_key: str | None = Field(None, exclude=True) + """The API key used for authentication.""" + params: GenerateParams + """The parameters used for generating completion messages.""" + + def to_identifier(self, params: GenerateParams | None = None) -> str: + """ + Converts the generator instance back into a rigging identifier string. + + This calls [rigging.generator.get_identifier][] with the current instance. + + Args: + params: The generation parameters. + + Returns: + The identifier string. + """ + return get_identifier(self, params) + + def generate_messages( + self, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], + *, + prefix: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: + """ + Generate a batch of messages using the specified parameters. + + Note: + The length of `params` must be the same as the length of `many`. + + Args: + messages: A sequence of sequences of messages. + params: A sequence of GenerateParams objects. + prefix: A sequence of fixed messages to be prefixed before every item of `many`. + + Returns: + A sequence of generated messages. + + Raises: + NotImplementedError: This method is not supported by this generator. + """ + raise NotImplementedError("`generate_messages` is not supported by this generator.") + + async def agenerate_messages( + self, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], + *, + prefix: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: + """async version of [rigging.generator.Generator.generate_messages][]""" + raise NotImplementedError("`agenerate_messages` is not supported by this generator.") + + def generate_texts( + self, + texts: t.Sequence[str], + params: t.Sequence[GenerateParams], + *, + prefix: str | None = None, + ) -> t.Sequence[str]: + """ + Generate a batch of text completions using the generator. + + Note: + This method falls back to looping over the inputs and calling `generate_text` for each item. + + Note: + If supplied, the length of `params` must be the same as the length of `many`. + + Args: + texts: The input texts for generating the batch. + params: Additional parameters for generating each text in the batch. + prefix: A fixed input text to be used as a prefix for all of `many`. + + Returns: + The generated texts. + + Raises: + NotImplementedError: This method is not supported by this generator. + """ + raise NotImplementedError("`generate_texts` is not supported by this generator.") + + async def agenerate_texts( + self, + texts: t.Sequence[str], + params: t.Sequence[GenerateParams], + *, + prefix: str | None = None, + ) -> t.Sequence[str]: + """async version of [rigging.generator.Generator.generate_texts][]""" + raise NotImplementedError("`agenerate_texts` is not supported by this generator.") + + # Helper alternative to chat(generator) -> generator.chat(...) + # + # params seem odd, but mypy doesn't like the TypedDict in a list otherwise @t.overload - def chat(self, messages: t.Sequence[MessageDict], overloads: GenerateParams | None = None) -> PendingChat: + def chat( + self, + messages: t.Sequence[MessageDict], + params: GenerateParams | None = None, + ) -> PendingChat: ... @t.overload - def chat(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> PendingChat: + def chat( + self, + messages: t.Sequence[Message] | MessageDict | Message | str | None = None, + params: GenerateParams | None = None, + ) -> PendingChat: ... def chat( - self, messages: t.Sequence[MessageDict] | t.Sequence[Message], overloads: GenerateParams | None = None + self, + messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str | None = None, + params: GenerateParams | None = None, ) -> PendingChat: - return PendingChat(self, Message.fit_list(messages), overloads or GenerateParams()) + """ + Build a pending chat with the given messages and optional params overloads. + Args: + messages: The messages to be sent in the chat. + params: Optional parameters for generating responses. -class LiteLLMGenerator(Generator): - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams = GenerateParams()) -> Message: - logger.trace("--- Conversation ---") - logger.trace("\n".join([str(msg) for msg in messages])) - logger.trace("---") - - messages_as_dicts = [message.model_dump() for message in messages] - complete_params = self._merge_params(overloads) - result = litellm.completion(self.model, messages_as_dicts, api_key=self.api_key, **complete_params) - response = result.choices[-1].message.content.strip() - next_message = Message(role="assistant", content=response) + Returns: + Pending chat to run. + """ + return PendingChat(self, Message.fit_as_list(messages) if messages else [], params) - logger.trace("--- Response ---") - logger.trace(str(next_message)) - logger.trace("---") + # Helper alternative to complete(generator) -> generator.complete(...) - return next_message + def complete(self, text: str, params: GenerateParams | None = None) -> PendingCompletion: + """ + Build a pending string completion of the given text with optional param overloads. + + Args: + text: The input text to be completed. + params: The parameters to be used for completion. + + Returns: + The completed text. + """ + return PendingCompletion(self, text, params) + + +@t.overload +def chat( + generator: "Generator", + messages: t.Sequence[MessageDict], + params: GenerateParams | None = None, +) -> PendingChat: + ... + + +@t.overload +def chat( + generator: "Generator", + messages: t.Sequence[Message] | MessageDict | Message | str | None = None, + params: GenerateParams | None = None, +) -> PendingChat: + ... + + +def chat( + generator: "Generator", + messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str | None = None, + params: GenerateParams | None = None, +) -> PendingChat: + """ + Creates a pending chat using the given generator, messages, and params. + + Args: + generator: The generator to use for creating the chat. + messages: + The messages to include in the chat. Can be a single message or a sequence of messages. + params: Additional parameters for generating the chat. + + Returns: + Pending chat to run. + """ + return generator.chat(messages, params) + + +def complete( + generator: Generator, + text: str, + params: GenerateParams | None = None, +) -> PendingCompletion: + return generator.complete(text, params) + + +def get_identifier(generator: Generator, params: GenerateParams | None = None) -> str: + """ + Converts the generator instance back into a rigging identifier string. + + Warning: + The `extra` parameter field is not currently supported in identifiers. + + Args: + generator: The generator object. + params: The generation parameters. + + Returns: + The identifier string for the generator. + """ + + provider = next(name for name, klass in g_providers.items() if isinstance(generator, klass)) + identifier = f"{provider}!{generator.model}" + + merged_params = generator.params.merge_with(params) + if merged_params.extra: + logger.warning("Extra parameters are not supported in identifiers.") + merged_params.extra = {} + + params_dict = merged_params.to_dict() + if params_dict: + if "stop" in params_dict: + params_dict["stop"] = ";".join(params_dict["stop"]) + identifier += f",{','.join([f'{k}={v}' for k, v in params_dict.items()])}" + + return identifier def get_generator(identifier: str) -> Generator: """ Get a generator by an identifier string. Uses LiteLLM by default. - !,<**kwargs> + Identifier strings are formatted like `!,<**kwargs>` - (provider is optional and defaults to "litellm" if not specified) + (provider is optional andif not specified) - :param identifier: The identifier string to use to get a generator - :return: The generator + Examples: - :raises InvalidModelSpecified: If the identifier is invalid + - "gpt-3.5-turbo" -> `LiteLLMGenerator(model="gpt-3.5-turbo")` + - "litellm!claude-2.1" -> `LiteLLMGenerator(model="claude-2.1")` + - "mistral/mistral-tiny" -> `LiteLLMGenerator(model="mistral/mistral-tiny")` - Examples: - "gpt-3.5-turbo" -> LiteLLMGenerator(model="gpt-3.5-turbo") - "litellm!claude-2.1" -> LiteLLMGenerator(model="claude-2.1") - "mistral/mistral-tiny" -> LiteLLMGenerator(model="mistral/mistral-tiny") + You can also specify arguments to the generator by comma-separating them: + + - "mistral/mistral-medium,max_tokens=1024" + - "gpt-4-0613,temperature=0.9,max_tokens=512" + - "claude-2.1,stop_sequences=Human:;test,max_tokens=100" + + (These get parsed as [rigging.generator.GenerateParams][]) + + Args: + identifier: The identifier string to use to get a generator. - You can also specify arguments to the generator by comma-separating them# - "mistral/mistral-medium,max_tokens=1024" - "gpt-4-0613,temperature=0.9,max_tokens=512" - "claude-2.1,stop_sequences=Human:;test,max_tokens=100" + Returns: + The generator object. - (These get parsed as GenerateParams) + Raises: + InvalidModelSpecified: If the identifier is invalid. """ - provider: str = "litellm" + provider: str = list(g_providers.keys())[0] model: str = identifier api_key: str | None = None params: GenerateParams = GenerateParams() @@ -141,7 +412,185 @@ def get_generator(identifier: str) -> Generator: except Exception as e: raise InvalidModelSpecifiedError(identifier) from e - if provider == "litellm": - return LiteLLMGenerator(model=model, api_key=api_key, params=params) - else: + if provider not in g_providers: raise InvalidModelSpecifiedError(identifier) + + generator_cls = g_providers[provider] + return generator_cls(model=model, api_key=api_key, params=params) + + +def register_generator(provider: str, generator_cls: type[Generator]) -> None: + """ + Register a generator class for a provider id. + + This let's you use [rigging.generator.get_generator][] with a custom generator class. + + Args: + provider: The name of the provider. + generator_cls: The generator class to register. + """ + global g_providers + g_providers[provider] = generator_cls + + +def trace_messages(messages: t.Sequence[Message], title: str) -> None: + """ + Helper function to trace log a sequence of Message objects. + + Args: + messages: A sequence of Message objects to be logged. + title: The title to be displayed in the log. + + Returns: + None + """ + logger.trace(f"--- {title} ---") + logger.trace("\n".join([str(msg) for msg in messages])) + logger.trace("---") + + +def trace_str(content: str, title: str) -> None: + """ + Helper function to trace log a string. + + Parameters: + content: The string content to be logged. + title: The title of the log entry. + + Returns: + None + """ + logger.trace(f"--- {title} ---") + logger.trace(content) + logger.trace("---") + + +class LiteLLMGenerator(Generator): + """ + Generator backed by the LiteLLM library. + + Note: + Find more information about supported models and formats [in their docs.](https://docs.litellm.ai/docs/providers). + + Note: + Batching support is not performant and simply a loop over inputs. + """ + + def _generate_message(self, messages: t.Sequence[Message], params: GenerateParams) -> Message: + result = litellm.completion( + self.model, + [message.model_dump(include={"role", "content"}) for message in messages], + api_key=self.api_key, + **self.params.merge_with(params).to_dict(), + ) + response = result.choices[-1].message.content.strip() + return Message(role="assistant", content=response) + + async def _agenerate_message(self, messages: t.Sequence[Message], params: GenerateParams) -> Message: + result = await litellm.acompletion( + self.model, + [message.model_dump(include={"role", "content"}) for message in messages], + api_key=self.api_key, + **self.params.merge_with(params).to_dict(), + ) + response = result.choices[-1].message.content.strip() + return Message(role="assistant", content=response) + + def _generate_text(self, text: str, params: GenerateParams) -> str: + result = litellm.text_completion( + text, self.model, api_key=self.api_key, **self.params.merge_with(params).to_dict() + ) + return t.cast(str, result.choices[-1]["text"]) + + async def _agenerate_text(self, text: str, params: GenerateParams) -> str: + result = await litellm.atext_completion( + text, self.model, api_key=self.api_key, **self.params.merge_with(params).to_dict() + ) + return t.cast(str, result.choices[-1]["text"]) + + def generate_messages( + self, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], + *, + prefix: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: + if prefix is not None: + messages = [list(prefix) + list(messages) for messages in messages] + + generated: list[Message] = [] + for i, (_messages, _params) in enumerate(zip(messages, params, strict=True)): + trace_messages(_messages, f"Messages {i+1}/{len(messages)}") + next_message = self._generate_message(_messages, _params) + generated.append(next_message) + trace_messages([next_message], f"Response {i+1}/{len(messages)}") + + return generated + + async def agenerate_messages( + self, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], + *, + prefix: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: + if prefix is not None: + messages = [list(prefix) + list(messages) for messages in messages] + + generated: list[Message] = await asyncio.gather( + *[self._agenerate_message(_messages, _params) for _messages, _params in zip(messages, params, strict=True)] + ) + + for i, (_messages, _generated) in enumerate(zip(messages, generated, strict=True)): + trace_messages(_messages, f"Messages {i+1}/{len(messages)}") + trace_messages([_generated], f"Response {i+1}/{len(messages)}") + + return generated + + def generate_texts( + self, + texts: t.Sequence[str], + params: t.Sequence[GenerateParams], + *, + prefix: str | None = None, + ) -> t.Sequence[str]: + if prefix is not None: + texts = [prefix + text for text in texts] + + generated: list[str] = [] + for i, (text, _params) in enumerate(zip(texts, params, strict=True)): + trace_str(text, f"Text {i+1}/{len(texts)}") + response = self._generate_text(text, _params) + generated.append(response) + trace_str(response, f"Generated {i+1}/{len(texts)}") + + return generated + + async def agenerate_texts( + self, + texts: t.Sequence[str], + params: t.Sequence[GenerateParams], + *, + prefix: str | None = None, + ) -> t.Sequence[str]: + if prefix is not None: + texts = [prefix + text for text in texts] + + generated: list[str] = await asyncio.gather( + *[self._agenerate_text(text, _params) for text, _params in zip(texts, params, strict=True)] + ) + + for i, (text, response) in enumerate(zip(texts, generated, strict=True)): + trace_str(text, f"Text {i+1}/{len(texts)}") + trace_str(response, f"Generated {i+1}/{len(texts)}") + + return generated + + +g_providers["litellm"] = LiteLLMGenerator + +# TODO: This fixes some almost-circular import issues and +# typed forwardrefs we use in the other module + +Chat.model_rebuild() +Completion.model_rebuild() diff --git a/rigging/logging.py b/rigging/logging.py index 7ba1127..8b7a265 100644 --- a/rigging/logging.py +++ b/rigging/logging.py @@ -1,3 +1,9 @@ +""" +We use loguru for logging. This module provides a function to configure the logging settings. + +To enable rigging logging, call `logger.enable("rigging")` after importing the module. +""" + import pathlib import sys import typing as t @@ -15,6 +21,20 @@ def configure_logging( log_file: pathlib.Path | None = None, log_file_level: LogLevelLiteral = "debug", ) -> None: + """ + Configures the loguru settings for the rigging module. + + This is optional, and calling `logger.enable("rigging")` will enable the logging + and you can control the formatting and log levels using the loguru API. + + Args: + log_level: The desired log level. Valid values are 'TRACE', 'DEBUG', 'INFO', + 'SUCCESS', 'WARNING', 'ERROR', and 'CRITICAL'. + log_file: The path to the log file. If None, logging + will only be done to the console. + log_file_level: The log level for the log file. Valid values + are 'TRACE', 'DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', and 'CRITICAL'. + """ global g_configured if g_configured: diff --git a/rigging/message.py b/rigging/message.py index 9685790..2d27e74 100644 --- a/rigging/message.py +++ b/rigging/message.py @@ -1,3 +1,7 @@ +""" +This module covers core message objects and handling. +""" + import string import typing as t @@ -14,24 +18,38 @@ from rigging.error import MissingModelError from rigging.model import Model, ModelT +from rigging.parsing import try_parse_many Role = t.Literal["system", "user", "assistant"] +"""The role of a message. Can be 'system', 'user', or 'assistant'.""" # Helper type for messages structured # more similarly to other libraries class MessageDict(t.TypedDict): + """ + Helper to represent a [rigging.message.Message][] as a dictionary. + """ + role: Role + """The role of the message.""" content: str + """The content of the message.""" # Structured portion of a message with # a slice indicating where is it located class ParsedMessagePart(BaseModel): + """ + Represents a parsed message part. + """ + model_config = ConfigDict(arbitrary_types_allowed=True) model: SerializeAsAny[Model] + """The rigging/pydantic model associated with the message part.""" slice_: slice + """The slice representing the range into the message content.""" @field_serializer("slice_") def serialize_slice(self, slice_: slice, _info: FieldSerializationInfo) -> list[int]: @@ -48,8 +66,14 @@ def validate_slice(cls, value: t.Any) -> slice: class Message(BaseModel): + """ + Represents a message with role, content, and parsed message parts. + """ + role: Role - parts: list[ParsedMessagePart] = Field(default_factory=list, exclude=True) + """The role of the message.""" + parts: list[ParsedMessagePart] = Field(default_factory=list) + """The parsed message parts.""" _content: str = "" @@ -60,9 +84,24 @@ def __init__(self, role: Role, content: str, parts: t.Sequence[ParsedMessagePart def __str__(self) -> str: return f"[{self.role}]: {self.content}" + # TODO: In general the add/remove/sync_part methods are + # overly complicated. We should probably just update content, + # then reparse all the models to get their fresh slices. + # + # I don't like all this manual slice recalculation logic, seems brittle. + def _remove_part(self, part: ParsedMessagePart) -> str: + removed_length = part.slice_.stop - part.slice_.start self._content = self._content[: part.slice_.start] + self._content[part.slice_.stop :] self.parts.remove(part) + + # Update slices of any parts that come after the removed part + for other_part in self.parts: + if other_part.slice_.start > part.slice_.start: + other_part.slice_ = slice( + other_part.slice_.start - removed_length, other_part.slice_.stop - removed_length + ) + return self._content def _add_part(self, part: ParsedMessagePart) -> None: @@ -104,9 +143,12 @@ def _sync_parts(self) -> None: shift += new_length - old_length + self.parts = sorted(self.parts, key=lambda p: p.slice_.start) + @computed_field # type: ignore[misc] @property def content(self) -> str: + """The content of the message.""" # We used to sync the models and content each time it was accessed, # hence the getter. Now we just return the stored content. # I'll leave it as is for now in case we want to add any @@ -124,10 +166,31 @@ def content(self, value: str) -> None: self._content = value def apply(self, **kwargs: str) -> None: + """ + Applies the given keyword arguments with string templating to the content of the message. + + Uses [string.Template.safe_substitute](https://docs.python.org/3/library/string.html#string.Template.safe_substitute) underneath. + + Args: + **kwargs: Keyword arguments to substitute in the message content. + """ template = string.Template(self.content) self.content = template.safe_substitute(**kwargs) - def strip(self, model_type: type[Model], fail_on_missing: bool = False) -> list[ParsedMessagePart]: + def strip(self, model_type: type[Model], *, fail_on_missing: bool = False) -> list[ParsedMessagePart]: + """ + Removes and returns a list of ParsedMessagePart objects from the message that match the specified model type. + + Args: + model_type: The type of model to match. + fail_on_missing: If True, raises a TypeError if no matching model is found. + + Returns: + A list of removed ParsedMessagePart objects. + + Raises: + TypeError: If no matching model is found and fail_on_missing is True. + """ removed: list[ParsedMessagePart] = [] for part in self.parts[:]: if isinstance(part.model, model_type): @@ -141,52 +204,129 @@ def strip(self, model_type: type[Model], fail_on_missing: bool = False) -> list[ @property def models(self) -> list[Model]: + """Returns a list of models parsed from the message.""" return [part.model for part in self.parts] + # TODO: Many of these functions are duplicates from the parsing + # module, but here we don't hand back slices and want there + # to be a convient access model. We should probably consolidate. + def parse(self, model_type: type[ModelT]) -> ModelT: - for model in self.models: - if isinstance(model, model_type): - return model - return self.try_parse_many([model_type], fail_on_missing=True)[0] + """ + Parses a model from the message content. + + Args: + model_type: The type of model to parse. + + Returns: + The parsed model. + + Raises: + ValueError: If no models of the given type are found and `fail_on_missing` is set to `True`. + """ + return self.try_parse_many(model_type, fail_on_missing=True)[0] def try_parse(self, model_type: type[ModelT]) -> ModelT | None: - for model in self.models: - if isinstance(model, model_type): - return model - return next(iter(self.try_parse_many([model_type])), None) + """ + Tries to parse a model from the message content. + + Args: + model_type: The type of model to search for. + + Returns: + The first model that matches the given model type, or None if no match is found. + """ + return next(iter(self.try_parse_many(model_type)), None) def parse_set(self, model_type: type[ModelT], minimum: int | None = None) -> list[ModelT]: + """ + Parses a set of models of the specified identical type from the message content. + + Args: + model_type: The type of models to parse. + minimum: The minimum number of models required. + + Returns: + A list of parsed models. + + Raises: + MissingModelError: If the minimum number of models is not met. + """ return self.try_parse_set(model_type, minimum=minimum, fail_on_missing=True) def try_parse_set( self, model_type: type[ModelT], minimum: int | None = None, fail_on_missing: bool = False ) -> list[ModelT]: - models = self.try_parse_many([model_type], fail_on_missing=fail_on_missing) + """ + Tries to parse a set of models from the message content. + + Args: + model_type: The type of model to parse. + minimum: The minimum number of models expected. + fail_on_missing: Whether to raise an exception if models are missing. + + Returns: + The parsed models. + + Raises: + MissingModelError: If the number of parsed models is less than the minimum required. + """ + models = self.try_parse_many(model_type, fail_on_missing=fail_on_missing) if minimum is not None and len(models) < minimum: raise MissingModelError(f"Expected at least {minimum} {model_type.__name__} in message") return models - def parse_many(self, types: t.Sequence[type[ModelT]]) -> list[ModelT]: - return self.try_parse_many(types, fail_on_missing=True) + def parse_many(self, *types: type[ModelT]) -> list[ModelT]: + """ + Parses multiple models of the specified non-identical types from the message content. - def try_parse_many(self, types: t.Sequence[type[ModelT]], fail_on_missing: bool = False) -> list[ModelT]: + Args: + *types: The types of models to parse. + + Returns: + A list of parsed models. + + Raises: + MissingModelError: If any of the models are missing. + """ + return self.try_parse_many(*types, fail_on_missing=True) + + def try_parse_many(self, *types: type[ModelT], fail_on_missing: bool = False) -> list[ModelT]: + """ + Tries to parse multiple models from the content of the message. + + Args: + *types: The types of models to parse. + fail_on_missing: Whether to raise an exception if a model type is missing. + + Returns: + A list of parsed models. + + Raises: + MissingModelError: If a model type is missing and `fail_on_missing` is True. + """ model: ModelT - parsed: list[ModelT] = [] - for model_class in types: - try: - for model, slice_ in model_class.from_text(self.content): - self._add_part(ParsedMessagePart(model=model, slice_=slice_)) - parsed.append(model) - except MissingModelError as e: - if fail_on_missing: - raise e + parsed: list[tuple[ModelT, slice]] = try_parse_many(self.content, *types, fail_on_missing=fail_on_missing) + for model, slice_ in parsed: + self._add_part(ParsedMessagePart(model=model, slice_=slice_)) self._sync_parts() - return parsed + return [p[0] for p in parsed] @classmethod def from_model( cls: type["Message"], models: Model | t.Sequence[Model], role: Role = "user", suffix: str | None = None ) -> "Message": + """ + Create a Message object from one or more Model objects. + + Args: + models: The Model object(s) to convert to a Message. + role: The role of the Message. + suffix: A suffix to append to the content. + + Returns: + The created Message object. + """ parts: list[ParsedMessagePart] = [] content: str = "" for model in models if isinstance(models, list) else [models]: @@ -201,11 +341,17 @@ def from_model( return cls(role=role, content=content, parts=parts) @classmethod - def fit_list(cls, messages: t.Sequence["Message"] | t.Sequence[MessageDict]) -> list["Message"]: + def fit_as_list( + cls, messages: t.Sequence[MessageDict] | t.Sequence["Message"] | MessageDict | "Message" | str + ) -> list["Message"]: + """Helper function to convert various common types to a strict list of Message objects.""" + if isinstance(messages, Message | dict | str): + return [cls.fit(messages)] return [cls.fit(message) for message in messages] @classmethod def fit(cls, message: t.Union["Message", MessageDict, str]) -> "Message": + """Helper function to convert various common types to a Message object.""" if isinstance(message, str): return cls(role="user", content=message) return cls(**message) if isinstance(message, dict) else message diff --git a/rigging/model.py b/rigging/model.py index 81cefb7..3478978 100644 --- a/rigging/model.py +++ b/rigging/model.py @@ -1,3 +1,7 @@ +""" +Models are the core datatypes for structured parsing. +""" + import re import typing as t from xml.etree import ElementTree as ET @@ -9,7 +13,7 @@ from pydantic_xml import element as element from pydantic_xml import wrapped as wrapped from pydantic_xml.element import SearchMode # type: ignore [attr-defined] -from pydantic_xml.typedefs import NsMap +from pydantic_xml.typedefs import EntityLocation, NsMap from rigging.error import MissingModelError @@ -73,8 +77,15 @@ def __init_subclass__( cls.__xml_tag__ = XmlTagDescriptor() # type: ignore [assignment] # to_xml() doesn't prettify normally, and extended - # requirements like lxml seemed like poor form + # requirements like lxml seemed like poor form for + # just this feature def to_pretty_xml(self) -> str: + """ + Converts the model to a pretty XML string with indents and newlines. + + Returns: + The pretty XML representation of the model. + """ tree = self.to_xml_tree() ET.indent(tree, " ") pretty_encoded_xml = ET.tostring(tree).decode() @@ -89,29 +100,65 @@ def to_pretty_xml(self) -> str: # So we'll handle easy cases here and mark the model as "simple" # if it only contains a single basic field. It makes our parsing # much more consistent and is likely the most popular model type. + # + # TODO: lxml with the recover option is likely a better approach @classmethod def is_simple(cls) -> bool: + """ + Check if the model is "simple", meaning it has a single field with a basic datatype. + + Until we refactor our XML parsing, this helps make the parsing more consistent for models + which can support it. + + Returns: + True if the model is simple, False otherwise. + """ field_values = list(cls.model_fields.values()) return len(field_values) == 1 and field_values[0].annotation in BASIC_TYPES @classmethod def xml_start_tag(cls) -> str: + """Helper method which wrapped the class tag in XML braces.""" return f"<{cls.__xml_tag__}>" @classmethod def xml_end_tag(cls) -> str: + """Helper method which wrapped the class tag in XML braces with a leading slash.""" return f"" @classmethod def xml_tags(cls) -> str: + """Helper method which returns the full XML tags for the class.""" return cls.xml_start_tag() + cls.xml_end_tag() # This can be overridden to provide a more complex example # to a model when it's required. @classmethod def xml_example(cls) -> str: + """ + Returns an example XML representation of the given class. + + Models should typically override this method to provide a more complex example. + + By default, this method just returns the XML tags for the class. + + Returns: + A string containing the XML representation of the class. + """ return cls.xml_tags() + @classmethod + def ensure_valid(cls) -> None: + # Do a sanity check for models with a single + # attr field, which our parsing currently doesn't support + # + # TODO: Add support for style models + + if len(cls.model_fields) == 1: + field_info = next(iter(cls.model_fields.values())) + if hasattr(field_info, "location") and field_info.location == EntityLocation.ATTRIBUTE: + raise ValueError(f"Model '{cls.__name__}' has a single attr() field which is not supported") + # Attempt to extract this object from an arbitrary string # which may contain other XML elements or text, returns # the object and the string from which is was parsed. @@ -124,6 +171,22 @@ def xml_example(cls) -> str: @classmethod def from_text(cls, content: str) -> list[tuple[ModelT, slice]]: + """ + The core parsing method which attempts to extract and parse as many + valid instances of a model from semi-structured text. + + Args: + content: The text content to parse. + + Returns: + A list of tuples containing the extracted models and their corresponding slices. + + Raises: + MissingModelError: If the specified model tags are not found in the message. + ValidationError: If an error occurs while parsing the content. + """ + cls.ensure_valid() + pattern = r"(<([\w-]+).*?>((.*?)))" matches = [m for m in re.finditer(pattern, content, flags=re.DOTALL) if m.group(2) == cls.__xml_tag__] @@ -146,6 +209,9 @@ def from_text(cls, content: str) -> list[tuple[ModelT, slice]]: # # Example: "Sure I'll use tags: hello" # + # TODO: The opposite could be true, and we could greedily parse + # backwards if we get failures. This is a simple solution for now. + inner_match: re.Match[str] | None = match while inner_match is not None: inner_matches = re.finditer(pattern, inner_with_end_tag, flags=re.DOTALL) @@ -173,7 +239,20 @@ def from_text(cls, content: str) -> list[tuple[ModelT, slice]]: return extracted @classmethod - def one_from_text(cls, content: str, fail_on_many: bool = False) -> tuple[ModelT, slice]: + def one_from_text(cls, content: str, *, fail_on_many: bool = False) -> tuple[ModelT, slice]: + """ + Finds and returns a single match from the given text content. + + Args: + content: The text content to search for matches. + fail_on_many: If True, raises a ValidationError if multiple matches are found. + + Returns: + A tuple containing the matched model and the slice indicating the match location. + + Raises: + ValidationError: If multiple matches are found and fail_on_many is True. + """ matches = cls.from_text(content) # type: ignore [var-annotated] if fail_on_many and len(matches) > 1: raise ValidationError("Multiple matches found with 'fail_on_many=True'") @@ -207,27 +286,41 @@ class ValidationErrorModel(ErrorModel, tag="validation_error"): class Thinking(Model): + """Quick model for thinking messages.""" + content: str class Question(Model): + """Quick model for questions.""" + content: str class Answer(Model): + """Quick model for answers.""" + content: str class QuestionAnswer(Model): - question: Question - answer: Answer + """Quick model for question-answer pairs.""" + + question: Question = element() + """The question""" + answer: Answer = element() + """The answer""" class Description(Model): + """Quick model for descriptions.""" + content: str class Instructions(Model): + """Quick model for instructions.""" + content: str @@ -239,6 +332,7 @@ class DelimitedAnswer(Model): @property def items(self) -> list[str]: + """Parsed items from the content.""" split_sizes: dict[str, int] = {} for delimiter in self._delimiters: split_sizes[delimiter] = len(self.content.split(delimiter)) @@ -269,6 +363,7 @@ class YesNoAnswer(Model): "Yes/No answer answer with coercion" boolean: bool + """The boolean value of the answer.""" @field_validator("boolean", mode="before") def parse_str_to_bool(cls, v: t.Any) -> t.Any: diff --git a/rigging/parsing.py b/rigging/parsing.py new file mode 100644 index 0000000..47bd21d --- /dev/null +++ b/rigging/parsing.py @@ -0,0 +1,124 @@ +""" +Parsing helpers for extracting rigging models from text +""" + +from rigging.error import MissingModelError +from rigging.model import ModelT + + +def parse(text: str, model_type: type[ModelT]) -> tuple[ModelT, slice]: + """ + Parses a single model from text. + + Args: + text: The content to parse. + model_type: The type of model to parse. + + Returns: + The parsed model. + + Raises: + ValueError: If no models of the given type are found and `fail_on_missing` is set to `True`. + """ + return try_parse_many(text, model_type, fail_on_missing=True)[0] + + +def try_parse(text: str, model_type: type[ModelT]) -> tuple[ModelT, slice] | None: + """ + Tries to parse a model from text. + + Args: + text: The content to parse. + model_type: The type of model to search for. + + Returns: + The first model that matches the given model type, or None if no match is found. + """ + return next(iter(try_parse_many(text, model_type)), None) + + +def parse_set(text: str, model_type: type[ModelT], *, minimum: int | None = None) -> list[tuple[ModelT, slice]]: + """ + Parses a set of models with the specified identical type from text. + + Args: + text: The content to parse. + model_type: The type of models to parse. + minimum: The minimum number of models required. + + Returns: + A list of parsed models. + + Raises: + MissingModelError: If the minimum number of models is not met. + """ + return try_parse_set(text, model_type, minimum=minimum, fail_on_missing=True) + + +def try_parse_set( + text: str, model_type: type[ModelT], *, minimum: int | None = None, fail_on_missing: bool = False +) -> list[tuple[ModelT, slice]]: + """ + Tries to parse a set of models with the specified identical type from text. + + Args: + text: The content to parse. + model_type: The type of model to parse. + minimum: The minimum number of models expected. + fail_on_missing: Whether to raise an exception if models are missing. + + Returns: + The parsed models. + + Raises: + MissingModelError: If the number of parsed models is less than the minimum required. + """ + models = try_parse_many(text, model_type, fail_on_missing=fail_on_missing) + if minimum is not None and len(models) < minimum: + raise MissingModelError(f"Expected at least {minimum} {model_type.__name__} in message") + return models + + +def parse_many(text: str, *types: type[ModelT]) -> list[tuple[ModelT, slice]]: + """ + Parses multiple models of the specified non-identical types from text. + + Args: + text: The content to parse. + *types: The types of models to parse. + + Returns: + A list of parsed models. + + Raises: + MissingModelError: If any of the models are missing. + """ + return try_parse_many(text, *types, fail_on_missing=True) + + +def try_parse_many(text: str, *types: type[ModelT], fail_on_missing: bool = False) -> list[tuple[ModelT, slice]]: + """ + Tries to parses multiple models of the specified non-identical types from text. + + Args: + text: The content to parse. + *types: The types of models to parse. + fail_on_missing: Whether to raise an exception if a model type is missing. + + Returns: + A list of parsed models. + + Raises: + MissingModelError: If a model type is missing and `fail_on_missing` is True. + """ + model: ModelT + parsed: list[tuple[ModelT, slice]] = [] + for model_class in types: + try: + for model, slice_ in model_class.from_text(text): + parsed.append((model, slice_)) + except MissingModelError as e: + if fail_on_missing: + raise e + + return parsed diff --git a/rigging/tool.py b/rigging/tool.py index 18deb25..7cd0afe 100644 --- a/rigging/tool.py +++ b/rigging/tool.py @@ -1,4 +1,7 @@ -import abc +""" +This module defines handles tool interaction with rigging generation. +""" + import inspect import typing as t @@ -57,6 +60,7 @@ class ToolCalls(Model, tag="tool_calls"): # TODO: We should consider building a base model # interface for both simple tags () # and full examples will filled in template vars + @classmethod def xml_example(cls) -> str: return cls( @@ -124,19 +128,56 @@ class ToolResults(Model, tag="tool_results"): # -class Tool(abc.ABC): - # TODO: I don't love having these defined as property getters, - # I would prefer to have them as class attributes, but I'm not - # sure how we can hint/enforce that to derived classes - @property - @abc.abstractmethod - def name(self) -> str: - ... +class Tool: + """ + Base class for implementing tools in the Rigging system. + + You should subclass this to define your own tools: + + ```python + def Hammer(Tool): + name = "Hammer" + description = "A tool for hitting things." + + def hit(self, target: Annotated[str, "Target of the hit") -> str: + return f"Hit {target} with a hammer." + + chat = generator.chat(...).using(Hammer()).run() + ``` + + Note: + The `name` and `description` attributes are required and can be defined + as class attributes or properties. If you define them as properties, + you must also define a getter for them. + + Note: + All functions on the tool must have type hints for their parameters and + use the `Annotated` type hint to provide a description for each parameter. + """ + + name: str + """Name of the tool""" + description: str + """Description of the tool""" + + def __init_subclass__(cls, *, name: str | None = None, description: str | None = None, **kwargs: t.Any) -> None: + super().__init_subclass__(**kwargs) + if name is not None: + cls.name = name + if description is not None: + cls.description = description + + # Ensure name and description are defined + if not (hasattr(cls, "name") or hasattr(cls, "name_property")): + raise TypeError(f"{cls.__name__} must define 'name' attribute or 'name' property.") + if not (hasattr(cls, "description") or hasattr(cls, "description_property")): + raise TypeError(f"{cls.__name__} must define 'description' attribute or 'description' property.") - @property - @abc.abstractmethod - def description(self) -> str: - ... + # Check that they aren't empty or unset + if not getattr(cls, "name", None): + raise ValueError(f"{cls.__name__}.name must not be empty.") + if not getattr(cls, "description", None): + raise ValueError(f"{cls.__name__}.description must not be empty.") # TODO: We could alternatively use the get_description() # object and check against that (or even cast into it first) @@ -170,6 +211,7 @@ def _execute(self, call: ToolCall) -> str: return str(result) def execute(self, call: ToolCall) -> ToolResult: + """Executes a function call on the tool.""" try: content = self._execute(call) return ToolResult(tool=call.tool, function=call.function, error=False, content=content) @@ -183,6 +225,7 @@ def execute(self, call: ToolCall) -> ToolResult: # build a ToolDescription object that can be serialized # and passed to a model def get_description(self) -> ToolDescription: + """Creates a full description of the tool for use in prompting""" functions: list[ToolFunction] = [] for method_name, method in inspect.getmembers(self.__class__, predicate=inspect.isfunction): if not method.__qualname__.startswith(self.__class__.__name__): diff --git a/tests/test_generation.py b/tests/test_generation.py index 93c7b37..3bc417f 100644 --- a/tests/test_generation.py +++ b/tests/test_generation.py @@ -8,16 +8,36 @@ class EchoGenerator(Generator): - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams) -> Message: - return Message(role="assistant", content=messages[-1].content) + def generate_messages( + self, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], + *, + prefix: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: + if prefix is not None: + messages = [list(m) + list(prefix) for m in messages] + + assert len(messages) == 1 + return [Message(role="assistant", content=messages[-1][-1].content) for m in messages] class CallbackGenerator(Generator): callback: t.Callable[["CallbackGenerator", t.Sequence[Message]], str] | None = None - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams) -> Message: - assert self.callback is not None, "Callback must be defined for CallbackGenerator" - return Message(role="assistant", content=self.callback(self, messages)) + def generate_messages( + self, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], + *, + prefix: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: + if prefix is not None: + messages = [list(prefix) + list(m) for m in messages] + + assert len(messages) == 1 + assert self.callback is not None + return [Message(role="assistant", content=self.callback(self, m)) for m in messages] def test_until_parsed_as_with_reset() -> None: diff --git a/tests/test_generator_creation.py b/tests/test_generator_creation.py index e1767f4..1da584e 100644 --- a/tests/test_generator_creation.py +++ b/tests/test_generator_creation.py @@ -1,7 +1,8 @@ import pytest from rigging.error import InvalidModelSpecifiedError -from rigging.generator import GenerateParams, LiteLLMGenerator, get_generator +from rigging.generator import GenerateParams, LiteLLMGenerator, get_generator, get_identifier, register_generator +from tests.test_generation import EchoGenerator @pytest.mark.parametrize("identifier", ["test_model", "litellm!test_model"]) @@ -22,7 +23,7 @@ def test_get_generator_invalid_provider(identifier: str) -> None: [ ("litellm!test_model,max_tokens=123,top_p=10", GenerateParams(max_tokens=123, top_p=10)), ("litellm!test_model,temperature=0.5", GenerateParams(temperature=0.5)), - ("test_model,max_tokens=100,temperature=1.0", GenerateParams(max_tokens=100, temperature=1.0)), + ("test_model,temperature=1.0,max_tokens=100", GenerateParams(max_tokens=100, temperature=1.0)), ], ) def test_get_generator_with_params(identifier: str, valid_params: GenerateParams) -> None: @@ -32,6 +33,26 @@ def test_get_generator_with_params(identifier: str, valid_params: GenerateParams assert generator.params == valid_params +@pytest.mark.parametrize( + "identifier", + [ + ("litellm!test_model,max_tokens=1024,top_p=0.1"), + ("litellm!custom,temperature=1.0,max_tokens=100,api_base=https://localhost:8000"), + ("litellm!many/model/slashes,stop=a;b;c;"), + ], +) +def test_identifier_roundtrip(identifier: str) -> None: + generator = get_generator(identifier) + assert generator.to_identifier() == identifier + + +def test_get_identifier_no_extra() -> None: + generator = get_generator("testing_model,temperature=0.5") + generator.params.extra = {"abc": 123} + identifier = get_identifier(generator) + assert "extra" not in identifier + + @pytest.mark.parametrize("identifier", ["litellm:invalid,stuff:test,t1/123", "litellm:invalid,stuff:test,t1/123"]) def test_get_generator_invalid_structure_format(identifier: str) -> None: with pytest.raises(InvalidModelSpecifiedError): @@ -44,3 +65,12 @@ def test_get_generator_invalid_structure_format(identifier: str) -> None: def test_get_generator_invalid_params(identifier: str) -> None: with pytest.raises(InvalidModelSpecifiedError): get_generator(identifier) + + +def test_register_generator() -> None: + with pytest.raises(InvalidModelSpecifiedError): + get_generator("echo!test") + + register_generator("echo", EchoGenerator) + generator = get_generator("echo!test") + assert isinstance(generator, EchoGenerator) diff --git a/tests/test_messages.py b/tests/test_messages.py index 99c8019..4f4a3e1 100644 --- a/tests/test_messages.py +++ b/tests/test_messages.py @@ -105,7 +105,7 @@ def test_message_from_model() -> None: def test_messages_fit_list() -> None: messages: t.Any = [{"role": "system", "content": "You are an AI assistant."}, Message("user", "Hello!")] - fitted = Message.fit_list(messages) + fitted = Message.fit_as_list(messages) assert len(fitted) == 2 assert isinstance(fitted[0], Message) assert isinstance(fitted[1], Message) @@ -150,7 +150,7 @@ def test_chat_continue() -> None: Message("user", "Hello"), Message("assistant", "Hi there!"), ], - pending=PendingChat(get_generator("gpt-3.5"), [], GenerateParams()), + generator=get_generator("gpt-3.5"), ) continued = chat.continue_([Message("user", "How are you?")]).chat @@ -163,7 +163,7 @@ def test_chat_continue() -> None: def test_pending_chat_continue() -> None: pending = PendingChat(get_generator("gpt-3.5"), [], GenerateParams()) - continued = pending.continue_([Message("user", "Hello")]) + continued = pending.fork([Message("user", "Hello")]) assert continued != pending assert len(continued.chat) == 1 @@ -171,12 +171,17 @@ def test_pending_chat_continue() -> None: def test_pending_chat_add() -> None: - pending = PendingChat(get_generator("gpt-3.5"), [Message("user", "Hello")], GenerateParams()) - added = pending.add(Message("user", "Hello")) + pending = PendingChat(get_generator("gpt-3.5"), [Message("user", "Hello")]) + added = pending.add(Message("user", "There")) assert added == pending - assert len(added.chat) == 2 - assert added.chat.all[0].content == "Hello" + assert len(added.chat) == 1 + assert added.chat.all[0].content == "Hello\nThere" + + diff_added = pending.add(Message("assistant", "Hi there!")) + assert diff_added == added == pending + assert len(diff_added.chat) == 2 + assert diff_added.chat.all[1].content == "Hi there!" def test_chat_continue_maintains_parsed_models() -> None: @@ -185,7 +190,7 @@ def test_chat_continue_maintains_parsed_models() -> None: Message("user", "30"), Message("assistant", "
123 Main StAnytown
"), ], - pending=PendingChat(get_generator("gpt-3.5"), [], GenerateParams()), + generator=get_generator("gpt-3.5"), ) chat.all[0].parse(Person)