From 2410240954ed21e49957ab301f2f01392e60544f Mon Sep 17 00:00:00 2001 From: Nick Landers Date: Thu, 25 Apr 2024 17:12:50 -0600 Subject: [PATCH 01/16] Update README.md --- README.md | 96 ++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 60 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index 5b81915..28d83d0 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,31 @@ Modern python with type hints, pydantic validation, native serialization support pip install rigging ``` +### Overview + +The basic flow in rigging is: + +1. Get a generator object +2. Call `.chat()` to produce a `PendingChat` +3. Call `.run()` on a `PendingChat` to get a `Chat` + +`PendingChat` objects hold any messages waiting to be delivered to an LLM in exchange +for a new response message. Afterwhich it is converted into a `Chat` which holds +all messages prior to generation (`.prev`) and after generation (`.next`). + +You should think of `PendingChat` objects like the configurable pre-generation step +with calls like `.overload()`, `.apply()`, `.until()`, `.using()`, etc. Once you call +`.run()` the generator is used to produce the next message based on the prior context +and any constraints you have in place. Once you have a `Chat` object, the interation +is "done" and you can inspect/parse the messages. + +You'll often see us use functional styling chaining as most of our +utility functions return the object back to you. + +```python +chat = generator.chat(...).using(...).until(...).overload(...).run() +``` + ### Basic Chats ```python @@ -98,32 +123,6 @@ jokes = chat.last.parse_set(Joke) # ] ``` -### Complex Models - -```python -import rigging as rg - -class Inner(rg.Model): - type: str = rg.attr() - content: str - -class Outer(rg.Model): - name: str = rg.attr() - inners: list[Inner] = rg.element() - -outer = Outer(name="foo", inners=[ - Inner(type="cat", content="meow"), - Inner(type="dog", content="bark") -]) - -print(outer.to_pretty_xml()) - -# -# meow -# bark -# -``` - ### Tools ```python @@ -169,19 +168,18 @@ import rigging as rg generator = rg.get_generator("gpt-3.5-turbo") chat = generator.chat([ {"role": "user", "content": "Hello, how are you?"}, -]).run() - -print(chat.last.content) +]) -# "Hello! I'm an AI language model, ..." +# We can fork (continue_) before generation has occured +specific = chat.fork("Be specific please.").run() +poetic = chat.fork("Be as poetic as possible").overload(temperature=1.5).run() -cont = chat.continue_( +# We can also fork (continue_) after generation +next_chat = poetic.fork( {"role": "user", "content": "That's good, tell me a joke"} -).run() - -print(cont.last.content) +) -# "Sure, here's a joke for you: ..." +update = next_chat.run() ``` ### Basic Templating @@ -214,6 +212,32 @@ for temp in [0.1, 0.5, 1.0]: ``` +### Complex Models + +```python +import rigging as rg + +class Inner(rg.Model): + type: str = rg.attr() + content: str + +class Outer(rg.Model): + name: str = rg.attr() + inners: list[Inner] = rg.element() + +outer = Outer(name="foo", inners=[ + Inner(type="cat", content="meow"), + Inner(type="dog", content="bark") +]) + +print(outer.to_pretty_xml()) + +# +# meow +# bark +# +``` + ### Strip Parsed Sections ```python @@ -300,4 +324,4 @@ configure_logging( 'trace' # log file level ) ``` -*(This will remove existing handlers, so you might prefer to configure them yourself)* \ No newline at end of file +*(This will remove existing handlers, so you might prefer to configure them yourself)* From c26287136f79b50e8355f99c64d9a9560e6ff298 Mon Sep 17 00:00:00 2001 From: moo Date: Tue, 30 Apr 2024 09:28:41 -0600 Subject: [PATCH 02/16] initial docs --- .vscode/settings.json | 11 ++ docs/docs/api/chat.md | 0 docs/docs/api/errors.md | 3 + docs/docs/api/generators.md | 1 + docs/docs/api/logging.md | 1 + docs/docs/api/message.md | 1 + docs/docs/api/model.md | 14 +++ docs/docs/api/prompt.md | 1 + docs/docs/api/tools.md | 10 ++ docs/docs/images/logo.png | Bin 0 -> 23903 bytes docs/docs/index.md | 178 +++++++++++++++++++++++++++++++ docs/docs/tutorial/chat.md | 29 +++++ docs/docs/tutorial/generators.md | 0 docs/docs/tutorial/logging.md | 22 ++++ docs/docs/tutorial/model.md | 59 ++++++++++ docs/docs/tutorial/tools.md | 36 +++++++ docs/mkdocs.yml | 54 ++++++++++ rigging/chat.py | 6 ++ rigging/error.py | 8 ++ rigging/generator.py | 2 +- rigging/tool.py | 41 +++++++ 21 files changed, 476 insertions(+), 1 deletion(-) create mode 100644 docs/docs/api/chat.md create mode 100644 docs/docs/api/errors.md create mode 100644 docs/docs/api/generators.md create mode 100644 docs/docs/api/logging.md create mode 100644 docs/docs/api/message.md create mode 100644 docs/docs/api/model.md create mode 100644 docs/docs/api/prompt.md create mode 100644 docs/docs/api/tools.md create mode 100644 docs/docs/images/logo.png create mode 100644 docs/docs/index.md create mode 100644 docs/docs/tutorial/chat.md create mode 100644 docs/docs/tutorial/generators.md create mode 100644 docs/docs/tutorial/logging.md create mode 100644 docs/docs/tutorial/model.md create mode 100644 docs/docs/tutorial/tools.md create mode 100644 docs/mkdocs.yml diff --git a/.vscode/settings.json b/.vscode/settings.json index 819a8e5..66f478e 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -12,4 +12,15 @@ ], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, + "yaml.schemas": { + "https://squidfunk.github.io/mkdocs-material/schema.json": "mkdocs.yml" + }, + "yaml.customTags": [ + "!ENV scalar", + "!ENV sequence", + "!relative scalar", + "tag:yaml.org,2002:python/name:material.extensions.emoji.to_svg", + "tag:yaml.org,2002:python/name:material.extensions.emoji.twemoji", + "tag:yaml.org,2002:python/name:pymdownx.superfences.fence_code_format" + ] } \ No newline at end of file diff --git a/docs/docs/api/chat.md b/docs/docs/api/chat.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/docs/api/errors.md b/docs/docs/api/errors.md new file mode 100644 index 0000000..0cdfa39 --- /dev/null +++ b/docs/docs/api/errors.md @@ -0,0 +1,3 @@ +::: rigging.error.ExhaustedMaxRoundsError +::: rigging.error.InvalidModelSpecifiedError +::: rigging.error.MissingModelError diff --git a/docs/docs/api/generators.md b/docs/docs/api/generators.md new file mode 100644 index 0000000..aab0e9b --- /dev/null +++ b/docs/docs/api/generators.md @@ -0,0 +1 @@ +:::rigging.generator \ No newline at end of file diff --git a/docs/docs/api/logging.md b/docs/docs/api/logging.md new file mode 100644 index 0000000..e899f27 --- /dev/null +++ b/docs/docs/api/logging.md @@ -0,0 +1 @@ +:::rigging.logging \ No newline at end of file diff --git a/docs/docs/api/message.md b/docs/docs/api/message.md new file mode 100644 index 0000000..2ac4fd2 --- /dev/null +++ b/docs/docs/api/message.md @@ -0,0 +1 @@ +:::rigging.message \ No newline at end of file diff --git a/docs/docs/api/model.md b/docs/docs/api/model.md new file mode 100644 index 0000000..09aee8a --- /dev/null +++ b/docs/docs/api/model.md @@ -0,0 +1,14 @@ +::: rigging.model.Model +::: rigging.model.Model.to_pretty_xml +::: rigging.model.ErrorModel +::: rigging.model.SystemErrorModel +::: rigging.model.ValidationErrorModel +::: rigging.model.Thinking +::: rigging.model.Question +::: rigging.model.Answer +::: rigging.model.QuestionAnswer +::: rigging.model.Description +::: rigging.model.Instructions +::: rigging.model.DelimitedAnswer +::: rigging.model.CommaDelimitedAnswer +::: rigging.model.YesNoAnswer \ No newline at end of file diff --git a/docs/docs/api/prompt.md b/docs/docs/api/prompt.md new file mode 100644 index 0000000..6c24012 --- /dev/null +++ b/docs/docs/api/prompt.md @@ -0,0 +1 @@ +:::rigging.prompt.system_tool_extension \ No newline at end of file diff --git a/docs/docs/api/tools.md b/docs/docs/api/tools.md new file mode 100644 index 0000000..163b7b1 --- /dev/null +++ b/docs/docs/api/tools.md @@ -0,0 +1,10 @@ +::: rigging.tool +::: rigging.tool.ToolCallParameter +::: rigging.tool.ToolCall +::: rigging.tool.ToolCalls +::: rigging.tool.ToolParameter +::: rigging.tool.ToolFunction +::: rigging.tool.ToolDescription +::: rigging.tool.ToolDescriptionList +::: rigging.tool.ToolResult +::: rigging.tool.ToolResults diff --git a/docs/docs/images/logo.png b/docs/docs/images/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..f66abe732b484f0de3bad74c801e83d168387bf5 GIT binary patch literal 23903 zcmeFZX*iZ``!0SHl~f{BA{i=Eq>yADGE_2Vo>PX5k<3FSC1XlS$dGv+GfRmyAVbKU zLPRC=u=cCxdH=udzlLpnSYOtM^={ky)|=aPU)Oma=W*=EzVF9%TTNAQ+ZLuRBob+x zvXZ<8iA0XSl9M)X#2>TQ+L!PLm7|j06%vVNJMmw#!Uelf{No-M1zi_Shl?(5X3mx* zH#awaYkS)(7G{o?{0`2S9{&($B9Zo!l;vf#uEkFFxm|nbwk|dEtx2@|_DiM9=Gv1g zFB_#*HU`KCZ;vcVJ8M1uq%AQ;%a+sT{{BBX7?M8>51W6k_TC%KZ}j3-@GhstUzVJQ4vl<%exgwvqwr}4))|Ntz|99ZiCT|4>diwn*PVBjT`?k`VGxAOLy=uW z9A7|p?D+B52{P9Q$H%EWJw3a(ey2{!%iAF+DER65l|#fYeEw{h&Oysfhv>g8Dk^IF zDbH-?*RKG7fB$wT&WQW>Z}wHVSu{U5Eb2Lz)$SBTPuxWiA0OY~_wT`G^?^^_e}C0@ z9dTbXRn_eG*LOj8?|wNi$4;9^+D|9*E=Kt0kBaHx>ok&n>FMe1P8X)I3!e%ssXKE` z!rs0;lRmOT+WSUw26tygsA>hYm%bwbLRuHva)5S;Y!yLy4K3gG87aP`wt&JJHaKd zFQ+&`lJAjE(J)(>86vhsYQ^&9yLa1KTU*CIriQhv2Ni8pBn7=`YG~Lv;62Y$?)v?p zE4|0f!v-a$Yb%S_eto%(&s+$>-JB2=6JzJ%+AM6_86~#me*I1AT}Q4?>KZLzP)>DP0w%l%6FQwrQ`85wDjnb!1 zZQLi}aRWi|Wpq?x{w|K>-H;Hoix)RiQ&YF+80`%V45T~2*wB%wBWV6|^XE!;O*~a3 zHfwhM&t&&bUY(rwp3`2$HK=#-?>}&WlAD{m%xm$ehK5FXOw65tfB=x&ly#at&6 zaJ=)1ixoy1LRu@`uN2vKZAao@TH6^kf{?j3T{Z8BgPxwWL-`Jh22tDiOsWktbAFsX zd-m1Ok=OC70tXH#6jlG3ot^2i%_R=~lap?NRaFub?dcr)>)L+?>o(kFK0&9ftgNBP z7P*z0q#^F$;J{~Gx!c3TLqpL$B{GtZ^WZ^tF)^m)xv6?YVoK=jU6mBsTiaO%hlc#C zy_XZQf*OiEnpl{A_irqWWXZaGB&F8Ms;ZxTZUapoj|Ucif1_n$x`U-pe)dd-CDqW_ zb4vJ{hsW@{;{!KBLPFY|j3+iCuX5!Ul| zASH=gwc_ejp6mDXdfDazTwJvcA$#*}I(Wpd%}7s(1{CDy*L}*j5Of(+OY5*n9}&Ou zFA# zZ!VHuL=s(DUFEH^i&T0%!%x!Kx^w5w8#it=Uc9-4Bq@0D?e57>`5RCyG+xshoj=dG zoki>hGH8-^=J_<8+_?6LmOL}L_6%+6n$@|-IA|2F@$@o^ikpxg9bH^-&T>p@Lh;0A zi@!#%&5lJNXS%Jgc@tMXZvR>O+xsVGmoM{K$6cN3VUv(x@m*gnO+FEHqk)^Gabx^r zDsjjnFLLqmy~K|A;ah)d#6;)i<<(Ah7YbgRF;%`oPftIH{qC4x*)4C0e9~e3Ax`2a zl8&sRVtGX_@%7xA&-M!nsuyhSdD+{$7fJX!9_YfJAAJ?Kd{&o+uz8V|`|0Q$HYD3* z+xL|(ASH9I&V=2$6Hr=uTxx|EfQURUE>6^ErR>5Uo35OY(dOvprEspqj6o(+M#=1- zKlwhZ9zu3KS_NBiWg?}%kCzhA+h}+H(WBdld|9OAg3L#Ig{=H4E5%oSzGjV_J;%w( zNYb)98!JkCedD%Bq3bx%8@KKz+1lDNpS*gvx0xpmWf~#&3c=r+sUx6V9dc4kY`8Ik zL!2`xgC&P7!6yCeW0B3&)b~0BYr1kxUeAtyT(m2QoSd4(<2ZOUbZ1x6;$cc}-GNQ#a}8=v1yPeGdu?Q@nb$MCo?1 ziLvp%#KgpwY=h&UK79)6bja2(o$Dcx2kD4|ci0z(iJS z4>LBZuCZ~m)Y^i@qk(ea@y=`mBITG|i}S^8_^kfXF|52sE8#Kg_w?xyZ|~6~NBnNx z+N57(<^SoEZjOjRDiJftH)CD7@mA|o+qP~swYI*EQyARF8li@Y(Gd_IAD<>y*WKNH zZGCkz+zUPhCPS+ayl+W?Gug__so%?fzTW*09!w4d^ z@!h+dg@uJ&tHRc86jW4HW$wS9ajnu?Gf|V;Y>I8W8ro7$EA+-Nu(F1IkX+t+;K2Qp z(!Y_ykSjOc2@W0_98~*S+JS;>-Wbltg|r&)yQVkpbAR}Q)Vh3ciJihpi>E+q`1ma@ zZ(O#E6n_5q{Jg*w|0mCqQ&R~Plk+RwOGbw5!C`|yWY6J=j!a9*-1C`&77bf34^$h? z9uDn!mYhrhh*}@A_qhCc?&)ZL0Nvof*TZe2O-Pk2yz|%Z-}QSPD%n&_kYpDS z`ojJ8K0cB@%TvdJy^}oWe;he_)MNCwO)-lpLJ;j-W z-oo~uccYpP+xaKEB8c>rO-xwV*47ed?}vx`VT)vzKUKI1EX_>?Z`iaw>bv{7bLS>| zOKGY+=NW~fMGT%hr&!&hxc$d1Lg}6^77JB#r{#dJx;kz3%CFPvCBrGHsTMfc4<)@l zo`+KAXlF8Z<`@O%)~qo#G&BT!`zW%wxVV5YPXv4o2d_}#&Z4EyRUK+C7XkXXUp8&9DDLde)J6h%)x`JW8c49jI}%s zn3qdd=0O(y!ONcAhU&yD=AtyS!DOG5&s{VURWoTOf@9<3L%`g%uUUN!zYfWe)O2-& zT)%e?El=0%vLlNXIU+3l3R|pJc=4>vvhwNE*YDlixrLgVa_W|nCb3kP@I8sKb0y}l zwlqnu`1?w>=;RuQ93Sx72~35c02V}441SCEWwc|h6lr|(MpjKNd~g+=nPQP#&_B<~ za9`!Xn{0Qt>Hb$=-#wd28Vv0K5)m;qCfuY;24k>PAvhQf<>0UNNz9LrpI2p|LU4 zL28-*T*A=Eh;rHj@UQAv7J(4aRSY)WJ7g(oQsd)&xv!j|@Y07s{cJ|Ir&GyuJb11Y$H+Bqgf1(iqiV}o9F%(A0Ho3 zCw@AG}#2c`j3^tuvw?*uFHut>lukt+5H*cc7OThR>SQrgr`+QDP zFkt2FmKHVib*G}PFnk#Xavn97*VF4OcfAh0fkoB6@4VJ=@BEdkR~c92CQ{>l=!(1r ziJR>%u-q-?!j)rOrT_Ie681wui;X1?1EHrvn5bCQ4qpB7ncHgl>({SV1JzZ;br=vS zTVw?cPb(|e;cFx4{LsByI}fQ2qD%$M4So5-X|*h`{R+z=3$R9M z=ZpX4;!Zd2GQVJH$+mw#39DRRe~YHstG~US-o$rR_Te-U4Z)NAs6F<=X{ZoEY!jz(-B6Ux7?2Vf@hhE;^sattXkeQj; zcri;Q<;BcUJz_8@T{CqUD2(C9hfkk&@7uR;=I2itP$N9EA)+Xu%k$HUy1KhjF)1G< z-VO<2C?buur|+aDMIJ_8pDecT({Gsr0(Qf?Jl=cUe(;G~f8C3l)XlD6^0TuK%gV_S zwFY@EI+F%C{S2TZcnab^%H5`ohlU)F9XqzLuweQR%^<(NwkqsXVdmvkd8&6=;AyZ9 z@G3p2$ryMWwd;^l@18w-#=z7(pL?gHIMrMF8cldhk@ZJsiS;fMU$dSfYb9l6_wi$^ zOMmK-%J>@(-#`wo&C)A$9C|?+Qgq4QJ`@Wn3v|UXwc3SRF8{nbChaIPWWh!J3|+{U z&b54t*YwLnG$cINab%WT=ueZ%%F5n;JLxj!hmfls7^rD4b;9#_0lRG8x|M!%i}|Ka zn?OI}MpID3X&Y{kF6hkA<_)1)UVGdkS1Vv{YfFv$_|)5* zgj|=GpYI!Jay~xQ3GV;qNM;ES`e4SxJilKYkdu>p z8zW4elaoUnr62uO)O+{tz5Kb17VN|hAzl*DFY$BG9t)F&A2KS{j9dnuU=$-vjJGIe*MHYi9qw4XoWGvARjEmcc2jArDyB;04BQ-ad2HOPGnE)WR zFjG%M@NYxU%JSqNFotf95Cwv$cKVXl)bthejc6|&#!!=uwCayE{q_^L)v$Br3{V;5mT8YPU)epZ4&U865 z3j{9wFinVUefvKTD$F(@o@@A=eJtA%Jpi#bMC&T#&|j%ym)>D>msM&X+Sv?egHc)r z21)?|0RYl7Gu0V3wSHs|MV-u3OnlK9@`UVEA~P9+O;ySZ*NndJsT!YMy3023X@xE+NdvSv);6nl#~SY4&1rQ8cGT`M5fW( z`Rg6d!sCFV%-q~C1YUi8yc=E3Ub7`%0j-h^%~pkfMwzv zQ{tDul=i%MAsh19X+7%kM3n`?eH>}9geJc?(e6p(JWDeg3M=mqxQA|Wt)ViR(LGxUlJryDz_Vpis8 zNJix@tyZ24DM?8j3!lC29Xy+GJ`;sclgERbR*|H18>sa%)#9`Crm9IG(gM**g|WvA zm+kB*?M&_=LFzn}^S`l=Z18^V-~OfK2D3=V1v0f3P?iLNJ3d?n+&cB~? zZkz}D3GiKb@EXh~h28qE(PmstxzxUo&dX9Nq6Ac7GTG}e*65=3#6kRJsfrUg&aN0A)~UC}UCB*Cvob-fEg#>66%wrtr#+z_8$fqc^I?eUV{O#S`+qVB(v8+x*`2m;EdrV5p% z;qIR6sNMCQo#(;27rek~?SJ*^73%2Aa5gzdAbg(U`}gi$M|Chm=UX6pb@#%hWDD?b zM6zsGf770Cp@`hE39Vn{!&_tP*gDQUuo~3pDPD}VCegC8nmqNMb^GyI2FrmqAP;0@ z_qz%ykJ-%}ilHy>^XhPpOQAv$CFRHG=bB#&QBWQV+cE}Rziwt{cUV@8-TmV|F;WCw zR8-Vpt^Wp5$3YT=slmy~tx#N0Dwqki>gG*KqF#eAi;AL;mGERly(S)alBWMTWT}3S z?}xKYYK+?z+<=T;BKNBq8g50~K*$ML8O!STuU=6`L_{nMtgqejUY@%h9;Flq(c?Cu zauEH@OtV0Z5{rUG+sBVTzdm*_|N8b3c4B8yE(9LpB2uff9}7*I8yjn*xHSn@;%&SX zDQ z@J(`M{V+OIv?`;C9sOmBfH~PEu`I2lnA6O3(gm1#%ZcYI5|0)kbqhm=U$-LnFBxXK5ox^ z@XSLJO6Rtf^q`DQh;>c~3FJiXnDwMU2*{O9nizf|4o&L)P}RY6L$s&V4C z$u?{>MbAVi0K{ob-VF|W^XAR93qMDG&J4*xkNT@GaI3nw2#HHb@D}BTrrttmz9i+tg)#nLTr1)7`8gX{C-pvam8yWlsb1k0Oz`%gHsmBA6Cme_f=ZvZu4A+ilBjlH#43ZPr&mDE>1zT$9(!lN)x-~ z6LG}v{ApEWCG^O=k`gF!%E>q}pNegFdHk6;U?uh0@^2}ne+L-@N6PW)Rea{QQi?_$ zutLqsFKV=RoUY95%0MBn3JVHe1a8}V1g(iG6_+1WXF|LL3YYmSM9N2E$8Pvm(}qI6 zWMgAXGpZirA$S*roNhVp|eYJmiP)nKOAxa-Uu1NATgkW}OK)9A6@QUQWyeSYN<@df*U`?G-%(>bEr zD%>21qzY~(Qf$j=Ft~jrieTs$b;vdoq93?&ZlaUgW%&Mmu~^pf4QfK%AxXSV7t1C`^+F2 zk+vXRJS{8JMbXNANlR)ID=8__%{QkcTmkrgLU=zXxk!N)RYNiTftQ!p5r=;9-Fx;Z zv!p~pXEl2h#f{o~Pi%|&a|B`b1>B)P^#VEc@%vaCV44?b(IXx{`LS z=~F^ARLsoS2#Nv}r_c98*)OuC4b`1n9Gukb7M+~go7{{gie$m+8|hCU@6cR=U@L_O#BSAj-| zLR0_t(J@QyK?zqt4zFKLhyIFT>OnlP3use=mRMN&o@;qR-fQ#?1JT%D`Ve`o`( zdp&r?4Ip3BlcHX*`DXB=!RwF(4?XvzFBluxXdmsDl5zkVV#K|B_1yl>;3hW!FI{kt zoz6BWof*2tY7WYV`PZTB*M#^(_&A9BeE0Y`#C>k)=nqd^Ifxve63Twknf2EINZ*U^ zW1GgAGeiib=7PC-0Gd7v^lKW5+{k{E=(lR|FZlf4^XJbKYT4k#1P>PG*RNlBR;}y+ zRHs>PDkTv&+E$ZF0n7k({E%z%B>;ZG3ynKesaAXej}(-YHiKl~Ai_;U1A%u7ng;Qk zBgFTD!aA|Cwmd!7UsaKn^RwgQ$LlC~NIW+H&=mIV$%GsP1$gMOL$yCdw{|BLDjJ&C z04z=fz)RXe6`)DllBAJt3IJW^enwMMlZaVx8N}Jae|Agxh)3SLho@k|i3cA_kA5Kn zKLf}n&%W#sFts_wHS`{y8fA&20GP?E;DB_3`j^ z;AP?&PM$nT$jo>KfBo!0@YOF~yaW)=inywu8CpCplb>iqLqmgZ z-Ewp1TUTv%E83S%9>^PoDSWX&z$Xp2M??|v-Yraa2z;u%G``BfdsCUQ0qWjrVnJA z)JPKH8XJ2TM6q(XDiaa~P&GK>jUckN15TuKNLawP1pS6XSh!d}i<-C}z!-v>L%68M zONP*LoEpRiXeiQ#z}3RYr|Sn0R5Md~6Yb{^8ruc1fSgK-=H~eJ>FQlHLN@K2L8a+0zH6_oB>`0|to;69W@@_MHTD}&t0_3nheFnL z`_NjsGPgqMrH4r*ps?^aZ~PUwk^l?~3JT;^PHC8uslDs|^vU$kM2A{tN3Pe8@hE8! zfb_=?I6~o{{QP_;oF4LjT};1Yks^Qo{Hgy(rlZJOli-@Xmu86wq+ykOd4p;vk!KYc zDnKLZmfEur9qHFXXgcWC>wxqWlul`w;oDFcsj+u_^S`$-3Hbs0L3++jh5m;W0&3o7z^Rrcya)+v=*f^z`NlO zAEu?vlcfxuAay(h|A>TRkghpHRDg@`mA9*@5pTG9Ls3TtnL4 zJ_p%eP)O({3ZdomCqEIZ0hH7kquYa{t=rRR;Wuf4>ZMY(hM$iO)YJrOMi@F$Brq*c zWahS@UsI_%55UpizYlg8B2-mXLjylHbs^sKZM*m@=Xz`jMA_~X_p#j7b`}m7Gz{0k zIN)E>jVeT5)Ymu18a$BI*JnBvwl4(B&Sld$0Ot^(voxwGZ{I1yorIiAYCpk&vWWTo}hBa(Oc_OzcTFRdomQb8+G_^6~_zMIF!`jZZq*0izvM(6Xyj zO#DXW07Rs9!-p0&He1lv=$(ycKH*G3bV%?D;e^Kc#+RB}S?P|>0lB24U5|*mK`AXP zZ1#czq@`{0VGa(CtxQ6jaR@}OPChjMAtS)eO_)x?!^26JE?s&U6Qja1zGuUR4YS|7 zXb=N3XU^RHGH@B3w4lbM7H}FM4dFDU`6}s9li$Hc7 ztR;i6dX8f%!YhxI7D(6^E;P}FXsw@0_V zpOzenNfwmt_d=)bhK{4YsYy9}Zgc`i9DUDgu-ulVTm>k!p9(M0K!-{WB_E6*_d|nh zj)GR^@h2nAdm0ako@xPFto)RY3UZ1?!(F1g$;!&27*B#wP;jy?A|sT8)X@SVm$J&B z4~Z4cA$o*5=)K&!QqC^eaAqkVW;h#;2nt4sy+4MqhuBge_I4c4LCC3QsCWqJ%}ZpF z@$q4(bR(I$)sdsysh;j;Vj=+8Zgf@&QYI)VM1i_fen^H&lw9o{Qe!0;s(^Vo6?iKg zUYIK@DM{D(tnS#mm)AAw8-5H;&I~G+w*F$aGQz12Xl!VDIt;S!=C;=;$by$Y9YOWv zwP|EsxHNTB_hwo}MFrPt_%(#)F#slp%e!X=&S&f2L=bQ84>N?sS`S1{kdCiPr+kr3 zS>`6Y{{ai)SKy2UlWgTB?2i6vX?{BF(?{SCQ(s$g{yeO#T~D46H^E0l8mH|yV=><2~z?n=kwD8yNYP+qT=GVOG!!TsZzO&wUB{}mW8U9 z68h^c!T8v;bK#h1jSZ*I%s5{2^c-1P{7wGsqbh1aKMy0gZ@e0kMk%`BO*oF{B6RLFRxscf_tICZa16 zuv$hQqW|}r@;E^vA|e6BH6R=GjCX=Vu+w|ewrkg}P4L2XPbv6{9oRufM-DBU@Oh0i zY^(hJ{-kc1BO4q5r&oP6H0Xe6iDXFCW*zsTw>K~hbM_}un}h)!a@+7PJmPG>TzgGUZBtnO-+TM-M>^*ZPzQZ zu(UM%<~}doGtao^&rcB1mzQ^>`*NvQ2Gm=Kh;&)rPq**putqL91rrh2(@D)fV}L1)7113e?7IU!&kyLgu`bbPfGbx`x!nTU>2iCz4Le!02R zh4BW`(zVozl*w(#?L^fCZc;#;#F*Kmk&hX*<01@Lo#k@qHouo*Uo}@Hv z%{GhVbKax%aah>7N1EEQNaQjIL<-w@WPWO#R?g$cby}W|0wo2{o$Ud82mY)cx}}h_ zW5|;yEZC+C#pV?W2_%@)UMjMBb5?Ns6V@Q)_J`u`N8wxiEn;SEy?B`?Im7n{IzM(H zA^Pdc-=TqR_usvHcedqv+%*=*KZ&uv-CH2RZL8}kflJO(wF(&+dUyLmYw@#Z&(O}^ z%u=o2uy(>U9hQTFidIz}om~jN23%1s^ZeoTki^{z`Ykv;Fqn zJK@eB%_tWLYiH+zgsFRFL$K3wdMX}{;R1YxMSY8_coagx|vqR6#L&U`5KeJZe_!f_~o z_#PK{cx$4>D6CaMAPqwP*Cl)eZ~?x+=@IU+ho>v=>XbgP%GmR&RoQ$U33~dJ4Ji@8-4C2c^Vi0HWH&P;K4J+vY`j=lSX`!h7>ITr7$^MkV_w%} z9_yYWZnQ@7Mw4lonbd$<#77X19~*mUyuH2A)o9ElX{wz)YD8(PCEYj(zL}PzYu0)^nERGpZe2D3cz0^ z^3gJR57HhRXvbOf15wpIr4C_)!Sv9fO>npHUD@6D{{40$=U$5UK3I{d0G~BBo0>5E z!PosBa4H{W1z7Rl*a)wEW0j zGH^T)V=WO8WXRS+@V$Yg@q=yYf?Uohj?;ERF8XIuz_=0pABb`yV-s8vtV5^)oMYNW zF)am_rzt2Y|ITwLx<5s|c=`JE2wG54?fb??S&04;r-ZUk&f=u?J*!a_mYPCisO81+YC3b_ukev8lQ6$J)H#^H+T>UyN4 z3laxR5zTv!UEExKsag!}Vnp8w1})cE^0a68POi*g$1f9Tpa}OXE`eLN2ia4P7;6Qg zN?3pi>R@cFwyB8<-i3lVx@+Yn!$8{wz4qLS@KF*;s$Ect^qEraEOetGaPJzg!;1lj zDi^;#h5-Z(>CKenWYd-e8DyCXA{`?}(ArZl_nqBEG>K$ z`Ig6_BQq3T@e%JO7c86hwfTTp)Tm)3gEFU)jj>OjSh`l`15s?G*>@Lenx1d%#S0hq zi;FWObprBV&tly4Mn*+tOZCzOuim+Fwb6W1T7!Lm8#v3K zA-C{8-uWY0OG6_71P0*{MNbz4ul^5cq#tq;?(c=~4ZTpXkFdSMDu}w7+1Zz@zAJ35 z^m?C49azCu3b<~i+o4Fxc-~v;P!FJ;dCo2~0}815iyKrBWzsuesr|#*@iudl0W<EaMMU=jL`EtLMl*xpWfR>@*-5WP32wO9r9iEto z_6Ye>aCG=x-HF|tM^wqUq@iM=AU#Vns3cl@q|&>)1<9fJMTmutMMg#*F|8#b!W2O$ zV|D^_1)Q3hix-Ej9Y65biuegn$m%3TO7%nAY_48H{?> z!Ivq}OZhlikB>zE1+-Wo_;i>6&{aRgK}VTnJ8?p9f~&yk>)Smb=r*G}2p^4-PC6Ar z^j_a-Gf;Gg5!sH;&H;bttSAUy5YgL2a9mV*MJ{ayhb%G4Py`Fs1Q(O`KOMB!T=yl0 zYVEUB#H2t=OUtGCqm9Z4ZGbL+tSW55r&+$G09xI|aVfItP?_l!c{S(KIzSW$S9&^H zY7(WG%h(PiR8w19URU}MDD|%}3l)UV@6-LYh2XvJ+#x6maHJPb90-q#uHu$6&%E$b(>fRfz&k=TbT&{ z5PUo~fkszXSBd4@9%R0<=g+NQ5#(1QpPRmx*2RklS5{UsI_h+_e6X+?@HayLlLZTh z!&z)!1J|JliorwfK~{I z-NphdJQzv1RZ}AcvhJ`eedK1akf+a_c?sznrov=l#f1O3_F*zb-g1ZB!zOgvP!*r}z3c~k<`5hm7mr*R>LowHkNqZZ(xFyIp zY-mLwfNZDRw=WdtaAxdAgxDKixbv7!Kj4!=vyJXi1L6l!rt|Il#B!q9LP;%B$aR>K z^+)X2K{m4~)}K)Na62sQHL>4NS-YpFa&>a|;TCT}_l%0s|ECJm5XK(_arcTIOlQGB z1#g^hE~73kEL{6%AvR*k!|G;4ji zw`3a$teOERV&#}ahYxQv@%l;m^+UYC_v7oS@B&^UUly~i?x%l5%KTeKV z@`UJul9C2Oxa~A>xGl-<2owqww-4D+5yV}Zw@zSnwWQ?Q!iWZvKoiJw!ckaZumT4q zp%KFmrvzmO=K}qB!{K5@wQ>q>!*1W#Cp{d9uH(*5DLls4RQm`q~^PnF@a7sr+uSw!6xewbwgE#fD* zsb*&*B{b3pRbzwTL{?!*F@yqj6X&MBuaAW=s{i@zLctF2G$?`CZ>c))Krs-*D}+xQ zWV;N2Vy2g-KF(4o{4gN#35P>L|J)yTV6<0>3c-YRYqTT>@DXQ)jhkC}Lz58v+!(>2 ziT9p+^yQ<>`h5(MuCGq}7F5oRS#I6B6{!0LDmZZm5LmSBF`~MQa6>?$+RiNMhX}KV zp%Juxh7bU1kzFq%!sZm37j>AE+OrLI0jS=DCIqT7F8ecK0|eJW%=qC6*-xC*N-vv){25|{D4o79QYe&&l!pQ%)=vONFmW;cTT~4`S8)Bx|c60{vC#d ziL*UFEoO+Hl(^+zX@KdtJ)e~nYGJ^C+7WmA6DA=Bsj4hn_Z+1lD4Y2J$08JQt?mEt-vky`I4RhQi5)P9|4z7A{$>LtR)(*E{Qdj)|9|4}q=mmy z4zw<L@mz42RpUop>+6t(9gC+&(e?1O`(Ec{^Dz&cj34URjxn7`)A-PQk{+ zpQSP^cTpzXKd`@zQ6hkjTzbz8kfSLQ%{dO)2!q-%hwws>uE`J-mGw!yHaSAu2uSMkFsjutz`4fF|5YQp4?`@SBbb;yl( z@x)4)cs&ox@2mhJRGi3>E zO1B8#=(t6?c^k7RA(AT^8h$Rca7$m=xScr&Adt}h@${qc+`j-ra0BKvBnJ(IH9^v# zjf17PzN;%*lbiu%DiFE>%%vN!+6w?HDWUS(wcMn`7qODy&WXUqPf%re{?@t_hA-k@ z;(I=sqr* z`*@MVVM{a!pyY4_m%4Kq7#J|#v>9TX*D=qVz;))Y?l4@ND(*_g`Gvvsh|kJ`jZ^HD zFXSW|v{J7jlkr1Rqoboc;{NM`USHMzU6Ai|VZ6oKt53`mP5>Z4BGm_6(9(`2Z_yyS zI}9NG{G2;JHkJU+W9;+uW9H`O;rH%wsEaK!p@M_^;b>-C2h%j16egKb!YT`m7BKhL zqepv)Yz@KKpXYUIa-4T2`}5m*!s#96xqUjsS2B30Al^D{Hib()zyOx7v(!skEHnayi6kedii+QA_ViV@NG-eFca28|57v!K(k{Mf|AgYUvDVlK6?Tf9tH zJX}vM-HcYj{ACdBsK?xZ!$p)Km=YL8pR5p0e{cW=OX2P|F!I1m&HiJcMnFG)orTJ} zuUyz;Zt^w(B0Of5ikJxy1y1rtdO9~TUSl(0X$_tLqLAaCBBdX*xZ zTUb;i58oFoTHI2N%4Xrajx!(Z5e>=|1)O#Lv@+w^s-`%@5o!CPS@1;KhJFKr zO4iUYc1-R$ScsRXo|yRaBaF#C^O@*>6HqiEZRM6|l4AglpnZXCHB*H5;J0C<8Rqh1 z#P>dcRf8CM!|bK{XbEv7V2I9^F#W0>xOGneF=x`)sDwU9Tp~aO4OZluriR9wKZgH> zAGYirqx|9}S&v92PE)thH7y3>G?iT51F2uAtsg%C9(`l+pqL_zyps+yY)p$Yn@Ioo z^O2~vIi{NGKHfYH?l`jVmhbuVorMm%8r};!@(`Q}cu5Rb_Pb$jl<*UQdt^IyOmlY4 z|$V5&KrXO056oPrD_=JtcnHEN3>JW7C6 z1O_2Ss>3DG1X^krwKnIITF5c+a|A#sYZbI6F`vuOzr=9>oDnfrLa^7+CQ?Fw$HKDg zHQTBPiF(k`iUC3n1{BLD83$qX*aa4iXwSGZHMg0Z!Yd~L?sgx&5R|Zr2(s$%yd6n= zPfTo*=N@$MMGgbTGd@C3Vd^59R-`jdB9HFeNf079EaxV;I#e+3148oe+XbS&VRC20 zcYT?aNRAc3p=+7{OqA_R5rAVved%cioD!5TxkykX?qV_PmeVsZg-CSVfL8yOKg<;? zV%$IKQWG^NmJ0GSF#rzP9iyJa;hPK|L((OZe!%?5@G$!)`VH1_zC9`|DLI9_O)$-iBnh#wkN@FxyX>3Uzh%#bS z4U`4?AX+c8RCrnlLxvTrmMb6Wg6bHrEy37c`Ff`HLqKj2IvoxS&J)9$q7(NorXRRS|A6}9^=EcW>@gZ`7uoJa>Qap)Kfiw`(U01i{qq3p!1Fb} z=1NDY=23ChW2=iNrsvOa%>Oy@&Fj7U!qBZJ_M@!to}BDz`z^KN>bNIMj^z0KH2?FL z1^Jm78HP^M?*@#nyc2%Y)T9o7z{l*^r(j3XJuiW@vddl`yiUWz$M?1~#{UAq1{1In zh6aDQ@cM$OPDCqc1FDH^|m%PZ0n1}`}&NYRDE$^5y13>7~pYEe`yEiIor zNBf>S6$s*_2z+kugvTXWSPCc0e{cW%tf)u}bt<&a>Y50;AZ;{&InTc`r5Y>-<25tQ z4Oe^Lvb>ikIiBFHIehiw@s57ha!efwjh#Bmt9AbTM1vSIVe1eVN;$cQ79EJ#~o&PhYvpfPMPWgUYwz*|k1_v%~EJ-!6|6sa9qx`E(Y5? zhP%~4Gk*Ax{#qY&3Ye3)bbRt#K@V#_-u1*8WVL}vNpH@ThDV!8{BCf6G&k6H)Xsfc)4(6;#`TV*AUNvltKSw_BY%XZ!J42te%p$jHQlXD8PSk72|M z5LgW`w4iFPJJzk=T?OMLfOkWU2S4n!FRLrahm!8l{b`1V+^?BM!lS()9|WL&bHmzk zyuJlKxVDsNV;j$@!t~6{k0+-qlR%AOBH=?xIaqdLo=^y53|%ZvlMY~aRFsul!O1XF z#iO2SAVgtCxSYFc0)M*ca$FeNjz43q^#wnn5nBEGm3oU!dO%Qio_lS(3sRn)i_ZZ9 z;N|3e`5@*Sp+u^vs-8y=X=7_^m+1+hYXfO{qPH~HPPi`tM_3;OHw3@Sxl>gc?jU1x z4Cu;+@9-A6jB9M$C;sVy2NbLa@5PR6_!~XFNTHS%Zj+zXu@I@2Bc3;J-2(Zh2p2aSw zxI{&Df!a$ido@Xz>=_!MB^kb-$h2hjoD@9mz<(MK0U`E#X9-k{`*Csh%TYC)RbYLP zU?YEarFcYBfX10_jFfhA%EwH8rbG2&!(7k0C208v#-2~Y!%Y{XkEf)hJO~X75JHZvnr%Cs^Y|C0@N_VM0&}0;lV9`0cV}zLr%HOG?%hiQ z(b;QUk&&T-JM99NFmmI#h*Em^1YHG`G>;$8zpaRUmFh0CzBek|as(00%g_Jd^+)jq68y-Z%gKdJ0IJ$5DtFLLd~2+e`QDYQgWH&j_lpbO zwC|cUrq9dK-F}xi1+nB$)1mlJ1A`R*!dD4z7x>e&lb~U} zK?bWRDdoAGlkp}7prD<^h}gw^=a+i%2=O%wq2Yu^ujh01mc7i z0^~jNcwh~x*!T--fug+nb(H8W1``Qpn@MD~IZ0sWw zbwUS@S$O?2G1&u)hc1wunzr_R$UD#Qk?(Bbj>|4+QKN3cYtQWgTA!v#H;ez@QCbiNq@{@~$ zLk{VSM?k=>oJGf=@L~wat*#Vaj5h>Z=@jg3c<|vdV7R(ApoM_Z$?vm zHaQ`|PtUJL54Hxh%SGUOQVsmsDqkxdI(YDCuM1QE+6Ef3{k8Cax4M=-JW+RYG(I-= z^7lGPynH2Ot;aa_F_;OsKmtpRmBEDdlsP4Rw5uiVxDZ3P-hcL zs-p{6hKgY?8i|evFPe(Xd9rWgggg$dnl(`8sLFlN`S3#?DnC!BR1T%Cr72w0BdV&cb_i>?-+CJ zQYcs7-(6b0552$lyD2ln&TXIKiXht_@T5I*1f~ z#Jnxu3&WY(E4^U{iTo4ti|d~bo#oYP$Ky@S%^wRtNZNjha*-J2>{b|_ikcO=*=`5{>v?vT+8aCUv)*zB8a|u$~*)Nfc!zoe}C@2e-PZv26AZ*H6x>Jh<~R9cZNqtr~0fejU#&$WnZ+i zO2Z1JVnSc!&A7hOE+dx+$(0#7iTTf0N{oWPYinu}ucNcZ%ZtQ75@r{#T0`Yx!uO#Q znRMZu*@+g zu?KhV-Yu$Hi#G8=ba654+J*A+ROIrfgj@SOlsxAqoyr*&Xvq?2p~|&to|9m3ycLPr zE`<0g$b8k=ACuM9)%8m*Kgube;OVt_;Z770ykJ2X4GJoAuf8XcD)^>6;{6^^#NFFE zd;X{!7-Yg?Phi80tSpyumW4fJ2@yZ9iKlKq&|TwO^KmLx+sNn%Ci$`<#8}};OS+~z zGNY$H);WfuQ5gO5g$pkq1aDWIZGZM`2o4JU&Yek^6z`o_vH=o76=*@px+``C(Ae)( zMnZx#+SE=0B|cxYLPfa~7Dg1$#S!w?Le_2apekyxal#;Fja*t4N;=^3DFhLSGXEpv z!?=x&4a}11%RefJH{Ga~ZlI~sr)A@eqJlm!Cwc+CIE2C7agFYn1WHU)Mgg8?y@sD3!SE|)j7 zL2SMMr$ivWeJh+8~+H+ljnuv(Naks{+04k^U-XWPLU;-KE!}@cXQ~9?zPnQo8 z^Jy4~5nEsRy?WHg-TgU{h!5kXgOE_Oepbi>_rg%_OUNqF0k$rDT=5^p_( z>em5PTtQww4b`C>7G2jq*Dh`#$Zvu$=jC9c%L)cIebj*MfSoemHlyRYuC1eEJKmO} zU+$8QaPCMsy{pgV0=i%mV1$8ltS<^D- zk*DKOye#6wUiOvPJ^oQOZyYT4;<>2vmmtY?jQm3Xj9hbj>4cWE{i)n0q-kT^h9+8y zbB9fc;by#XG9xXm1%3zB)z=1+Yj~@$^7?YW8&>#I>@Nmp<}9Kh!xXBC&Ym6}9CjA_ z{ofV^Y!*FwYuE7O*EjxhtHMVuz|rKGznqFp%!~}_p}^en=~L12+YRzp)ENvWGn>TK zurf$AF)%zRVPQBRz{0?=T$zEPO@V>ILY9%ifMqrV;~b#H58@4(8BF3pN^}^ZQj81- zUUD!rC~`0`_yJF#;0c^BU(CdCp`uVoA)y19-RI{|E&fzj#c)jtXdvj2rpvkv1%lHU z7)-3@R5JWA2OXr$mc+@h;U=)U{t8^v_v=f~@y8c|lW)4OPFMjKPXGr>HrLcCr|)Fi zAjb$)2CVB}m2hvf1U4m>`OJJ&BF*rR(Hm%pLH19s$M#Y&j# zf!*{gz%m5567 z+xb)s*rfRRCkD8jfbqanz1?p%RsG!$+>NZra6kf>f*2AMz|jv(8bmW6X(UJh`>(#c zJ{;n%tpYaURxDa{Xi?t-ecUQe_=>v0{fXmX}7w+N&8m>=zjF1w0 gqiTsu<`@1mikXSt%b&X?7#QUYp00i_>zopr0IKfC7XSbN literal 0 HcmV?d00001 diff --git a/docs/docs/index.md b/docs/docs/index.md new file mode 100644 index 0000000..076e202 --- /dev/null +++ b/docs/docs/index.md @@ -0,0 +1,178 @@ +# Rigging + +Rigging is a lightweight LLM interaction framework built on Pydantic XML and LiteLLM. It supports useful primitives for validating LLM output and adding tool calling abilities to models that don't natively support it. It also has various helpers for common tasks like structured object parsing, templating chats, overloading generation parameters, stripping chat segments, and continuing conversations. + +Modern python with type hints, pydantic validation, native serialization support, etc. + +``` +pip install rigging +``` + +### Overview + +The basic flow in rigging is: + +1. Get a generator object +2. Call `.chat()` to produce a `PendingChat` +3. Call `.run()` on a `PendingChat` to get a `Chat` + +`PendingChat` objects hold any messages waiting to be delivered to an LLM in exchange +for a new response message. Afterwhich it is converted into a `Chat` which holds +all messages prior to generation (`.prev`) and after generation (`.next`). + +You should think of `PendingChat` objects like the configurable pre-generation step +with calls like `.overload()`, `.apply()`, `.until()`, `.using()`, etc. Once you call +`.run()` the generator is used to produce the next message based on the prior context +and any constraints you have in place. Once you have a `Chat` object, the interation +is "done" and you can inspect/parse the messages. + +You'll often see us use functional styling chaining as most of our +utility functions return the object back to you. + +```python +chat = generator.chat(...).using(...).until(...).overload(...).run() +``` + +### Continuing Chats + +```python +import rigging as rg + +generator = rg.get_generator("gpt-3.5-turbo") +chat = generator.chat([ + {"role": "user", "content": "Hello, how are you?"}, +]) + +# We can fork (continue_) before generation has occured +specific = chat.fork("Be specific please.").run() +poetic = chat.fork("Be as poetic as possible").overload(temperature=1.5).run() + +# We can also fork (continue_) after generation +next_chat = poetic.fork( + {"role": "user", "content": "That's good, tell me a joke"} +) + +update = next_chat.run() +``` + +### Basic Templating + +```python +import rigging as rg + +template = rg.get_generator("gpt-4").chat([ + {"role": "user", "content": "What is the capitol of $country?"}, +]) + +for country in ["France", "Germany"]: + print(template.apply(country=country).run().last) + +# The capital of France is Paris. +# The capital of Germany is Berlin. +``` + +### Overload Generation Params + +```python +import rigging as rg + +pending = rg.get_generator("gpt-3.5-turbo,max_tokens=50").chat([ + {"role": "user", "content": "Say a haiku about boats"}, +]) + +for temp in [0.1, 0.5, 1.0]: + print(pending.overload(temperature=temp).run().last.content) + +``` + +### Complex Models + +```python +import rigging as rg + +class Inner(rg.Model): + type: str = rg.attr() + content: str + +class Outer(rg.Model): + name: str = rg.attr() + inners: list[Inner] = rg.element() + +outer = Outer(name="foo", inners=[ + Inner(type="cat", content="meow"), + Inner(type="dog", content="bark") +]) + +print(outer.to_pretty_xml()) + +# +# meow +# bark +# +``` + +### Strip Parsed Sections + +```python +import rigging as rg + +class Reasoning(rg.Model): + content: str + +meaning = rg.get_generator("claude-2.1").chat([ + { + "role": "user", + "content": "What is the meaning of life in one sentence? " + f"Document your reasoning between {Reasoning.xml_tags()} tags.", + }, +]).run() + +# Gracefully handle mising models +reasoning = meaning.last.try_parse(Reasoning) +if reasoning: + print("reasoning:", reasoning.content.strip()) + +# Strip parsed content to avoid sharing +# previous thoughts with the model. +without_reasons = meaning.strip(Reasoning) +print("meaning of life:", without_reasons.last.content.strip()) + +# follow_up = without_thoughts.continue_(...) +``` + +### Custom Generator + +Any custom generator simply needs to implement a `complete` function, and +then it can be used anywhere inside rigging. + +```python +class Custom(Generator): + # model: str + # api_key: str + # params: GeneratorParams + + custom_field: bool + + def complete( + self, + messages: t.Sequence[rg.Message], + overloads: GenerateParams = GenerateParams(), + ) -> rg.Message: + # Access self vars where needed + api_key = self.api_key + model_id = self.model + + # Merge in args for API overloads + marged: dict[str, t.Any] = self._merge_params(overloads) + + # response: str = ... + + return rg.Message("assistant", response) + + +generator = Custom(model='foo', custom_field=True) +generator.chat(...) +``` + +*Note: we currently don't have anyway to "register" custom generators for `get_generator`.* + diff --git a/docs/docs/tutorial/chat.md b/docs/docs/tutorial/chat.md new file mode 100644 index 0000000..cb82549 --- /dev/null +++ b/docs/docs/tutorial/chat.md @@ -0,0 +1,29 @@ +### Basic Chats + +```python +import rigging as rg + +generator = rg.get_generator("claude-2.1") +chat = generator.chat( + [ + {"role": "system", "content": "You are a wizard harry."}, + {"role": "user", "content": "Say hello!"}, + ] +).run() + +print(chat.last) +# [assistant]: Hello! + +print(f"{chat.last!r}") +# Message(role='assistant', parts=[], content='Hello!') + +print(chat.prev) +# [ +# Message(role='system', parts=[], content='You are a wizard harry.'), +# Message(role='user', parts=[], content='Say hello!'), +# ] + +print(chat.json) +# [{ ... }] + +``` \ No newline at end of file diff --git a/docs/docs/tutorial/generators.md b/docs/docs/tutorial/generators.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/docs/tutorial/logging.md b/docs/docs/tutorial/logging.md new file mode 100644 index 0000000..8ce8c22 --- /dev/null +++ b/docs/docs/tutorial/logging.md @@ -0,0 +1,22 @@ +### Logging + +By default rigging disables it's logger with loguru. To enable it run: + +```python +from loguru import logger + +logger.enable('rigging') +``` + +To configure loguru terminal + file logging format overrides: + +```python +from rigging.logging import configure_logging + +configure_logging( + 'info', # stderr level + 'out.log', # log file (optional) + 'trace' # log file level +) +``` +*(This will remove existing handlers, so you might prefer to configure them yourself)* diff --git a/docs/docs/tutorial/model.md b/docs/docs/tutorial/model.md new file mode 100644 index 0000000..07eaa1d --- /dev/null +++ b/docs/docs/tutorial/model.md @@ -0,0 +1,59 @@ +### Model Parsing + +```python +import rigging as rg + +class Answer(rg.Model): + content: str + +chat = ( + rg.get_generator("claude-3-haiku-20240307") + .chat([ + {"role": "user", "content": f"Say your name between {Answer.xml_tags()}."}, + ]) + .until_parsed_as(Answer) + .run() +) + +answer = chat.last.parse(Answer) +print(answer.content) + +# "Claude" + +print(f"{chat.last!r}") + +# Message(role='assistant', parts=[ +# ParsedMessagePart(model=Answer(content='Claude'), ref='Claude') +# ], content='Claude') + +chat.last.content = "new content" # Updating content strips parsed parts +print(f"{chat.last!r}") + +# Message(role='assistant', parts=[], content='new content') +``` + +### Mutliple Models + +```python +import rigging as rg + +class Joke(rg.Model): + content: str + +chat = ( + rg.get_generator("claude-2.1") + .chat([{ + "role": "user", + "content": f"Provide 3 short jokes each wrapped with {Joke.xml_tags()} tags."}, + ]) + .run() +) + +jokes = chat.last.parse_set(Joke) + +# [ +# Joke(content="Why don't eggs tell jokes? They'd crack each other up!"), +# Joke(content='What do you call a bear with no teeth? A gummy bear!'), +# Joke(content='What do you call a fake noodle? An Impasta!') +# ] +``` \ No newline at end of file diff --git a/docs/docs/tutorial/tools.md b/docs/docs/tutorial/tools.md new file mode 100644 index 0000000..9f7c475 --- /dev/null +++ b/docs/docs/tutorial/tools.md @@ -0,0 +1,36 @@ +### Tools + +```python +from typing import Annotated +import rigging as rg + +class WeatherTool(rg.Tool): + @property + def name(self) -> str: + return "weather" + + @property + def description(self) -> str: + return "A tool to get the weather for a location" + + def get_for_city(self, city: Annotated[str, "The city name to get weather for"]) -> str: + print(f"[=] get_for_city('{city}')") + return f"The weather in {city} is nice today" + +chat = ( + rg.get_generator("mistral/mistral-tiny") + .chat( + [ + {"role": "user", "content": "What is the weather in London?"}, + ] + ) + .using(WeatherTool(), force=True) + .run() +) + +# [=] get_for_city('London') + +print(chat.last.content) + +# "Based on the information I've received, the weather in London is nice today." +``` \ No newline at end of file diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml new file mode 100644 index 0000000..5f408da --- /dev/null +++ b/docs/mkdocs.yml @@ -0,0 +1,54 @@ +site_name: Rigging +site_url: https://rigging.dreadnode.io +repo_url: https://github.com/dreadnode/rigging +theme: + logo: images/logo.png + name: material + icon: + repo: fontawesome/brands/github + palette: + scheme: slate + features: + - toc.integrate + - navigation.footer + - navigation.indexes + - navigation.sections + - navigation.expand + - navigation.path + - content.code.copy + - navigation.top +plugins: + - search + - mkdocstrings: + handlers: + python: + rendering: + show_source: true + +markdown_extensions: + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.superfences + - admonition + - pymdownx.details + +nav: + - introduction: index.md + - generators: tutorial/generators.md + - chats: tutorial/chat.md + - models: tutorial/model.md + - tools: tutorial/tools.md + - logging: tutorial/logging.md + - API: + - chat: api/chat.md + - generator: api/generators.md + - message: api/message.md + - model: api/model.md + - prompt: api/prompt.md + - tools: api/tools.md + - logging: api/logging.md + - exceptions: api/errors.md diff --git a/rigging/chat.py b/rigging/chat.py index 764d807..eebaff3 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -21,6 +21,10 @@ class Chat: + """A chat object that contains a list of messages and a list of next messages. The next messages are messages that + are generated by the generator and are not part of the original chat. This object is used to keep track of the + conversation and generate new messages based on the current state of the conversation. + """ def __init__( self, messages: Messages, @@ -38,6 +42,8 @@ def __len__(self) -> int: @property def all(self) -> list[Message]: + """Returns all messages in the chat, including the next messages. + """ return self.messages + self.next_messages @property diff --git a/rigging/error.py b/rigging/error.py index 9e6e272..1d9b078 100644 --- a/rigging/error.py +++ b/rigging/error.py @@ -1,14 +1,22 @@ class ExhaustedMaxRoundsError(Exception): + """Raised when the maximum number of rounds is exceeded while generating. + """ def __init__(self, max_rounds: int): + """Initializes the exception with the maximum number of rounds that was exceeded. + """ super().__init__(f"Exhausted max rounds ({max_rounds}) while generating") self.max_rounds = max_rounds class InvalidModelSpecifiedError(Exception): + """Raised when an invalid model is specified. + """ def __init__(self, model: str): super().__init__(f"Invalid model specified: {model}") class MissingModelError(Exception): + """Raised when a model is missing. + """ def __init__(self, content: str): super().__init__(content) diff --git a/rigging/generator.py b/rigging/generator.py index ee8b154..df3d8bd 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -110,7 +110,7 @@ def get_generator(identifier: str) -> Generator: :raises InvalidModelSpecified: If the identifier is invalid Examples: - "gpt-3.5-turbo" -> LiteLLMGenerator(model="gpt-3.5-turbo") + `gpt-3.5-turbo" -> LiteLLMGenerator(model="gpt-3.5-turbo")` "litellm!claude-2.1" -> LiteLLMGenerator(model="claude-2.1") "mistral/mistral-tiny" -> LiteLLMGenerator(model="mistral/mistral-tiny") diff --git a/rigging/tool.py b/rigging/tool.py index 18deb25..5fbad1c 100644 --- a/rigging/tool.py +++ b/rigging/tool.py @@ -22,6 +22,24 @@ class ToolCallParameter(Model): + """ + Represents a parameter for a tool call. + + Attributes: + name (str): The name of the parameter. + attr_value (SUPPORTED_TOOL_ARGUMENT_TYPES | None): The attribute value of the parameter. + text_value (SUPPORTED_TOOL_ARGUMENT_TYPES | None): The text value of the parameter. + + Computed Attributes: + value (SUPPORTED_TOOL_ARGUMENT_TYPES): The computed value of the parameter. + + Methods: + validate_value: Validates the value of the parameter. + + Raises: + ValueError: If the parameter value is missing. + """ + name: str = attr() attr_value: SUPPORTED_TOOL_ARGUMENT_TYPES | None = attr("value", default=None, exclude=True) text_value: SUPPORTED_TOOL_ARGUMENT_TYPES | None = Field(default=None, exclude=True) @@ -49,6 +67,13 @@ class ToolCall(Model, tag="call"): class ToolCalls(Model, tag="tool_calls"): + """ + Represents a collection of tool calls. + + Attributes: + calls (list[ToolCall]): The list of tool calls. + """ + calls: list[ToolCall] = element() # This can be used in prompts to teach the model @@ -57,8 +82,15 @@ class ToolCalls(Model, tag="tool_calls"): # TODO: We should consider building a base model # interface for both simple tags () # and full examples will filled in template vars + @classmethod def xml_example(cls) -> str: + """ + Generates an XML example of the ToolCalls structure. + + Returns: + str: The XML example. + """ return cls( calls=[ ToolCall( @@ -82,6 +114,15 @@ def xml_example(cls) -> str: # Description of a single tool parameter class ToolParameter(Model, tag="parameter"): + """ + Represents a parameter for a tool. + + Attributes: + name (str): The name of the parameter. + type (str): The type of the parameter. + description (str): A description of the parameter. + """ + name: str = attr() type: str = attr() description: str = attr() From 1b79b55565f2f66297882f6784207d90a781be84 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Thu, 2 May 2024 11:50:54 -0600 Subject: [PATCH 03/16] Early refactor efforts --- rigging/chat.py | 75 ++++++++++++++++++++---------------------- rigging/generator.py | 23 +++++++------ rigging/message.py | 4 ++- tests/test_messages.py | 2 +- 4 files changed, 52 insertions(+), 52 deletions(-) diff --git a/rigging/chat.py b/rigging/chat.py index 764d807..7afaefb 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -1,7 +1,8 @@ import typing as t +from uuid import UUID, uuid4 from loguru import logger -from pydantic import ValidationError +from pydantic import BaseModel, Field, ValidationError from rigging.error import ExhaustedMaxRoundsError from rigging.message import Message, MessageDict, Messages @@ -20,17 +21,21 @@ DEFAULT_MAX_ROUNDS = 5 -class Chat: +class Chat(BaseModel): + messages: list[Message] + next_messages: list[Message] = Field(default_factory=list) + pending_chat: "PendingChat" | None = None + def __init__( self, messages: Messages, next_messages: Messages | None = None, pending: t.Optional["PendingChat"] = None, ): - self.messages: list[Message] = Message.fit_list(messages) + self.messages: list[Message] = Message.fit_as_list(messages) self.next_messages: list[Message] = [] if next_messages is not None: - self.next_messages = Message.fit_list(next_messages) + self.next_messages = Message.fit_as_list(next_messages) self.pending_chat = pending def __len__(self) -> int: @@ -115,10 +120,17 @@ def inject_tool_prompt(self, tools: t.Sequence[Tool]) -> None: UntilCallback = t.Callable[[Message], tuple[bool, list[Message]]] -class PendingChat: - def __init__(self, generator: "Generator", messages: t.Sequence[Message], params: "GenerateParams"): +class PendingChat(BaseModel): + uuid: UUID = Field(default_factory=uuid4) + parent: "PendingChat" | None = None + generator: "Generator" + params: "GenerateParams" | None = None + chat: Chat + + def __init__(self, generator: "Generator", messages: t.Sequence[Message], params: "GenerateParams" | None = None): self.generator: "Generator" = generator self.chat: Chat = Chat(messages, pending=self) + self.params = params # (callback, attempt_recovery, drop_dialog, max_rounds) self.until_callbacks: list[tuple[UntilCallback, bool, bool, int]] = [] @@ -127,26 +139,24 @@ def __init__(self, generator: "Generator", messages: t.Sequence[Message], params self.inject_tool_prompt: bool = True self.force_tool: bool = False - self.params = params - def overload(self, **kwargs: t.Any) -> "PendingChat": from rigging.generator import GenerateParams return self.with_params(GenerateParams(**kwargs)) def with_params(self, params: "GenerateParams") -> "PendingChat": - if params is not None: - self.params = params + if self.params is not None: + new = self.clone() + new.params = params + return new + + self.params = params return self def add( self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str ) -> "PendingChat": - message_list: list[Message] = ( - [Message.fit(messages)] - if not isinstance(messages, t.Sequence) or isinstance(messages, str) - else Message.fit_list(messages) - ) + message_list = Message.fit_as_list(messages) # If the last message is the same role as the first new message, append to it if self.chat.next_messages and self.chat.next_messages[-1].role == message_list[0].role: self.chat.next_messages[-1].content += "\n" + message_list[0].content @@ -168,6 +178,11 @@ def continue_( def clone(self) -> "PendingChat": new = PendingChat(self.generator, [], self.params) new.chat = self.chat.clone() + new.until_callbacks = self.until_callbacks.copy() + new.until_types = self.until_types.copy() + new.until_tools = self.until_tools.copy() + new.inject_tool_prompt = self.inject_tool_prompt + new.force_tool = self.force_tool return new def apply(self, **kwargs: str) -> "PendingChat": @@ -313,7 +328,7 @@ def _until( logger.trace( f"_until({callback.__name__}) round {_ + 1}/{max_rounds} (attempt_recovery={attempt_recovery})" ) - next_message = self.generator.complete(messages[:-1] + running_messages, self.params) + next_message = self.generator.complete(messages[:-1] + running_messages, self.params or GenerateParams()) should_continue, step_messages = callback(next_message) logger.trace(f" |- returned {should_continue} with {len(step_messages)} new messages)") @@ -338,7 +353,7 @@ def _execute(self) -> list[Message]: if self.inject_tool_prompt: self.chat.inject_tool_prompt(self.until_tools) - new_messages: list[Message] = [self.generator.complete(self.chat.all, self.params)] + new_messages: list[Message] = [self.generator.complete(self.chat.all, self.params or GenerateParams())] for callback, reset_between, drop_internal, max_rounds in self.until_callbacks: next_messages = self._until( @@ -348,29 +363,6 @@ def _execute(self) -> list[Message]: return new_messages - @t.overload - def run_with( - self, - messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, - count: t.Literal[None] = None, - ) -> Chat: - ... - - @t.overload - def run_with( - self, - messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, - count: int, - ) -> list[Chat]: - ... - - def run_with( - self, - messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, - count: int | None = None, - ) -> Chat | list[Chat]: - return self.add(messages).run(count) - @t.overload def run(self, count: t.Literal[None] = None) -> Chat: ... @@ -389,3 +381,6 @@ def run_many(self, count: int) -> list[Chat]: return [Chat(self.chat.all, self._execute(), pending=self) for _ in range(count)] __call__ = run + + + diff --git a/rigging/generator.py b/rigging/generator.py index ee8b154..0c629d8 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -63,18 +63,21 @@ def _merge_params(self, overloads: GenerateParams) -> dict[str, t.Any]: def complete(self, messages: t.Sequence[Message], overloads: GenerateParams) -> Message: ... - @t.overload - def chat(self, messages: t.Sequence[MessageDict], overloads: GenerateParams | None = None) -> PendingChat: - ... - - @t.overload - def chat(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> PendingChat: - ... - def chat( - self, messages: t.Sequence[MessageDict] | t.Sequence[Message], overloads: GenerateParams | None = None + self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | str, overloads: GenerateParams | None = None ) -> PendingChat: - return PendingChat(self, Message.fit_list(messages), overloads or GenerateParams()) + return PendingChat(self, Message.fit_as_list(messages), overloads) + + +# Helper function external to a generator + + +def chat( + generator: "Generator", + messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, + overloads: GenerateParams | None = None, +) -> PendingChat: + return PendingChat(generator, Message.fit_as_list(messages), overloads) class LiteLLMGenerator(Generator): diff --git a/rigging/message.py b/rigging/message.py index 9685790..725c6a7 100644 --- a/rigging/message.py +++ b/rigging/message.py @@ -201,7 +201,9 @@ def from_model( return cls(role=role, content=content, parts=parts) @classmethod - def fit_list(cls, messages: t.Sequence["Message"] | t.Sequence[MessageDict]) -> list["Message"]: + def fit_as_list( + cls, messages: t.Sequence[MessageDict] | t.Sequence["Message"] | MessageDict | "Message" | str + ) -> list["Message"]: return [cls.fit(message) for message in messages] @classmethod diff --git a/tests/test_messages.py b/tests/test_messages.py index 99c8019..98fce70 100644 --- a/tests/test_messages.py +++ b/tests/test_messages.py @@ -105,7 +105,7 @@ def test_message_from_model() -> None: def test_messages_fit_list() -> None: messages: t.Any = [{"role": "system", "content": "You are an AI assistant."}, Message("user", "Hello!")] - fitted = Message.fit_list(messages) + fitted = Message.fit_as_list(messages) assert len(fitted) == 2 assert isinstance(fitted[0], Message) assert isinstance(fitted[1], Message) From 41bcb8b8257d8bd63ca0a07d175407f09d816ee4 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Thu, 2 May 2024 13:20:57 -0600 Subject: [PATCH 04/16] Added async support to generators Added optional override for text completion Improved docs on some functions Added register_generator --- rigging/generator.py | 193 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 178 insertions(+), 15 deletions(-) diff --git a/rigging/generator.py b/rigging/generator.py index 0c629d8..be13612 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -26,6 +26,10 @@ # parallel generation eventually -> need to # update our interfaces to support that class GenerateParams(BaseModel): + """ + Common parameters for generating text using a language model. + """ + model_config = ConfigDict(extra="forbid") temperature: float | None = None @@ -52,20 +56,107 @@ class Generator(BaseModel, abc.ABC): api_key: str | None = None params: GenerateParams - def _merge_params(self, overloads: GenerateParams) -> dict[str, t.Any]: + def _merge_params(self, overloads: GenerateParams | None = None) -> dict[str, t.Any]: + """ + Helper to merge the parameters of the current instance with the provided `overloads` parameters. + + Typically used to prepare a dictionary of API parameters for a request. + + Args: + overloads (GenerateParams): The parameters to be merged with the current instance's parameters. + + Returns: + dict[str, t.Any]: The merged parameters. + + """ params: dict[str, t.Any] = self.params.model_dump(exclude_unset=True) if self.params else {} + if overloads is None: + return params + for name, value in overloads.model_dump(exclude_unset=True).items(): if value is not None: params[name] = value + return params + def complete_text(self, text: str, overloads: GenerateParams | None = None) -> str: + """ + Generates a string completion of the given text. + + Args: + text (str): The input text to be completed. + overloads (GenerateParams | None, optional): The parameters to be used for completion. + + Returns: + str: The completed text. + + Raises: + NotImplementedError: This generator does not support the `complete_text` method. + """ + raise NotImplementedError("complete_text is not supported by this generator.") + + def acomplete_text(self, text: str, overloads: GenerateParams | None = None) -> t.Coroutine[None, None, str]: + """ + Asynchronously generates a string completion of the given text. + + Args: + text (str): The input text to be completed. + overloads (GenerateParams | None, optional): The parameters to be used for completion. + + Returns: + Coroutine[None, None, str]: A coroutine that yields the completed text. + + Raises: + NotImplementedError: This generator does not support the `acomplete_text` method. + """ + raise NotImplementedError("acomplete_text is not supported by this generator.") + + @abc.abstractmethod + def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: + """ + Generates the next message for a given set of messages. + + Args: + messages (Sequence[Message]): The list of messages to generate completion for. + overloads (GenerateParams | None, optional): The parameters to be used for completion. + + Returns: + Message: The generated completion message. + + """ + ... + @abc.abstractmethod - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams) -> Message: + def acomplete( + self, messages: t.Sequence[Message], overloads: GenerateParams | None = None + ) -> t.Coroutine[None, None, Message]: + """ + Asynchronously generates the next message for a given set of messages. + + Args: + messages (Sequence[Message]): A sequence of messages. + overloads (GenerateParams | None, optional): The parameters to be used for completion. + + Returns: + Coroutine[None, None, Message]: A coroutine that yields completion messages. + + """ ... def chat( self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | str, overloads: GenerateParams | None = None ) -> PendingChat: + """ + Initiates a pending chat with the given messages and optional overloads. + + Args: + messages (Sequence[MessageDict] | Sequence[Message] | str): The messages to be sent in the chat. + overloads (GenerateParams | None, optional): Optional parameters for generating responses. Defaults to None. + + Returns: + PendingChat: A PendingChat object representing the ongoing chat. + + """ return PendingChat(self, Message.fit_as_list(messages), overloads) @@ -77,27 +168,83 @@ def chat( messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, overloads: GenerateParams | None = None, ) -> PendingChat: + """ + Creates a pending chat using the given generator, messages, and overloads. + + Args: + generator (Generator): The generator to use for creating the chat. + messages (Sequence[MessageDict] | Sequence[Message] | MessageDict | Message | str): + The messages to include in the chat. Can be a single message or a sequence of messages. + overloads (GenerateParams | None, optional): Additional parameters for generating the chat. + Defaults to None. + + Returns: + PendingChat: The pending chat object. + + """ return PendingChat(generator, Message.fit_as_list(messages), overloads) +def trace_messages(messages: t.Sequence[Message], title: str) -> None: + logger.trace(f"--- {title} ---") + logger.trace("\n".join([str(msg) for msg in messages])) + logger.trace("---") + + +def trace_str(content: str, title: str) -> None: + logger.trace(f"--- {title} ---") + logger.trace(content) + logger.trace("---") + + class LiteLLMGenerator(Generator): - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams = GenerateParams()) -> Message: - logger.trace("--- Conversation ---") - logger.trace("\n".join([str(msg) for msg in messages])) - logger.trace("---") + def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: + trace_messages(messages, "Conversations") + + messages_as_dicts = [message.model_dump() for message in messages] + params = self._merge_params(overloads) + result = litellm.completion(self.model, messages_as_dicts, api_key=self.api_key, **params) + response = result.choices[-1].message.content.strip() + next_message = Message(role="assistant", content=response) + + trace_messages([next_message], "Response") + + return next_message + + async def acomplete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: + trace_messages(messages, "Conversations") messages_as_dicts = [message.model_dump() for message in messages] - complete_params = self._merge_params(overloads) - result = litellm.completion(self.model, messages_as_dicts, api_key=self.api_key, **complete_params) + params = self._merge_params(overloads) + result = await litellm.acompletion(self.model, messages_as_dicts, api_key=self.api_key, **params) response = result.choices[-1].message.content.strip() next_message = Message(role="assistant", content=response) - logger.trace("--- Response ---") - logger.trace(str(next_message)) - logger.trace("---") + trace_messages([next_message], "Response") return next_message + def complete_text(self, text: str, overloads: GenerateParams | None = None) -> str: + trace_str(text, "Text") + params = self._merge_params(overloads) + result = litellm.text_completion(self.model, text, api_key=self.api_key, **params) + completion: str = result.choices[-1]["text"] + trace_str(completion, "Completion") + return completion + + async def acomplete_text(self, text: str, overloads: GenerateParams | None = None) -> str: + trace_str(text, "Text") + params = self._merge_params(overloads) + result = await litellm.atext_completion(self.model, text, api_key=self.api_key, **params) + completion: str = result.choices[-1]["text"] + trace_str(completion, "Completion") + return completion + + +g_providers: dict[str, type["Generator"]] = { + "litellm": "LiteLLMGenerator", +} + def get_generator(identifier: str) -> Generator: """ @@ -125,7 +272,7 @@ def get_generator(identifier: str) -> Generator: (These get parsed as GenerateParams) """ - provider: str = "litellm" + provider: str = g_providers.keys()[0] model: str = identifier api_key: str | None = None params: GenerateParams = GenerateParams() @@ -144,7 +291,23 @@ def get_generator(identifier: str) -> Generator: except Exception as e: raise InvalidModelSpecifiedError(identifier) from e - if provider == "litellm": - return LiteLLMGenerator(model=model, api_key=api_key, params=params) - else: + if provider not in g_providers: raise InvalidModelSpecifiedError(identifier) + + generator_cls = g_providers[provider] + return generator_cls(model=model, api_key=api_key, params=params) + + +def register_generator(provider: str, generator_cls: type[Generator]) -> None: + """ + Register a generator class for a provider id. + + Args: + provider (str): The name of the provider. + generator_cls (type[Generator]): The generator class to register. + + Returns: + None + """ + global g_providers + g_providers[provider] = generator_cls From 27d5abcd00f74fb33ebe3d6ea7e4093f086c069b Mon Sep 17 00:00:00 2001 From: monoxgas Date: Thu, 2 May 2024 14:49:13 -0600 Subject: [PATCH 05/16] Wrap initial async support Bug fixes in part strip slicing and add() --- rigging/__init__.py | 7 +- rigging/chat.py | 159 +++++++++++++++++++------------ rigging/generator.py | 105 ++++++++++++++++++-- rigging/message.py | 17 ++++ tests/test_generation.py | 10 +- tests/test_generator_creation.py | 12 ++- tests/test_messages.py | 13 ++- 7 files changed, 244 insertions(+), 79 deletions(-) diff --git a/rigging/__init__.py b/rigging/__init__.py index a2dee15..49ac7eb 100644 --- a/rigging/__init__.py +++ b/rigging/__init__.py @@ -1,5 +1,5 @@ -from rigging.chat import Chat, PendingChat -from rigging.generator import GenerateParams, Generator, get_generator +from rigging.chat import AsyncPendingChat, Chat, PendingChat +from rigging.generator import GenerateParams, Generator, achat, chat, get_generator from rigging.message import Message, MessageDict, Messages from rigging.model import Model, attr, element, wrapped from rigging.tool import Tool @@ -16,8 +16,11 @@ "wrapped", "Chat", "PendingChat", + "AsyncPendingChat", "Generator", "GenerateParams", + "chat", + "achat", ] from loguru import logger diff --git a/rigging/chat.py b/rigging/chat.py index 7afaefb..3990f00 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -1,8 +1,9 @@ +import asyncio import typing as t -from uuid import UUID, uuid4 from loguru import logger -from pydantic import BaseModel, Field, ValidationError +from pydantic import BaseModel, ConfigDict, Field, ValidationError +from typing_extensions import Self from rigging.error import ExhaustedMaxRoundsError from rigging.message import Message, MessageDict, Messages @@ -22,21 +23,23 @@ class Chat(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + messages: list[Message] next_messages: list[Message] = Field(default_factory=list) - pending_chat: "PendingChat" | None = None + pending: t.Optional["PendingChatBase"] = Field(None, exclude=True) def __init__( self, messages: Messages, next_messages: Messages | None = None, - pending: t.Optional["PendingChat"] = None, + pending: t.Optional["PendingChatBase"] = None, ): - self.messages: list[Message] = Message.fit_as_list(messages) - self.next_messages: list[Message] = [] - if next_messages is not None: - self.next_messages = Message.fit_as_list(next_messages) - self.pending_chat = pending + super().__init__( + messages=Message.fit_as_list(messages), + next_messages=Message.fit_as_list(next_messages) if next_messages is not None else [], + pending=pending, + ) def __len__(self) -> int: return len(self.messages) + len(self.next_messages) @@ -57,34 +60,35 @@ def next(self) -> list[Message]: def last(self) -> Message: return self.next_messages[-1] - @property - def json(self) -> list[MessageDict]: - return [t.cast(MessageDict, message.model_dump()) for message in self.all] - - def restart(self) -> "PendingChat": - if self.pending_chat is None: + def restart(self, generator: t.Optional["Generator"] = None) -> "PendingChat": + if generator is not None: + return generator.chat(self.messages) + elif self.pending is None: raise ValueError("Cannot restart chat that was not created with a PendingChat") - return PendingChat(self.pending_chat.generator, self.messages, self.pending_chat.params) + return PendingChat(self.pending.generator, self.messages, self.pending.params) - # TODO: Why are these overloads here? I wonder if IDEs preferred them + def arestart(self, generator: t.Optional["Generator"] = None) -> "AsyncPendingChat": + if generator is not None: + return generator.achat(self.messages) + elif self.pending is None: + raise ValueError("Cannot restart chat that was not created with a PendingChat") + return AsyncPendingChat(self.pending.generator, self.messages, self.pending.params) def fork( self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str ) -> "PendingChat": - if self.pending_chat is None: - raise ValueError("Cannot continue chat that was not created with a PendingChat") + return self.restart().add(messages) - pending = PendingChat(self.pending_chat.generator, self.all, self.pending_chat.params) - pending.add(messages) - return pending + def afork( + self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str + ) -> "AsyncPendingChat": + return self.arestart().add(messages) def continue_(self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | str) -> "PendingChat": return self.fork(messages) def clone(self) -> "Chat": - return Chat( - [m.model_copy() for m in self.messages], [m.model_copy() for m in self.next_messages], self.pending_chat - ) + return Chat([m.model_copy() for m in self.messages], [m.model_copy() for m in self.next_messages], self.pending) def apply(self, **kwargs: str) -> "Chat": self.messages[-1].apply(**kwargs) @@ -120,14 +124,10 @@ def inject_tool_prompt(self, tools: t.Sequence[Tool]) -> None: UntilCallback = t.Callable[[Message], tuple[bool, list[Message]]] -class PendingChat(BaseModel): - uuid: UUID = Field(default_factory=uuid4) - parent: "PendingChat" | None = None - generator: "Generator" - params: "GenerateParams" | None = None - chat: Chat - - def __init__(self, generator: "Generator", messages: t.Sequence[Message], params: "GenerateParams" | None = None): +class PendingChatBase: + def __init__( + self, generator: "Generator", messages: t.Sequence[Message], params: t.Optional["GenerateParams"] = None + ): self.generator: "Generator" = generator self.chat: Chat = Chat(messages, pending=self) self.params = params @@ -139,12 +139,12 @@ def __init__(self, generator: "Generator", messages: t.Sequence[Message], params self.inject_tool_prompt: bool = True self.force_tool: bool = False - def overload(self, **kwargs: t.Any) -> "PendingChat": + def overload(self, **kwargs: t.Any) -> Self: from rigging.generator import GenerateParams return self.with_params(GenerateParams(**kwargs)) - def with_params(self, params: "GenerateParams") -> "PendingChat": + def with_params(self, params: "GenerateParams") -> Self: if self.params is not None: new = self.clone() new.params = params @@ -153,30 +153,24 @@ def with_params(self, params: "GenerateParams") -> "PendingChat": self.params = params return self - def add( - self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str - ) -> "PendingChat": + def add(self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str) -> Self: message_list = Message.fit_as_list(messages) # If the last message is the same role as the first new message, append to it - if self.chat.next_messages and self.chat.next_messages[-1].role == message_list[0].role: - self.chat.next_messages[-1].content += "\n" + message_list[0].content + if self.chat.all and self.chat.all[-1].role == message_list[0].role: + self.chat.all[-1].content += "\n" + message_list[0].content message_list = message_list[1:] else: self.chat.next_messages += message_list return self - def fork( - self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str - ) -> "PendingChat": + def fork(self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str) -> Self: return self.clone().add(messages) - def continue_( - self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str - ) -> "PendingChat": + def continue_(self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str) -> Self: return self.fork(messages) - def clone(self) -> "PendingChat": - new = PendingChat(self.generator, [], self.params) + def clone(self) -> Self: + new = self.__class__(self.generator, [], self.params) new.chat = self.chat.clone() new.until_callbacks = self.until_callbacks.copy() new.until_types = self.until_types.copy() @@ -185,12 +179,12 @@ def clone(self) -> "PendingChat": new.force_tool = self.force_tool return new - def apply(self, **kwargs: str) -> "PendingChat": + def apply(self, **kwargs: str) -> Self: new = self.clone() new.chat.apply(**kwargs) return new - def apply_to_all(self, **kwargs: str) -> "PendingChat": + def apply_to_all(self, **kwargs: str) -> Self: new = self.clone() new.chat.apply_to_all(**kwargs) return new @@ -202,7 +196,7 @@ def until( attempt_recovery: bool = False, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, - ) -> "PendingChat": + ) -> Self: self.until_callbacks.append((callback, attempt_recovery, drop_dialog, max_rounds)) return self @@ -215,7 +209,7 @@ def using( drop_dialog: bool = False, max_rounds: int = DEFAULT_MAX_ROUNDS, inject_prompt: bool | None = None, - ) -> "PendingChat": + ) -> Self: self.until_tools += tool if isinstance(tool, t.Sequence) else [tool] self.inject_tool_prompt = inject_prompt or self.inject_tool_prompt self.force_tool = force @@ -237,7 +231,7 @@ def until_parsed_as( attempt_recovery: bool = False, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, - ) -> "PendingChat": + ) -> Self: self.until_types += types if isinstance(types, t.Sequence) else [types] if next((c for c in self.until_callbacks if c[0] == self._until_parse_callback), None) is None: self.until_callbacks.append((self._until_parse_callback, attempt_recovery, drop_dialog, max_rounds)) @@ -317,7 +311,7 @@ def _until( attempt_recovery: bool, drop_dialog: bool, max_rounds: int, - ) -> list[Message]: + ) -> t.Generator[list[Message], Message, list[Message]]: should_continue, step_messages = callback(messages[-1]) if not should_continue: return step_messages @@ -328,7 +322,7 @@ def _until( logger.trace( f"_until({callback.__name__}) round {_ + 1}/{max_rounds} (attempt_recovery={attempt_recovery})" ) - next_message = self.generator.complete(messages[:-1] + running_messages, self.params or GenerateParams()) + next_message = yield messages[:-1] + running_messages should_continue, step_messages = callback(next_message) logger.trace(f" |- returned {should_continue} with {len(step_messages)} new messages)") @@ -341,7 +335,7 @@ def _until( logger.warning(f"Exhausted max rounds ({max_rounds})") raise ExhaustedMaxRoundsError(max_rounds) - def _execute(self) -> list[Message]: + def _execute(self) -> t.Generator[list[Message], Message, list[Message]]: if self.until_tools: # TODO: This can cause issues when certain APIs do not return # the stop sequence as part of the response. This behavior @@ -352,17 +346,21 @@ def _execute(self) -> list[Message]: if self.inject_tool_prompt: self.chat.inject_tool_prompt(self.until_tools) + self.inject_tool_prompt = False - new_messages: list[Message] = [self.generator.complete(self.chat.all, self.params or GenerateParams())] + first_message = yield self.chat.all + new_messages = [first_message] for callback, reset_between, drop_internal, max_rounds in self.until_callbacks: - next_messages = self._until( + next_messages = yield from self._until( self.chat.all + new_messages, callback, reset_between, drop_internal, max_rounds ) new_messages = new_messages[:-1] + next_messages return new_messages + +class PendingChat(PendingChatBase): @t.overload def run(self, count: t.Literal[None] = None) -> Chat: ... @@ -374,13 +372,52 @@ def run(self, count: int) -> list[Chat]: def run(self, count: int | None = None) -> Chat | list[Chat]: if count is not None: return self.run_many(count) - else: - return Chat(self.chat.all, self._execute(), pending=self) + + executor = self._execute() + outbound = next(executor) + + try: + while True: + inbound = self.generator.complete(outbound, self.params) + outbound = executor.send(inbound) + except StopIteration as stop: + outbound = t.cast(list[Message], stop.value) + + return Chat(self.chat.all, outbound, pending=self) def run_many(self, count: int) -> list[Chat]: - return [Chat(self.chat.all, self._execute(), pending=self) for _ in range(count)] + return [self.run() for _ in range(count)] __call__ = run +class AsyncPendingChat(PendingChatBase): + @t.overload + async def run(self, count: t.Literal[None] = None) -> Chat: + ... + + @t.overload + async def run(self, count: int) -> list[Chat]: + ... + + async def run(self, count: int | None = None) -> Chat | list[Chat]: + if count is not None: + return await self.run_many(count) + executor = self._execute() + outbound = next(executor) + + try: + while True: + inbound = await self.generator.acomplete(outbound, self.params) + outbound = executor.send(inbound) + except StopIteration as stop: + outbound = t.cast(list[Message], stop.value) + + return Chat(self.chat.all, outbound, pending=self) + + async def run_many(self, count: int) -> list[Chat]: + chats = await asyncio.gather(*[self.run() for _ in range(count)]) + return list(chats) + + __call__ = run diff --git a/rigging/generator.py b/rigging/generator.py index be13612..4527077 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -5,7 +5,7 @@ from loguru import logger from pydantic import BaseModel, ConfigDict, field_validator -from rigging.chat import PendingChat +from rigging.chat import AsyncPendingChat, PendingChat from rigging.error import InvalidModelSpecifiedError from rigging.message import ( Message, @@ -95,7 +95,7 @@ def complete_text(self, text: str, overloads: GenerateParams | None = None) -> s """ raise NotImplementedError("complete_text is not supported by this generator.") - def acomplete_text(self, text: str, overloads: GenerateParams | None = None) -> t.Coroutine[None, None, str]: + async def acomplete_text(self, text: str, overloads: GenerateParams | None = None) -> str: """ Asynchronously generates a string completion of the given text. @@ -127,9 +127,7 @@ def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | No ... @abc.abstractmethod - def acomplete( - self, messages: t.Sequence[Message], overloads: GenerateParams | None = None - ) -> t.Coroutine[None, None, Message]: + async def acomplete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: """ Asynchronously generates the next message for a given set of messages. @@ -143,6 +141,17 @@ def acomplete( """ ... + # These type overloads look unnecessary, but mypy + # doesn't pick up on MessageDict args for some reason + + @t.overload + def chat(self, messages: t.Sequence[MessageDict]) -> PendingChat: + ... + + @t.overload + def chat(self, messages: t.Sequence[Message] | str) -> PendingChat: + ... + def chat( self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | str, overloads: GenerateParams | None = None ) -> PendingChat: @@ -154,15 +163,57 @@ def chat( overloads (GenerateParams | None, optional): Optional parameters for generating responses. Defaults to None. Returns: - PendingChat: A PendingChat object representing the ongoing chat. + PendingChat: Pending chat to run. """ return PendingChat(self, Message.fit_as_list(messages), overloads) + @t.overload + def achat(self, messages: t.Sequence[MessageDict]) -> AsyncPendingChat: + ... + + @t.overload + def achat(self, messages: t.Sequence[Message] | str) -> AsyncPendingChat: + ... + + def achat( + self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | str, overloads: GenerateParams | None = None + ) -> AsyncPendingChat: + """ + Initiates an async pending chat with the given messages and optional overloads. + + Args: + messages (Sequence[MessageDict] | Sequence[Message] | str): The messages to be sent in the chat. + overloads (GenerateParams | None, optional): Optional parameters for generating responses. Defaults to None. + + Returns: + AsyncPendingChat: Pending chat to run. + + """ + return AsyncPendingChat(self, Message.fit_as_list(messages), overloads) + # Helper function external to a generator +@t.overload +def chat( + generator: "Generator", + messages: t.Sequence[MessageDict], + overloads: GenerateParams | None = None, +) -> PendingChat: + ... + + +@t.overload +def chat( + generator: "Generator", + messages: t.Sequence[Message] | str, + overloads: GenerateParams | None = None, +) -> PendingChat: + ... + + def chat( generator: "Generator", messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, @@ -179,12 +230,48 @@ def chat( Defaults to None. Returns: - PendingChat: The pending chat object. + PendingChat: Pending chat to run. """ return PendingChat(generator, Message.fit_as_list(messages), overloads) +@t.overload +def achat( + generator: "Generator", messages: t.Sequence[MessageDict], overloads: GenerateParams | None = None +) -> AsyncPendingChat: + ... + + +@t.overload +def achat( + generator: "Generator", messages: t.Sequence[Message] | str, overloads: GenerateParams | None = None +) -> AsyncPendingChat: + ... + + +def achat( + generator: "Generator", + messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, + overloads: GenerateParams | None = None, +) -> AsyncPendingChat: + """ + Creates an async pending chat using the given generator, messages, and overloads. + + Args: + generator (Generator): The generator to use for creating the chat. + messages (Sequence[MessageDict] | Sequence[Message] | MessageDict | Message | str): + The messages to include in the chat. Can be a single message or a sequence of messages. + overloads (GenerateParams | None, optional): Additional parameters for generating the chat. + Defaults to None. + + Returns: + AsyncPendingChat: Pending chat to run. + + """ + return AsyncPendingChat(generator, Message.fit_as_list(messages), overloads) + + def trace_messages(messages: t.Sequence[Message], title: str) -> None: logger.trace(f"--- {title} ---") logger.trace("\n".join([str(msg) for msg in messages])) @@ -242,7 +329,7 @@ async def acomplete_text(self, text: str, overloads: GenerateParams | None = Non g_providers: dict[str, type["Generator"]] = { - "litellm": "LiteLLMGenerator", + "litellm": LiteLLMGenerator, } @@ -272,7 +359,7 @@ def get_generator(identifier: str) -> Generator: (These get parsed as GenerateParams) """ - provider: str = g_providers.keys()[0] + provider: str = list(g_providers.keys())[0] model: str = identifier api_key: str | None = None params: GenerateParams = GenerateParams() diff --git a/rigging/message.py b/rigging/message.py index 725c6a7..5a8d46d 100644 --- a/rigging/message.py +++ b/rigging/message.py @@ -60,9 +60,24 @@ def __init__(self, role: Role, content: str, parts: t.Sequence[ParsedMessagePart def __str__(self) -> str: return f"[{self.role}]: {self.content}" + # TODO: In general the add/remove/sync_part methods are + # overly complicated. We should probably just update content, + # then reparse all the models to get their fresh slices. + # + # I don't like all this manual slice recalculation logic, seems brittle. + def _remove_part(self, part: ParsedMessagePart) -> str: + removed_length = part.slice_.stop - part.slice_.start self._content = self._content[: part.slice_.start] + self._content[part.slice_.stop :] self.parts.remove(part) + + # Update slices of any parts that come after the removed part + for other_part in self.parts: + if other_part.slice_.start > part.slice_.start: + other_part.slice_ = slice( + other_part.slice_.start - removed_length, other_part.slice_.stop - removed_length + ) + return self._content def _add_part(self, part: ParsedMessagePart) -> None: @@ -204,6 +219,8 @@ def from_model( def fit_as_list( cls, messages: t.Sequence[MessageDict] | t.Sequence["Message"] | MessageDict | "Message" | str ) -> list["Message"]: + if isinstance(messages, Message | dict | str): + return [cls.fit(messages)] return [cls.fit(message) for message in messages] @classmethod diff --git a/tests/test_generation.py b/tests/test_generation.py index 93c7b37..53e5600 100644 --- a/tests/test_generation.py +++ b/tests/test_generation.py @@ -8,17 +8,23 @@ class EchoGenerator(Generator): - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams) -> Message: + def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: return Message(role="assistant", content=messages[-1].content) + async def acomplete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: + return self.complete(messages, overloads) + class CallbackGenerator(Generator): callback: t.Callable[["CallbackGenerator", t.Sequence[Message]], str] | None = None - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams) -> Message: + def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: assert self.callback is not None, "Callback must be defined for CallbackGenerator" return Message(role="assistant", content=self.callback(self, messages)) + async def acomplete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: + return self.complete(messages, overloads) + def test_until_parsed_as_with_reset() -> None: generator = CallbackGenerator(model="callback", params=GenerateParams()) diff --git a/tests/test_generator_creation.py b/tests/test_generator_creation.py index e1767f4..db07016 100644 --- a/tests/test_generator_creation.py +++ b/tests/test_generator_creation.py @@ -1,7 +1,8 @@ import pytest from rigging.error import InvalidModelSpecifiedError -from rigging.generator import GenerateParams, LiteLLMGenerator, get_generator +from rigging.generator import GenerateParams, LiteLLMGenerator, get_generator, register_generator +from tests.test_generation import EchoGenerator @pytest.mark.parametrize("identifier", ["test_model", "litellm!test_model"]) @@ -44,3 +45,12 @@ def test_get_generator_invalid_structure_format(identifier: str) -> None: def test_get_generator_invalid_params(identifier: str) -> None: with pytest.raises(InvalidModelSpecifiedError): get_generator(identifier) + + +def test_register_generator() -> None: + with pytest.raises(InvalidModelSpecifiedError): + get_generator("echo!test") + + register_generator("echo", EchoGenerator) + generator = get_generator("echo!test") + assert isinstance(generator, EchoGenerator) diff --git a/tests/test_messages.py b/tests/test_messages.py index 98fce70..8ff2f26 100644 --- a/tests/test_messages.py +++ b/tests/test_messages.py @@ -171,12 +171,17 @@ def test_pending_chat_continue() -> None: def test_pending_chat_add() -> None: - pending = PendingChat(get_generator("gpt-3.5"), [Message("user", "Hello")], GenerateParams()) - added = pending.add(Message("user", "Hello")) + pending = PendingChat(get_generator("gpt-3.5"), [Message("user", "Hello")]) + added = pending.add(Message("user", "There")) assert added == pending - assert len(added.chat) == 2 - assert added.chat.all[0].content == "Hello" + assert len(added.chat) == 1 + assert added.chat.all[0].content == "Hello\nThere" + + diff_added = pending.add(Message("assistant", "Hi there!")) + assert diff_added == added == pending + assert len(diff_added.chat) == 2 + assert diff_added.chat.all[1].content == "Hi there!" def test_chat_continue_maintains_parsed_models() -> None: From 38601ece3c31d65a0a010dd26eb622510f307106 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Thu, 2 May 2024 15:02:23 -0600 Subject: [PATCH 06/16] Simplified the async chat interface --- rigging/__init__.py | 5 ++-- rigging/chat.py | 67 ++++++++++++++++++-------------------------- rigging/generator.py | 62 +--------------------------------------- 3 files changed, 30 insertions(+), 104 deletions(-) diff --git a/rigging/__init__.py b/rigging/__init__.py index 49ac7eb..fcff37d 100644 --- a/rigging/__init__.py +++ b/rigging/__init__.py @@ -1,5 +1,5 @@ -from rigging.chat import AsyncPendingChat, Chat, PendingChat -from rigging.generator import GenerateParams, Generator, achat, chat, get_generator +from rigging.chat import Chat, PendingChat +from rigging.generator import GenerateParams, Generator, chat, get_generator from rigging.message import Message, MessageDict, Messages from rigging.model import Model, attr, element, wrapped from rigging.tool import Tool @@ -16,7 +16,6 @@ "wrapped", "Chat", "PendingChat", - "AsyncPendingChat", "Generator", "GenerateParams", "chat", diff --git a/rigging/chat.py b/rigging/chat.py index 3990f00..8d0d077 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -3,7 +3,6 @@ from loguru import logger from pydantic import BaseModel, ConfigDict, Field, ValidationError -from typing_extensions import Self from rigging.error import ExhaustedMaxRoundsError from rigging.message import Message, MessageDict, Messages @@ -27,13 +26,13 @@ class Chat(BaseModel): messages: list[Message] next_messages: list[Message] = Field(default_factory=list) - pending: t.Optional["PendingChatBase"] = Field(None, exclude=True) + pending: t.Optional["PendingChat"] = Field(None, exclude=True) def __init__( self, messages: Messages, next_messages: Messages | None = None, - pending: t.Optional["PendingChatBase"] = None, + pending: t.Optional["PendingChat"] = None, ): super().__init__( messages=Message.fit_as_list(messages), @@ -67,23 +66,11 @@ def restart(self, generator: t.Optional["Generator"] = None) -> "PendingChat": raise ValueError("Cannot restart chat that was not created with a PendingChat") return PendingChat(self.pending.generator, self.messages, self.pending.params) - def arestart(self, generator: t.Optional["Generator"] = None) -> "AsyncPendingChat": - if generator is not None: - return generator.achat(self.messages) - elif self.pending is None: - raise ValueError("Cannot restart chat that was not created with a PendingChat") - return AsyncPendingChat(self.pending.generator, self.messages, self.pending.params) - def fork( self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str ) -> "PendingChat": return self.restart().add(messages) - def afork( - self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str - ) -> "AsyncPendingChat": - return self.arestart().add(messages) - def continue_(self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | str) -> "PendingChat": return self.fork(messages) @@ -124,7 +111,7 @@ def inject_tool_prompt(self, tools: t.Sequence[Tool]) -> None: UntilCallback = t.Callable[[Message], tuple[bool, list[Message]]] -class PendingChatBase: +class PendingChat: def __init__( self, generator: "Generator", messages: t.Sequence[Message], params: t.Optional["GenerateParams"] = None ): @@ -139,12 +126,12 @@ def __init__( self.inject_tool_prompt: bool = True self.force_tool: bool = False - def overload(self, **kwargs: t.Any) -> Self: + def overload(self, **kwargs: t.Any) -> "PendingChat": from rigging.generator import GenerateParams return self.with_params(GenerateParams(**kwargs)) - def with_params(self, params: "GenerateParams") -> Self: + def with_params(self, params: "GenerateParams") -> "PendingChat": if self.params is not None: new = self.clone() new.params = params @@ -153,7 +140,9 @@ def with_params(self, params: "GenerateParams") -> Self: self.params = params return self - def add(self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str) -> Self: + def add( + self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str + ) -> "PendingChat": message_list = Message.fit_as_list(messages) # If the last message is the same role as the first new message, append to it if self.chat.all and self.chat.all[-1].role == message_list[0].role: @@ -163,14 +152,18 @@ def add(self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageD self.chat.next_messages += message_list return self - def fork(self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str) -> Self: + def fork( + self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str + ) -> "PendingChat": return self.clone().add(messages) - def continue_(self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str) -> Self: + def continue_( + self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str + ) -> "PendingChat": return self.fork(messages) - def clone(self) -> Self: - new = self.__class__(self.generator, [], self.params) + def clone(self) -> "PendingChat": + new = PendingChat(self.generator, [], self.params) new.chat = self.chat.clone() new.until_callbacks = self.until_callbacks.copy() new.until_types = self.until_types.copy() @@ -179,12 +172,12 @@ def clone(self) -> Self: new.force_tool = self.force_tool return new - def apply(self, **kwargs: str) -> Self: + def apply(self, **kwargs: str) -> "PendingChat": new = self.clone() new.chat.apply(**kwargs) return new - def apply_to_all(self, **kwargs: str) -> Self: + def apply_to_all(self, **kwargs: str) -> "PendingChat": new = self.clone() new.chat.apply_to_all(**kwargs) return new @@ -196,7 +189,7 @@ def until( attempt_recovery: bool = False, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, - ) -> Self: + ) -> "PendingChat": self.until_callbacks.append((callback, attempt_recovery, drop_dialog, max_rounds)) return self @@ -209,7 +202,7 @@ def using( drop_dialog: bool = False, max_rounds: int = DEFAULT_MAX_ROUNDS, inject_prompt: bool | None = None, - ) -> Self: + ) -> "PendingChat": self.until_tools += tool if isinstance(tool, t.Sequence) else [tool] self.inject_tool_prompt = inject_prompt or self.inject_tool_prompt self.force_tool = force @@ -231,7 +224,7 @@ def until_parsed_as( attempt_recovery: bool = False, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, - ) -> Self: + ) -> "PendingChat": self.until_types += types if isinstance(types, t.Sequence) else [types] if next((c for c in self.until_callbacks if c[0] == self._until_parse_callback), None) is None: self.until_callbacks.append((self._until_parse_callback, attempt_recovery, drop_dialog, max_rounds)) @@ -359,8 +352,6 @@ def _execute(self) -> t.Generator[list[Message], Message, list[Message]]: return new_messages - -class PendingChat(PendingChatBase): @t.overload def run(self, count: t.Literal[None] = None) -> Chat: ... @@ -390,19 +381,17 @@ def run_many(self, count: int) -> list[Chat]: __call__ = run - -class AsyncPendingChat(PendingChatBase): @t.overload - async def run(self, count: t.Literal[None] = None) -> Chat: + async def arun(self, count: t.Literal[None] = None) -> Chat: ... @t.overload - async def run(self, count: int) -> list[Chat]: + async def arun(self, count: int) -> list[Chat]: ... - async def run(self, count: int | None = None) -> Chat | list[Chat]: + async def arun(self, count: int | None = None) -> Chat | list[Chat]: if count is not None: - return await self.run_many(count) + return await self.arun_many(count) executor = self._execute() outbound = next(executor) @@ -416,8 +405,6 @@ async def run(self, count: int | None = None) -> Chat | list[Chat]: return Chat(self.chat.all, outbound, pending=self) - async def run_many(self, count: int) -> list[Chat]: - chats = await asyncio.gather(*[self.run() for _ in range(count)]) + async def arun_many(self, count: int) -> list[Chat]: + chats = await asyncio.gather(*[self.arun() for _ in range(count)]) return list(chats) - - __call__ = run diff --git a/rigging/generator.py b/rigging/generator.py index 4527077..99d4e30 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -5,7 +5,7 @@ from loguru import logger from pydantic import BaseModel, ConfigDict, field_validator -from rigging.chat import AsyncPendingChat, PendingChat +from rigging.chat import PendingChat from rigging.error import InvalidModelSpecifiedError from rigging.message import ( Message, @@ -168,30 +168,6 @@ def chat( """ return PendingChat(self, Message.fit_as_list(messages), overloads) - @t.overload - def achat(self, messages: t.Sequence[MessageDict]) -> AsyncPendingChat: - ... - - @t.overload - def achat(self, messages: t.Sequence[Message] | str) -> AsyncPendingChat: - ... - - def achat( - self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | str, overloads: GenerateParams | None = None - ) -> AsyncPendingChat: - """ - Initiates an async pending chat with the given messages and optional overloads. - - Args: - messages (Sequence[MessageDict] | Sequence[Message] | str): The messages to be sent in the chat. - overloads (GenerateParams | None, optional): Optional parameters for generating responses. Defaults to None. - - Returns: - AsyncPendingChat: Pending chat to run. - - """ - return AsyncPendingChat(self, Message.fit_as_list(messages), overloads) - # Helper function external to a generator @@ -236,42 +212,6 @@ def chat( return PendingChat(generator, Message.fit_as_list(messages), overloads) -@t.overload -def achat( - generator: "Generator", messages: t.Sequence[MessageDict], overloads: GenerateParams | None = None -) -> AsyncPendingChat: - ... - - -@t.overload -def achat( - generator: "Generator", messages: t.Sequence[Message] | str, overloads: GenerateParams | None = None -) -> AsyncPendingChat: - ... - - -def achat( - generator: "Generator", - messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, - overloads: GenerateParams | None = None, -) -> AsyncPendingChat: - """ - Creates an async pending chat using the given generator, messages, and overloads. - - Args: - generator (Generator): The generator to use for creating the chat. - messages (Sequence[MessageDict] | Sequence[Message] | MessageDict | Message | str): - The messages to include in the chat. Can be a single message or a sequence of messages. - overloads (GenerateParams | None, optional): Additional parameters for generating the chat. - Defaults to None. - - Returns: - AsyncPendingChat: Pending chat to run. - - """ - return AsyncPendingChat(generator, Message.fit_as_list(messages), overloads) - - def trace_messages(messages: t.Sequence[Message], title: str) -> None: logger.trace(f"--- {title} ---") logger.trace("\n".join([str(msg) for msg in messages])) From 876754e9111614b45c9a9cc916b3bb568e4743ae Mon Sep 17 00:00:00 2001 From: monoxgas Date: Thu, 2 May 2024 18:15:05 -0600 Subject: [PATCH 07/16] Add some sanity checks for single attr-only models until we can handle them --- rigging/model.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/rigging/model.py b/rigging/model.py index 81cefb7..08c7171 100644 --- a/rigging/model.py +++ b/rigging/model.py @@ -9,7 +9,7 @@ from pydantic_xml import element as element from pydantic_xml import wrapped as wrapped from pydantic_xml.element import SearchMode # type: ignore [attr-defined] -from pydantic_xml.typedefs import NsMap +from pydantic_xml.typedefs import EntityLocation, NsMap from rigging.error import MissingModelError @@ -73,7 +73,8 @@ def __init_subclass__( cls.__xml_tag__ = XmlTagDescriptor() # type: ignore [assignment] # to_xml() doesn't prettify normally, and extended - # requirements like lxml seemed like poor form + # requirements like lxml seemed like poor form for + # just this feature def to_pretty_xml(self) -> str: tree = self.to_xml_tree() ET.indent(tree, " ") @@ -89,6 +90,8 @@ def to_pretty_xml(self) -> str: # So we'll handle easy cases here and mark the model as "simple" # if it only contains a single basic field. It makes our parsing # much more consistent and is likely the most popular model type. + # + # TODO: lxml with the recover option is likely a better approach @classmethod def is_simple(cls) -> bool: field_values = list(cls.model_fields.values()) @@ -112,6 +115,18 @@ def xml_tags(cls) -> str: def xml_example(cls) -> str: return cls.xml_tags() + @classmethod + def ensure_valid(cls) -> None: + # Do a sanity check for models with a single + # attr field, which our parsing currently doesn't support + # + # TODO: Add support for style models + + if len(cls.model_fields) == 1: + field_info = next(iter(cls.model_fields.values())) + if hasattr(field_info, "location") and field_info.location == EntityLocation.ATTRIBUTE: + raise ValueError(f"Model '{cls.__name__}' has a single attr() field which is not supported") + # Attempt to extract this object from an arbitrary string # which may contain other XML elements or text, returns # the object and the string from which is was parsed. @@ -124,6 +139,8 @@ def xml_example(cls) -> str: @classmethod def from_text(cls, content: str) -> list[tuple[ModelT, slice]]: + cls.ensure_valid() + pattern = r"(<([\w-]+).*?>((.*?)))" matches = [m for m in re.finditer(pattern, content, flags=re.DOTALL) if m.group(2) == cls.__xml_tag__] @@ -146,6 +163,9 @@ def from_text(cls, content: str) -> list[tuple[ModelT, slice]]: # # Example: "Sure I'll use tags: hello" # + # TODO: The opposite could be true, and we could greedily parse + # backwards if we get failures. This is a simple solution for now. + inner_match: re.Match[str] | None = match while inner_match is not None: inner_matches = re.finditer(pattern, inner_with_end_tag, flags=re.DOTALL) From 2027b89a41fc67ff990ba4da9e4fcb7e5031b9f0 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Thu, 2 May 2024 18:15:45 -0600 Subject: [PATCH 08/16] Improve message serialization with generator id strings, timestamps, uuids, etc. --- rigging/chat.py | 36 +++++++++++++++++++++++++++++++----- rigging/generator.py | 20 ++++++++++++++++++-- rigging/message.py | 2 +- 3 files changed, 50 insertions(+), 8 deletions(-) diff --git a/rigging/chat.py b/rigging/chat.py index 8d0d077..2be3a6d 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -1,8 +1,16 @@ import asyncio import typing as t +from datetime import datetime +from uuid import UUID, uuid4 from loguru import logger -from pydantic import BaseModel, ConfigDict, Field, ValidationError +from pydantic import ( + BaseModel, + ConfigDict, + Field, + ValidationError, + computed_field, +) from rigging.error import ExhaustedMaxRoundsError from rigging.message import Message, MessageDict, Messages @@ -24,20 +32,37 @@ class Chat(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) + uuid: UUID = Field(default_factory=uuid4) + timestamp: datetime = Field(default_factory=datetime.now, repr=False) messages: list[Message] next_messages: list[Message] = Field(default_factory=list) - pending: t.Optional["PendingChat"] = Field(None, exclude=True) + + pending: t.Optional["PendingChat"] = Field(None, exclude=True, repr=False) + + @computed_field(repr=False) + def generator_id(self) -> str | None: + if self.pending is not None: + return self.pending.generator.to_identifier(self.pending.params) + return None def __init__( self, messages: Messages, next_messages: Messages | None = None, pending: t.Optional["PendingChat"] = None, + **kwargs: t.Any, ): + from rigging.generator import get_generator + + if "generator_id" in kwargs and pending is None: + generator = get_generator(kwargs.pop("generator_id")) + pending = generator.chat(messages) + super().__init__( messages=Message.fit_as_list(messages), next_messages=Message.fit_as_list(next_messages) if next_messages is not None else [], pending=pending, + **kwargs, ) def __len__(self) -> int: @@ -59,12 +84,13 @@ def next(self) -> list[Message]: def last(self) -> Message: return self.next_messages[-1] - def restart(self, generator: t.Optional["Generator"] = None) -> "PendingChat": + def restart(self, *, generator: t.Optional["Generator"] = None, include_next: bool = False) -> "PendingChat": + messages = self.all if include_next else self.messages if generator is not None: - return generator.chat(self.messages) + return generator.chat(messages) elif self.pending is None: raise ValueError("Cannot restart chat that was not created with a PendingChat") - return PendingChat(self.pending.generator, self.messages, self.pending.params) + return PendingChat(self.pending.generator, messages, self.pending.params) def fork( self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str diff --git a/rigging/generator.py b/rigging/generator.py index 99d4e30..fa0a721 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -3,7 +3,7 @@ import litellm # type: ignore from loguru import logger -from pydantic import BaseModel, ConfigDict, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from rigging.chat import PendingChat from rigging.error import InvalidModelSpecifiedError @@ -53,9 +53,21 @@ def validate_stop(cls, value: t.Any) -> t.Any: class Generator(BaseModel, abc.ABC): model: str - api_key: str | None = None + api_key: str | None = Field(None, exclude=True) params: GenerateParams + def to_identifier(self, overloads: GenerateParams | None = None) -> str: + provider = next(name for name, klass in g_providers.items() if isinstance(self, klass)) + params_dict = self._merge_params(overloads) + if not params_dict: + return f"{provider}!{self.model}" + + if "stop" in params_dict: + params_dict["stop"] = ";".join(params_dict["stop"]) + params = ",".join([f"{k}={v}" for k, v in params_dict.items()]) + + return f"{provider}!{self.model},{params}" + def _merge_params(self, overloads: GenerateParams | None = None) -> dict[str, t.Any]: """ Helper to merge the parameters of the current instance with the provided `overloads` parameters. @@ -273,6 +285,10 @@ async def acomplete_text(self, text: str, overloads: GenerateParams | None = Non } +def get_identifier(generator: Generator, overloads: GenerateParams | None = None) -> str: + return generator.to_identifier(overloads) + + def get_generator(identifier: str) -> Generator: """ Get a generator by an identifier string. Uses LiteLLM by default. diff --git a/rigging/message.py b/rigging/message.py index 5a8d46d..6b28885 100644 --- a/rigging/message.py +++ b/rigging/message.py @@ -49,7 +49,7 @@ def validate_slice(cls, value: t.Any) -> slice: class Message(BaseModel): role: Role - parts: list[ParsedMessagePart] = Field(default_factory=list, exclude=True) + parts: list[ParsedMessagePart] = Field(default_factory=list) _content: str = "" From a08f11e0b07fcda02f73c844b9a5abd7e28a62c3 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Fri, 3 May 2024 10:22:04 -0600 Subject: [PATCH 09/16] Start on metadata tracking --- rigging/chat.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/rigging/chat.py b/rigging/chat.py index 2be3a6d..1f27b7b 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -1,5 +1,6 @@ import asyncio import typing as t +from copy import deepcopy from datetime import datetime from uuid import UUID, uuid4 @@ -144,6 +145,7 @@ def __init__( self.generator: "Generator" = generator self.chat: Chat = Chat(messages, pending=self) self.params = params + self.metadata: dict[str, t.Any] = {} # (callback, attempt_recovery, drop_dialog, max_rounds) self.until_callbacks: list[tuple[UntilCallback, bool, bool, int]] = [] @@ -188,16 +190,22 @@ def continue_( ) -> "PendingChat": return self.fork(messages) - def clone(self) -> "PendingChat": + def clone(self, *, only_messages: bool = False) -> "PendingChat": new = PendingChat(self.generator, [], self.params) new.chat = self.chat.clone() - new.until_callbacks = self.until_callbacks.copy() - new.until_types = self.until_types.copy() - new.until_tools = self.until_tools.copy() - new.inject_tool_prompt = self.inject_tool_prompt - new.force_tool = self.force_tool + if not only_messages: + new.until_callbacks = self.until_callbacks.copy() + new.until_types = self.until_types.copy() + new.until_tools = self.until_tools.copy() + new.inject_tool_prompt = self.inject_tool_prompt + new.force_tool = self.force_tool + new.metadata = deepcopy(self.metadata) return new + def meta(self, **kwargs: t.Any) -> "PendingChat": + self.metadata.update(kwargs) + return self + def apply(self, **kwargs: str) -> "PendingChat": new = self.clone() new.chat.apply(**kwargs) From 841e8af243cb9a209fe922d480244aa801d93fcd Mon Sep 17 00:00:00 2001 From: monoxgas Date: Fri, 3 May 2024 13:59:13 -0600 Subject: [PATCH 10/16] Cleaning and preparing documentation --- docs/api/chat.md | 1 + docs/api/error.md | 1 + docs/api/generator.md | 1 + docs/api/logging.md | 1 + docs/api/message.md | 1 + docs/api/model.md | 1 + docs/api/tool.md | 1 + docs/assets/logo_black.png | Bin 0 -> 32746 bytes docs/assets/logo_white.png | Bin 0 -> 23700 bytes docs/docs/api/chat.md | 0 docs/docs/api/errors.md | 3 - docs/docs/api/generators.md | 1 - docs/docs/api/logging.md | 1 - docs/docs/api/message.md | 1 - docs/docs/api/model.md | 14 - docs/docs/api/prompt.md | 1 - docs/docs/api/tools.md | 10 - docs/docs/images/logo.png | Bin 23903 -> 0 bytes docs/{docs => }/index.md | 0 docs/mkdocs.yml | 54 - docs/stylesheets/extra.css | 18 + docs/{docs => }/tutorial/chat.md | 0 docs/{docs => }/tutorial/generators.md | 0 docs/{docs => }/tutorial/logging.md | 0 docs/{docs => }/tutorial/model.md | 0 docs/{docs => }/tutorial/tools.md | 0 mkdocs.yml | 68 ++ poetry.lock | 1246 +++++++++++++++--------- pyproject.toml | 10 + rigging/chat.py | 83 +- rigging/error.py | 19 + rigging/generator.py | 139 ++- rigging/logging.py | 23 + rigging/message.py | 159 ++- rigging/tool.py | 69 +- 35 files changed, 1319 insertions(+), 607 deletions(-) create mode 100644 docs/api/chat.md create mode 100644 docs/api/error.md create mode 100644 docs/api/generator.md create mode 100644 docs/api/logging.md create mode 100644 docs/api/message.md create mode 100644 docs/api/model.md create mode 100644 docs/api/tool.md create mode 100644 docs/assets/logo_black.png create mode 100644 docs/assets/logo_white.png delete mode 100644 docs/docs/api/chat.md delete mode 100644 docs/docs/api/errors.md delete mode 100644 docs/docs/api/generators.md delete mode 100644 docs/docs/api/logging.md delete mode 100644 docs/docs/api/message.md delete mode 100644 docs/docs/api/model.md delete mode 100644 docs/docs/api/prompt.md delete mode 100644 docs/docs/api/tools.md delete mode 100644 docs/docs/images/logo.png rename docs/{docs => }/index.md (100%) delete mode 100644 docs/mkdocs.yml create mode 100644 docs/stylesheets/extra.css rename docs/{docs => }/tutorial/chat.md (100%) rename docs/{docs => }/tutorial/generators.md (100%) rename docs/{docs => }/tutorial/logging.md (100%) rename docs/{docs => }/tutorial/model.md (100%) rename docs/{docs => }/tutorial/tools.md (100%) create mode 100644 mkdocs.yml diff --git a/docs/api/chat.md b/docs/api/chat.md new file mode 100644 index 0000000..6bb886e --- /dev/null +++ b/docs/api/chat.md @@ -0,0 +1 @@ +::: rigging.chat \ No newline at end of file diff --git a/docs/api/error.md b/docs/api/error.md new file mode 100644 index 0000000..3596392 --- /dev/null +++ b/docs/api/error.md @@ -0,0 +1 @@ +::: rigging.error diff --git a/docs/api/generator.md b/docs/api/generator.md new file mode 100644 index 0000000..6b9738a --- /dev/null +++ b/docs/api/generator.md @@ -0,0 +1 @@ +::: rigging.generator \ No newline at end of file diff --git a/docs/api/logging.md b/docs/api/logging.md new file mode 100644 index 0000000..b216e71 --- /dev/null +++ b/docs/api/logging.md @@ -0,0 +1 @@ +::: rigging.logging \ No newline at end of file diff --git a/docs/api/message.md b/docs/api/message.md new file mode 100644 index 0000000..55d2c77 --- /dev/null +++ b/docs/api/message.md @@ -0,0 +1 @@ +::: rigging.message \ No newline at end of file diff --git a/docs/api/model.md b/docs/api/model.md new file mode 100644 index 0000000..e8ea08a --- /dev/null +++ b/docs/api/model.md @@ -0,0 +1 @@ +::: rigging.model \ No newline at end of file diff --git a/docs/api/tool.md b/docs/api/tool.md new file mode 100644 index 0000000..94e64e1 --- /dev/null +++ b/docs/api/tool.md @@ -0,0 +1 @@ +::: rigging.tool diff --git a/docs/assets/logo_black.png b/docs/assets/logo_black.png new file mode 100644 index 0000000000000000000000000000000000000000..60a5eb6f9067365c7ef1ce100b169009a6b522a2 GIT binary patch literal 32746 zcmaI8byU=E)IBOC-Q5h0bc2L6Lx+Gv3xYIAHxfhF5CVd9h;$>}Qi_1Igdp9G3h3{d z@4MFT-n-Ua?;oxaapv>XIcJ}}_cO7&+N$_CG&m0)Jiu3jD(OFXfPz5&!FmY(CA3HE zGk8Jq)K^t_P%}Zd2Yx`elh=}e@Sr{!_s$9f{EYnyYV7&o0U-hM4@%=Nm!t;|UcXjT zk~i?PILgH|FqoekW;D{355U|r>h4}5l5uUtQvZBKHss&Rg<-_6P|$j`2lwqVV_ z5O`wF10y0%XYQh`ZwXK>m1LIWxXiViPtB=*Q>)nZq{D^qa6^y z8!$+Jw88z~U$7{MqO7P?*kE#0;PuUO6f5v*5RL%{uX@=3-(UWCOr+vg5V%rMP|(XG zCH0-aGCnTuv+G*7d8P&XWfxa>PvDK~k!mu-Y|#0pn&=;sh`(CKEC;@diUZNJmCs%) zS)R<-nBTfqn=~KKeR%qQ;qzx6Mt)Dv3u#>3crz!b10EKZ;=qRwAKrxuKE}nxDo&Fl z(tr9iclz5m`k`?YONZrDrT)#n2uzhrp`c%i%I&$ z|L>t-xk>XrX@E?nQT5UbEVV=$(bE7d`LO3dl#4vR4i0X%eE8tdl9;HK-PIMG-Pq`v zT3PAT`R0uw#eK){Mvo1Z@&Ic8E_z#An~}KN>Seq8&nO);vv>Tl$};CdJn`}I8qx_; zzQ^f7Y+s84jP(#WG5AVD3k$!*ELb@)HgkIwM_C2cE>BNmuFlS4ecavEn5NqJ|56Wd z;!qH&q%3#%)?Qy7%-{1{b}z?t|FM1f@^MY?#t|Ni6WFn#P|-N2`5Mufgal`GOWLxMl8-5`7Iro_ zVQW3Fdni`M<1}#OIq{r0=H}+8eSCZzTwPrSQBY7)7!XMNrj5%!Gu5=+<4Hr3=zE!g@GcOGd4gHpvAj25DdY7PG0aqKrK|y@Yt}B0_V0)98ZL_|?sa^U}`|as@V)CVY`TW+pgM(|# z+ECX|IXOA&a9QNOVEePR1_cM#KE%XiJf8XR&DGTO`(=e;r7XFd6;)y%ct3v0 zY@W1#!%CrIbU^Cx)b93|99iGvWx=vQeYdW}^Kcrnsx+n&<<*hDHRc`1O^uBjwh${3 zY;aE98J>i6qG78N`q5!yW9Ko)$3+&0Gi}YUuF4Lrt^M=3y*~2w-kY|4-`woY z&i_$3I}`&xMU~9>q$cFg<(}C0RyRhk_5MhT*w|Pi0+z>v{d#IltVu~pYWOrFF|5Rk zOZ6{?WjM{qqN!C#z}6>*K02#5Z6%QO*o2DDcpwiIK_J~eW+5*nW#+FaeA>KYa0dJu zUw+Xpw9AzEEZ`W40Z&OkJao>GD%QLgEu$U*BqY-%C z>v*}of#a*P0=6_Y6z@@$r}I*i158Ot>HFoS7YDp<&`L#-={~bF#d9;U>Tt4fAg|r~ zfFf2A@2LYAT64r4vuTSnb<2z4q%b`_J^Yk1ZURA*2S@tLD=R<2#&`JcP8Mp&$jH=s z#*?YggQqxRM20%eRZ&ME$W4-@{m)CXB?j%OFPFo`h3qE_O6jG1mcOiGJvBytdZJ&I@&=+S=N!5?h&i%feLA9Y3B7@X%d4YWHWJ1_EQ^B7;NDyND_}N!E#}45Dlc$f;Z_GXdK6p zbF(wah(CF<7JkgJYG(;~K2X`)+j}t(O=vk;s*y=wR#p~?d;>0sY_>`DV6e;gAji@A zfUV5%Jz1@0ArQntUFU{3{kcBsa751g@bFORqddz~3z%FMPDF@d<+EA>)1Hv~@`VN1 zYm&6=4WeS*N!3a)JyVd}o(zBaa>RZ!V?Bn3i}e(ODC%%tY>1^5ccn{7P2CtXHfoOm z$xOb(=jd8HTVyZra&E)Ga@!lM=@vT#(ic3M%J$Uva6yohn|oY4DKM!L*M_^07y9Or z;a@*FaAeIA(HkGi2tUp-Hv-|*!E4eap~8G_uB$ugps1*5VPx zIZ5QSw6trV(ACnm6$50V=EJbboF4F-Xhw0$DIqHEe)L6TFYu;VsIYQ2*NGXJnVFq4 z6T1&4#Ub;2QgZT5RQZp!*ncY51#U*qpZ{nZP2>2AhmF1X6`%SoCCCw-sTVt6#dDs# z{(Bou$at(mS`tf@!JBorvb=1KhKkAzQd80BIF!|mWqfSx5pCW>f5z6SCB-pKmKeO; z%E$I}vCmvxYuNnRlf+$@PkX%f^mR2gH6uEFPsT!iRy{JTkgm4u`K#>5Y)`1uAE-)a zodse&E0ctnc*=UU(_c7=@yROnTr*V${VBr_D-p+;UFV;l62GQo5+_$NlO#Diqi{zj zF#A1%%0YE>dFCs3Ao1c@>N#!V7UB@>MS|BY)y zFPM%{ukX8{5E(Z7rR1 zXmAkoR@u_h^0r;SGL*6C4d0}g1xj+K4<2%})YCLvmCb?Z5aAe8;pnxDT$Zr)As z`QXE!MZ5f1H|pRho|Sf73LZa7TKN<_pF^Ru&4`hY_x&))l)sCdx#>U3in{OPO`{~|3HE;J?lzsU*YccrGr7uas0ry@rpETT-p9%{L)7Kl7 zR_ED3PgE4O?Cf^XOLSQ-*};)P#f@Q5WQZPLZu9)-zy0~G&r;J%y%z#j{cy&0Z$u>(AAiu6~dO1;Kn zhb_^OiHVlT(3BXn4Rib}P)TIa4;YZuusPsr|LpSg#gv)Twyc5>D?-55rI*lnlIfj{gnvmYiqL zd!2m8YDFHQSPxH67ZMVZ&b?tf5p4F0zog`t#=3lbd}pkm&fdO#OCTvJ*^;e&ql_gY zDmptfKF&>Z05M}MVgz%g-X)0lPJa4c+k0}mU6tz>UfsK&8yk0fjm8p^l5E8oQpWRU z>n13O56U~o8SmfQ9j02W>0q$SST+Y$ zpg;id6d}a*^Q(+W z!C^4ve5Az4b4IZ(`oj!D9j3%ez^t(I?dRY7+vY*5(LNPZRXzy+5OXBXFq!g@$eFq#yFOq_Z|>M|3k;2ZftBkV)C%}cD1rf ziJ&3Qi+S|0^T8!i@r83}E-_?c1F^}J5E;nB#Poy5yeCNT^DYw)Q$T-<%lC&>mOZj~ zX~OsA^^=D26ue2=Spw@FeCF+k*B2M@3ZkNec=27e73OkMo+4xkiHSqU!OfGDxDeu` z%1lOK;YQRLtlXV_BI0u;5jJKlTS)|FD*TbRcEY4NSmFmwSO6a%Kbd5|i+y;rdjr5$5rDNMJMCoxOb3BT#^qyI&jo4R@&>6~&M zFYo)?3`UWBR^m3boi>6MOor@pSt~lGfg1OppNPf8#5e^u1xg6|QHy>)_poTN87=7w zyt)3i`Iq5pf3`!?VYb}Jbz1V zHRg$ekj>)i#;V2^=1?&6(0rcK_bbD^{KxXv=;)t^hdFRuV1ok1ieSGbkSLRq*F=O@ zP;iRWo6;JlKPl=1I*G=>o8zuM+c%?Gy1E^q?I`fFk%3oTz9$SV6TyYz;cEQ6yw$6T zwBmR6%uR~xkoz@^<;TTAOz)J#a}jxY7ReLEq!b-PkWsm!hu2vS3w7(pWRDwm$^*eO z1z-sFv#g}xFGHI=k|WtTxVSk|0IQO3I@t6)ion7B3Hq%)7^S>Dx48KHV}Aax`?f=g zr)9bhiC$|t;%)(Hlpd50mE~bXZdMkkb4fhByq-|Mw7uouOe;pDxtaCE8HI)G0}t_O zl=^9j2NIHcQHuu5KE8tZEjHMuP5GFTu3~nEiLd&-7zJ6762ZjEV#Y?$MW*Qt@?`7i zHZV8Jvy36@q zEs?6gNdS8UXpj4xN_ynSKY*^$`FJ@>i84JS$F{jFX1- zKw@ZRMJ7r*6VEG)mxo8G{RFq#CgH^qD*&I-(jyC$0B<_pnOy&@{yJd~D2$}k75=hsHcB2%z|OhgHhxRl+BtE!k<29Ws=Pj{{X{_F-2 z;+EnZ-8J6o4cE5{X&Q$PnDpyeu^O1Lye6ZI?rvAjrCS5>$xiJ47E>&W79sINrHae( zx!f5HN8MNEi?~y~FgY&-fTw>nGI-vmzI%7bceR8$CliH7-QWfAe~;BaSMO@YAOD&g z8ph8c7l&=veN&vB=tj7EUt6jmol4%;>Lx&_rV*YK7XGYWslHsfEw-m;R za&qp9%vDLSXo!O~lh5Eay`GyYR_$2n3Z$gPQ&WHS>a-rz$$w09GBdJVOhfZSLu_H4 zDV3i-eQK@m)wVvLPE2DGzzQtu+$tTRwVa}fQH=0;b8prB5^?EMp7(-!IQQ#h58RGrcKN-ymPfZh}= zEb>Gv-dVmmjz=9TDPiTGNt`4)5j`hCbjt0plN%d*ok(=^ILK>JF=^!-XAThgOk_Wm zB=YjY=a0JcvMsrA(g;$3xBzD&XoIu zinoRnZ-M88=O<+&N?DYd(c0=O5{0P2OaA%#!ft!3t%Hc+$Za zG^gK)Pcl6qWa07~XPyAC-V|iW;W^<~*&&EF+d4XiW4lxmxEtn207!O9KNe*sIN&AH z$Jz%l!nF95a1?6;u@pAh>NM~AB%rXKvfK=ud3dAU*OsLQ+i~5d8zRNj&@#Vb)#SW` z?QIJF_e1tgzP?88wZ!Kt@1|rixXS)moA_Tz4J;>(Ns^5v8X=YqqU7eq-Fn3)YzF=x9 z>$hN;u&yOG4U}1PLO&n@?2D@eqkc^!)8T)Fiy3JVa&8Z7EL~RG4bM+!bKJVyh@2SJ z%b8WA8PBeS0uvVs9-NU6MH>!UuvjecM^wG&T5&?}k97(Hw2qJd>Qk%~jE4_Z`d=t1 zz(FvfgJW3-+S|U7?$icvY>I5qZr!pNU(LUW%~>b%O{$E-Y2y+6B0#*(T7^}YDB?{kdTluo=Wzfb2OZT zLJIT|cfk=4@%bJ#zeHt#OP+}8Lv%Sj?|z+K@9pjJ>6b>maH;S-{*B10#)av=tYIy#&DIVdT;4QrK2lKU<&^OcZ8`?9c zzkdHV-!`G!LXJYrbyCc!D?p+xGPa0HSG;?9e6gWT?(Wo{EKs=a8de;e|JEH8};p0>|$0cwk~D z6_0CLXp;sY6Vn_M@jvP8OyssoN=TH%LR27ip`28jFW1-CUzC{r3*DIlt@u^l!Vp3` z>@!c>ktphm2NbV^f;#O0|4T@X_4l$Ph*-PG={Pj;?GI`uNxA()fjfdjQWCKC& zJSEZKkdhuSbv`W&7bw6~2pZ>1jKC!Hxxf6TW0$6U$Qv~?!I2+yeq%KdbsyEFEm+dX zH1YcP`Ig+w48SWdP7nIUV3_XS-d$6GUgAaaOsFW~OtAyxsK!>89kj}zk4G}2Uq(mk zRSM}Tey{(Q`#6FYgtIO;f@wOHdde2)?>{8sy8MeSPCAjbq5x18L8l4S!W6yV$Q2!T z1_+Dt+DE-F?td zaggSH5-no#r=$9LA&VxCd=2&Wqh_FjeFRC1K(&^) zsest+4PH!CPtU(0BurUo^W4eZ-D4v#%mQbTHWBpo2XBS#9{0wR3^;E6`+M@QjF3Uv zkNVFG5U$&Gm_@pP+L_+~pm(LTLhk13sw?i&Tx+l-B@fxRq>N|s=YIsKA4Nnhk|`ru zmk8&sE)*{r*OFLW;Wh&R?>~KGNZ!r!T%V?Z0`0!yQA4OX@#(*xW=t!TU#L?=BfmyW zrudCtfU36<`_+GAFg6JpBlznnj;J9F@S)wiH}F~LE6+(P{n?sURu!EQCr6EcnS^r| zgCGO3Q@6Cu4kJG3TcPN2vNGg46uEF1K15Qh1T5l~V|*WC0#BCP8W~$QN^2v4NI?Qu zqsDE9nv%N(LbB-2THn8aFJ*;79j=(xh<1J68$zM)uT>hDSU`@${=!DRFrSn@V0|YT zbkY!tHiwO?Aj5GoY(Y5}+<<_>00vR+il{~eYp5vg*-ZugpCGlqzP{nlWD9jqEs?{? z^0Nx=c_dXXbh$t4^x|8YmX7*t4Vn`*5~p^0Y>mZkYBfLNBda$kD-;-JNSNbmx_IO$chCE7P+FtOt+DtHeMZH>f5(_(7Y{j_%{P$*t?%G zF)=Or{#~1FTP*+~K%@AEq#U*{s#&-F=;eoHSMXp5!vTw6)EMM3~rZl;^#l}cqo6Z#Jn zFY3eek(y_DAU(=k)q}Ze+AdHI#YbpbQXFL-6xM+=i}?i{&4>ODA^Q9#KcxWzTceZq zJB`zk{k<6hGz(WDK|wB(Xj^5#UrI{%%*+_f&sPh}5H=J;<&L{5mZg-7#7QN10YR1bUZ zD-FBYP;HJF`b;D0SYC>+!gVUqKQN$L>|rtX)q=vt(*-0yq5$PSC*)()C~)U~tyn{S zVlJki$jGc%;Ju-3s?D@EuOjoJnVY%qGhm<~!ayZd8jJo6AKiTrHDHYbH=DoOm-R$J%o zR=C2ZH-^#eeP!vDQoF+{@G+}XXhCw8>JTW ztiPKy;B-FBS5aL7cp@i2tWM1l6>3mzV(5}K2S0pynb{{Pd!M~%lPm2pP|kkv*1Gdy5#28ZE+Uw{{>us`wD>B`X3-`o z&uu&&bB(iNBe{BO&^m3RdVm;tVDK)U(H7swfYrUZd<33&MTmxmHj1vaADE-Z58_B^ zKN|0fu^fZ=i4jhaw%5$iaey_Hd(vi4aZc>|5H7N8{4h?!%TqSQ$Ji7%d+9O{G zhfv)JT1pifw|TxJ57QVxPhn7ACM?Sj-1CfDn4X@VdMLbVXx-=GYV_68C3wX$ey6=L zN@MNJqzz(yGU+KP>#&r;hgGlZF#FR%>mvt$!@k(eg5qDsheFI%t>TeIhM;EKQh1z7 zJGfSnwSyB}4Lu#pML-l_hhYM20A>+=s1~V*R%EE5rafZE0CScf*^Y%TI*oJ|{j=2L zDj)e&F>)1#$-Kq>IjMX2-dz-@5zQMk_J! z*tnqWwY^oMwF*XexAFcT&n*andO&GUV(^_ctlC|$K8~?DuC$a*Hr~c?2H7%~R!wK1 z2EDW~ie;oOQvhPrYx&&xlI`tne-SlurbZ1s2aLNAz&{)KL9C%iRMgZf!XQ?4r{T^o zUZlzAn;sF$dDe&&{ySI7rk70UoA= z&0e(K@2w{RgoMd3o|o}fiJ?{HlarI8fJ4U4hSpazj!-agfAG5Zq`$&z6O?x|$~*>8%1(U=Ac)HIEOk{fRfZ z%m*gY)GJM`HZg$QPK>yp0jg;t1On**@|lRx+W{b-C5ltDx3#UzY$_SoT67J9q;@of z&KqT;3N)|*WHYIJ^StR3@c`aa2vR>sIr&-Lx|*$#cFfK@OT~K3XwntlRT(emek4>S$Y1$)l;tmcg;08T$TR zDxe>Zy*4!%0xBAmgt)Ir=01rr08o$?rp79pk*s)h60fKmz78S|<=$^1JEptij+5>z zfYF;kO1ERXz5v;KjVD1xP(Z+MuFh)U;-u?lHM6j=@L+$gx^fPi0aesxsi_u>s3`%? zs$qJdjP=%gchXAdYo3hwh-7+Xmqt-h5ltsXg)%0kMH+Z)B7r`)V)|Y? z*IJNALkQ?8KF7<8{|;(<{zeVjnECj$G)oK$@R#n}0VX% zHE42ll&choGbS!ezodzTxVbBNo;{n-@#^BKn>gK_Vi|3j4Q6}M-xQ^Z`B=-|(h;ElIy4gd%j4J2_R<}`PRhpj90sAp@#Tbi(w7x<_& z&y_Zq5=KWyU5QlPVM4lr!T`i9ZoaK9xCo$MWx&2Pe8){9lnG!mT*zphM=v46dJw{4d?+ch$o}WoufYPzLV(|Gn^+)(Y^k#I z$}}BGbB9=gYK!L2a^y}AFAm}J)P!K&t zRuXe^#7&?ZJ<(av;xB#{~ogO@_WVlk^hTx1XY@Nsse)^UcVFkg_Tv|;kdA zKB;~xCs0?XMe3pi0B0-0{%#J4eBK~6-Xe9{Z<~R&Rr6;;rwl-A3nEFOg$4F+gs7-Q zk%rXzja&NN{c^|uFo*^ohl|#(H`JP-zW|<62O6*taApr5Ma_&21N~cPhX1(-v0}o* zP+J{}ZgNL~ANYu8)+T0V&YC|}Hs2a>Fub4-w@_wfZUq{^IbuE1Yj>_dM>Z=p*f;!Gxn8C~Tbx z7=)pJrkUMEN};|;GOe7JI9dcKw+%nky`oaMSV_J@1sIs$MYwpJkz2$VY@m!3kE%%h zp%#95m&1O(2+-{4FS-qZfxG{FjDOKP-vN?ArKtU1{=d7Oo^0S?YX=So!a=L3-)Kmu zqXm{fqXY0KEMKliGyZ*nBc4?x`1TwH}E zgn+^;@R_?WJ0AE5twH*j_X5$G_Td8;$Fu)=?^C`y@_OJK3ANHRe_2F7U@fT21&!}Y{Xg?~y$*aJvmyWPeY?f8dgN{*Q7kr8xt`ys$e*$nYOfS4GYrm2olw&JTBxEFBCwEGPu|nivL2vYjSz4^W^?A`PVN$ zegcO*N~A5+>wc~5;NKZicA+3Af2-E{w~rGKjI#8315{hvhi&}g3pip=Ec20m!u#8s zMBtc<=E65l3DxWXdc{vLu2XNjzsN_KoL^k<-khwhkwU70LqiXy2@nPSPhg7@ZFOE0 zvIn%bC6mn?pz9}8NxNrR{RQXYDWAQyU;;3G%D(=4cSAg_4gmiqBEuD&E~_VQtDQI7 z6S=qRz%bap3h1YFMWfG^V4uhE+0%d~lMMRa1O@29%!j8gk|m|I@1%Tde*3Nk-~2`z zI0gp?(R)dWf$^#o97A$;AY3|vHm&TxcI@EBO5Rt?)lL!26;# zG&Z_`PJ%thzW@kDI&C!Ok_PvOjb8^r>SIFrP2ct#(A*7b^+*Tv6|h9TM^Z)rsmxRZ z!Y?&8cGM*gZxhWw{utow01j#CATt0jSCQDWGFk+lxxh!O{q`t+RN$1` zN0S`_<0D@9N+me}y09PHY0Vki$HxpWUcSu8%+B7A z`)deGBXXO1grpxl0qO2LIJY;v2E+k3YS5efThmiXn+U>MHP$mGin0QzduWnOAmB3ui3}k#yhL;ST{h z?w}w%m&f|apDM!9b4@WYO(p_i$CC2$5iejEE!NWuL3)6IVR_+BO&e0#0YcU?0%3?m zECcZ`YYo~!iNg9C*^~;LKZi(|vPm|ruA{?0I$fkjU~ED+tuI_(U*C;O$@i`sG#&zf zR{w$A(12qsDJ8{`1a|QlR802)BET9w(4IA1auLNOJfE23oQZD)~UF@9u)Q zZ5;YE0AZ@u3VM-sgwYn0dyNk$Lp8v?^V$uB#619Lw#|E_`xz!p_U>N5b=8RUmFD5S zvmr3vdk8)ix`;5!ee4~Vq8~3tE`;mQHb5&69q8+Wlp;&rkX0FKLGxd*U&BDYb@l+3 zUemT_mat4oU|rZk>UFdvBp#EWKUalzX|I5-SLLFe_VDr19s?F7P+YZ*O+<;N6*7!K ze0{)jAivnu52@Sxh9t$!0Zz^#dU43iMKFpg^xS$a;GjAmDSDTL-2Zvzx1w?32q^Zs{qy8-;dp@K@WnHW93q(@VQ=&BQ z{P;T5h9;0OYz_UePO!SV5c{UOy3eB!6s=9agt3f4!*O!2l_QqO)OR%~Ng8bv1^@w` zKYgQ`T*jY2e=LMWL~K~(hQY@P6HR+~duxS?BYTS_2L}gj549U5C~i>{!szA8GM4Ov zw>ddG%&g3-MVZfidyl@i63UJ21GB0O@wMV>Zg<$h57)!k-0cm}-wt5DLb3u!`;ll$Ewco5vO$ggi#^ zS1SDRPrMPm#pI(v&HU{IID(|c`Etv;P$F3RU^iA4?27FPJ@0yTRh7qon+=Q;XvQr@ z9@q|$;+W3jRD_YcyE{GbnWLAm9(>z`YvoVM`vRGL0J^!t2rR|sbg392AtBG$HM369 zroFlV4ow?*LK5voGqZxoJTxv=NsPXnj_>(x3m%DdiZ|JernZ9AQYG*fq>kGfiIR)- zmky8;<+*A>eW$Hd0uaq&%q3ekFJ43-jPh!tRqJ$qgVeJI>-0O5Qca@Ui=y}~LAg;k z*H^L#SOi|+Jio68HzTB~s;e7!v!zL+y=mGhw!utkT7!O{$J)G-RnRMuZ$?YO0>K#Maq40YTBwWb;iCd%YQ` zG=wvcXDSR22iF!6zil+_4HDx z8%%e%f2(yey-pbT{%;NC6rzt(^O| z6QLgr6bzy!mPMdh5zQPh888!!qr$5It0dyo8;ZicC)Y2=*y%e21NuT7*scD~l~2_A&o`Hul`(h|rMM>i9I z)(bs}T6lXHX>uF1iT!J|QKubo{0VFo4(Upw;+T*4r+X~L_=%yXjEwo5q@-!q2?wMB z+)PN9Z8O+QHCTDDYj*d=FPD&z3*Z4WEKY8kb>>O{?0POpq=RB3z0;5t6n?SL!MxkLFUHBi*l)ZA5e_klRc=DrGa@A-!b~q)`N#udl{5TIU)9=y@0OErQVN4nQF)jG`dxl{HVB>`f^4!W|9~Ejq)d~{@4MHf!Kwt z#3;)^7u|<)4_~tTGtXZ+MW+%82|w6FRGT3tJPv|!9Gml)x)`$?E*%=-m!Fpb=;fbj4ITp_VFv7c zmjto5A3+pjOgD4+N)V^uHDLlKcM)4ks>3VTSa+TqCG3r=_y|(oxFiz1k2_U`#OM=ozYlMKmS$NE1i` zwL6p(;~EZ7TvBqayoXp0duiiwM&#h8l{kde=68-YbGpXUCo$CeJbRA=?QCooz6}K7 z1`dw1-GzH1A*-y{zmMP0*>Wm841N&;(nlFwMtKL+X~jQHdug0Ybbr3P^=L@}-F;^a zPAd4GskqUw(nwVgwSX`$#{wJ6ItSU~u<4%}Jey4+)K;Jaf(GyXjLgAkKXR) zrWbBxG_X|NY*I`sEYsvM!{Id=5Ct3$ECW0a zJa%0Tjd32h-{sKJutIZnwaZI~3<{Hif^u~}2yYc{Nm*Gt^Y1YBD4HzJ(djwIbF{hC z!EPSGUpQ`RrdD!(whQm+=)G<)_Zss$n+<|1%94N!D6GF1dEKdWXC7nkQWP^OZcQm6ErreJ&@3-=KPW{pp>TTg(siQqP!a$jQv2V)TF{g2VT zMqY8yn6kr*G64T@GoX>F@yp=RrHY(Utp&Xb^R#wc9ZFT^G=gG9dfUcoRSBxWXkAd? z48`Rt@W69O83}}x7y@TDsL|DPBF`JyuxKXUf;))p$BbA4RnN{S)C$&og8_dGqVV?i zR(0>`NL+%d6TGS}=O^(0<*1V@C@Dm@w%&-=XN6ee9k2#Km;hj9hp~2m`-!#i_3%^# zMHi20v!nL{XhgIu=al3Y5fl_edYh^{sO!|mUMak!)90BQP~17LemW?$cTk(u#04vs z%v;PWSpAwyb)*7Z-2_j6p-DG~i%@C=4(gk_)xsLYtcE~uwkfz9m1GKoZN>`bD)FO( z8&zc}Eht~c#u_Mr+3UWuK9{4d7L#acijuH|rxe1g88-s$lSHn%u<$n{coR+&{mX*e zZQ1FhsmD?1Y?uxu&`aHC_K|--bt1hmV!$_-53qp$yH0_wPVstKwNHv#HoE=gWCN)W zYFhztk*b?39p5qGN8bdlKJz37nI&>cLm-{BOEM`iV+!lz%`8<;4sB%{)RmxwO`R4=JZ3jz)(i(?pb1D zqT72A3I)5(Ma|fCEs6>X0M!rz9d;r7l({BWqNG~!* zyI4ZgMT9_Nx{_E_WBt^s*G!Fe;DX+NKH16l%A@CU6d}YogvuEi{s3JbDwx!%XegS1 zt7;~M4puHjUNW&0-|?~d147=2yCzfI*eizzj}ar`@Kj}Ws3m596_$D4lh@t0K#^<) z_D&ck&>Z&E+TY_0AtnkhP!;A(k3bGeq+@*jH3O` z&2%Nw4N=XBj}-PM90X1|sT88+J8J6c=b)&JL=Ce>Pc+)w#2!VhC;p)8r}a1Yp%n|x zmT=$TSxyGG5Ca-@{FUWti$H#!eeZP4X6UI~^sn`q6%f@ z@jj5l;xBtc);_xbZ1iyX!GSpjo1IT-iCe<-V?{PV%q7aC>Y`QaBao8FUv1#V@8gE1 zx=Digw-!d6Fj6-zlp71sD~rF#_n;EVYLc+I@_1#jj?L19PpAWYJ3<+G8`(ep#6ofv zcQDXD1Ok89ar6<#5W+4a7G0dmdiRF#(a?yffu^8sO8Hs`fSiX9-g2|V%{3IeIAsgj<+on-mmVIs7h!eCOnFI{(a!IkzF*&I@1z~x*LP+$iS1M1 zY!O4+rp!y?JX^rsP#DrDgNS^s3UppgKad4z6bO%{Y|gyL$H#ArhY1@JpI3vGyo4*{ zcvjod_B|Td>9yGfWKZ(15Fn3L4;uDwm5G$F4JQu%7)mK$@p$zLs^}7J#@~yv<4g972U13|2nA z3CiHVr6dSooyUdFDpr(GO-o!ppBuO?2fZN!BRM%aYrp*0ubdA(cnZ)|R8+F?NWqF> zYT3|^KGMM5!S7&5#mHr$#5K`={>25rTQop&iqI=}z}5n~eM+mC(jYH%Hr`(~?E&KA zGC463AK^&vifk5$U)m5V6(SA}2?&)ch$tRuA>Ei(?l3$DJn4dkyVrf;7z=7H0t$vU zT|FKg%|eSRySUfluy7AfFNHI<21BB?;c#wQ7IYnLNVO1c>^(k&H+aAO?$KmPo!1w9 zEgZc_UV<*H%Y;TEhi`e8%bQHxI;KwI$gVA7PV96V1Rc-waIT1(qU2+!d+lTr+5neze~Qx@W5l9F5nkfbWe@Tcsl1Y?2KljKESiP%n_{fMbu z8q!p)xoT6&s4MY5dPl<2U+@Vrffk#7oM!qzd8sgV)*1BpU3#N{yT)14^g3K{r<8d> zb8xRim@9rQT#)k`DAt+}9)F2bBrg;Zd1{TF!By}rz+UB&N1fG;m8K{Lk@2pfiD}MO z!9>C>piw-8i?z&fuWcXutXHfH*26qdqOi)0PVKNPU;{{Nk=5mMz-_u9Hj-uJ0ql|I z&Ib3FYD;>!4e|EqK&;ip0L51FK}Io=v;;~$zrwaXyYO90brkTOh|rqWI1u*-3n!jAvr4zCXv z=LCxEVTQPW2^AkYd1iTurnnLepz|VcU(VB5nG04K)z!>{lWdIA^AI4Zft zb>%Vik@y?VdT2_bvbg#~t^QQF@Q8W?3_y&J0PE-=0Uyh?_p|Y}r!?8}85G8;+aJ%i z5Vgq&ida>(u?j4Q#SvZ&T;?R@_33^^b3z9;5Y)Z5rGf5v`kRa&KHx+5mgpQac5i7u zVp<{A84%&Xd3?(c-~|b_&apl<8=%0Y=Yhq5zP*zp28@crZfYX8N?GRvJ3~Xlm*K&oQLGU_i{;68q2AvFOG;%eAKDq?W@Za zR?!Bi00+Q_oiN+BFp8ZzuCzBhI}Vn}fr3M!3Fu`eoNOW=q5($?aaj|Mj~_oS!?#NK zZfHNGNckLMMf$~f)0y7!xx+}5>_5>PiN<2KRA^-=;^JtIl>5s;$HA9uSi+lTTK4p9 z`}|o@xT-z<*fVtt9vAd{u?NzJVtpkCi5QW3iIsyH8tT6%b6G!#e=;<+w(67y>bl%! z9j)IlYFg{$`lVAA{u9#4*U z;$#eCY-BL%!QhzSK@J3D4mu{|Jr>?C>z7Sr)Bec_W@n&E%z@3+SxyaZ+2f|4cS9m} z+(E}A&Cg#;WzNft4p1@H0PxP$y`M2%V*VEI8t^Q8(*p|CH#k#zPMj+f*n+W?;tV7L zs(v+na>*{7AOf5BOd@DbYvB7W@V{3xvvRH$iGtp>57;>#TC3)NRennP{h$|FsN*h- zXr}d-Sv}4+hS;GC3r!B+UfuC#Q#jgqr<~a(dSbhk00=PTC2MH%eApXli&u>qMqZid zDKjda{s`Jpe53({I?X}9`sWBr7ZDN9C;BHL(&)G^-+VC%Aoof$f}}J=U<80WNG5gJ zz?B{GC1}i6GzIf#On(_NFci*M*k9e{Z`2tveSJ?53sZxR&&-H0uD+R}7TQ9A@QRQr ztrxz1Ys>&QO@dT|qmnvD@5Rr_UZVU^1F679rV?KqBfsCm@m7|{C4z*O_8{2?qeP3v zxUkcSg>`outB=>B^Op-rvXOpd!}v7#k`{C1J@M-C95v*4HN|Y?XIWaCQ?M}99x{l) z4eN-YJ^Tgb^6NzzmiD5{06)xHyYERicKpB_^R*OIzaX!~oGS2T z6e3l2T=O0Or?NK>r?L&hy^WdYu?P#vup%;)$ZDC#B-27>We6E6k+Ee~rf3iumZFxh zkRe58$~-GWB4e2&!oHsOkG+rm9s4`J{k?zYcn#~hpZmV<^ZK3VWlFC33_+OA25!#V zQBYFKWRvHf6+LfXnu_XY;Xbb-ZW#U6mS)i_N$yevzc4DC@HjA5O=`xJW71qlBY`6R|6Q^T8z>ZvGVeg6mlz8S5Lj#7w9PVQC3 zhqVRPvBcA{oVY-P4vL}ky0>ON8b(tA=nf4+3Y`@dondXd)Wt&O_!EE%0pvQ%FCcJ1 z@zkl#qfUrus*0KR34uOCJmPD=+V!33+hcHyu-Jl#d=`J@vWhr!J3X(PD`T8td0R<% zg?HHX<1YliTNIXY+ckm3`KQrSxmjxhDjt=oDTB2nV_1;C%x7EUUwBT^uDPTo%wFh9 zJW(VbX>eHT(!QcwGP*q--cz51Qcq85{&xn~rJa^!XnL%#fkI3!yRA`O=d%{bHH3$b zsC%TCp?apBbnqJdOptEH%H+6L%xIIr1hcAfqLF?&VGJ zsgy0kW*ymNc)hGxua}RUTs%QNiUapj+V`pH46XYq5T#ECtxkQ?2WziO+0VypV`qLLZ8edz`7pSZZ z7f&90=Hc6vWRzz?f#AYHF_DhljJ@=D2}-PoE8{unQsDl(&Domjbvm>vGs(3icn__ijRntVLn5#E*1}l=s(9B z9{Jjwd_?hqoi(hv5^!N9F&V16zm_9uljzUGs?-D-|ECOM$P3qP#C-p|1ov;WyE62i zP|a4?pl5UXsEGcXl9C=%e0fLapCa8P!x^}vF<#UPNUc4|bB>P0U;n3Kgz~sRe&37C z_1rrY5vxbICqR-NU|eEzv@BlR*m$o3+=C#pp1y${JRR+jsi%tO=&LyZ2lTUf-t1Wa zuJy39s*;PEhbMXP05?azXM_tNV2!=^?ir@i$>iA3D+iGtq zGmLB<+vmpagH5Zjar`kxrW6QG&f5?9lzvA8&hPoET}pra!O{!`mY$a5Q-s*X>Ia$* zmOcJDUZ_)o)zRg8)T3X&THp3C7$Z9(p{nk;%J|8Tr!g?xmY$Y75+y9I)6kH8L7HAT zXR1?a(&n`9Oxs;(+o+wuo}4c5is(to&v)@Eq<%1@SaLAycrnk#5J<8!yJ}pVoKH}M z6T`-iqY7edeZGw~xfi0!p|Y$D9Vob5*P|L~IJ=oAjm>^cLD3^tV_sx+2NB13B`V|z zN=g^F|F(@EkiXBr8yMKK5LmIiQ6B*e~hEvQrF+G zIVW}olcYcVgtg(~?V~lPRg!Vva~=sfC1>S(Lp(;;qSbt*%6w6nkR}^9rvSy~2-|nY z%qaY{17cT6ggRX@aLc~*QdeZSiOI1(>@_yhb-l9k`6MFNYUaUCwhPT~v!l`Fzu_)C zXYz2bLgxrRv*|~ju)i5PYK#5@7i$3)q=1y2VAUiJD@ETWb0Br(g} zbuW7f^=A0;?aRCz^1J5kgRF3P#6x87!@2>9?{6(4RJ>MM}bVNX#ibIb63}S zcO|9DfZ+DaGBQvB%eu-?qLOhspN||df-Y1lXo(Bb!62uljYpid#g{MppCUvitspyz z!+sKDk8|+Wboc}VE#{JNfUtkrT;_8bSo%>L9$1k)FyK+D;l=5{CvV-1(Y? z-+hCI@6q$AF+08Z8+hsN0@7m-ozD{!0;nQh{|?PuEOdF!Wy!2@H;0!DLc5zsUB&oH zQ&>A{%1Hk7bh_KONGLBX^ZR_?3oKsd#hpvmvDO_Z^vEOAy!U<@eJ=Jn| zcHSftD1G&6PJvT5H&&Fc46XQDlEt6JCUmK&GDPFm5xKS$9hFVx8)n?xNjzcGrz{Qp ze>6=%rWtb_(`>CYMY1czM^r<2!2#r?)TimqX9xZMR{axclZV3b=560E{ERWUha*nx zQxL6SuSHR4NPwRUsk{$5K#^Za<%|DyV2D3ZC=9dhw~c!75=irEJrkLLX<#eD&-hB) zQC;wZ?*E;*9lj7;-5y1KbNJr%9MkB^33W9m3;eXro%u+mX~U%@A07hg3Eb75i*q{xP@-PcS{&bU`9L4iAikHdn_1)A<_i0uH z?7D%utTrWyOXb1IV%Pjj<{`(IouJt~cWJKbNG%)Zg}6p5N>q`_eTwu8Ds1cNGwJze z)6Ko-h>`8Mx6tINjs58Ce;cOR}hVpz&{(hF(*{VYy03}m5Qcu0?yw6(O}VU#7YGK(<_!hbF) z`6{cLK0c;+BjBONk{r%MN$8lc>4`mt=%hhl=NjO0qHr2i-ur0hG%+eX7eYYNOFRN= zk}|~`fLJ7}Xh1MD4+Tj`9kJxHb5y=l&u~HIND($}ufqZQTsv!#%{etzokTUqtV=H% z@*C>Ok32HAH;lT?{N#jtuIZfmP@$vqqT!53Pxhx~h)QFy{6QGL#a?PE*W#QRAgpbP z8Td~;M8cy|ny*I>F~w;18?M}^Fn2pxPqCaI1cd&1Z>`;MB*Ih2b8(<-6ql z{rV8lPwFe1tu7fEF$ORnHIU`Yc(UIqTV7QOOp7hCKQgcJBv~N{B!HwMf6a!&BAq_ zWwnSV@_@qWUozA!Gz||n^~92^G#%O0ZE-4DQi1s*m(_K<)4l=2bmoZ)M1L6So+Yw& zYpMuJq;FydBqSwCLi!!FsS;vpXgnWaSFeUWXVz0)t|HMq!ZFm!v(r!5RUVLfCar=% z?4*SBF_j2ntNp+I=_IS{=nMG(%IMSkSP~7^2YW7&(TUy$=NNA9cf=EJ! zClcZu)LkLqtjkF)6`{9fm6W=9qRZ*UYQU!oJx^#5(4kIQ+jsz~V$aS*Pp=&N_6sd_ z2TJy$*{VwNWpj>g-mdeuXNLn`P8Bt5L_C?g&dsnerRmQUeuJ4ku2shYPmM2TWxiVV zX>BU&tdFqs!N0#Nh=1&K+*g8uq_{QaF@T}r=u^-<7Jlxu#$6Pq!GK4ps;D>?0pgLw z7D)*2h*}w-ZoqCnwb3v8S`ud9NYr8bR1u{!b*|x=ko&FG2=TrRv-KO_1AEF&98il; z1KysGX>&(xtT=P9g)PU>UEwfS3eg6WohF2Y@$9C9P2s0wUkd@i90iT^AtpkBus(yt zq{&?;F6yP5!up?Sa8{7f*xn{RK+W=`R|SkQ9rl+u05}ag4{RX+pT`#3e{h=NVW`_e{W@s_i9BCvaXCzp2HbH>tGM}%r{F8tlplL&AFRvYFWE*^ z*Z0GhvreZv>BKi}Cnlxa&?_GO3xxBd3<80mhBb%dgrz>0cvh65=h`&{nm`gYA2&i0 zD!@VkOkBed;M!gC{=~I%D zrb=CVbMNS2T3VW#+N7P$fOLz8cF3*+plMPns;WPd_GFmb1qLMTeZ{<~_8fV$HKS{G zRSUzRBTFg=BqLdx$<62fCwe5uk3H-BVp@+8_3%~xK01FROb^x>N|(?{;5u1N?%i0H zi68&GrAQ}~m1L5eV+{NDtNB*^$Lnz0D_<01 zxx|e+MSH9-u~*;#&eg$l1hbHyvp}L3QMQp~guw4=3y;n_?04t<=X`xE@d^b9l&S8> z&Zxuf$E%3OqIvfHkP|y*TKcVCuqrJMc7VESFiYc!%GtJIQL~(+j-wDbDvT#0N5{*D z2HWq&Wp+!f{=)`&7DJ3*Cvse;$WyT83Mv ze05XPIb2ujKw-TDf4v(0{*%!N;;NW1H3@{?a`e^2zp};CtenmiU)E`#y z45!4m1^!<$YuW4|y!ic1)H%Y|bvT(E0oz}h|k9(brYCfo;R z0I@;4XlM}T5vE`(ClCMtCg*C4DT5Kb`T16YC-E9-vT_f@7WiBQM^0*Pn@W zG~ujqRF`{}$f?G%fDfy8w65`A;9SS-<~5h`9^5B&ZtJOAxz*pc^f{vVcKH*QaDDReWpeveUmv2hIf`cMOR?^J z@baQ+H^Oa0($dZ^NPEOX)2$UBWfzc&#V^#3B8ifWw=bCP@)I~Hd=Y)WD!}=8b=?=M z#sCPeRDf3UavSxf)(Ntb-6rcz=4Vv3dS%2JD_-c`Hl?7Y45NsEvv&e2t;#LCjHE`j zemu3xD8EqMK!WSXhw}M}=9gxqjy|lWD%NWAOB$y@ibaf8dLU1q9s*JM{DL~AB~58r zfBP}l>wO>x^BkVXIB4bukY9Yb1rF_laKn2Yerxc7wD}DWCz(G*zuATcM{EOz<|*=N zRAe^UI-%Y`l+H}>{9c`s<~9Scu`w7?9-JC;dXOVeh@^`alTw0@ zCNf$l!o07I{ky|1F|p*Z=52oANBVVR>o15cTke8X9}6=ZM}UuIzxopIiQ>SnMv^U%wvxXKuvSG7ogGFk!EUN!-EA)sf`~rU3;@s3MX? z4J@=JJ--;p|2W=#+$48@e_sUNiXdx7DP~OuTIm^?x_@#%;9(Zc@6kuv0dv)8DMm=k zSppIZ`B=nZ7 zDe{Kb!h4jAja~|DLvKWF;+lAUuArIR6?i%MCXe6!jcl;2eSMM3 zF{FBz3lL(=<6AlT6?QqY>PR9h7D5BJTcEfZMVxnZsNqdY>TobMHdAdzu&alXYtbLq zmWxswSE_&r*Pa6#_}C!MS)Q<)!t+QwDp3kYy5+zaTj|P$FHgcICJFBE7~so)AFdRv z$AW=CA~knO*VaB?S+ZZ%*g_KDMbJy1+ZR` z=#92wVD=17gZ)#S4%b^v&xUh<_H?a-oH&TI>-l6IoBko$h4n?l19%&5=RSz(bt#~Y z{K$!~NaCXIPDAUQd`4#SrL%KGo)?5M*d6Rw5PV ze9#FseOjmVm-D|z%U;>J1zh2O%Q#}z&%W3H75EkLHGT)Bj`CPCzAtw=_U*no^ZXT#h>q4i5i;pBLdw?{brJ-cp%m za!_Rv3nQmILTsR@RK&9GWUhwjXgHSs47+`Fl`zZMC)pW52Feuzp0D!av@H_&%J4v8b`@<(g+qTd#F0zIXPX+Q~0;lGFVyri(s5Kn_ToJG(n>9D`+-Ys~zn8U_rS8 z&+~UWY3O3iIl$WD(SO&#@7rmC==Nfb#V^mI^^G&a%NHP;LFLg{o=EsDjjF^fi)!b= zG(OEhp~o_IUL>eCm6Be_M2Y4A#ZH45H0nXxHSdy$rNtp(H+)!g%bELJ^yHiX>SE2| zQgSsqi3?{HR638YEY~zsWj&3kFJtSN4q>d71iUKe89nam=%(xIypdN)puSym^0!kc zWA!za+=tYfAMCNr7g90-j;gZOHS&#PBJ&`;(tio|%8S6cI*tFABIi;%6Zz z9nb(SH%Wa>(Ej$O>TO|RVHco{UX~l|(F4qMAwF<#WJd8zL~(`-t6TbS-0jtnc&bUZ z#Jm4^K0+YXuktdU2tJ7|nRS(yc98A%G=cuJ9xYsF_Q9wf64&7iX~M3|I5%e4ZN4BE zp#$XUEsmDWX_-dHt6OQ~qxXTP-55grRMg||4NHFt0p+^RXp(&G_u^upx?b_o3E`u% zB>HoYX`1P1rwiNe0f@Q+srsixMG2zkbr{>}@kC~Dvjro@uj~T4W^3osc8Sl64xo*W z!M+qaQEXSY1^cU1^<<(+_*y@p>m1&7`+tTnHFD9Z`dQ$AbT{?;`w)DC7J+fAtF*L@SaTuwyEcyTn8LYr795F= zQpi8$*Su)sUXpX4JVZKcvyAGZH{(#FH}`jMscm{lGU3`!eSQtL@K;cR0pWA(GpPgo zM{z(2#BK@6HmT4Uh8&fr^AcnGOPl#ncf^IGFGBk`e$xn;KS#;;purse)a>L!_?qv+&-Lux}9&etLgLx#p%BlAj|GRSNJ1%Un)ax-n?1m zVs9^$apNq$9HB6#5Z^pYFw+OHq&w@%aZ$G<&Erns=4@{8p@%E7ALqcsaNsx0N*Q!P zn#JuB%UGqf1&H37*j;$_;sQ5H$`5V>#{U&tIE2R<1(ToqXnmSU>P}aSG0326I()5gPUHrbi=muuq zTPa=toz30L&%=cF^g3cA#G3pe{~bs81$UmE#u}|)n(D2KzMn}5fE|m>5#hiSd#>4B z`PB(_D72gJUA|msd-i&jd~>x7ss1E)1ZOoL_ODl4Fn9zpWa#8`MVB*Ut>(wIqWVSU zSU+OMAM>OA%J~b;i=H_r$08|HbDI9KpLEmvgi$;dPtbBj+)U}$a43OV=mT9I*?ORH zMR9P|1#5n+n}ltP6HB|)E4q#}bJMubgs_bq_XQvzxaWg8qZ?KE+{Ke4@Twfz0U2O} zX8$aq&oI#n+zt)61Yu6dglnWQvm3Q+wZ7tZ&!IQQFJnjdsJIcvm`!@t@a?bo7s zO3E3Ba^BkLRy*7Pu>I=HfRFj{DQ^_pny3RmwFloI6yQy)02Uky-T5AUsB!7k zxybVQyF_ll>`K5y(Y|@_9(nf_K2#9^?b`tC3~{TyJ}!DrylD2rG!+4{j_F`T&$o-4 z4ESq`3$RwrH`Ii11c}=NDy$5I*&T#}VvG!jBVybYT1SNv`i1hJUbkP+`ygB#c9KR0fY7KTCt3R!(n!F5Um#9t)=D30J6r(>6g+w=yUb@XE_ zv**1|(TxP?8;!o0z34;X2v$d#{8z*p#zNiPzpMGq7&V3N#C5;*s)_udX~AdAZYYiAl@&n+Sh+9McBbx!D!KMd zPJ$Qq;Q@(xND?%o3`7EA{q6LISQz+5B#Oopr>T(hIG##_l*%YYqiNXo zhh04-+}0d9oLl8_wd!7XIjUW7kX|M32yCvM{R%T6DofAkaSoNb%ah1pkwkf3Qp2GP z!}pfSxN_%!JYmA1Q9tUG9KQ*|zJsk0u$qa;8Fn9?rFyOqYy( z+8X&7926?&_?%F~@_oU9$5g|zDM{G)d03Bb7w|gi&Tj8Y$Pi!jc^Lg*4xqFOS%Cwl zz!9oI5(5s!eID{G^1*enbH~-SKug`6)oj@yqa>CZ zK=fbF*6KV1T*_X>z7uD1>wKYhxDOE3p$HT^SNc&!z&`53*7o-=1f=Zo@7?Kta$|ba zGBx{p#Sdh6LcGi;Q?}jTbj{JBegduhAyU&W zB|Poq^cNDFndf~vHa=NT2v3k#9!?4GH%A^MI^{oiY7+OK(#Pw6--yqAu&9G1PxTu= z{*6O{pr@syehV%r27tXilXafIC94Yn7bh414u(~h*u`0de zCkkL3MKhc-+;M_j_l56>Qi+q)Hx$_c(t6o@zC`596X@6OO zBlUSLv%zgwCs~BnMdLJ#Cz4$r|H0KU4mwK+!7w=U%ipV0ND&`AN4v^bar)&U;Xea)ayhLJWE;a#`DX7mx!YWrAwp z1N_aNGhcZ<1Wclf$bLUX?s9J67I4cS3?pk+SdlydE{V~-mAX1{FpPxrrtsU`xZ&Y; z{rcl0!%tLlC`j(~_Jj79Q@xbSAo)_Cz>{0A+mS0`20tpcH4wG|RYlFd!hng2+@*3n^0Qka67ug66)^u; zDVomEJ5dW9XH=KtRj83(h9Z6g5>fayDSWkJmmbF1XwYo-X!JX?J?JWN)bU--u2;}v z>qjoR4UGsOI1i{PK6y*o7jbX5b-2af;;4?LX(~o0-1L5ShYcg%zErBnlS1UuP48ry z^ntq2!$HB|&0e*3sj+DPARkLb`(P>uiFNG1sjXy-K28Bz6}Fg|m(7&O_yJbMk}J6Z zWX<%nxWhZ`i{HXK{ACF~I>xW-4aF)yC{EWsyLs1)93>UgvboT&wgkN~ZsqHvdLJB) z{*5qp)zN7S;mj7+kN;g};1hF~TwH*#QNh;@s4#yVAR!rpjg3E^DK2t7ihvnu(~uWY zmkB!GHZmiK?mMJZX#JYP-GD}XBqaauYaIFfxWF4Dn|t=lBXWmfBs8n<_D?4lkW`T1 zr+S8*Vyg}84800t(`@^(e>`^u8O}L3yRysKcro#wCPNjmwD+XC=;&t%-9p?x8JDw4N2Sz6PB5IwFNxYkDX9@j0*}nCxiEXRn}#~%#Up(=g~0z zg;olqaZw!{fFHr4&Fp)X!BIYu)=l%{){wQ@$GzY052I&zf?3@D?(S+OJ5-u4OwbtD zhSf8`#ia8)AV9}4IZ3=j=cgR9g>(_o?bekmD5>6%{c&UgO2KHmEZ%ER*jE#3YHGx? zZGZkqRMScy3IT_94s9Sr^2V1K5g0rw<9JxfzVkxe@GmTkEVD|Si`wj+YgkN@87!bm zce&4tAo6vBc=7jd$_f3KxvuMQsFKF|@yRUts3JJ}>O={Vx5#{Syb*0B3R&-cTuv4z zsi=~y18zZut|qcX%bmzObhk%u8cv~odXyBWh?}JIEx^{T;BPQ?Xl zHQcP4*B7jQXnwD?_F*3yGSdQ=qJaxNXa!s8JC&kvfASkWiuUzkE=VJ+h9p)vaxJb~ zooD6_*fdQ|@!-p8v|HppIi`gt{(+3C-_f=o0zFB<7Z01tya7F<8f{w}(1GZ_+Hk45 zXy97q%pDAAZt4A$6l|@DTw2ye2J`nWG@skr-mZ%{Fo7Vi*}$9^ydRhwYs|O2%`4LA zUO#Eioy>EW;<#}VGwj2@__UR4|6_v@04g~mr1%dQ97S7B5n%O(g# z=3=Zwcu?NivF{fVQ5XqrON@=$Ce(+V7tLl;@*Ih#-n(xj((TRL4vzcVZ)99qf&bjqxv=?u+rI0wkQ60gCf0 z*HMc2%X~$G1Uzw?VmT7aampvqIJb&r?#b8Pg3s=1wBwH57NK~}kQNdU0ZTkt24}S(j zN~Ub@f)&3x+S#6P2}YOgA#xk0x9qiT_48g`a&oxn55F{cy6y=ybR(N1{J18_1$Bit zg<~5rc(Yi9 z<&YEP<_CqRyy1HwVs^d109Ait>BtRuj8*%xdKZU1%2|&=@~X)|kn8l_!6V92LdJW@ zUKUP`NnI&+t&?V!poc=MQxpQy0hiUcVn)~M2{<3h0=%I0tiloBt;gU7LGf8Z$gZSu z$8jAicgsvA%;*v=E8DX$C*&yf1}p!hgv7@vSUw_LxBE(pVQ#xF^_ zocP8*^Z9f3-iLTwyZ**9KI;Ydb*)-t0w|LIJ7Rsz0SysN0>!njN?vt>!psMAW;{p0 ztWvj&XhqGyO4UGWEnj@44*d`la1G`0Vl&;W54FaMZJ00wk^YU{7qHKS^aGQk{;ca- zKX)KUN_K|qW3QIXm}1B9z)nCUChnMSxh8~K0%3B9PU24|!(IGYtrIi@2 z9cPt>#2p>{grtiDNNub$($Tp&+_WFwpe}#B>DW}=!B#O#8_6#Ei&(YgpOJniq@{b| z-)duM&zbl2FahhCViwLb$3oU~bKDyS=KMv{Le`sk>}vnJK855jm3|;c*yz~U%iiYU z0_}*5NjRi;p_6_cY6k64ZM8R$apq1uoz&|sX>uBuK0Q5M15*SQ2mFt-LO0WH(gT_H zm6*_kv^s-nV?h_tgF;%N@B1)>8oD7auXctYo}3EAvxkUfZ!g(u;~-{$%t|kBpBlH9 z(VIH=rugHtxY$}a_SsBU${s#^?)sQK@xDOBoz@3@+fGncTqG5&k1W|gvAC}j0hZ-k z>?usF?18sWA{mocZb$>2Gv3X5aOpqTIjSeg8n6#99b|jlI-v5WDut)+GdIB-JczMYB3oJJ@ zG$;ZZrO2Z`t5sNJz}l?__{{0P#|y1$k+(~5qo2i2B=jqMFnucNt0xnv8N5|YCMA)%Krh7sLnBuJAk?_a&cR2!% z`O7rhAF>Ap?P)A4nL_rcsLl0!XJ2;;gk2Q#gZu3eq|V$P9UWElzoPsKhO|1Epos#hrLfb4tnV>;RcBa+@L1-HH#;_Gx7hM3*#zeIXcQyXhZm~7ZENC*a{ zAtJUf=LuogxmUEO>2ujenmIgpA%ODkjzDwkrXRD_*QuxGTVspLXrW^AA*NkwgnrHw zz=j0LHwhHvexet!=o ziuLz)e^6b=OLbFTr%N!zlnd2_y?hXGtv8qY{q^bU7Fm_D0O``G@E*AaCp7o0w)Zfm zgFy^yDO7L*AoF<0FZk;gc$4M!%0#d>=v9g!VB8$fzIeZ3O##@wD+r7U(?h?L8RotM z&R-CWnONUwZ@s^HuVwRJTgp8nZ(ycm-ZF*3XoYXzzNLd0hP40)VVZ0>L%*V~tCeMg z1m5Ki2%Ax}C#CQ-&htT5-G-W^h|nr#gu!8x8RG0vtRGDj@MigXT-+>LGC%g|)do3V3HmcyuG1?%ZgpV+E_Vl6p?4v)hgO~6$%%6borCuN@Fa=j}v_OI_@^vb# za$CkX($~9txX(N^yB| z?Uam2Ea7f^M{_D8WmV?+4W9DRE(EsO9|92K*j{N2+@p}@0V1OSh$NxOC=mt4!5=Ho zBSv6QthKDEbOelMbW+lXX=%=~rl=CN7VzHRLCoQiypl?K?sHu_G{E5SluHP0zW40! z>*EJ;#8RCxjRE2&DVBMoKY%9X7-k8RiO)K8)5AAp^=+KI;p|N4oBRTJ6T}nzlySX+ z;q63VCvICxm^%IcHhubkCKdjlMpOTmNnN%fIe3rS@#p-0-g};v^;t7^9 zs`J5*Exu!9^soqi1^FD6C1l_gEFJl`8Zv4H8i$DPsjsVBc=NRxj<5DY>qD*C#SzVRCgO@zapsx0A<^mY3Hz+ z^JSoAq_(*paN04Z_X@myxMTnUFHS@^B99ZcYX?w+FLAiJ!yQgA4%G~$uzfm&CC0Sd zz$!d(*qPu30lWX@Xv*ZC`M;0V{NDmOZ=z5eJfNJIIzfbBFvyv|7FvBPt1t|ykQwRq z+Q5TJ%MV#*i^$DE6b7M~5_q_;&Yh$@!f3fPKOYbuA4tiS zW-Ijp)DlGv1I~WNY94$8!hHy~0t>q;$o{1+-sp#MgLj=FYVe`?%ax;f_mHKQ1l*b` zq-8*wIu%GEhj z*XlZ;urkX&6}waVExRaE^6+6Vb5$1t{Yj1~kPTNsn)XiRH&0CWcXP2Xc9=8G2nN&+ zf*L_kYJlMlbf~nERIX-L5vLx5HE<980vU`r-|B03otEb6T%LbsnWe@iOkn*n) t{=W!8{okI}|F8Je|Nc*k(7msRlzF~|9|g}B4U@rtx|&8BZ?X0f{{xFqPTc?i literal 0 HcmV?d00001 diff --git a/docs/assets/logo_white.png b/docs/assets/logo_white.png new file mode 100644 index 0000000000000000000000000000000000000000..574f288ad97c7f28d46caf055057b0581151eec4 GIT binary patch literal 23700 zcmbrlRa9GV^etMXG*BdX@DN;zJH_4I3Y6ldXeh1)Qrz90AGAnuEl?nMDN>3{2~gaj zIGo-89p~YWdtc5t4;dk0XYcjNT5GO3*B7O&sf33^iSy*i6Fe1VdEF;Z&?5i6Kv=*x zrgGlQz#H0oT_xEkH9x3#fDf4VG8!^Zp425gzq5P_e8zTDHhKT#2?74U7h1!WbNrJh zW^5|*GWtHR5AvS*Q1{OdGUOdXBk+@VOnMuv{=Ab#`gHR-f)-Rn z!Gnp8&BRE8@gL=4BO-YsCgcMz+LDc0`Cf9@YNhGP@E^`)6=DB|j-w;rKUuI_7uS*7 z0$*?M|2|h%?mdexW{s{F`K4LK3j7sQh+YJ|lgK}n)ge(}i^M?y|J4PPf)sQ}%Ao9E z;1X3CeJF{7PHJd{^uJ#`)d|rCKH?PrzkhhxUwVaqn_+sb zWrb;p@z5w1ZYD2V;g0H=lF+`QB}{sRkk%Id9p-<}3 z=c0W`WTBRFxbCS^7i1ys*s{Urv?6APM)8im1a&@mbyqBwIGY72j+iD!gj#+{{kVjcGwLuTf@eri zFIb7w#g?-x{LM!D<7ZYfV7XdxI1u!p-ky1EQi#R04o zK%Lk`fb}8Ka){!mMXeeiWj~Ps%R&hLm}b)?9l#l0z4Z{PD+VJuBaGf-NE*V6H44-> zgyR9$S4|Y`+Vm%m*^M)EEWX-~E_yj6eJ@6hZ=so~EH-KWnH3AzG$V?9S=4eKf`Oke zpF6Ai?tHGP38G38>27E^^F>wZ)v($sJ(Z;p$Wpfpi;s^V-B#;JOsnQ5>C>LgE7IN$ zQD!`SU(bJ0Cf<+94j%bXbcA@{wsZ|l0(HFxX6N{CF85$7{L0AZgxmhblW#}Zj8f}? z$-)K6TRO*1y#Ti4B5=Egv=MsQ6B2Z|*p8NmU{UIGz`9)mpH2&k8eEU?_VSX9t}}_L ziVnNipM1SXzdQcZPRWLk!u)^@L-whx2@H~9ESA@mHNRbLlL6bl$8w!izSStD;bvM+ z5!-Bg6#_BvV2!GIJvtoa*wKfh~c6dfK4>xJ_Xg6OsQ6Y)m=(l8Ky6C##WS8 z=TfV>2oDH0*ve;1Cbun1rG6<{rder+Xp_KO$L{-pberqj3jJOT%CN;7QZ&T`Wy z!6gY|zj7NfWQg5(Kg0OC zyrg7ZDb?RIW8n)p3XXi#Ptv{tCSeB)K9|>Ztdu+o{%UpJ)%&dz0a9lOA}qP@f_z;W z=;o$peY$IxB)0b65gdaDGCk{D1iSWiQ}h(PtlLH1Y{|ot!rK1q_Noi+o&P1Esj*gDrEa^FSH=wGiI2`#W z!!~?WmQ}Px2S&_}ACpk8>Os8+RgfB(#LqAMCWO!Bt)A{${MaKas{NzFidZu@B=Uz1 zS@@Z!rkxg|6Y}t=bJ#S@m-FcEB~`atR&yN>7g6gvtqqz5d}om+ev3zZRp+=niT4Q-=@^ zWycR~b+3r=&T2XeF{YJy;E(3;BlsZcjydaY#hw=ZKfflGmo{#7TO03NJO0SHcgA!m zXE6%L>mG!@mhr2BC~axs4803AKx*TjGXbFx68-Kh7*1Ei6!w;8=tBL!v3kuqA|Al} z4-Fp&Fc>+mYx>B)t7?7wt={w7CYP`535=w1{sFY@S!z^T0mO#u2Umv%8s(96rW+f* zCpsa=qTdybhHUuU-D0W#>JetZ1~z#O@ye6$bn!Ick;>5j80zQu$B^}#ehP;g6R&@o zs0`)&Jv(JmO`PxtKQQNxZo2f%RcW^CIxhoh>AlqKNnEttN#yMd5}e@Q#*~+F9kx*-TC-`kaH*`E4<+@ zTG_~#SD=-!C4EoJ@y(g=I$FxItTm=RGXSTdi;l0bfWD2h2djjW=HTqDfFGaLN$$!O zCmx~U!ujCza)I&}nWV}TaYF&Reyci~>%%V>YoWgjIY4y1(0H&5)ZwS-lsipjneHbg zUU#FE^lIi*7-30l>jQ@6a#?%(O#w;U&0C&(2WQOPP>s--=Ko;et&~}+LMAZIN1MxE zf#4|eq4qzYOX3{2EoPy*a(S@+tqSR@*r(6Q%oI0vd!!)o!o`!@AxM56+Q*o+g`D$e zcb~?~)31a*zbkU9l>bZ-6BnOlejCh5v}=riOR?3gJfXNpPb6jgw8GWo*^^F{0|uff zgp)a(+?ib*(cW)bMg4cv-=3(87!!9G9Q$aCC>j2HsaQ*$TbuZ6(6@^90Wrtv9@JQG zQ?XT)rT&K(^_IQGN%rORqWN zHB#H{#dDr37QG|n{F>T2hg;lG*&>3DxfrKhL!1GD_lRN6qr^UD0Y09~9stmH~Kg>~0829Gd0 zQ1{cc2M``C5$ASq3AJ1J9pAmfFYdBW_Uvc+Hgf#in6p_F-tYa?@Ou&Z8U{Gr&Mv7u z(PPt&Tk$JKpW8tFC>O^-w^j)^jNTPfsbumVG^i(#Gm$}RO%r==sf?RzQYntEXIa6yH%55$llf*6klVj%&= zpDp{j!*bKXX3h?2FqAxV!jxNC%aqXxM*T#{v}4t^(iIt}8Q;30Rs0kNpRFbdbZLkj z@MIex$pQ*$U;*JPVaHs8--|7*-CUFxDw0YQ?l8g{gkijcZ0zCz5!8tdv5+PJ-eIuq zjt6sb&yM}c{;}~Wq3^PGiZSgVkTU#IDUdQaDxP#yE?~AUE-tPbSQiYBznePA!y6FX zZ|@j2V&s&Zoe!QutV@LH`!^9uNfP1lzr_=Zw;g2*68cV$(ya~N6QIL6d?g&Mcq@Ji zta&6qKV$kMJ$L!(%NL8ZX6+mlLnE(*s=t2+M0Y*t3fSTu;1c0T^LbHyL~%{cAsWJJ z4=7KDNiN#P!H=(LBP95el)uV;tz6+WCxHvn7q_JBAJL8%7Se2-_cENXBz9|)r#!QS z)_iw=5KMN5B=SPep9A3{ozXsZec=2Fxa2)i_eHSs=rv(r4S0Lh{bpgztl{hPTByZm zXs-L`)ynDd1)DexoE`4f0js(=<)9pHov-o0wgFW}$VJ6U35M3CQT?cSjTFDd8TSrQVHs^Zn?R+S$nnmAWb!C4Ir>UFzBZ#>rh>K@aOYgOSpVh>wo0N z8crXLmlSn^{bdX+YZV!YxUFjOmCWK?B`o3!cX&tW`kOmZrz_nz2W;0CsA z%sbo-Nu=VkT4bL_6>Y2(rKq;C!jK6ui)Cyyu<#DJGjzOWr|#y7z^uwJD!-0zWnBqcC=0(?PV<{7&w3e;gNRUOP7x=f zpz?r>dT3~jytHd;9rZOXJOZ~isC9$=9MuCx;*g{>Bhoxk)>&5p&I!2R` z5G^K|JeS4k>C%J_KWb7?!UuXP4V&{n0_gCBXGR1uAGA~bX5?c@<;P8ksThzUK5aFC zOMJQWvPMVKnBKH&zldr9ax{o@wgtu~G{-|$U};wt9913q2RA>x^7Ij(qSoMgA+7uKhe$bqr3F+aNAzHNUnEi_C0P*!|c zqyJBUim&B=_h_qi2NOkd&pc-u540l;s*-I4;Vmx3hGo7Y!oe(~u6gT5_!mpo*BBhcvv1OwB#|FMNK?pI zp|kkpZB^9laK=Fu_PjQ4o@RHrOg9ljH;%%oD>-?X)eaeM*L#ec9{H|doYhX!BF_7e zUwtuVCA`991)Z90H3o1xQqX7*k3b(NSup?!8O$&>HN|MV%lL&^2@-X*@@U1i*EnwR zi@l0tJRHaiqbIa*Erg**#*^fenST$XKo3~g?1Zc(9ZQ~Kr^Pt9i&DLK5MqYveb z*t{0QwMt278R;OXY0tRQa3s=1PAvC1-HrjyHGkssQf5guldZwV?)NM&2HmKXE0Pu8 zKQxmodAc5`?M_|~6=Z3}UgLXd#W>ZR*(jg8 zInupg=)k0YDJRif&y`-{w^AYR($Se|+y&aMovpcy4>7Q4mUisIqH&GH1IIrBKq(X( z_Q}=;Z{W--woeaI|6+(>+F*dm@YkuRee}eTeUgyZy54S>$#n_jhcn^CAnWe5P}kUzdyhbj4Xq? z>aC7&`jf2A9+?oQg*I2oBnA(9a)rronZ#ZP(-CIfICIeN0|5*`Z7#iXA#=*&mil&< zcOyYRTgu8d`H+@U$piSYYYTfkk>2q8y(}Tg`s{BXg1)^go@-!c?|(Nv%mz>^NrSF2 zXeYPfLqix(Kr_0#R;SDhZ^`<0WmXmh?e2I2EYkd+CMh%itpP}>_|g-N1>|vUa^^Qz z*4=|YJ+gtG1%e@Ux1u`_D_IpF(An_x5uXWq4llO`U`+y^@;~ZQT{gv8e>i zI}%Jmp^t0mt5VZxrWis|xqsV@J{*0{7P}?`LcsgRj+XL=ZaP0^R%duDvD{0qT<|c1 z&+$y)pQL?ofdN@+593Y@6&%0=|qzo^&u0&sN@{u@k1;wPE@t&!cHVLV#98B;=o@7&|ApU9Hl zFj?+XUOBaO!x9o-Nf94zZyR@zkP}~IaLvAR$2R8vJOUKOwNxosMur5oyr4*pWguC_ z9Z(o>g5*^c>-4Yo1tVXc^pZRCS+C$BGHeh05;h1f__=r^~K z`4OS-;LSL5>%i>sbe6K0ymq;yzb34nqN}@JzwT=iQR(LZHa#xJ$cCD`ZO7v`&yiaK z(gGSQ@TLldbzpl*iJVa1*>}#&_P02$>O-)S~V3l#%!Kx;aiX~sb22P}F z-!mTxg?yTY3hwD}UBp%P;%XvoSxM?o_i_5g0g=O&o-9UVz{hZ3+b?#F!`a!np$#jA zt5E$VZE)TkmiMayZ94885;Z2sziZJ#d^DFR897iTgHTCrQ_~#x`hhoP=jcg0-dI?8 z9FdO`zb8!Gc%vJA>B?yNVc&0sC;nGFR!v>6aE-o&DS&}gxs-G4;Q~<%pTyHCm zqetecEE^S2{19ig1<6kRqH2rm+uZS3NMrFb{uD63vchGrXDTkP1Ua-7v~7xHTagC# z_{$4S9IWEOjkB>JhL;eCkS2F~38IqKvphSB*ewppHPnTuQnOXBNzV+(CPtXNfKXo2Xy31)!~Go!a7v&7SN2t%B$JojM_~YqL!;u$-K+Syq97>} zsYgRYGn+`4%xwrT-XYqt9|*N#ZlB5d5zfy9?EGGPzbZ@Z#VDFJUQ-cM-qv>g4s(|x zd8W%%I`AW5q+ZJKMgwRdLD?Vm@_9c#AYr9=ypax8nyYJXf3-pfIkwI)H#f(Y+?oPt z3Jk@g)gtYo@SIxFnst$&MaSeHwl)~)jQTRTBYG=baOYiaWXagjEB+>&DRjbZS>^VZZ=|42}Q-!^mFkvvEe|soyVjw`}`uF=)l``Fu z2wV!?Bo??IZTEhZf0%N>ESrD1bHu9dDDHg*AhY1s*9ACSulehOrW68Z$EvEj{Bw=3Z)Qk)XYDnf(^ZUkTP;M2~c#ESrvB7S{?EJl?LLpvr-D#o&ye&37Ft zBp2RiX3UYdP0`&*$=x6u&SOsU<-+NUK zqaq)k)3a}nBp)eJci9-!az8%kNB<$b*;T3^F&53!e9SVY`)EK0IGfH$EW|;UNG^~{ zwvK+lPil(bn8kO!u z6QfQ4Z3$2epzI9tv<7QUd0#?95ld8V$%Iomo?c!qTv~)lud^oP+g^IHZ<)7DaTQor zNe#4iPfbovb~03g;_tRXdcEbZs`&Y`zJIjYyfsrg`XBHV15{cpv28~#w3nsik}Qt|(Z#%w8k!`xKNN-Fn%t54$~`iD$7 zLH7=u`8MpYAVD!Yp_w7TJ$7WavK1JF;`H(`W{jF^)$>{u|J$wT>Pkw34xjXQv2l@N zmuuIh^16o{VR^gz`C_aUWUIHN|a zVH?KosY(KFrUnG=Yg$O6ZU3fx0JceBeg8TX2PONJAfy$MX2q|ttE(Fg$aQR>@KWZC z(Y*E6+!@`B6yc)#=J#f*w@C&W$+|>h+i6{ZCwoD%*s7Sltg$G)Mo4cz!Oc_?CcX9& zjetw|fDFxB6{-BPii^ed1FN#XM5wgch^lsh>i?5}u?;n@T5E&}Bi6V3j-dFW<;EvqphgJs zil!$b=Qn*B=EQ07?&(yBoh4}CecU*ZYHiN?qg5*QUfuL6oMZ107*%|QiW=ka(k6q!_^g+BHoc$KEL^8G*~!04pBJsSmaK9l>PPg{ z=UeIXF<*cVv^QJrJO9|ISLH(jpl*6tPn8ZXk_Hm_g0+5Z^SpH3{YKY5ezUx@v(rA5 zC-==Ss>#NCwZ_gWMyZ)wyaxbyluWpIg8Iw8GiyN|b4GX61~2bI+jB=SQbGpQnctJ1 zB-uE>bKQv%HN=~x$M2?J%;q+@`v956!u{3|*Y*>Db)%)-2Uv0nu>ru7(XBw}9VOuWv3kwev<(^%} zRi2#GEP)@=8r$f(C8Td;B=zD@KDi)ehGp)bRG?KzLj}lef)En8%=90%YH4{CKkjgy z+4`uUpx{lbH6iq`M&s5I_@QrTVLE?T7YA4o$D8sM z17|bW^sCtDPqQQyF*9IASv}sl)0&qQTb6TCQxh9F;rp?ZhA<}+k$r8>l5|aoA55+RF;e; zl|P&dj{-tPs0R`2whL->CioCwG)nTJSp_edeV^^bvDObi8IJKJn3_G7_ z=^p}bcOIUheOP_LRUT*KrTmJvL_Z_Ne@JxF9?Zq;ysT3wU2yzFcFQC@%xx{p`{W`U0TM}zzi2PAN(znTII}D zo&ZF~n}Q%wr4J9np>7-(UBo7fhBeNyAq{O^{-GPXucihf&bwJEp83kaWx5z!G5h-& zU1BQqX9LMOFL7_&@=x zBkRt2_YQ^MKv#PqhtkxUw|H|QVgomHS7Y8uX<_MPz9JvIlK+ZDd7X83G)SS>g#xv= zk~Mf!XM7-F__ByLbl-hp&cx1DIw_8e1@8a-??Q5cW4vw)IX{2PwRXXC9C2k@CxI43*D0g?@$ybxv4{w|gz<^3Ml zZVFSLP^az{;+uulTAD$il*aPvW?SvZd}V67IoD^^(o1$Z&>TPN@Ca?oh2Wm?kVVkP zmnStjJY{mO$9FyzIPhsCH`7EdDnkE*FP980Fynuj__0X1N=E`OT$P86W+VoDI3GbO z%u8T;bOS(NDRKWB>dNnF57a#67^XrPnOyi<%p`R68#I|k6P-Wow|C5_gC17ISK}5* z1)=s_&CP{BbAQOzvNv+K)r_Nt$>uz-b;W-#p!;eHIM+p2ZGaY&UYJzvxE_`2ewvgE z$4$ER2mr zbwJ4ZSnVTkWafeCJII{RU59}@f4TrCIVnLSdgi;jL47kAhA4kSFiQ9Y)umwe{k5q6*qeR6jGLqDFUq7UGKaz(tqkzY>@%nrXUD zGNau6p7B<;jz;Z8>KJW}aHcz%Ftah8S{ULD{xlrd<_>|!@cgkY$DAi^xg4#v4G9uV z`umc&=^;}N`&0e=qvvo|@-`>B zBiGoODXa4_yp|sjYTY#&!K1@fl+By|n?Z8gNEzBHk*6{N2+3Nq>-t4ONq+)l&wn(} z%z84=lhm0ni%TGm>XQB@xRdwDtrK&ZQ;m^PMDRDzK@{4J^69k|$J=5Jo4>5w@`g*B z^7kH=g^y8nD?pKKG3?2dD@749K7K4geipd?w%`%d)^qfzX3U0<017tJ+ZxlX!%kOX z6tcE``^$RtzmpPa{eCOn`KfOe3qL7rt+^=>AwFic&v*kMMdZ9ei}Yl{Yvi9IH-bD<; z4W*uT^`KC;55Voa8U>_7>E&8>Y5bzr=o!}fXn2f}W zZ<%@OH{jka5`xyQt3WFV_n*rs_+2O%l%Y$1b@2RCjGV@$j3L;OfPzn6=(zHn_o-825b$!Pg_|}Fiqrt%&t>j7*`u3$Sa?BWmPp>UH zYsX$DzmBvQfDw9{zi6Z5In|qaBwgVJVeE^xDHK-ndeCE>Vy`tzwk~T%EN<=iFz$&1w9fckx3X; zaT)pnbxj#{aF9oh;~q;TR=1807p1hfn;H@$wGxj;e-HLc^dE^NpU=YhuC2<1|4~d8 zRMuLtI*)s`(b^%$tZPMb5jE0I^3cpWiTRlsW;K&aEEXCs6EPS5B2<^oCn`)2c171` zu`7BcCj6tD3+?8b|0;_UF+}ub@F(t@Ut81j#=GifjH}|@xqbl=-A6;+of<4GfpYii zMfrb{1^5&xtoxj{gbB12|)lkN+{+`6KAgug_K%Y z9r{OTuM4wIT9UAV;jR0di;$K+W1@T-k3Ev7cEOzQLNnlQ9DhKe-m%Z3|e_CJ2 zCgc2H0U$`13@wgnPJt#>37$}tf)Kfe%@XsnhXU{->JO7SvR}*ags0yU`@XX$ldU7* zw-%ir{ZEO%tjmU()AQX}`k}5pQLd9h(YO*}NF`yF_z;;`p3d&^+nc2`WRB?{cXp0O zmH6KqcL}SQFgbf6p^B@wZG$>VD?Q&F-#l4CLOWCTPZ3=;a#;Hdkk0a`1eWH_%paWm z>#IE91_-;Lt{>4z-p&@f?5warZb>ULl#E4(@Xovb(xgaU%4KC92mkm|nv=4Zcw&ic zp437$gIcJrRG~ciDk-~~gQhJ+{O)$H%g;8B@9W2W*f73?P@Ld-yqjOK%ukbE+kHh=3|p!UbFcw!^9e%Xu8RrA-_ghGcS?~nS9_O%MuzjT zD^{_@-*nfV@*_0HlG-t5blA4NW&F>jlzf3nUce_W+1=wen0BA1neJzsR7h86wJj{T z7Y`uUn!9pOdn+g0J^{kmQ^q&}Xr5~Z(0{=!Y^MM@E#uL>+KD-iD6Y);AtA68eHX+cHLVOvdI#<3UB7NYs~WnWWc$5AYgP9&LWwk>WXK{By12=snm% zW@ZB(ry#coU>vw1kvJdNqI4*`XQ=68)_uNOUQ~yT!S2}4^@yOaSlyd!%q)XRo}092 zo{%#9;QqNJ(tU#KL-#7fQgdeT7df68<| z>xnPO4>U?jbjK3txUpY9c!-`@U{~sH`1udHS|*NYAO|UG9f{djSZtouCto3&mq?2e zO^OV-Sauz8FPOAM<4!)0vH`d~N+(UJyoGu1uPeA^d1ksuJzA4?1A@aJUbEG*uQs#2 zffJIc8jL&rDGRCbNWEXrki<=?|n)$tqhgqNR#i_rhb^rJzdSMWUuN5hXY2o z4KAF9VDe~&Db?cz=)&!0zc04PdYCepA{Bd$g!u&%-SLJklKg=2UhXBW6i!n{;Nvo+ zRX60((hmM&jcY3K&}`)Q(uWW?tTpZW8qPg6w30jKFqbWQx$s621=0vf_M@<>D4EtU zzr|$OtcBq|hN!Ck-)ym{_8gVk8|WwBvp>t!G5zvx&b$5s@CU&}4WMa2f2d{;f4LfDH^GnipDok$gJRX*#qbx=NI9{oe^E z&7D+f|Ii2pW+mZFncgU$k98X3;nl8(FcSWd-6Rpnj9&E zjz?8ZchZzMjs~!##PZx42Gh6et{!+=t+!{oBme4t@&wKbWn6Tx1)0DVn4f> zE3_Qu=Rae2A{^rS6qcsyJgB}-m@EghHPdGnby7vi2dSEOs%%{%*ZpBc5Roj6m0{1v zOc(&ZwZyo}xt-0TuyrG1lR8yKUYj#SI_G`Wr(QVQ^bqSbdjGyb(8Z|bcv)kYHWOPb zW^mn|&!~{3o5Wb?G0l%MjL?6K0|kwowCz9RZD==-bS)feCg1ZKyc*Hd6lbt8l-y+W zR9=))yf?YT>Tu=}a*6J@8u-!HOtFfCn3$esblrT_G5rPV{M{|V{?GOf#&O#rX4oZX z&>*i(H`CmyxQ$R({mV^+a&AuJ7NT1r(X0HSOZsic>YH*}3E?-%rzl#ln?T^MBq^vMdDup1(C4L!u9rL>XJbRFtxHjf zg?xR#p`0M(%G~{-|LE-z!w_XKR~M!{qh!fp!X~9f^RK+YrYL_Wh23LAc!Cbewo2NP zT5k~b0x({2`Da8H9hsO|n~oknzAUiXQTy1R0d}>rTAPmU%n7H(SW)vWI%U{^HZp- z_;~#JoylhV?zrB<;HU7!turZ`<7ZanszH_1J`A^aODYSq@L-$wV@b|=?tO-aglt(Af;#qF2Fs%9P?2M_0 ziKhWQJ;wFte6Z%!rJ5r0w`uG+8nQg`qFU{f6l7aM%P~*dFutUzjCKR-I2)kw9SB-; z?S5w97t@ZH4PCktp5;hww&XLWu$Qw=VA?&UABz1a=gOuhz(~?opyMD&|4zJ4E+I*V zZm@DkL-O1NT+~@P2?(wW`!aS_%A8^+mIAS#sZ#sy#KNOy_EcZo$6`#SezikO0M#TSC z%6DXkuDLxIb63+$Qy5IO`g&2L;*t^N#u&JvGoW4Ae0~@M#X)$4vxZy2Xzr!!g(#v! zPD%4_eZ(#sAQT5nfzpekuQio;^d4zUIDTv%hB&HC1d&h1x6~Hb`f63Z8~EX4NYc$D zk@|bbo4Is_2JWNYZ>fXM2F7=PI4eWwshI9yaADcyr)*D`C*6fM{xa2A42{G*=;Ceo zEw22S2=f$8Q~n~BtGpo8V$RkG!vc>`3YdO_w;vBw4csqtUFZPrddS9^!x8 z;t?SjD7+iAgTHBRBeHx18bJErX~c3B-*DshZELf72rCG6GI40($ElU+y1%|MguLOP z8$C3L5Z8&g)ZXY9j_YWlu*Cf&!Wjbn0&P#q0*AplRYd1Uh)r>ML{fhQ+rmsdDdS3ch3S@r=LAyLLF+T#xLsVhs7(pnpvJh5clRc8N_6pyzYC0U}ARB zjUD;qKw3=_4|HyDsE1qZ!6_tqFS9Pn*x2_hZtpwa&$j&dK1PIc`) zK!>)A{4y64s_PIwoKEaopPlF|y)v*pfBHi)VRVOYYU-~LJ@-T2#}CWa9ivfVW)ZyS zrZom$k=2cJKT21)P}w$2cFvvikvK>qH-wc4&7G6j44g!eaET<^B(ir5Gb?KR>_dS- zY+-(PrrIaStLwO}6RPTy2DQaA@#c-}4Q!gl*EoA{(Y%qSvASQIjLH&IEJ7@jF}wol zj-D5t3|&VI%tkE}-G_fD(c6~4ztIWVezvm#i7rAun;0fwc|r9+F^`pXv|T9I{mtn{ z6QwlN;$Nhv$ISqO*@!383^~;eZs+y5J`AL1%`_gUhpAfkOVjJ8sD_qYC9$Z?|NVMM`9`x#d z3}PFG2tjNsMx$aUOiKsZXEX-!FVj@h@j0}LhGhO05UrH)cMq@H68AKE*kPX*z5cLD zHf-0>jr7TvX_mAQ^SN457wNVgVzQ~8Gfr~cosZb|nqpuBbRrMoN^a3TIhQNb)reBb zywX@Y-&2)=i2(%O@MvdDQS4J+d%L74@?fq#)`HI>|J>8HiG z=ex%-rOzijF#MbN8E{a#?Rhe77$(FunD@4ll?+{_ePqr#n8$EeA_X46d_lNwq!;8>Sl{&hd^pWU(D2On&_-o-M z)RHth_L8Q{KFdQakQ<#|X@EY`iOGxuCF`3re*!aUC8e!g!2%kFPmE-4@bH;Q`O7;f}2$@4D+o0}TZxm^o&uF=3Xs9EzG=f=z#%_s|i9bNnwnMaO^>jZ$*o zXq&e`@$_4rvoV)Z#n!Z@ZfrI_Q|>rT)va@rV8dOUhcuTA1v%G=fJ_kkpMiuN*!qKF zquO^Q%Fj#;XwdlRms;NISN`EE`wcy4IYkIU%HsP~lScFQf+w<85#b z?PL?B(-=hlQM4Cx;WAa9eRKY7Kc?p=2S~@=fiozK+Mey)9t#u7itQ5o7KAG z9-vUrDq6PsKhU$q^VqLs%+8=6Q6LNv|GxAB8XhuS*TgaZwRb zL!2;3pajV;d_BWJHIflTx9iBwEYxv3Ipd=P{p?W75p^4&o)FAH)w!%ief8rPuVmyI9nxs$Le$D@7vo7MybKX{-G&CCw`M`g|v>{(Y4 zUG+#4t2V-aRTp)}&tD|-9{jHj*Gj-zz9SA!iDsJW#fIL@djG4G^Neb8`PR4sB6^d?AiPIJ^X2Rg5#M_hFOSZwClRK;L<)fsS;I*=e~s7x-xe6*+5#- zR!Vb_Ff#1Xxv4>o+a1zVh)iRGAv8{R-MwPl(6fT&eG<-dsLoqoz9*n42e?mtSc39P zADxhY^C2s~TWwUqA0KF^ooj6{fol1v_cO;&3^^}n#OTg?9%SmCki)b-+oKxtvxNC> z_0LMsFNJfYZlneyW;E<9Vji!K;;Na#5~-rJ4Bj?%=zD`#2DA3bw(*78`w4?rys-H* zN(3_tBopn%0yQ6O+DLKO*ePuQihgv_Zu#Q$pzT&r^Z^^;OrhPrHuJI5Ap6f^vuF%U z6Dn))aVM!{pzq-mnd89X>W`Z}Lma@{FQ^_69Oy1iUsC8utm$CQ2-Jy_VOANl$c}eg zesJ=#*=n&7$Upu>W}VEvYRsuEwks0&h=GIotK}GQ7Wt+uqs@@-0oRDX~*}sX0W9 zQge)`T7I%h4~}b@{UufO8?Ds)H1$#@RK-f#(VkDKFl)-PV?TuTyz3$IX5q!xJAy1TnuBO7-M0}s1t6`x;il&5qn zge&JQ5zsqee6_1>`QpRPh6z?U&t83znIZ0}{$GIe#0XCk|Ku-|jU?2BkROH)L%V(F z;16ZntH$(TW~7#!ig5Ax)M24DS1FTtTINp-k;1LytIJ;CITc0m>g@MCFonx81gzoe z9eVLi;G)QvW7AdDch8Ty#T+e3)+}8-@lUvS~0wwB^z5^eWZaZUE0Uz$#9z`8>Rq5||1XTD5zy?vsj( zlW&NW2}PCnF}kq6e|h)Dgx&&qVb(5QFcx1{c zN@dzw=OZ`OH0`COjm|Gljl_&!q{QjmOjxr<%H6WN75cX2O-u#J@ANfcxbm!C;@c?t z3O$z<8uSfed>H!{YLz2&J%N$TkzJ6Gsr1#Dudc2R-;Hi(!FzfwL3w4ebD2}LGCD^;3GA=B`o#YEY$R85ar!I%$t9B~Z7_Ca6WdTzjLEfvUqGji zABq}SxB9!nJr4AL>=Z<^^d8oot054JAJn>kssCzR# zEj;S)s{LoQ zL(u+-Ex}7Yn~-GIDSR?s#*?rW*5$?C=2bh_efZ*l(eTR~y@aqjwWHDh@FjV_(L{#}_o1J=2htH>l$P`0tk0me%x)@;{{o54#>f z*mvQfk1xV4aU_;}p|(*>E}eNG3I>$b=N_LV?O`%I;+f_3M?aX_t?G-cdPQ_U6(Jl# zSHzR4-c{$T)~ch8FGO8{TYy{j>tIx)bYi)ef3L)ibIrpR8Klf9^HdO!0vfn|?cpCy zGT^R#E1yLS)pP13|1FCU1jiJ-i&tVQaRSX+cz%gK1=4W&VXx04SmLc7Eb)pr0V0O*5@4h{Hpe_(Z*2ab0V6wAa zIpU9J(1PCFL1p){S_`_+w(~`x7^0w@)tlij5}a5{NdW!BSr*x%-;u!HE@_(449Xon zka-F!aM!iZh9rwNKJe_T>Tw#do>`*_nA*S zpov+ZkG(Lc+0KVn^)IX0px@ax9x*-JToyYCrTdxgv6oQ30lrQhYrwy$UCu~>p0EJ; zGJ1C7LyrLG9(9>;T2gyR)V@a50NC9J`lanDo7&!L^b1G5#!Gu97cW*|$sl^km!Z7y zn0LpsKCjv*Z`S85@%FrjkDC= z2fU-qhHWsG>7c3)AGV7VxoZ0Bh7Nq~IV=39u{?wMikEdDwy7$@7?)Ar^WYu8jK^`$ z=muugO^>R!c>DI7zr}zBWASn)Ez&}$s(Ce+NFRy(76olY_WDTAJOXXm&|_5ztV}?Y z$qqqe=L7T@pLTkNgS_9Gy6I<7Y4`OxKO98;5 z<^5<9#l&%Lh8z(OJKwS9V@&$?(mJ9tF|I=~QzCX?oeSkoXf-jXRa928YiqrA+^yi( zB(rGRT~r^Ekf^`$w9zB`IThLFHI+AuI5nr5s&$EuwdV_eDa5m4Ly|MQ7teUR%1<_Q3Gmh|}w+8JW@}eECFJWn}XA+1$eMv9DZnMTp)n z`gLmhf-6Fx%|(erUEe|G!c`-?D*2vozBv7cJodQd?fVVGMkQ_5;0n0r^%fT6dUUy2 zS&a0vm9q~xT)1H>Z^KQ128hriDs00zqwcy*lzY9TWaX1xW}x&qg@<{S0U{BqHLZsy zJw8mu0odVt>grtWL+(NID=RD442;#m+yhIRpxrqqn=sdR6i#T9&o)&*=VYTl_Qe}l zxVl%U+76|rj7OFsXhXc^+kLshtZnxWB=R08D!L!EbBxr%%P3EnKlf=s8~_3Pm=ir(f+5RLKjJjg9MWaPrvH75i1n9JN3* z2f#YIOq$6V!z}52Pk_x>J6FE*xOyWe3%Nj#JXTg!jbgi%*}H13F7qHj(^{CAZgow3 zc-_}vYUPhN`R7$kJ&GQPv0KCsE$bFO;gKHqvbLXgh7YNP_TFr6~GARc!&LA$4x36D1}T>NxpvZM?sbI9#H z+-MUg;rfKC&w5W&Mlnr|`4-;ENHc~lWFqxW4r`*S9b3L{oSELzy5S%Q`MHi+3|@pH zlbQ8^8pbG(-{FE%agIjIRx4;n+YbsA839tr0Uu0S87IhM;`{eHSxdVgP$W18#Js2b z_Eo4zJ<$ic{YA-dyGsgZF}M5s%(AKf$5U&A(X4cI(T==wikHh5Qso&>U~_fu1{&sV z&q$Cu5$OiSJDgdA7&01f=(g@wsV)@26*6_Owrgu^lPLQyRtq}ItuX&4De9Y3-^=dU zvE0M8iP*Gmv<2YjpOJ7$NJ-hXfiw=$H)awZkS~dYdOd9Xp=QfI4FJhtDa|xrq_oQ> zeX?v;&I4RDrx^HQBK+M^Tc{8!1cDDgAAp}uBT-frE>z_(^bu*J^*W(sIB}@cg|Z}Y z^zK{^ei-W2ae$h;AAJh0&nxfiFr&K#pa(^c!U1o*6x5A5tqst9AwWqQapr0F%jAbP z68tDx%6NM|l{l_n!25e%r#>C!)Y3BqaVbnog@rGEZ zYRjoDFY)f!GAleF+8D9u5mN};@lm0>RL?&N0z$|uqS&IA@PK9<6rD|>nR3{CRftUE ziCC1r0q!KS#!7dRWrGOn{0Sjfrv0OCP6JaVF#5w@1_<24qKsUboxnU{+eK@P7cHBk>R$l?4- zf?7sovSxF;IRi6mK630j86C)BePQgs6+Qh_fM5k};km&WNBh^%sy?j5dpBxT(Srv% zOz3;}X&O9L?g+)|#`2p;G~f4T1MQs?XN$9D&~G`GM||f2(TK8Z45I6(`%J4Ea^@;J0y$lqW&`bIU@HyosLk$6Pzd=uEtd_kso(_{HuHhk5S-}}w z&Nd#RD$J0!-xwA#?BIR2rjs1z7;H=U^S5O5^v%BCw?Ce0Y)(y)7Fta*Wja`sCNL#$ z{fej2j4~SD)<>^q&h&1b)D#>WLXEm(O^1>|5PeQgYl)|f0#)NS;()oG;;wXbkV)|@ z_G&u@=ki#K#XONPqcfI|v|p@c%YPR3?avS&S=Bx?<7GIbBQIP8aBLWSVlw13f29(3 zQPfNfFsI%TBlmY67rliTRbxPc?SCGtZvimqUvTZv~ zQz`y9YpaWoC==T?FVdhJwN?T-lXsoiV0R?}i3@>9HV47$Bdpuep*wQ8O>%P(>$g8t*!?i2 z^5Pa~zvDfX-8+lR81(o!b6H3l9;4XTq~y$A99ul|E{Gj2qmPTzXl; za09JDr5l4L7s{IVQl`#I3vU%6X4D&mJqD_Bma1L$vyS$s7LOChk1Ix@r_T-ESEt_m zyVJk6wKbFe$+GgQY}G4Pg`nk2@9xK_zA;*K0XOW25I4iASYpK+cR>$`=nSPAOaWm% z*XPRK741Q<5Tcvayqi^h1L@CuCk`pB8umhuho?w&!X7-q=dKOruA+)y)qaqt$Vg{$ z!Dj^@As^)>*Oz)&NfW@1|40)8_s}BF zDP7;UunRlr1Auf3Udx)lUL2nC06LGZ%d~NqK2DL0nfP^s)GCbm$k!sws!xM1?2UQL zf!_InXSF~Ob3QkP5cn0pu;yC$&G`9Tp57*-{gR$xt$`5re6b=c?SLcp1$$N;)>TUhv~^$C8@QLY_Uaj7nZ^z7{OHnirfL;tI{7nehENll=Du4s}B{yhDX(;&3(_0Jv zH4>%tG;wN@oKr0k5h=bvQO@mnuECaY5xM!{3$Wp( z0yhA<>-b&7MLt^;bd@Dt4_td{KKPD=+Fp^E=VT@EHzD`$V_q9AyS}raGH!> z(nUqN*(9wk0%_JzXxzE|5~klt?6#4euB^Ixn^(?jgW@AMPfyz)zz6`$PBqq5c%Njd z*^{#^jg#s{xGr4TSP=DpAyofmpN!LHw+FHcK#f4;PQ|(haZM|om6VolxV1l@dz1(K z$|%y%v|t;^aEo@XL9rr_bh1>*5&K&9Sm@48z@m0ht9y^POLXoUVm{h z^m_CN;2d;{Xgtb}0h3$cFs)04XsI#>Xz|A)_&Dxv=$4W9qw zoDkI;u8aayqmcj|AzlIOhe8zyu_Roi9@BorO|g*yI5Ro$w_HbR4-AZrf#AjezRZhN zgD@9s-2hCTu4){rwNK`2RY}^Ju$|=txe+4(5BnITI5PJ+nMkTOXZt_t2cp#DWZGKf zXd0kjqfHCRnEc?^vh&W9c|>@DbvkMgUuP4cY>fMV6T+UKsqF#Z12}L2S4!pG?&`dh zPCJeQ!YDB`#gP@TB{uD@-O3tI7KH+03SUiE*Hu-(ab=qhdR5~Zr1k3|DdAy3X{nBE z!u79*X5@ea(mzhxE!Y1QApda|{}=w+t+&E|N1WTGHQ_k0K7f<>6>W6`wF(vc$o~TG Ckyi}> literal 0 HcmV?d00001 diff --git a/docs/docs/api/chat.md b/docs/docs/api/chat.md deleted file mode 100644 index e69de29..0000000 diff --git a/docs/docs/api/errors.md b/docs/docs/api/errors.md deleted file mode 100644 index 0cdfa39..0000000 --- a/docs/docs/api/errors.md +++ /dev/null @@ -1,3 +0,0 @@ -::: rigging.error.ExhaustedMaxRoundsError -::: rigging.error.InvalidModelSpecifiedError -::: rigging.error.MissingModelError diff --git a/docs/docs/api/generators.md b/docs/docs/api/generators.md deleted file mode 100644 index aab0e9b..0000000 --- a/docs/docs/api/generators.md +++ /dev/null @@ -1 +0,0 @@ -:::rigging.generator \ No newline at end of file diff --git a/docs/docs/api/logging.md b/docs/docs/api/logging.md deleted file mode 100644 index e899f27..0000000 --- a/docs/docs/api/logging.md +++ /dev/null @@ -1 +0,0 @@ -:::rigging.logging \ No newline at end of file diff --git a/docs/docs/api/message.md b/docs/docs/api/message.md deleted file mode 100644 index 2ac4fd2..0000000 --- a/docs/docs/api/message.md +++ /dev/null @@ -1 +0,0 @@ -:::rigging.message \ No newline at end of file diff --git a/docs/docs/api/model.md b/docs/docs/api/model.md deleted file mode 100644 index 09aee8a..0000000 --- a/docs/docs/api/model.md +++ /dev/null @@ -1,14 +0,0 @@ -::: rigging.model.Model -::: rigging.model.Model.to_pretty_xml -::: rigging.model.ErrorModel -::: rigging.model.SystemErrorModel -::: rigging.model.ValidationErrorModel -::: rigging.model.Thinking -::: rigging.model.Question -::: rigging.model.Answer -::: rigging.model.QuestionAnswer -::: rigging.model.Description -::: rigging.model.Instructions -::: rigging.model.DelimitedAnswer -::: rigging.model.CommaDelimitedAnswer -::: rigging.model.YesNoAnswer \ No newline at end of file diff --git a/docs/docs/api/prompt.md b/docs/docs/api/prompt.md deleted file mode 100644 index 6c24012..0000000 --- a/docs/docs/api/prompt.md +++ /dev/null @@ -1 +0,0 @@ -:::rigging.prompt.system_tool_extension \ No newline at end of file diff --git a/docs/docs/api/tools.md b/docs/docs/api/tools.md deleted file mode 100644 index 163b7b1..0000000 --- a/docs/docs/api/tools.md +++ /dev/null @@ -1,10 +0,0 @@ -::: rigging.tool -::: rigging.tool.ToolCallParameter -::: rigging.tool.ToolCall -::: rigging.tool.ToolCalls -::: rigging.tool.ToolParameter -::: rigging.tool.ToolFunction -::: rigging.tool.ToolDescription -::: rigging.tool.ToolDescriptionList -::: rigging.tool.ToolResult -::: rigging.tool.ToolResults diff --git a/docs/docs/images/logo.png b/docs/docs/images/logo.png deleted file mode 100644 index f66abe732b484f0de3bad74c801e83d168387bf5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23903 zcmeFZX*iZ``!0SHl~f{BA{i=Eq>yADGE_2Vo>PX5k<3FSC1XlS$dGv+GfRmyAVbKU zLPRC=u=cCxdH=udzlLpnSYOtM^={ky)|=aPU)Oma=W*=EzVF9%TTNAQ+ZLuRBob+x zvXZ<8iA0XSl9M)X#2>TQ+L!PLm7|j06%vVNJMmw#!Uelf{No-M1zi_Shl?(5X3mx* zH#awaYkS)(7G{o?{0`2S9{&($B9Zo!l;vf#uEkFFxm|nbwk|dEtx2@|_DiM9=Gv1g zFB_#*HU`KCZ;vcVJ8M1uq%AQ;%a+sT{{BBX7?M8>51W6k_TC%KZ}j3-@GhstUzVJQ4vl<%exgwvqwr}4))|Ntz|99ZiCT|4>diwn*PVBjT`?k`VGxAOLy=uW z9A7|p?D+B52{P9Q$H%EWJw3a(ey2{!%iAF+DER65l|#fYeEw{h&Oysfhv>g8Dk^IF zDbH-?*RKG7fB$wT&WQW>Z}wHVSu{U5Eb2Lz)$SBTPuxWiA0OY~_wT`G^?^^_e}C0@ z9dTbXRn_eG*LOj8?|wNi$4;9^+D|9*E=Kt0kBaHx>ok&n>FMe1P8X)I3!e%ssXKE` z!rs0;lRmOT+WSUw26tygsA>hYm%bwbLRuHva)5S;Y!yLy4K3gG87aP`wt&JJHaKd zFQ+&`lJAjE(J)(>86vhsYQ^&9yLa1KTU*CIriQhv2Ni8pBn7=`YG~Lv;62Y$?)v?p zE4|0f!v-a$Yb%S_eto%(&s+$>-JB2=6JzJ%+AM6_86~#me*I1AT}Q4?>KZLzP)>DP0w%l%6FQwrQ`85wDjnb!1 zZQLi}aRWi|Wpq?x{w|K>-H;Hoix)RiQ&YF+80`%V45T~2*wB%wBWV6|^XE!;O*~a3 zHfwhM&t&&bUY(rwp3`2$HK=#-?>}&WlAD{m%xm$ehK5FXOw65tfB=x&ly#at&6 zaJ=)1ixoy1LRu@`uN2vKZAao@TH6^kf{?j3T{Z8BgPxwWL-`Jh22tDiOsWktbAFsX zd-m1Ok=OC70tXH#6jlG3ot^2i%_R=~lap?NRaFub?dcr)>)L+?>o(kFK0&9ftgNBP z7P*z0q#^F$;J{~Gx!c3TLqpL$B{GtZ^WZ^tF)^m)xv6?YVoK=jU6mBsTiaO%hlc#C zy_XZQf*OiEnpl{A_irqWWXZaGB&F8Ms;ZxTZUapoj|Ucif1_n$x`U-pe)dd-CDqW_ zb4vJ{hsW@{;{!KBLPFY|j3+iCuX5!Ul| zASH=gwc_ejp6mDXdfDazTwJvcA$#*}I(Wpd%}7s(1{CDy*L}*j5Of(+OY5*n9}&Ou zFA# zZ!VHuL=s(DUFEH^i&T0%!%x!Kx^w5w8#it=Uc9-4Bq@0D?e57>`5RCyG+xshoj=dG zoki>hGH8-^=J_<8+_?6LmOL}L_6%+6n$@|-IA|2F@$@o^ikpxg9bH^-&T>p@Lh;0A zi@!#%&5lJNXS%Jgc@tMXZvR>O+xsVGmoM{K$6cN3VUv(x@m*gnO+FEHqk)^Gabx^r zDsjjnFLLqmy~K|A;ah)d#6;)i<<(Ah7YbgRF;%`oPftIH{qC4x*)4C0e9~e3Ax`2a zl8&sRVtGX_@%7xA&-M!nsuyhSdD+{$7fJX!9_YfJAAJ?Kd{&o+uz8V|`|0Q$HYD3* z+xL|(ASH9I&V=2$6Hr=uTxx|EfQURUE>6^ErR>5Uo35OY(dOvprEspqj6o(+M#=1- zKlwhZ9zu3KS_NBiWg?}%kCzhA+h}+H(WBdld|9OAg3L#Ig{=H4E5%oSzGjV_J;%w( zNYb)98!JkCedD%Bq3bx%8@KKz+1lDNpS*gvx0xpmWf~#&3c=r+sUx6V9dc4kY`8Ik zL!2`xgC&P7!6yCeW0B3&)b~0BYr1kxUeAtyT(m2QoSd4(<2ZOUbZ1x6;$cc}-GNQ#a}8=v1yPeGdu?Q@nb$MCo?1 ziLvp%#KgpwY=h&UK79)6bja2(o$Dcx2kD4|ci0z(iJS z4>LBZuCZ~m)Y^i@qk(ea@y=`mBITG|i}S^8_^kfXF|52sE8#Kg_w?xyZ|~6~NBnNx z+N57(<^SoEZjOjRDiJftH)CD7@mA|o+qP~swYI*EQyARF8li@Y(Gd_IAD<>y*WKNH zZGCkz+zUPhCPS+ayl+W?Gug__so%?fzTW*09!w4d^ z@!h+dg@uJ&tHRc86jW4HW$wS9ajnu?Gf|V;Y>I8W8ro7$EA+-Nu(F1IkX+t+;K2Qp z(!Y_ykSjOc2@W0_98~*S+JS;>-Wbltg|r&)yQVkpbAR}Q)Vh3ciJihpi>E+q`1ma@ zZ(O#E6n_5q{Jg*w|0mCqQ&R~Plk+RwOGbw5!C`|yWY6J=j!a9*-1C`&77bf34^$h? z9uDn!mYhrhh*}@A_qhCc?&)ZL0Nvof*TZe2O-Pk2yz|%Z-}QSPD%n&_kYpDS z`ojJ8K0cB@%TvdJy^}oWe;he_)MNCwO)-lpLJ;j-W z-oo~uccYpP+xaKEB8c>rO-xwV*47ed?}vx`VT)vzKUKI1EX_>?Z`iaw>bv{7bLS>| zOKGY+=NW~fMGT%hr&!&hxc$d1Lg}6^77JB#r{#dJx;kz3%CFPvCBrGHsTMfc4<)@l zo`+KAXlF8Z<`@O%)~qo#G&BT!`zW%wxVV5YPXv4o2d_}#&Z4EyRUK+C7XkXXUp8&9DDLde)J6h%)x`JW8c49jI}%s zn3qdd=0O(y!ONcAhU&yD=AtyS!DOG5&s{VURWoTOf@9<3L%`g%uUUN!zYfWe)O2-& zT)%e?El=0%vLlNXIU+3l3R|pJc=4>vvhwNE*YDlixrLgVa_W|nCb3kP@I8sKb0y}l zwlqnu`1?w>=;RuQ93Sx72~35c02V}441SCEWwc|h6lr|(MpjKNd~g+=nPQP#&_B<~ za9`!Xn{0Qt>Hb$=-#wd28Vv0K5)m;qCfuY;24k>PAvhQf<>0UNNz9LrpI2p|LU4 zL28-*T*A=Eh;rHj@UQAv7J(4aRSY)WJ7g(oQsd)&xv!j|@Y07s{cJ|Ir&GyuJb11Y$H+Bqgf1(iqiV}o9F%(A0Ho3 zCw@AG}#2c`j3^tuvw?*uFHut>lukt+5H*cc7OThR>SQrgr`+QDP zFkt2FmKHVib*G}PFnk#Xavn97*VF4OcfAh0fkoB6@4VJ=@BEdkR~c92CQ{>l=!(1r ziJR>%u-q-?!j)rOrT_Ie681wui;X1?1EHrvn5bCQ4qpB7ncHgl>({SV1JzZ;br=vS zTVw?cPb(|e;cFx4{LsByI}fQ2qD%$M4So5-X|*h`{R+z=3$R9M z=ZpX4;!Zd2GQVJH$+mw#39DRRe~YHstG~US-o$rR_Te-U4Z)NAs6F<=X{ZoEY!jz(-B6Ux7?2Vf@hhE;^sattXkeQj; zcri;Q<;BcUJz_8@T{CqUD2(C9hfkk&@7uR;=I2itP$N9EA)+Xu%k$HUy1KhjF)1G< z-VO<2C?buur|+aDMIJ_8pDecT({Gsr0(Qf?Jl=cUe(;G~f8C3l)XlD6^0TuK%gV_S zwFY@EI+F%C{S2TZcnab^%H5`ohlU)F9XqzLuweQR%^<(NwkqsXVdmvkd8&6=;AyZ9 z@G3p2$ryMWwd;^l@18w-#=z7(pL?gHIMrMF8cldhk@ZJsiS;fMU$dSfYb9l6_wi$^ zOMmK-%J>@(-#`wo&C)A$9C|?+Qgq4QJ`@Wn3v|UXwc3SRF8{nbChaIPWWh!J3|+{U z&b54t*YwLnG$cINab%WT=ueZ%%F5n;JLxj!hmfls7^rD4b;9#_0lRG8x|M!%i}|Ka zn?OI}MpID3X&Y{kF6hkA<_)1)UVGdkS1Vv{YfFv$_|)5* zgj|=GpYI!Jay~xQ3GV;qNM;ES`e4SxJilKYkdu>p z8zW4elaoUnr62uO)O+{tz5Kb17VN|hAzl*DFY$BG9t)F&A2KS{j9dnuU=$-vjJGIe*MHYi9qw4XoWGvARjEmcc2jArDyB;04BQ-ad2HOPGnE)WR zFjG%M@NYxU%JSqNFotf95Cwv$cKVXl)bthejc6|&#!!=uwCayE{q_^L)v$Br3{V;5mT8YPU)epZ4&U865 z3j{9wFinVUefvKTD$F(@o@@A=eJtA%Jpi#bMC&T#&|j%ym)>D>msM&X+Sv?egHc)r z21)?|0RYl7Gu0V3wSHs|MV-u3OnlK9@`UVEA~P9+O;ySZ*NndJsT!YMy3023X@xE+NdvSv);6nl#~SY4&1rQ8cGT`M5fW( z`Rg6d!sCFV%-q~C1YUi8yc=E3Ub7`%0j-h^%~pkfMwzv zQ{tDul=i%MAsh19X+7%kM3n`?eH>}9geJc?(e6p(JWDeg3M=mqxQA|Wt)ViR(LGxUlJryDz_Vpis8 zNJix@tyZ24DM?8j3!lC29Xy+GJ`;sclgERbR*|H18>sa%)#9`Crm9IG(gM**g|WvA zm+kB*?M&_=LFzn}^S`l=Z18^V-~OfK2D3=V1v0f3P?iLNJ3d?n+&cB~? zZkz}D3GiKb@EXh~h28qE(PmstxzxUo&dX9Nq6Ac7GTG}e*65=3#6kRJsfrUg&aN0A)~UC}UCB*Cvob-fEg#>66%wrtr#+z_8$fqc^I?eUV{O#S`+qVB(v8+x*`2m;EdrV5p% z;qIR6sNMCQo#(;27rek~?SJ*^73%2Aa5gzdAbg(U`}gi$M|Chm=UX6pb@#%hWDD?b zM6zsGf770Cp@`hE39Vn{!&_tP*gDQUuo~3pDPD}VCegC8nmqNMb^GyI2FrmqAP;0@ z_qz%ykJ-%}ilHy>^XhPpOQAv$CFRHG=bB#&QBWQV+cE}Rziwt{cUV@8-TmV|F;WCw zR8-Vpt^Wp5$3YT=slmy~tx#N0Dwqki>gG*KqF#eAi;AL;mGERly(S)alBWMTWT}3S z?}xKYYK+?z+<=T;BKNBq8g50~K*$ML8O!STuU=6`L_{nMtgqejUY@%h9;Flq(c?Cu zauEH@OtV0Z5{rUG+sBVTzdm*_|N8b3c4B8yE(9LpB2uff9}7*I8yjn*xHSn@;%&SX zDQ z@J(`M{V+OIv?`;C9sOmBfH~PEu`I2lnA6O3(gm1#%ZcYI5|0)kbqhm=U$-LnFBxXK5ox^ z@XSLJO6Rtf^q`DQh;>c~3FJiXnDwMU2*{O9nizf|4o&L)P}RY6L$s&V4C z$u?{>MbAVi0K{ob-VF|W^XAR93qMDG&J4*xkNT@GaI3nw2#HHb@D}BTrrttmz9i+tg)#nLTr1)7`8gX{C-pvam8yWlsb1k0Oz`%gHsmBA6Cme_f=ZvZu4A+ilBjlH#43ZPr&mDE>1zT$9(!lN)x-~ z6LG}v{ApEWCG^O=k`gF!%E>q}pNegFdHk6;U?uh0@^2}ne+L-@N6PW)Rea{QQi?_$ zutLqsFKV=RoUY95%0MBn3JVHe1a8}V1g(iG6_+1WXF|LL3YYmSM9N2E$8Pvm(}qI6 zWMgAXGpZirA$S*roNhVp|eYJmiP)nKOAxa-Uu1NATgkW}OK)9A6@QUQWyeSYN<@df*U`?G-%(>bEr zD%>21qzY~(Qf$j=Ft~jrieTs$b;vdoq93?&ZlaUgW%&Mmu~^pf4QfK%AxXSV7t1C`^+F2 zk+vXRJS{8JMbXNANlR)ID=8__%{QkcTmkrgLU=zXxk!N)RYNiTftQ!p5r=;9-Fx;Z zv!p~pXEl2h#f{o~Pi%|&a|B`b1>B)P^#VEc@%vaCV44?b(IXx{`LS z=~F^ARLsoS2#Nv}r_c98*)OuC4b`1n9Gukb7M+~go7{{gie$m+8|hCU@6cR=U@L_O#BSAj-| zLR0_t(J@QyK?zqt4zFKLhyIFT>OnlP3use=mRMN&o@;qR-fQ#?1JT%D`Ve`o`( zdp&r?4Ip3BlcHX*`DXB=!RwF(4?XvzFBluxXdmsDl5zkVV#K|B_1yl>;3hW!FI{kt zoz6BWof*2tY7WYV`PZTB*M#^(_&A9BeE0Y`#C>k)=nqd^Ifxve63Twknf2EINZ*U^ zW1GgAGeiib=7PC-0Gd7v^lKW5+{k{E=(lR|FZlf4^XJbKYT4k#1P>PG*RNlBR;}y+ zRHs>PDkTv&+E$ZF0n7k({E%z%B>;ZG3ynKesaAXej}(-YHiKl~Ai_;U1A%u7ng;Qk zBgFTD!aA|Cwmd!7UsaKn^RwgQ$LlC~NIW+H&=mIV$%GsP1$gMOL$yCdw{|BLDjJ&C z04z=fz)RXe6`)DllBAJt3IJW^enwMMlZaVx8N}Jae|Agxh)3SLho@k|i3cA_kA5Kn zKLf}n&%W#sFts_wHS`{y8fA&20GP?E;DB_3`j^ z;AP?&PM$nT$jo>KfBo!0@YOF~yaW)=inywu8CpCplb>iqLqmgZ z-Ewp1TUTv%E83S%9>^PoDSWX&z$Xp2M??|v-Yraa2z;u%G``BfdsCUQ0qWjrVnJA z)JPKH8XJ2TM6q(XDiaa~P&GK>jUckN15TuKNLawP1pS6XSh!d}i<-C}z!-v>L%68M zONP*LoEpRiXeiQ#z}3RYr|Sn0R5Md~6Yb{^8ruc1fSgK-=H~eJ>FQlHLN@K2L8a+0zH6_oB>`0|to;69W@@_MHTD}&t0_3nheFnL z`_NjsGPgqMrH4r*ps?^aZ~PUwk^l?~3JT;^PHC8uslDs|^vU$kM2A{tN3Pe8@hE8! zfb_=?I6~o{{QP_;oF4LjT};1Yks^Qo{Hgy(rlZJOli-@Xmu86wq+ykOd4p;vk!KYc zDnKLZmfEur9qHFXXgcWC>wxqWlul`w;oDFcsj+u_^S`$-3Hbs0L3++jh5m;W0&3o7z^Rrcya)+v=*f^z`NlO zAEu?vlcfxuAay(h|A>TRkghpHRDg@`mA9*@5pTG9Ls3TtnL4 zJ_p%eP)O({3ZdomCqEIZ0hH7kquYa{t=rRR;Wuf4>ZMY(hM$iO)YJrOMi@F$Brq*c zWahS@UsI_%55UpizYlg8B2-mXLjylHbs^sKZM*m@=Xz`jMA_~X_p#j7b`}m7Gz{0k zIN)E>jVeT5)Ymu18a$BI*JnBvwl4(B&Sld$0Ot^(voxwGZ{I1yorIiAYCpk&vWWTo}hBa(Oc_OzcTFRdomQb8+G_^6~_zMIF!`jZZq*0izvM(6Xyj zO#DXW07Rs9!-p0&He1lv=$(ycKH*G3bV%?D;e^Kc#+RB}S?P|>0lB24U5|*mK`AXP zZ1#czq@`{0VGa(CtxQ6jaR@}OPChjMAtS)eO_)x?!^26JE?s&U6Qja1zGuUR4YS|7 zXb=N3XU^RHGH@B3w4lbM7H}FM4dFDU`6}s9li$Hc7 ztR;i6dX8f%!YhxI7D(6^E;P}FXsw@0_V zpOzenNfwmt_d=)bhK{4YsYy9}Zgc`i9DUDgu-ulVTm>k!p9(M0K!-{WB_E6*_d|nh zj)GR^@h2nAdm0ako@xPFto)RY3UZ1?!(F1g$;!&27*B#wP;jy?A|sT8)X@SVm$J&B z4~Z4cA$o*5=)K&!QqC^eaAqkVW;h#;2nt4sy+4MqhuBge_I4c4LCC3QsCWqJ%}ZpF z@$q4(bR(I$)sdsysh;j;Vj=+8Zgf@&QYI)VM1i_fen^H&lw9o{Qe!0;s(^Vo6?iKg zUYIK@DM{D(tnS#mm)AAw8-5H;&I~G+w*F$aGQz12Xl!VDIt;S!=C;=;$by$Y9YOWv zwP|EsxHNTB_hwo}MFrPt_%(#)F#slp%e!X=&S&f2L=bQ84>N?sS`S1{kdCiPr+kr3 zS>`6Y{{ai)SKy2UlWgTB?2i6vX?{BF(?{SCQ(s$g{yeO#T~D46H^E0l8mH|yV=><2~z?n=kwD8yNYP+qT=GVOG!!TsZzO&wUB{}mW8U9 z68h^c!T8v;bK#h1jSZ*I%s5{2^c-1P{7wGsqbh1aKMy0gZ@e0kMk%`BO*oF{B6RLFRxscf_tICZa16 zuv$hQqW|}r@;E^vA|e6BH6R=GjCX=Vu+w|ewrkg}P4L2XPbv6{9oRufM-DBU@Oh0i zY^(hJ{-kc1BO4q5r&oP6H0Xe6iDXFCW*zsTw>K~hbM_}un}h)!a@+7PJmPG>TzgGUZBtnO-+TM-M>^*ZPzQZ zu(UM%<~}doGtao^&rcB1mzQ^>`*NvQ2Gm=Kh;&)rPq**putqL91rrh2(@D)fV}L1)7113e?7IU!&kyLgu`bbPfGbx`x!nTU>2iCz4Le!02R zh4BW`(zVozl*w(#?L^fCZc;#;#F*Kmk&hX*<01@Lo#k@qHouo*Uo}@Hv z%{GhVbKax%aah>7N1EEQNaQjIL<-w@WPWO#R?g$cby}W|0wo2{o$Ud82mY)cx}}h_ zW5|;yEZC+C#pV?W2_%@)UMjMBb5?Ns6V@Q)_J`u`N8wxiEn;SEy?B`?Im7n{IzM(H zA^Pdc-=TqR_usvHcedqv+%*=*KZ&uv-CH2RZL8}kflJO(wF(&+dUyLmYw@#Z&(O}^ z%u=o2uy(>U9hQTFidIz}om~jN23%1s^ZeoTki^{z`Ykv;Fqn zJK@eB%_tWLYiH+zgsFRFL$K3wdMX}{;R1YxMSY8_coagx|vqR6#L&U`5KeJZe_!f_~o z_#PK{cx$4>D6CaMAPqwP*Cl)eZ~?x+=@IU+ho>v=>XbgP%GmR&RoQ$U33~dJ4Ji@8-4C2c^Vi0HWH&P;K4J+vY`j=lSX`!h7>ITr7$^MkV_w%} z9_yYWZnQ@7Mw4lonbd$<#77X19~*mUyuH2A)o9ElX{wz)YD8(PCEYj(zL}PzYu0)^nERGpZe2D3cz0^ z^3gJR57HhRXvbOf15wpIr4C_)!Sv9fO>npHUD@6D{{40$=U$5UK3I{d0G~BBo0>5E z!PosBa4H{W1z7Rl*a)wEW0j zGH^T)V=WO8WXRS+@V$Yg@q=yYf?Uohj?;ERF8XIuz_=0pABb`yV-s8vtV5^)oMYNW zF)am_rzt2Y|ITwLx<5s|c=`JE2wG54?fb??S&04;r-ZUk&f=u?J*!a_mYPCisO81+YC3b_ukev8lQ6$J)H#^H+T>UyN4 z3laxR5zTv!UEExKsag!}Vnp8w1})cE^0a68POi*g$1f9Tpa}OXE`eLN2ia4P7;6Qg zN?3pi>R@cFwyB8<-i3lVx@+Yn!$8{wz4qLS@KF*;s$Ect^qEraEOetGaPJzg!;1lj zDi^;#h5-Z(>CKenWYd-e8DyCXA{`?}(ArZl_nqBEG>K$ z`Ig6_BQq3T@e%JO7c86hwfTTp)Tm)3gEFU)jj>OjSh`l`15s?G*>@Lenx1d%#S0hq zi;FWObprBV&tly4Mn*+tOZCzOuim+Fwb6W1T7!Lm8#v3K zA-C{8-uWY0OG6_71P0*{MNbz4ul^5cq#tq;?(c=~4ZTpXkFdSMDu}w7+1Zz@zAJ35 z^m?C49azCu3b<~i+o4Fxc-~v;P!FJ;dCo2~0}815iyKrBWzsuesr|#*@iudl0W<EaMMU=jL`EtLMl*xpWfR>@*-5WP32wO9r9iEto z_6Ye>aCG=x-HF|tM^wqUq@iM=AU#Vns3cl@q|&>)1<9fJMTmutMMg#*F|8#b!W2O$ zV|D^_1)Q3hix-Ej9Y65biuegn$m%3TO7%nAY_48H{?> z!Ivq}OZhlikB>zE1+-Wo_;i>6&{aRgK}VTnJ8?p9f~&yk>)Smb=r*G}2p^4-PC6Ar z^j_a-Gf;Gg5!sH;&H;bttSAUy5YgL2a9mV*MJ{ayhb%G4Py`Fs1Q(O`KOMB!T=yl0 zYVEUB#H2t=OUtGCqm9Z4ZGbL+tSW55r&+$G09xI|aVfItP?_l!c{S(KIzSW$S9&^H zY7(WG%h(PiR8w19URU}MDD|%}3l)UV@6-LYh2XvJ+#x6maHJPb90-q#uHu$6&%E$b(>fRfz&k=TbT&{ z5PUo~fkszXSBd4@9%R0<=g+NQ5#(1QpPRmx*2RklS5{UsI_h+_e6X+?@HayLlLZTh z!&z)!1J|JliorwfK~{I z-NphdJQzv1RZ}AcvhJ`eedK1akf+a_c?sznrov=l#f1O3_F*zb-g1ZB!zOgvP!*r}z3c~k<`5hm7mr*R>LowHkNqZZ(xFyIp zY-mLwfNZDRw=WdtaAxdAgxDKixbv7!Kj4!=vyJXi1L6l!rt|Il#B!q9LP;%B$aR>K z^+)X2K{m4~)}K)Na62sQHL>4NS-YpFa&>a|;TCT}_l%0s|ECJm5XK(_arcTIOlQGB z1#g^hE~73kEL{6%AvR*k!|G;4ji zw`3a$teOERV&#}ahYxQv@%l;m^+UYC_v7oS@B&^UUly~i?x%l5%KTeKV z@`UJul9C2Oxa~A>xGl-<2owqww-4D+5yV}Zw@zSnwWQ?Q!iWZvKoiJw!ckaZumT4q zp%KFmrvzmO=K}qB!{K5@wQ>q>!*1W#Cp{d9uH(*5DLls4RQm`q~^PnF@a7sr+uSw!6xewbwgE#fD* zsb*&*B{b3pRbzwTL{?!*F@yqj6X&MBuaAW=s{i@zLctF2G$?`CZ>c))Krs-*D}+xQ zWV;N2Vy2g-KF(4o{4gN#35P>L|J)yTV6<0>3c-YRYqTT>@DXQ)jhkC}Lz58v+!(>2 ziT9p+^yQ<>`h5(MuCGq}7F5oRS#I6B6{!0LDmZZm5LmSBF`~MQa6>?$+RiNMhX}KV zp%Juxh7bU1kzFq%!sZm37j>AE+OrLI0jS=DCIqT7F8ecK0|eJW%=qC6*-xC*N-vv){25|{D4o79QYe&&l!pQ%)=vONFmW;cTT~4`S8)Bx|c60{vC#d ziL*UFEoO+Hl(^+zX@KdtJ)e~nYGJ^C+7WmA6DA=Bsj4hn_Z+1lD4Y2J$08JQt?mEt-vky`I4RhQi5)P9|4z7A{$>LtR)(*E{Qdj)|9|4}q=mmy z4zw<L@mz42RpUop>+6t(9gC+&(e?1O`(Ec{^Dz&cj34URjxn7`)A-PQk{+ zpQSP^cTpzXKd`@zQ6hkjTzbz8kfSLQ%{dO)2!q-%hwws>uE`J-mGw!yHaSAu2uSMkFsjutz`4fF|5YQp4?`@SBbb;yl( z@x)4)cs&ox@2mhJRGi3>E zO1B8#=(t6?c^k7RA(AT^8h$Rca7$m=xScr&Adt}h@${qc+`j-ra0BKvBnJ(IH9^v# zjf17PzN;%*lbiu%DiFE>%%vN!+6w?HDWUS(wcMn`7qODy&WXUqPf%re{?@t_hA-k@ z;(I=sqr* z`*@MVVM{a!pyY4_m%4Kq7#J|#v>9TX*D=qVz;))Y?l4@ND(*_g`Gvvsh|kJ`jZ^HD zFXSW|v{J7jlkr1Rqoboc;{NM`USHMzU6Ai|VZ6oKt53`mP5>Z4BGm_6(9(`2Z_yyS zI}9NG{G2;JHkJU+W9;+uW9H`O;rH%wsEaK!p@M_^;b>-C2h%j16egKb!YT`m7BKhL zqepv)Yz@KKpXYUIa-4T2`}5m*!s#96xqUjsS2B30Al^D{Hib()zyOx7v(!skEHnayi6kedii+QA_ViV@NG-eFca28|57v!K(k{Mf|AgYUvDVlK6?Tf9tH zJX}vM-HcYj{ACdBsK?xZ!$p)Km=YL8pR5p0e{cW=OX2P|F!I1m&HiJcMnFG)orTJ} zuUyz;Zt^w(B0Of5ikJxy1y1rtdO9~TUSl(0X$_tLqLAaCBBdX*xZ zTUb;i58oFoTHI2N%4Xrajx!(Z5e>=|1)O#Lv@+w^s-`%@5o!CPS@1;KhJFKr zO4iUYc1-R$ScsRXo|yRaBaF#C^O@*>6HqiEZRM6|l4AglpnZXCHB*H5;J0C<8Rqh1 z#P>dcRf8CM!|bK{XbEv7V2I9^F#W0>xOGneF=x`)sDwU9Tp~aO4OZluriR9wKZgH> zAGYirqx|9}S&v92PE)thH7y3>G?iT51F2uAtsg%C9(`l+pqL_zyps+yY)p$Yn@Ioo z^O2~vIi{NGKHfYH?l`jVmhbuVorMm%8r};!@(`Q}cu5Rb_Pb$jl<*UQdt^IyOmlY4 z|$V5&KrXO056oPrD_=JtcnHEN3>JW7C6 z1O_2Ss>3DG1X^krwKnIITF5c+a|A#sYZbI6F`vuOzr=9>oDnfrLa^7+CQ?Fw$HKDg zHQTBPiF(k`iUC3n1{BLD83$qX*aa4iXwSGZHMg0Z!Yd~L?sgx&5R|Zr2(s$%yd6n= zPfTo*=N@$MMGgbTGd@C3Vd^59R-`jdB9HFeNf079EaxV;I#e+3148oe+XbS&VRC20 zcYT?aNRAc3p=+7{OqA_R5rAVved%cioD!5TxkykX?qV_PmeVsZg-CSVfL8yOKg<;? zV%$IKQWG^NmJ0GSF#rzP9iyJa;hPK|L((OZe!%?5@G$!)`VH1_zC9`|DLI9_O)$-iBnh#wkN@FxyX>3Uzh%#bS z4U`4?AX+c8RCrnlLxvTrmMb6Wg6bHrEy37c`Ff`HLqKj2IvoxS&J)9$q7(NorXRRS|A6}9^=EcW>@gZ`7uoJa>Qap)Kfiw`(U01i{qq3p!1Fb} z=1NDY=23ChW2=iNrsvOa%>Oy@&Fj7U!qBZJ_M@!to}BDz`z^KN>bNIMj^z0KH2?FL z1^Jm78HP^M?*@#nyc2%Y)T9o7z{l*^r(j3XJuiW@vddl`yiUWz$M?1~#{UAq1{1In zh6aDQ@cM$OPDCqc1FDH^|m%PZ0n1}`}&NYRDE$^5y13>7~pYEe`yEiIor zNBf>S6$s*_2z+kugvTXWSPCc0e{cW%tf)u}bt<&a>Y50;AZ;{&InTc`r5Y>-<25tQ z4Oe^Lvb>ikIiBFHIehiw@s57ha!efwjh#Bmt9AbTM1vSIVe1eVN;$cQ79EJ#~o&PhYvpfPMPWgUYwz*|k1_v%~EJ-!6|6sa9qx`E(Y5? zhP%~4Gk*Ax{#qY&3Ye3)bbRt#K@V#_-u1*8WVL}vNpH@ThDV!8{BCf6G&k6H)Xsfc)4(6;#`TV*AUNvltKSw_BY%XZ!J42te%p$jHQlXD8PSk72|M z5LgW`w4iFPJJzk=T?OMLfOkWU2S4n!FRLrahm!8l{b`1V+^?BM!lS()9|WL&bHmzk zyuJlKxVDsNV;j$@!t~6{k0+-qlR%AOBH=?xIaqdLo=^y53|%ZvlMY~aRFsul!O1XF z#iO2SAVgtCxSYFc0)M*ca$FeNjz43q^#wnn5nBEGm3oU!dO%Qio_lS(3sRn)i_ZZ9 z;N|3e`5@*Sp+u^vs-8y=X=7_^m+1+hYXfO{qPH~HPPi`tM_3;OHw3@Sxl>gc?jU1x z4Cu;+@9-A6jB9M$C;sVy2NbLa@5PR6_!~XFNTHS%Zj+zXu@I@2Bc3;J-2(Zh2p2aSw zxI{&Df!a$ido@Xz>=_!MB^kb-$h2hjoD@9mz<(MK0U`E#X9-k{`*Csh%TYC)RbYLP zU?YEarFcYBfX10_jFfhA%EwH8rbG2&!(7k0C208v#-2~Y!%Y{XkEf)hJO~X75JHZvnr%Cs^Y|C0@N_VM0&}0;lV9`0cV}zLr%HOG?%hiQ z(b;QUk&&T-JM99NFmmI#h*Em^1YHG`G>;$8zpaRUmFh0CzBek|as(00%g_Jd^+)jq68y-Z%gKdJ0IJ$5DtFLLd~2+e`QDYQgWH&j_lpbO zwC|cUrq9dK-F}xi1+nB$)1mlJ1A`R*!dD4z7x>e&lb~U} zK?bWRDdoAGlkp}7prD<^h}gw^=a+i%2=O%wq2Yu^ujh01mc7i z0^~jNcwh~x*!T--fug+nb(H8W1``Qpn@MD~IZ0sWw zbwUS@S$O?2G1&u)hc1wunzr_R$UD#Qk?(Bbj>|4+QKN3cYtQWgTA!v#H;ez@QCbiNq@{@~$ zLk{VSM?k=>oJGf=@L~wat*#Vaj5h>Z=@jg3c<|vdV7R(ApoM_Z$?vm zHaQ`|PtUJL54Hxh%SGUOQVsmsDqkxdI(YDCuM1QE+6Ef3{k8Cax4M=-JW+RYG(I-= z^7lGPynH2Ot;aa_F_;OsKmtpRmBEDdlsP4Rw5uiVxDZ3P-hcL zs-p{6hKgY?8i|evFPe(Xd9rWgggg$dnl(`8sLFlN`S3#?DnC!BR1T%Cr72w0BdV&cb_i>?-+CJ zQYcs7-(6b0552$lyD2ln&TXIKiXht_@T5I*1f~ z#Jnxu3&WY(E4^U{iTo4ti|d~bo#oYP$Ky@S%^wRtNZNjha*-J2>{b|_ikcO=*=`5{>v?vT+8aCUv)*zB8a|u$~*)Nfc!zoe}C@2e-PZv26AZ*H6x>Jh<~R9cZNqtr~0fejU#&$WnZ+i zO2Z1JVnSc!&A7hOE+dx+$(0#7iTTf0N{oWPYinu}ucNcZ%ZtQ75@r{#T0`Yx!uO#Q znRMZu*@+g zu?KhV-Yu$Hi#G8=ba654+J*A+ROIrfgj@SOlsxAqoyr*&Xvq?2p~|&to|9m3ycLPr zE`<0g$b8k=ACuM9)%8m*Kgube;OVt_;Z770ykJ2X4GJoAuf8XcD)^>6;{6^^#NFFE zd;X{!7-Yg?Phi80tSpyumW4fJ2@yZ9iKlKq&|TwO^KmLx+sNn%Ci$`<#8}};OS+~z zGNY$H);WfuQ5gO5g$pkq1aDWIZGZM`2o4JU&Yek^6z`o_vH=o76=*@px+``C(Ae)( zMnZx#+SE=0B|cxYLPfa~7Dg1$#S!w?Le_2apekyxal#;Fja*t4N;=^3DFhLSGXEpv z!?=x&4a}11%RefJH{Ga~ZlI~sr)A@eqJlm!Cwc+CIE2C7agFYn1WHU)Mgg8?y@sD3!SE|)j7 zL2SMMr$ivWeJh+8~+H+ljnuv(Naks{+04k^U-XWPLU;-KE!}@cXQ~9?zPnQo8 z^Jy4~5nEsRy?WHg-TgU{h!5kXgOE_Oepbi>_rg%_OUNqF0k$rDT=5^p_( z>em5PTtQww4b`C>7G2jq*Dh`#$Zvu$=jC9c%L)cIebj*MfSoemHlyRYuC1eEJKmO} zU+$8QaPCMsy{pgV0=i%mV1$8ltS<^D- zk*DKOye#6wUiOvPJ^oQOZyYT4;<>2vmmtY?jQm3Xj9hbj>4cWE{i)n0q-kT^h9+8y zbB9fc;by#XG9xXm1%3zB)z=1+Yj~@$^7?YW8&>#I>@Nmp<}9Kh!xXBC&Ym6}9CjA_ z{ofV^Y!*FwYuE7O*EjxhtHMVuz|rKGznqFp%!~}_p}^en=~L12+YRzp)ENvWGn>TK zurf$AF)%zRVPQBRz{0?=T$zEPO@V>ILY9%ifMqrV;~b#H58@4(8BF3pN^}^ZQj81- zUUD!rC~`0`_yJF#;0c^BU(CdCp`uVoA)y19-RI{|E&fzj#c)jtXdvj2rpvkv1%lHU z7)-3@R5JWA2OXr$mc+@h;U=)U{t8^v_v=f~@y8c|lW)4OPFMjKPXGr>HrLcCr|)Fi zAjb$)2CVB}m2hvf1U4m>`OJJ&BF*rR(Hm%pLH19s$M#Y&j# zf!*{gz%m5567 z+xb)s*rfRRCkD8jfbqanz1?p%RsG!$+>NZra6kf>f*2AMz|jv(8bmW6X(UJh`>(#c zJ{;n%tpYaURxDa{Xi?t-ecUQe_=>v0{fXmX}7w+N&8m>=zjF1w0 gqiTsu<`@1mikXSt%b&X?7#QUYp00i_>zopr0IKfC7XSbN diff --git a/docs/docs/index.md b/docs/index.md similarity index 100% rename from docs/docs/index.md rename to docs/index.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml deleted file mode 100644 index 5f408da..0000000 --- a/docs/mkdocs.yml +++ /dev/null @@ -1,54 +0,0 @@ -site_name: Rigging -site_url: https://rigging.dreadnode.io -repo_url: https://github.com/dreadnode/rigging -theme: - logo: images/logo.png - name: material - icon: - repo: fontawesome/brands/github - palette: - scheme: slate - features: - - toc.integrate - - navigation.footer - - navigation.indexes - - navigation.sections - - navigation.expand - - navigation.path - - content.code.copy - - navigation.top -plugins: - - search - - mkdocstrings: - handlers: - python: - rendering: - show_source: true - -markdown_extensions: - - pymdownx.highlight: - anchor_linenums: true - line_spans: __span - pygments_lang_class: true - - pymdownx.inlinehilite - - pymdownx.snippets - - pymdownx.superfences - - admonition - - pymdownx.details - -nav: - - introduction: index.md - - generators: tutorial/generators.md - - chats: tutorial/chat.md - - models: tutorial/model.md - - tools: tutorial/tools.md - - logging: tutorial/logging.md - - API: - - chat: api/chat.md - - generator: api/generators.md - - message: api/message.md - - model: api/model.md - - prompt: api/prompt.md - - tools: api/tools.md - - logging: api/logging.md - - exceptions: api/errors.md diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 0000000..75e20a4 --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,18 @@ +[data-md-color-scheme="slate"] { + --md-primary-fg-color: #EAEAEA; + --md-accent-fg-color: hsla(250, 62%, 70%, 1); + + --md-primary-color: #EAEAEA; + --md-primary-bg-color: #191919; + --md-default-bg-color: #191919; + + --md-default-fg-color: hsla(0, 0%, 100%, 0.90); + --md-default-fg-color--light: hsla(0, 0%, 100%, 0.70); + --md-default-fg-color--lighter: hsla(0, 0%, 100%, 0.60); + --md-default-fg-color--lightest: hsla(0, 0%, 100%, 0.40); + + --md-footer-bg-color: hsla(0, 0%, 10%, 0.87); + --md-footer-bg-color--dark: hsla(0, 0%, 8%, 1); + + --md-typeset-a-color: var(--md-accent-fg-color); +} \ No newline at end of file diff --git a/docs/docs/tutorial/chat.md b/docs/tutorial/chat.md similarity index 100% rename from docs/docs/tutorial/chat.md rename to docs/tutorial/chat.md diff --git a/docs/docs/tutorial/generators.md b/docs/tutorial/generators.md similarity index 100% rename from docs/docs/tutorial/generators.md rename to docs/tutorial/generators.md diff --git a/docs/docs/tutorial/logging.md b/docs/tutorial/logging.md similarity index 100% rename from docs/docs/tutorial/logging.md rename to docs/tutorial/logging.md diff --git a/docs/docs/tutorial/model.md b/docs/tutorial/model.md similarity index 100% rename from docs/docs/tutorial/model.md rename to docs/tutorial/model.md diff --git a/docs/docs/tutorial/tools.md b/docs/tutorial/tools.md similarity index 100% rename from docs/docs/tutorial/tools.md rename to docs/tutorial/tools.md diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..c019f6c --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,68 @@ +site_name: Rigging +site_description: A lightweight LLM interaction framework +site_author: Dreadnode +site_url: https://rigging.dreadnode.io +repo_url: https://github.com/dreadnode/rigging + +nav: + - Introduction: index.md + - Generators: tutorial/generators.md + - Chats: tutorial/chat.md + - Models: tutorial/model.md + - Tools: tutorial/tools.md + - Logging: tutorial/logging.md + - API: + - rigging.chat: api/chat.md + - rigging.generator: api/generator.md + - rigging.message: api/message.md + - rigging.model: api/model.md + - rigging.tool: api/tool.md + - rigging.logging: api/logging.md + - rigging.error: api/error.md + +theme: + logo: assets/logo_black.png + favicon: assets/logo_white.png + name: material + icon: + repo: fontawesome/brands/github + palette: + scheme: slate + primary: custom + features: + - toc.integrate + - navigation.footer + - navigation.indexes + - navigation.sections + - navigation.expand + - navigation.path + - content.code.copy + - navigation.top + +plugins: + - search + - section-index + - mkdocstrings + +watch: + - rigging/ + +markdown_extensions: + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.superfences + - admonition + - pymdownx.details + +extra_css: + - stylesheets/extra.css + +extra_javascript: + - https://polyfill.io/v3/polyfill.min.js?features=es6 + +extra: + homepage: https://dreadnode.io \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index ff77064..2d99fc3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,87 +2,87 @@ [[package]] name = "aiohttp" -version = "3.9.3" +version = "3.9.5" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.8" files = [ - {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:939677b61f9d72a4fa2a042a5eee2a99a24001a67c13da113b2e30396567db54"}, - {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f5cd333fcf7590a18334c90f8c9147c837a6ec8a178e88d90a9b96ea03194cc"}, - {file = "aiohttp-3.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82e6aa28dd46374f72093eda8bcd142f7771ee1eb9d1e223ff0fa7177a96b4a5"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f56455b0c2c7cc3b0c584815264461d07b177f903a04481dfc33e08a89f0c26b"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bca77a198bb6e69795ef2f09a5f4c12758487f83f33d63acde5f0d4919815768"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e083c285857b78ee21a96ba1eb1b5339733c3563f72980728ca2b08b53826ca5"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab40e6251c3873d86ea9b30a1ac6d7478c09277b32e14745d0d3c6e76e3c7e29"}, - {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df822ee7feaaeffb99c1a9e5e608800bd8eda6e5f18f5cfb0dc7eeb2eaa6bbec"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:acef0899fea7492145d2bbaaaec7b345c87753168589cc7faf0afec9afe9b747"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cd73265a9e5ea618014802ab01babf1940cecb90c9762d8b9e7d2cc1e1969ec6"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a78ed8a53a1221393d9637c01870248a6f4ea5b214a59a92a36f18151739452c"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6b0e029353361f1746bac2e4cc19b32f972ec03f0f943b390c4ab3371840aabf"}, - {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7cf5c9458e1e90e3c390c2639f1017a0379a99a94fdfad3a1fd966a2874bba52"}, - {file = "aiohttp-3.9.3-cp310-cp310-win32.whl", hash = "sha256:3e59c23c52765951b69ec45ddbbc9403a8761ee6f57253250c6e1536cacc758b"}, - {file = "aiohttp-3.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:055ce4f74b82551678291473f66dc9fb9048a50d8324278751926ff0ae7715e5"}, - {file = "aiohttp-3.9.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6b88f9386ff1ad91ace19d2a1c0225896e28815ee09fc6a8932fded8cda97c3d"}, - {file = "aiohttp-3.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c46956ed82961e31557b6857a5ca153c67e5476972e5f7190015018760938da2"}, - {file = "aiohttp-3.9.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:07b837ef0d2f252f96009e9b8435ec1fef68ef8b1461933253d318748ec1acdc"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad46e6f620574b3b4801c68255492e0159d1712271cc99d8bdf35f2043ec266"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ed3e046ea7b14938112ccd53d91c1539af3e6679b222f9469981e3dac7ba1ce"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:039df344b45ae0b34ac885ab5b53940b174530d4dd8a14ed8b0e2155b9dddccb"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7943c414d3a8d9235f5f15c22ace69787c140c80b718dcd57caaade95f7cd93b"}, - {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84871a243359bb42c12728f04d181a389718710129b36b6aad0fc4655a7647d4"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5eafe2c065df5401ba06821b9a054d9cb2848867f3c59801b5d07a0be3a380ae"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9d3c9b50f19704552f23b4eaea1fc082fdd82c63429a6506446cbd8737823da3"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:f033d80bc6283092613882dfe40419c6a6a1527e04fc69350e87a9df02bbc283"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:2c895a656dd7e061b2fd6bb77d971cc38f2afc277229ce7dd3552de8313a483e"}, - {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1f5a71d25cd8106eab05f8704cd9167b6e5187bcdf8f090a66c6d88b634802b4"}, - {file = "aiohttp-3.9.3-cp311-cp311-win32.whl", hash = "sha256:50fca156d718f8ced687a373f9e140c1bb765ca16e3d6f4fe116e3df7c05b2c5"}, - {file = "aiohttp-3.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:5fe9ce6c09668063b8447f85d43b8d1c4e5d3d7e92c63173e6180b2ac5d46dd8"}, - {file = "aiohttp-3.9.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:38a19bc3b686ad55804ae931012f78f7a534cce165d089a2059f658f6c91fa60"}, - {file = "aiohttp-3.9.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:770d015888c2a598b377bd2f663adfd947d78c0124cfe7b959e1ef39f5b13869"}, - {file = "aiohttp-3.9.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee43080e75fc92bf36219926c8e6de497f9b247301bbf88c5c7593d931426679"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52df73f14ed99cee84865b95a3d9e044f226320a87af208f068ecc33e0c35b96"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc9b311743a78043b26ffaeeb9715dc360335e5517832f5a8e339f8a43581e4d"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b955ed993491f1a5da7f92e98d5dad3c1e14dc175f74517c4e610b1f2456fb11"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:504b6981675ace64c28bf4a05a508af5cde526e36492c98916127f5a02354d53"}, - {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6fe5571784af92b6bc2fda8d1925cccdf24642d49546d3144948a6a1ed58ca5"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ba39e9c8627edc56544c8628cc180d88605df3892beeb2b94c9bc857774848ca"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e5e46b578c0e9db71d04c4b506a2121c0cb371dd89af17a0586ff6769d4c58c1"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:938a9653e1e0c592053f815f7028e41a3062e902095e5a7dc84617c87267ebd5"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:c3452ea726c76e92f3b9fae4b34a151981a9ec0a4847a627c43d71a15ac32aa6"}, - {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ff30218887e62209942f91ac1be902cc80cddb86bf00fbc6783b7a43b2bea26f"}, - {file = "aiohttp-3.9.3-cp312-cp312-win32.whl", hash = "sha256:38f307b41e0bea3294a9a2a87833191e4bcf89bb0365e83a8be3a58b31fb7f38"}, - {file = "aiohttp-3.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:b791a3143681a520c0a17e26ae7465f1b6f99461a28019d1a2f425236e6eedb5"}, - {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ed621426d961df79aa3b963ac7af0d40392956ffa9be022024cd16297b30c8c"}, - {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f46acd6a194287b7e41e87957bfe2ad1ad88318d447caf5b090012f2c5bb528"}, - {file = "aiohttp-3.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:feeb18a801aacb098220e2c3eea59a512362eb408d4afd0c242044c33ad6d542"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f734e38fd8666f53da904c52a23ce517f1b07722118d750405af7e4123933511"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b40670ec7e2156d8e57f70aec34a7216407848dfe6c693ef131ddf6e76feb672"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fdd215b7b7fd4a53994f238d0f46b7ba4ac4c0adb12452beee724ddd0743ae5d"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:017a21b0df49039c8f46ca0971b3a7fdc1f56741ab1240cb90ca408049766168"}, - {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99abf0bba688259a496f966211c49a514e65afa9b3073a1fcee08856e04425b"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:648056db9a9fa565d3fa851880f99f45e3f9a771dd3ff3bb0c048ea83fb28194"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8aacb477dc26797ee089721536a292a664846489c49d3ef9725f992449eda5a8"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:522a11c934ea660ff8953eda090dcd2154d367dec1ae3c540aff9f8a5c109ab4"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5bce0dc147ca85caa5d33debc4f4d65e8e8b5c97c7f9f660f215fa74fc49a321"}, - {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b4af9f25b49a7be47c0972139e59ec0e8285c371049df1a63b6ca81fdd216a2"}, - {file = "aiohttp-3.9.3-cp38-cp38-win32.whl", hash = "sha256:298abd678033b8571995650ccee753d9458dfa0377be4dba91e4491da3f2be63"}, - {file = "aiohttp-3.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:69361bfdca5468c0488d7017b9b1e5ce769d40b46a9f4a2eed26b78619e9396c"}, - {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0fa43c32d1643f518491d9d3a730f85f5bbaedcbd7fbcae27435bb8b7a061b29"}, - {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:835a55b7ca49468aaaac0b217092dfdff370e6c215c9224c52f30daaa735c1c1"}, - {file = "aiohttp-3.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06a9b2c8837d9a94fae16c6223acc14b4dfdff216ab9b7202e07a9a09541168f"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abf151955990d23f84205286938796c55ff11bbfb4ccfada8c9c83ae6b3c89a3"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59c26c95975f26e662ca78fdf543d4eeaef70e533a672b4113dd888bd2423caa"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f95511dd5d0e05fd9728bac4096319f80615aaef4acbecb35a990afebe953b0e"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:595f105710293e76b9dc09f52e0dd896bd064a79346234b521f6b968ffdd8e58"}, - {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7c8b816c2b5af5c8a436df44ca08258fc1a13b449393a91484225fcb7545533"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f1088fa100bf46e7b398ffd9904f4808a0612e1d966b4aa43baa535d1b6341eb"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f59dfe57bb1ec82ac0698ebfcdb7bcd0e99c255bd637ff613760d5f33e7c81b3"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:361a1026c9dd4aba0109e4040e2aecf9884f5cfe1b1b1bd3d09419c205e2e53d"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:363afe77cfcbe3a36353d8ea133e904b108feea505aa4792dad6585a8192c55a"}, - {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e2c45c208c62e955e8256949eb225bd8b66a4c9b6865729a786f2aa79b72e9d"}, - {file = "aiohttp-3.9.3-cp39-cp39-win32.whl", hash = "sha256:f7217af2e14da0856e082e96ff637f14ae45c10a5714b63c77f26d8884cf1051"}, - {file = "aiohttp-3.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:27468897f628c627230dba07ec65dc8d0db566923c48f29e084ce382119802bc"}, - {file = "aiohttp-3.9.3.tar.gz", hash = "sha256:90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"}, + {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"}, + {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"}, + {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"}, + {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"}, + {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"}, + {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"}, + {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"}, + {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"}, + {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"}, ] [package.dependencies] @@ -202,6 +202,20 @@ tests = ["attrs[tests-no-zope]", "zope-interface"] tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +[[package]] +name = "babel" +version = "2.14.0" +description = "Internationalization utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Babel-2.14.0-py3-none-any.whl", hash = "sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287"}, + {file = "Babel-2.14.0.tar.gz", hash = "sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363"}, +] + +[package.extras] +dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] + [[package]] name = "certifi" version = "2024.2.2" @@ -473,13 +487,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, ] [package.extras] @@ -501,13 +515,13 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth [[package]] name = "filelock" -version = "3.13.3" +version = "3.14.0" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.13.3-py3-none-any.whl", hash = "sha256:5ffa845303983e7a0b7ae17636509bc97997d58afeafa72fb141a17b152284cb"}, - {file = "filelock-3.13.3.tar.gz", hash = "sha256:a79895a25bbefdf55d1a2a0a80968f7dbb28edcd6d4234a0afb3f37ecde4b546"}, + {file = "filelock-3.14.0-py3-none-any.whl", hash = "sha256:43339835842f110ca7ae60f1e1c160714c5a6afd15a2873419ab185334975c0f"}, + {file = "filelock-3.14.0.tar.gz", hash = "sha256:6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a"}, ] [package.extras] @@ -636,6 +650,37 @@ smb = ["smbprotocol"] ssh = ["paramiko"] tqdm = ["tqdm"] +[[package]] +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." +optional = false +python-versions = "*" +files = [ + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, +] + +[package.dependencies] +python-dateutil = ">=2.8.1" + +[package.extras] +dev = ["flake8", "markdown", "twine", "wheel"] + +[[package]] +name = "griffe" +version = "0.44.0" +description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." +optional = false +python-versions = ">=3.8" +files = [ + {file = "griffe-0.44.0-py3-none-any.whl", hash = "sha256:8a4471c469ba980b87c843f1168850ce39d0c1d0c7be140dca2480f76c8e5446"}, + {file = "griffe-0.44.0.tar.gz", hash = "sha256:34aee1571042f9bf00529bc715de4516fb6f482b164e90d030300601009e0223"}, +] + +[package.dependencies] +colorama = ">=0.4" + [[package]] name = "h11" version = "0.14.0" @@ -649,13 +694,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.4" +version = "1.0.5" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"}, - {file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"}, + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, ] [package.dependencies] @@ -666,7 +711,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.25.0)"] +trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" @@ -694,13 +739,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.22.1" +version = "0.23.0" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.22.1-py3-none-any.whl", hash = "sha256:eac63947923d15c9a68681d7ed2d9599e058860617064e3ee6bd91a4b954faaf"}, - {file = "huggingface_hub-0.22.1.tar.gz", hash = "sha256:5b8aaee5f3618cd432f49886da9935bbe8fab92d719011826430907b93171dd8"}, + {file = "huggingface_hub-0.23.0-py3-none-any.whl", hash = "sha256:075c30d48ee7db2bba779190dc526d2c11d422aed6f9044c5e2fdc2c432fdb91"}, + {file = "huggingface_hub-0.23.0.tar.gz", hash = "sha256:7126dedd10a4c6fac796ced4d87a8cf004efc722a5125c2c09299017fa366fa9"}, ] [package.dependencies] @@ -713,28 +758,28 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] inference = ["aiohttp", "minijinja (>=1.0)"] quality = ["mypy (==1.5.1)", "ruff (>=0.3.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] tensorflow-testing = ["keras (<3.0)", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["safetensors", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] name = "idna" -version = "3.6" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -769,13 +814,13 @@ files = [ [[package]] name = "ipykernel" -version = "6.29.3" +version = "6.29.4" description = "IPython Kernel for Jupyter" optional = false python-versions = ">=3.8" files = [ - {file = "ipykernel-6.29.3-py3-none-any.whl", hash = "sha256:5aa086a4175b0229d4eca211e181fb473ea78ffd9869af36ba7694c947302a21"}, - {file = "ipykernel-6.29.3.tar.gz", hash = "sha256:e14c250d1f9ea3989490225cc1a542781b095a18a19447fcf2b5eaf7d0ac5bd2"}, + {file = "ipykernel-6.29.4-py3-none-any.whl", hash = "sha256:1181e653d95c6808039c509ef8e67c4126b3b3af7781496c7cbfb5ed938a27da"}, + {file = "ipykernel-6.29.4.tar.gz", hash = "sha256:3d44070060f9475ac2092b760123fadf105d2e2493c24848b6691a7c4f42af5c"}, ] [package.dependencies] @@ -802,13 +847,13 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio [[package]] name = "ipython" -version = "8.22.2" +version = "8.24.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.10" files = [ - {file = "ipython-8.22.2-py3-none-any.whl", hash = "sha256:3c86f284c8f3d8f2b6c662f885c4889a91df7cd52056fd02b7d8d6195d7f56e9"}, - {file = "ipython-8.22.2.tar.gz", hash = "sha256:2dcaad9049f9056f1fef63514f176c7d41f930daa78d05b82a176202818f2c14"}, + {file = "ipython-8.24.0-py3-none-any.whl", hash = "sha256:d7bf2f6c4314984e3e02393213bab8703cf163ede39672ce5918c51fe253a2a3"}, + {file = "ipython-8.24.0.tar.gz", hash = "sha256:010db3f8a728a578bb641fdd06c063b9fb8e96a9464c63aec6310fbcb5e80501"}, ] [package.dependencies] @@ -822,18 +867,20 @@ prompt-toolkit = ">=3.0.41,<3.1.0" pygments = ">=2.4.0" stack-data = "*" traitlets = ">=5.13.0" +typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} [package.extras] -all = ["ipython[black,doc,kernel,nbconvert,nbformat,notebook,parallel,qtconsole,terminal]", "ipython[test,test-extra]"] +all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] black = ["black"] doc = ["docrepr", "exceptiongroup", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "stack-data", "typing-extensions"] kernel = ["ipykernel"] +matplotlib = ["matplotlib"] nbconvert = ["nbconvert"] nbformat = ["nbformat"] notebook = ["ipywidgets", "notebook"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] -test = ["pickleshare", "pytest (<8)", "pytest-asyncio (<0.22)", "testpath"] +test = ["pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] [[package]] @@ -958,6 +1005,21 @@ win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} [package.extras] dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.4.1)", "mypy (==v1.5.1)", "pre-commit (==3.4.0)", "pytest (==6.1.2)", "pytest (==7.4.0)", "pytest-cov (==2.12.1)", "pytest-cov (==4.1.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.0.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.3.0)", "tox (==3.27.1)", "tox (==4.11.0)"] +[[package]] +name = "markdown" +version = "3.6" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Markdown-3.6-py3-none-any.whl", hash = "sha256:48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f"}, + {file = "Markdown-3.6.tar.gz", hash = "sha256:ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224"}, +] + +[package.extras] +docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + [[package]] name = "markupsafe" version = "2.1.5" @@ -1029,18 +1091,186 @@ files = [ [[package]] name = "matplotlib-inline" -version = "0.1.6" +version = "0.1.7" description = "Inline Matplotlib backend for Jupyter" optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, - {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, + {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, + {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, ] [package.dependencies] traitlets = "*" +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +optional = false +python-versions = ">=3.6" +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + +[[package]] +name = "mkdocs" +version = "1.6.0" +description = "Project documentation with Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs-1.6.0-py3-none-any.whl", hash = "sha256:1eb5cb7676b7d89323e62b56235010216319217d4af5ddc543a91beb8d125ea7"}, + {file = "mkdocs-1.6.0.tar.gz", hash = "sha256:a73f735824ef83a4f3bcb7a231dcab23f5a838f88b7efc54a0eef5fbdbc3c512"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +jinja2 = ">=2.11.1" +markdown = ">=3.3.6" +markupsafe = ">=2.0.1" +mergedeep = ">=1.3.4" +mkdocs-get-deps = ">=0.2.0" +packaging = ">=20.5" +pathspec = ">=0.11.1" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +watchdog = ">=2.0" + +[package.extras] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.4)", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-autorefs" +version = "1.0.1" +description = "Automatically link across pages in MkDocs." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_autorefs-1.0.1-py3-none-any.whl", hash = "sha256:aacdfae1ab197780fb7a2dac92ad8a3d8f7ca8049a9cbe56a4218cd52e8da570"}, + {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, +] + +[package.dependencies] +Markdown = ">=3.3" +markupsafe = ">=2.0.1" +mkdocs = ">=1.1" + +[[package]] +name = "mkdocs-get-deps" +version = "0.2.0" +description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, + {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, +] + +[package.dependencies] +mergedeep = ">=1.3.4" +platformdirs = ">=2.2.0" +pyyaml = ">=5.1" + +[[package]] +name = "mkdocs-material" +version = "9.5.20" +description = "Documentation that simply works" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_material-9.5.20-py3-none-any.whl", hash = "sha256:ad0094a7597bcb5d0cc3e8e543a10927c2581f7f647b9bb4861600f583180f9b"}, + {file = "mkdocs_material-9.5.20.tar.gz", hash = "sha256:986eef0250d22f70fb06ce0f4eac64cc92bd797a589ec3892ce31fad976fe3da"}, +] + +[package.dependencies] +babel = ">=2.10,<3.0" +colorama = ">=0.4,<1.0" +jinja2 = ">=3.0,<4.0" +markdown = ">=3.2,<4.0" +mkdocs = ">=1.6,<2.0" +mkdocs-material-extensions = ">=1.3,<2.0" +paginate = ">=0.5,<1.0" +pygments = ">=2.16,<3.0" +pymdown-extensions = ">=10.2,<11.0" +regex = ">=2022.4" +requests = ">=2.26,<3.0" + +[package.extras] +git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] +imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] +recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] + +[[package]] +name = "mkdocs-material-extensions" +version = "1.3.1" +description = "Extension pack for Python Markdown and MkDocs Material." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, + {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, +] + +[[package]] +name = "mkdocs-section-index" +version = "0.3.9" +description = "MkDocs plugin to allow clickable sections that lead to an index page" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocs_section_index-0.3.9-py3-none-any.whl", hash = "sha256:5e5eb288e8d7984d36c11ead5533f376fdf23498f44e903929d72845b24dfe34"}, + {file = "mkdocs_section_index-0.3.9.tar.gz", hash = "sha256:b66128d19108beceb08b226ee1ba0981840d14baf8a652b6c59e650f3f92e4f8"}, +] + +[package.dependencies] +mkdocs = ">=1.2" + +[[package]] +name = "mkdocstrings" +version = "0.25.0" +description = "Automatic documentation from sources, for MkDocs." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocstrings-0.25.0-py3-none-any.whl", hash = "sha256:df1b63f26675fcde8c1b77e7ea996cd2f93220b148e06455428f676f5dc838f1"}, + {file = "mkdocstrings-0.25.0.tar.gz", hash = "sha256:066986b3fb5b9ef2d37c4417255a808f7e63b40ff8f67f6cab8054d903fbc91d"}, +] + +[package.dependencies] +click = ">=7.0" +Jinja2 = ">=2.11.1" +Markdown = ">=3.3" +MarkupSafe = ">=1.1" +mkdocs = ">=1.4" +mkdocs-autorefs = ">=0.3.1" +platformdirs = ">=2.2.0" +pymdown-extensions = ">=6.3" + +[package.extras] +crystal = ["mkdocstrings-crystal (>=0.3.4)"] +python = ["mkdocstrings-python (>=0.5.2)"] +python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] + +[[package]] +name = "mkdocstrings-python" +version = "1.10.0" +description = "A Python handler for mkdocstrings." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mkdocstrings_python-1.10.0-py3-none-any.whl", hash = "sha256:ba833fbd9d178a4b9d5cb2553a4df06e51dc1f51e41559a4d2398c16a6f69ecc"}, + {file = "mkdocstrings_python-1.10.0.tar.gz", hash = "sha256:71678fac657d4d2bb301eed4e4d2d91499c095fd1f8a90fa76422a87a5693828"}, +] + +[package.dependencies] +griffe = ">=0.44" +mkdocstrings = ">=0.24.2" + [[package]] name = "multidict" version = "6.0.5" @@ -1142,38 +1372,38 @@ files = [ [[package]] name = "mypy" -version = "1.9.0" +version = "1.10.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, + {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, + {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, + {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, + {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, + {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, + {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, + {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, + {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, + {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, + {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, + {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, + {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, + {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, + {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, + {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, + {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, + {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, ] [package.dependencies] @@ -1211,13 +1441,13 @@ files = [ [[package]] name = "openai" -version = "1.14.3" +version = "1.25.1" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.14.3-py3-none-any.whl", hash = "sha256:7a465994a7ccf677a110c6cc2ef9d86229bad42c060b585b67049aa749f3b774"}, - {file = "openai-1.14.3.tar.gz", hash = "sha256:37b514e9c0ff45383ec9b242abd0f7859b1080d4b54b61393ed341ecad1b8eb9"}, + {file = "openai-1.25.1-py3-none-any.whl", hash = "sha256:aa2f381f476f5fa4df8728a34a3e454c321caa064b7b68ab6e9daa1ed082dbf9"}, + {file = "openai-1.25.1.tar.gz", hash = "sha256:f561ce86f4b4008eb6c78622d641e4b7e1ab8a8cdb15d2f0b2a49942d40d21a8"}, ] [package.dependencies] @@ -1243,20 +1473,41 @@ files = [ {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] +[[package]] +name = "paginate" +version = "0.5.6" +description = "Divides large result sets into pages for easier browsing" +optional = false +python-versions = "*" +files = [ + {file = "paginate-0.5.6.tar.gz", hash = "sha256:5e6007b6a9398177a7e1648d04fdd9f8c9766a1a945bceac82f1929e8c78af2d"}, +] + [[package]] name = "parso" -version = "0.8.3" +version = "0.8.4" description = "A Python Parser" optional = false python-versions = ">=3.6" files = [ - {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, - {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, + {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, + {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, ] [package.extras] -qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] -testing = ["docopt", "pytest (<6.0.0)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["docopt", "pytest"] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] [[package]] name = "pexpect" @@ -1274,28 +1525,29 @@ ptyprocess = ">=0.5" [[package]] name = "platformdirs" -version = "4.2.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "4.2.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, - {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, + {file = "platformdirs-4.2.1-py3-none-any.whl", hash = "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1"}, + {file = "platformdirs-4.2.1.tar.gz", hash = "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] @@ -1371,13 +1623,13 @@ tests = ["pytest"] [[package]] name = "pycparser" -version = "2.21" +version = "2.22" description = "C parser in Python" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.8" files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] [[package]] @@ -1549,15 +1801,33 @@ files = [ plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pymdown-extensions" +version = "10.8.1" +description = "Extension pack for Python Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pymdown_extensions-10.8.1-py3-none-any.whl", hash = "sha256:f938326115884f48c6059c67377c46cf631c733ef3629b6eed1349989d1b30cb"}, + {file = "pymdown_extensions-10.8.1.tar.gz", hash = "sha256:3ab1db5c9e21728dabf75192d71471f8e50f216627e9a1fa9535ecb0231b9940"}, +] + +[package.dependencies] +markdown = ">=3.6" +pyyaml = "*" + +[package.extras] +extra = ["pygments (>=2.12)"] + [[package]] name = "pytest" -version = "8.1.1" +version = "8.2.0" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"}, - {file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"}, + {file = "pytest-8.2.0-py3-none-any.whl", hash = "sha256:1733f0620f6cda4095bbf0d9ff8022486e91892245bb9e7d5542c018f612f233"}, + {file = "pytest-8.2.0.tar.gz", hash = "sha256:d507d4482197eac0ba2bae2e9babf0672eb333017bcedaa5fb1a3d42c1174b3f"}, ] [package.dependencies] @@ -1565,11 +1835,11 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=1.4,<2.0" +pluggy = ">=1.5,<2.0" tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "python-dateutil" @@ -1682,106 +1952,115 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "pyyaml-env-tag" +version = "0.1" +description = "A custom YAML tag for referencing environment variables in YAML files. " +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, + {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, +] + +[package.dependencies] +pyyaml = "*" + [[package]] name = "pyzmq" -version = "25.1.2" +version = "26.0.3" description = "Python bindings for 0MQ" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:e624c789359f1a16f83f35e2c705d07663ff2b4d4479bad35621178d8f0f6ea4"}, - {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49151b0efece79f6a79d41a461d78535356136ee70084a1c22532fc6383f4ad0"}, - {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9a5f194cf730f2b24d6af1f833c14c10f41023da46a7f736f48b6d35061e76e"}, - {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:faf79a302f834d9e8304fafdc11d0d042266667ac45209afa57e5efc998e3872"}, - {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f51a7b4ead28d3fca8dda53216314a553b0f7a91ee8fc46a72b402a78c3e43d"}, - {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0ddd6d71d4ef17ba5a87becf7ddf01b371eaba553c603477679ae817a8d84d75"}, - {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:246747b88917e4867e2367b005fc8eefbb4a54b7db363d6c92f89d69abfff4b6"}, - {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:00c48ae2fd81e2a50c3485de1b9d5c7c57cd85dc8ec55683eac16846e57ac979"}, - {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5a68d491fc20762b630e5db2191dd07ff89834086740f70e978bb2ef2668be08"}, - {file = "pyzmq-25.1.2-cp310-cp310-win32.whl", hash = "sha256:09dfe949e83087da88c4a76767df04b22304a682d6154de2c572625c62ad6886"}, - {file = "pyzmq-25.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:fa99973d2ed20417744fca0073390ad65ce225b546febb0580358e36aa90dba6"}, - {file = "pyzmq-25.1.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:82544e0e2d0c1811482d37eef297020a040c32e0687c1f6fc23a75b75db8062c"}, - {file = "pyzmq-25.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:01171fc48542348cd1a360a4b6c3e7d8f46cdcf53a8d40f84db6707a6768acc1"}, - {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc69c96735ab501419c432110016329bf0dea8898ce16fab97c6d9106dc0b348"}, - {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3e124e6b1dd3dfbeb695435dff0e383256655bb18082e094a8dd1f6293114642"}, - {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7598d2ba821caa37a0f9d54c25164a4fa351ce019d64d0b44b45540950458840"}, - {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d1299d7e964c13607efd148ca1f07dcbf27c3ab9e125d1d0ae1d580a1682399d"}, - {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4e6f689880d5ad87918430957297c975203a082d9a036cc426648fcbedae769b"}, - {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cc69949484171cc961e6ecd4a8911b9ce7a0d1f738fcae717177c231bf77437b"}, - {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9880078f683466b7f567b8624bfc16cad65077be046b6e8abb53bed4eeb82dd3"}, - {file = "pyzmq-25.1.2-cp311-cp311-win32.whl", hash = "sha256:4e5837af3e5aaa99a091302df5ee001149baff06ad22b722d34e30df5f0d9097"}, - {file = "pyzmq-25.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:25c2dbb97d38b5ac9fd15586e048ec5eb1e38f3d47fe7d92167b0c77bb3584e9"}, - {file = "pyzmq-25.1.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:11e70516688190e9c2db14fcf93c04192b02d457b582a1f6190b154691b4c93a"}, - {file = "pyzmq-25.1.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:313c3794d650d1fccaaab2df942af9f2c01d6217c846177cfcbc693c7410839e"}, - {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b3cbba2f47062b85fe0ef9de5b987612140a9ba3a9c6d2543c6dec9f7c2ab27"}, - {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc31baa0c32a2ca660784d5af3b9487e13b61b3032cb01a115fce6588e1bed30"}, - {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02c9087b109070c5ab0b383079fa1b5f797f8d43e9a66c07a4b8b8bdecfd88ee"}, - {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f8429b17cbb746c3e043cb986328da023657e79d5ed258b711c06a70c2ea7537"}, - {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5074adeacede5f810b7ef39607ee59d94e948b4fd954495bdb072f8c54558181"}, - {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7ae8f354b895cbd85212da245f1a5ad8159e7840e37d78b476bb4f4c3f32a9fe"}, - {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b264bf2cc96b5bc43ce0e852be995e400376bd87ceb363822e2cb1964fcdc737"}, - {file = "pyzmq-25.1.2-cp312-cp312-win32.whl", hash = "sha256:02bbc1a87b76e04fd780b45e7f695471ae6de747769e540da909173d50ff8e2d"}, - {file = "pyzmq-25.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:ced111c2e81506abd1dc142e6cd7b68dd53747b3b7ae5edbea4578c5eeff96b7"}, - {file = "pyzmq-25.1.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7b6d09a8962a91151f0976008eb7b29b433a560fde056ec7a3db9ec8f1075438"}, - {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:967668420f36878a3c9ecb5ab33c9d0ff8d054f9c0233d995a6d25b0e95e1b6b"}, - {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5edac3f57c7ddaacdb4d40f6ef2f9e299471fc38d112f4bc6d60ab9365445fb0"}, - {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0dabfb10ef897f3b7e101cacba1437bd3a5032ee667b7ead32bbcdd1a8422fe7"}, - {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:2c6441e0398c2baacfe5ba30c937d274cfc2dc5b55e82e3749e333aabffde561"}, - {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:16b726c1f6c2e7625706549f9dbe9b06004dfbec30dbed4bf50cbdfc73e5b32a"}, - {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:a86c2dd76ef71a773e70551a07318b8e52379f58dafa7ae1e0a4be78efd1ff16"}, - {file = "pyzmq-25.1.2-cp36-cp36m-win32.whl", hash = "sha256:359f7f74b5d3c65dae137f33eb2bcfa7ad9ebefd1cab85c935f063f1dbb245cc"}, - {file = "pyzmq-25.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:55875492f820d0eb3417b51d96fea549cde77893ae3790fd25491c5754ea2f68"}, - {file = "pyzmq-25.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8c8a419dfb02e91b453615c69568442e897aaf77561ee0064d789705ff37a92"}, - {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8807c87fa893527ae8a524c15fc505d9950d5e856f03dae5921b5e9aa3b8783b"}, - {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5e319ed7d6b8f5fad9b76daa0a68497bc6f129858ad956331a5835785761e003"}, - {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3c53687dde4d9d473c587ae80cc328e5b102b517447456184b485587ebd18b62"}, - {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9add2e5b33d2cd765ad96d5eb734a5e795a0755f7fc49aa04f76d7ddda73fd70"}, - {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e690145a8c0c273c28d3b89d6fb32c45e0d9605b2293c10e650265bf5c11cfec"}, - {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:00a06faa7165634f0cac1abb27e54d7a0b3b44eb9994530b8ec73cf52e15353b"}, - {file = "pyzmq-25.1.2-cp37-cp37m-win32.whl", hash = "sha256:0f97bc2f1f13cb16905a5f3e1fbdf100e712d841482b2237484360f8bc4cb3d7"}, - {file = "pyzmq-25.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6cc0020b74b2e410287e5942e1e10886ff81ac77789eb20bec13f7ae681f0fdd"}, - {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bef02cfcbded83473bdd86dd8d3729cd82b2e569b75844fb4ea08fee3c26ae41"}, - {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e10a4b5a4b1192d74853cc71a5e9fd022594573926c2a3a4802020360aa719d8"}, - {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8c5f80e578427d4695adac6fdf4370c14a2feafdc8cb35549c219b90652536ae"}, - {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5dde6751e857910c1339890f3524de74007958557593b9e7e8c5f01cd919f8a7"}, - {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea1608dd169da230a0ad602d5b1ebd39807ac96cae1845c3ceed39af08a5c6df"}, - {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0f513130c4c361201da9bc69df25a086487250e16b5571ead521b31ff6b02220"}, - {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:019744b99da30330798bb37df33549d59d380c78e516e3bab9c9b84f87a9592f"}, - {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2e2713ef44be5d52dd8b8e2023d706bf66cb22072e97fc71b168e01d25192755"}, - {file = "pyzmq-25.1.2-cp38-cp38-win32.whl", hash = "sha256:07cd61a20a535524906595e09344505a9bd46f1da7a07e504b315d41cd42eb07"}, - {file = "pyzmq-25.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb7e49a17fb8c77d3119d41a4523e432eb0c6932187c37deb6fbb00cc3028088"}, - {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:94504ff66f278ab4b7e03e4cba7e7e400cb73bfa9d3d71f58d8972a8dc67e7a6"}, - {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6dd0d50bbf9dca1d0bdea219ae6b40f713a3fb477c06ca3714f208fd69e16fd8"}, - {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:004ff469d21e86f0ef0369717351073e0e577428e514c47c8480770d5e24a565"}, - {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c0b5ca88a8928147b7b1e2dfa09f3b6c256bc1135a1338536cbc9ea13d3b7add"}, - {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c9a79f1d2495b167119d02be7448bfba57fad2a4207c4f68abc0bab4b92925b"}, - {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:518efd91c3d8ac9f9b4f7dd0e2b7b8bf1a4fe82a308009016b07eaa48681af82"}, - {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1ec23bd7b3a893ae676d0e54ad47d18064e6c5ae1fadc2f195143fb27373f7f6"}, - {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db36c27baed588a5a8346b971477b718fdc66cf5b80cbfbd914b4d6d355e44e2"}, - {file = "pyzmq-25.1.2-cp39-cp39-win32.whl", hash = "sha256:39b1067f13aba39d794a24761e385e2eddc26295826530a8c7b6c6c341584289"}, - {file = "pyzmq-25.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:8e9f3fabc445d0ce320ea2c59a75fe3ea591fdbdeebec5db6de530dd4b09412e"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a8c1d566344aee826b74e472e16edae0a02e2a044f14f7c24e123002dcff1c05"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:759cfd391a0996345ba94b6a5110fca9c557ad4166d86a6e81ea526c376a01e8"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c61e346ac34b74028ede1c6b4bcecf649d69b707b3ff9dc0fab453821b04d1e"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb8fc1f8d69b411b8ec0b5f1ffbcaf14c1db95b6bccea21d83610987435f1a4"}, - {file = "pyzmq-25.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3c00c9b7d1ca8165c610437ca0c92e7b5607b2f9076f4eb4b095c85d6e680a1d"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:df0c7a16ebb94452d2909b9a7b3337940e9a87a824c4fc1c7c36bb4404cb0cde"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:45999e7f7ed5c390f2e87ece7f6c56bf979fb213550229e711e45ecc7d42ccb8"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ac170e9e048b40c605358667aca3d94e98f604a18c44bdb4c102e67070f3ac9b"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1b604734bec94f05f81b360a272fc824334267426ae9905ff32dc2be433ab96"}, - {file = "pyzmq-25.1.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a793ac733e3d895d96f865f1806f160696422554e46d30105807fdc9841b9f7d"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0806175f2ae5ad4b835ecd87f5f85583316b69f17e97786f7443baaf54b9bb98"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ef12e259e7bc317c7597d4f6ef59b97b913e162d83b421dd0db3d6410f17a244"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea253b368eb41116011add00f8d5726762320b1bda892f744c91997b65754d73"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b9b1f2ad6498445a941d9a4fee096d387fee436e45cc660e72e768d3d8ee611"}, - {file = "pyzmq-25.1.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8b14c75979ce932c53b79976a395cb2a8cd3aaf14aef75e8c2cb55a330b9b49d"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:889370d5174a741a62566c003ee8ddba4b04c3f09a97b8000092b7ca83ec9c49"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a18fff090441a40ffda8a7f4f18f03dc56ae73f148f1832e109f9bffa85df15"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99a6b36f95c98839ad98f8c553d8507644c880cf1e0a57fe5e3a3f3969040882"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4345c9a27f4310afbb9c01750e9461ff33d6fb74cd2456b107525bbeebcb5be3"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3516e0b6224cf6e43e341d56da15fd33bdc37fa0c06af4f029f7d7dfceceabbc"}, - {file = "pyzmq-25.1.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:146b9b1f29ead41255387fb07be56dc29639262c0f7344f570eecdcd8d683314"}, - {file = "pyzmq-25.1.2.tar.gz", hash = "sha256:93f1aa311e8bb912e34f004cf186407a4e90eec4f0ecc0efd26056bf7eda0226"}, + {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:44dd6fc3034f1eaa72ece33588867df9e006a7303725a12d64c3dff92330f625"}, + {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:acb704195a71ac5ea5ecf2811c9ee19ecdc62b91878528302dd0be1b9451cc90"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dbb9c997932473a27afa93954bb77a9f9b786b4ccf718d903f35da3232317de"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6bcb34f869d431799c3ee7d516554797f7760cb2198ecaa89c3f176f72d062be"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ece17ec5f20d7d9b442e5174ae9f020365d01ba7c112205a4d59cf19dc38ee"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ba6e5e6588e49139a0979d03a7deb9c734bde647b9a8808f26acf9c547cab1bf"}, + {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3bf8b000a4e2967e6dfdd8656cd0757d18c7e5ce3d16339e550bd462f4857e59"}, + {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2136f64fbb86451dbbf70223635a468272dd20075f988a102bf8a3f194a411dc"}, + {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e8918973fbd34e7814f59143c5f600ecd38b8038161239fd1a3d33d5817a38b8"}, + {file = "pyzmq-26.0.3-cp310-cp310-win32.whl", hash = "sha256:0aaf982e68a7ac284377d051c742610220fd06d330dcd4c4dbb4cdd77c22a537"}, + {file = "pyzmq-26.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:f1a9b7d00fdf60b4039f4455afd031fe85ee8305b019334b72dcf73c567edc47"}, + {file = "pyzmq-26.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:80b12f25d805a919d53efc0a5ad7c0c0326f13b4eae981a5d7b7cc343318ebb7"}, + {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:a72a84570f84c374b4c287183debc776dc319d3e8ce6b6a0041ce2e400de3f32"}, + {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ca684ee649b55fd8f378127ac8462fb6c85f251c2fb027eb3c887e8ee347bcd"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e222562dc0f38571c8b1ffdae9d7adb866363134299264a1958d077800b193b7"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f17cde1db0754c35a91ac00b22b25c11da6eec5746431d6e5092f0cd31a3fea9"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b7c0c0b3244bb2275abe255d4a30c050d541c6cb18b870975553f1fb6f37527"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:ac97a21de3712afe6a6c071abfad40a6224fd14fa6ff0ff8d0c6e6cd4e2f807a"}, + {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:88b88282e55fa39dd556d7fc04160bcf39dea015f78e0cecec8ff4f06c1fc2b5"}, + {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:72b67f966b57dbd18dcc7efbc1c7fc9f5f983e572db1877081f075004614fcdd"}, + {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f4b6cecbbf3b7380f3b61de3a7b93cb721125dc125c854c14ddc91225ba52f83"}, + {file = "pyzmq-26.0.3-cp311-cp311-win32.whl", hash = "sha256:eed56b6a39216d31ff8cd2f1d048b5bf1700e4b32a01b14379c3b6dde9ce3aa3"}, + {file = "pyzmq-26.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:3191d312c73e3cfd0f0afdf51df8405aafeb0bad71e7ed8f68b24b63c4f36500"}, + {file = "pyzmq-26.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:b6907da3017ef55139cf0e417c5123a84c7332520e73a6902ff1f79046cd3b94"}, + {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:068ca17214038ae986d68f4a7021f97e187ed278ab6dccb79f837d765a54d753"}, + {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7821d44fe07335bea256b9f1f41474a642ca55fa671dfd9f00af8d68a920c2d4"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeb438a26d87c123bb318e5f2b3d86a36060b01f22fbdffd8cf247d52f7c9a2b"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69ea9d6d9baa25a4dc9cef5e2b77b8537827b122214f210dd925132e34ae9b12"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7daa3e1369355766dea11f1d8ef829905c3b9da886ea3152788dc25ee6079e02"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6ca7a9a06b52d0e38ccf6bca1aeff7be178917893f3883f37b75589d42c4ac20"}, + {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1b7d0e124948daa4d9686d421ef5087c0516bc6179fdcf8828b8444f8e461a77"}, + {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e746524418b70f38550f2190eeee834db8850088c834d4c8406fbb9bc1ae10b2"}, + {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6b3146f9ae6af82c47a5282ac8803523d381b3b21caeae0327ed2f7ecb718798"}, + {file = "pyzmq-26.0.3-cp312-cp312-win32.whl", hash = "sha256:2b291d1230845871c00c8462c50565a9cd6026fe1228e77ca934470bb7d70ea0"}, + {file = "pyzmq-26.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:926838a535c2c1ea21c903f909a9a54e675c2126728c21381a94ddf37c3cbddf"}, + {file = "pyzmq-26.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:5bf6c237f8c681dfb91b17f8435b2735951f0d1fad10cc5dfd96db110243370b"}, + {file = "pyzmq-26.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c0991f5a96a8e620f7691e61178cd8f457b49e17b7d9cfa2067e2a0a89fc1d5"}, + {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dbf012d8fcb9f2cf0643b65df3b355fdd74fc0035d70bb5c845e9e30a3a4654b"}, + {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:01fbfbeb8249a68d257f601deb50c70c929dc2dfe683b754659569e502fbd3aa"}, + {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c8eb19abe87029c18f226d42b8a2c9efdd139d08f8bf6e085dd9075446db450"}, + {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5344b896e79800af86ad643408ca9aa303a017f6ebff8cee5a3163c1e9aec987"}, + {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:204e0f176fd1d067671157d049466869b3ae1fc51e354708b0dc41cf94e23a3a"}, + {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a42db008d58530efa3b881eeee4991146de0b790e095f7ae43ba5cc612decbc5"}, + {file = "pyzmq-26.0.3-cp37-cp37m-win32.whl", hash = "sha256:8d7a498671ca87e32b54cb47c82a92b40130a26c5197d392720a1bce1b3c77cf"}, + {file = "pyzmq-26.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:3b4032a96410bdc760061b14ed6a33613ffb7f702181ba999df5d16fb96ba16a"}, + {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2cc4e280098c1b192c42a849de8de2c8e0f3a84086a76ec5b07bfee29bda7d18"}, + {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bde86a2ed3ce587fa2b207424ce15b9a83a9fa14422dcc1c5356a13aed3df9d"}, + {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:34106f68e20e6ff253c9f596ea50397dbd8699828d55e8fa18bd4323d8d966e6"}, + {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ebbbd0e728af5db9b04e56389e2299a57ea8b9dd15c9759153ee2455b32be6ad"}, + {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6b1d1c631e5940cac5a0b22c5379c86e8df6a4ec277c7a856b714021ab6cfad"}, + {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e891ce81edd463b3b4c3b885c5603c00141151dd9c6936d98a680c8c72fe5c67"}, + {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9b273ecfbc590a1b98f014ae41e5cf723932f3b53ba9367cfb676f838038b32c"}, + {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b32bff85fb02a75ea0b68f21e2412255b5731f3f389ed9aecc13a6752f58ac97"}, + {file = "pyzmq-26.0.3-cp38-cp38-win32.whl", hash = "sha256:f6c21c00478a7bea93caaaef9e7629145d4153b15a8653e8bb4609d4bc70dbfc"}, + {file = "pyzmq-26.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:3401613148d93ef0fd9aabdbddb212de3db7a4475367f49f590c837355343972"}, + {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:2ed8357f4c6e0daa4f3baf31832df8a33334e0fe5b020a61bc8b345a3db7a606"}, + {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c1c8f2a2ca45292084c75bb6d3a25545cff0ed931ed228d3a1810ae3758f975f"}, + {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b63731993cdddcc8e087c64e9cf003f909262b359110070183d7f3025d1c56b5"}, + {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b3cd31f859b662ac5d7f4226ec7d8bd60384fa037fc02aee6ff0b53ba29a3ba8"}, + {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:115f8359402fa527cf47708d6f8a0f8234f0e9ca0cab7c18c9c189c194dbf620"}, + {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:715bdf952b9533ba13dfcf1f431a8f49e63cecc31d91d007bc1deb914f47d0e4"}, + {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e1258c639e00bf5e8a522fec6c3eaa3e30cf1c23a2f21a586be7e04d50c9acab"}, + {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15c59e780be8f30a60816a9adab900c12a58d79c1ac742b4a8df044ab2a6d920"}, + {file = "pyzmq-26.0.3-cp39-cp39-win32.whl", hash = "sha256:d0cdde3c78d8ab5b46595054e5def32a755fc028685add5ddc7403e9f6de9879"}, + {file = "pyzmq-26.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:ce828058d482ef860746bf532822842e0ff484e27f540ef5c813d516dd8896d2"}, + {file = "pyzmq-26.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:788f15721c64109cf720791714dc14afd0f449d63f3a5487724f024345067381"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c18645ef6294d99b256806e34653e86236eb266278c8ec8112622b61db255de"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e6bc96ebe49604df3ec2c6389cc3876cabe475e6bfc84ced1bf4e630662cb35"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:971e8990c5cc4ddcff26e149398fc7b0f6a042306e82500f5e8db3b10ce69f84"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8416c23161abd94cc7da80c734ad7c9f5dbebdadfdaa77dad78244457448223"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:082a2988364b60bb5de809373098361cf1dbb239623e39e46cb18bc035ed9c0c"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d57dfbf9737763b3a60d26e6800e02e04284926329aee8fb01049635e957fe81"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:77a85dca4c2430ac04dc2a2185c2deb3858a34fe7f403d0a946fa56970cf60a1"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4c82a6d952a1d555bf4be42b6532927d2a5686dd3c3e280e5f63225ab47ac1f5"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4496b1282c70c442809fc1b151977c3d967bfb33e4e17cedbf226d97de18f709"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:e4946d6bdb7ba972dfda282f9127e5756d4f299028b1566d1245fa0d438847e6"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:03c0ae165e700364b266876d712acb1ac02693acd920afa67da2ebb91a0b3c09"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:3e3070e680f79887d60feeda051a58d0ac36622e1759f305a41059eff62c6da7"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6ca08b840fe95d1c2bd9ab92dac5685f949fc6f9ae820ec16193e5ddf603c3b2"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e76654e9dbfb835b3518f9938e565c7806976c07b37c33526b574cc1a1050480"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:871587bdadd1075b112e697173e946a07d722459d20716ceb3d1bd6c64bd08ce"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d0a2d1bd63a4ad79483049b26514e70fa618ce6115220da9efdff63688808b17"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0270b49b6847f0d106d64b5086e9ad5dc8a902413b5dbbb15d12b60f9c1747a4"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:703c60b9910488d3d0954ca585c34f541e506a091a41930e663a098d3b794c67"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74423631b6be371edfbf7eabb02ab995c2563fee60a80a30829176842e71722a"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4adfbb5451196842a88fda3612e2c0414134874bffb1c2ce83ab4242ec9e027d"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3516119f4f9b8671083a70b6afaa0a070f5683e431ab3dc26e9215620d7ca1ad"}, + {file = "pyzmq-26.0.3.tar.gz", hash = "sha256:dba7d9f2e047dfa2bca3b01f4f84aa5246725203d6284e3790f2ca15fba6b40a"}, ] [package.dependencies] @@ -1789,104 +2068,90 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "regex" -version = "2023.12.25" +version = "2024.4.28" description = "Alternative regular expression module, to replace re." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "regex-2023.12.25-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0694219a1d54336fd0445ea382d49d36882415c0134ee1e8332afd1529f0baa5"}, - {file = "regex-2023.12.25-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b014333bd0217ad3d54c143de9d4b9a3ca1c5a29a6d0d554952ea071cff0f1f8"}, - {file = "regex-2023.12.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d865984b3f71f6d0af64d0d88f5733521698f6c16f445bb09ce746c92c97c586"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e0eabac536b4cc7f57a5f3d095bfa557860ab912f25965e08fe1545e2ed8b4c"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c25a8ad70e716f96e13a637802813f65d8a6760ef48672aa3502f4c24ea8b400"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9b6d73353f777630626f403b0652055ebfe8ff142a44ec2cf18ae470395766e"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9cc99d6946d750eb75827cb53c4371b8b0fe89c733a94b1573c9dd16ea6c9e4"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88d1f7bef20c721359d8675f7d9f8e414ec5003d8f642fdfd8087777ff7f94b5"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cb3fe77aec8f1995611f966d0c656fdce398317f850d0e6e7aebdfe61f40e1cd"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7aa47c2e9ea33a4a2a05f40fcd3ea36d73853a2aae7b4feab6fc85f8bf2c9704"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:df26481f0c7a3f8739fecb3e81bc9da3fcfae34d6c094563b9d4670b047312e1"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c40281f7d70baf6e0db0c2f7472b31609f5bc2748fe7275ea65a0b4601d9b392"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:d94a1db462d5690ebf6ae86d11c5e420042b9898af5dcf278bd97d6bda065423"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba1b30765a55acf15dce3f364e4928b80858fa8f979ad41f862358939bdd1f2f"}, - {file = "regex-2023.12.25-cp310-cp310-win32.whl", hash = "sha256:150c39f5b964e4d7dba46a7962a088fbc91f06e606f023ce57bb347a3b2d4630"}, - {file = "regex-2023.12.25-cp310-cp310-win_amd64.whl", hash = "sha256:09da66917262d9481c719599116c7dc0c321ffcec4b1f510c4f8a066f8768105"}, - {file = "regex-2023.12.25-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1b9d811f72210fa9306aeb88385b8f8bcef0dfbf3873410413c00aa94c56c2b6"}, - {file = "regex-2023.12.25-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d902a43085a308cef32c0d3aea962524b725403fd9373dea18110904003bac97"}, - {file = "regex-2023.12.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d166eafc19f4718df38887b2bbe1467a4f74a9830e8605089ea7a30dd4da8887"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7ad32824b7f02bb3c9f80306d405a1d9b7bb89362d68b3c5a9be53836caebdb"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:636ba0a77de609d6510235b7f0e77ec494d2657108f777e8765efc060094c98c"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fda75704357805eb953a3ee15a2b240694a9a514548cd49b3c5124b4e2ad01b"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f72cbae7f6b01591f90814250e636065850c5926751af02bb48da94dfced7baa"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db2a0b1857f18b11e3b0e54ddfefc96af46b0896fb678c85f63fb8c37518b3e7"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7502534e55c7c36c0978c91ba6f61703faf7ce733715ca48f499d3dbbd7657e0"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e8c7e08bb566de4faaf11984af13f6bcf6a08f327b13631d41d62592681d24fe"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:283fc8eed679758de38fe493b7d7d84a198b558942b03f017b1f94dda8efae80"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f44dd4d68697559d007462b0a3a1d9acd61d97072b71f6d1968daef26bc744bd"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:67d3ccfc590e5e7197750fcb3a2915b416a53e2de847a728cfa60141054123d4"}, - {file = "regex-2023.12.25-cp311-cp311-win32.whl", hash = "sha256:68191f80a9bad283432385961d9efe09d783bcd36ed35a60fb1ff3f1ec2efe87"}, - {file = "regex-2023.12.25-cp311-cp311-win_amd64.whl", hash = "sha256:7d2af3f6b8419661a0c421584cfe8aaec1c0e435ce7e47ee2a97e344b98f794f"}, - {file = "regex-2023.12.25-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8a0ccf52bb37d1a700375a6b395bff5dd15c50acb745f7db30415bae3c2b0715"}, - {file = "regex-2023.12.25-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c3c4a78615b7762740531c27cf46e2f388d8d727d0c0c739e72048beb26c8a9d"}, - {file = "regex-2023.12.25-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad83e7545b4ab69216cef4cc47e344d19622e28aabec61574b20257c65466d6a"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7a635871143661feccce3979e1727c4e094f2bdfd3ec4b90dfd4f16f571a87a"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d498eea3f581fbe1b34b59c697512a8baef88212f92e4c7830fcc1499f5b45a5"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:43f7cd5754d02a56ae4ebb91b33461dc67be8e3e0153f593c509e21d219c5060"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51f4b32f793812714fd5307222a7f77e739b9bc566dc94a18126aba3b92b98a3"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba99d8077424501b9616b43a2d208095746fb1284fc5ba490139651f971d39d9"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4bfc2b16e3ba8850e0e262467275dd4d62f0d045e0e9eda2bc65078c0110a11f"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8c2c19dae8a3eb0ea45a8448356ed561be843b13cbc34b840922ddf565498c1c"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:60080bb3d8617d96f0fb7e19796384cc2467447ef1c491694850ebd3670bc457"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b77e27b79448e34c2c51c09836033056a0547aa360c45eeeb67803da7b0eedaf"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:518440c991f514331f4850a63560321f833979d145d7d81186dbe2f19e27ae3d"}, - {file = "regex-2023.12.25-cp312-cp312-win32.whl", hash = "sha256:e2610e9406d3b0073636a3a2e80db05a02f0c3169b5632022b4e81c0364bcda5"}, - {file = "regex-2023.12.25-cp312-cp312-win_amd64.whl", hash = "sha256:cc37b9aeebab425f11f27e5e9e6cf580be7206c6582a64467a14dda211abc232"}, - {file = "regex-2023.12.25-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:da695d75ac97cb1cd725adac136d25ca687da4536154cdc2815f576e4da11c69"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d126361607b33c4eb7b36debc173bf25d7805847346dd4d99b5499e1fef52bc7"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4719bb05094d7d8563a450cf8738d2e1061420f79cfcc1fa7f0a44744c4d8f73"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dd58946bce44b53b06d94aa95560d0b243eb2fe64227cba50017a8d8b3cd3e2"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22a86d9fff2009302c440b9d799ef2fe322416d2d58fc124b926aa89365ec482"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2aae8101919e8aa05ecfe6322b278f41ce2994c4a430303c4cd163fef746e04f"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e692296c4cc2873967771345a876bcfc1c547e8dd695c6b89342488b0ea55cd8"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:263ef5cc10979837f243950637fffb06e8daed7f1ac1e39d5910fd29929e489a"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d6f7e255e5fa94642a0724e35406e6cb7001c09d476ab5fce002f652b36d0c39"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:88ad44e220e22b63b0f8f81f007e8abbb92874d8ced66f32571ef8beb0643b2b"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:3a17d3ede18f9cedcbe23d2daa8a2cd6f59fe2bf082c567e43083bba3fb00347"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d15b274f9e15b1a0b7a45d2ac86d1f634d983ca40d6b886721626c47a400bf39"}, - {file = "regex-2023.12.25-cp37-cp37m-win32.whl", hash = "sha256:ed19b3a05ae0c97dd8f75a5d8f21f7723a8c33bbc555da6bbe1f96c470139d3c"}, - {file = "regex-2023.12.25-cp37-cp37m-win_amd64.whl", hash = "sha256:a6d1047952c0b8104a1d371f88f4ab62e6275567d4458c1e26e9627ad489b445"}, - {file = "regex-2023.12.25-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b43523d7bc2abd757119dbfb38af91b5735eea45537ec6ec3a5ec3f9562a1c53"}, - {file = "regex-2023.12.25-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:efb2d82f33b2212898f1659fb1c2e9ac30493ac41e4d53123da374c3b5541e64"}, - {file = "regex-2023.12.25-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7fca9205b59c1a3d5031f7e64ed627a1074730a51c2a80e97653e3e9fa0d415"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086dd15e9435b393ae06f96ab69ab2d333f5d65cbe65ca5a3ef0ec9564dfe770"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e81469f7d01efed9b53740aedd26085f20d49da65f9c1f41e822a33992cb1590"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:34e4af5b27232f68042aa40a91c3b9bb4da0eeb31b7632e0091afc4310afe6cb"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9852b76ab558e45b20bf1893b59af64a28bd3820b0c2efc80e0a70a4a3ea51c1"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff100b203092af77d1a5a7abe085b3506b7eaaf9abf65b73b7d6905b6cb76988"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cc038b2d8b1470364b1888a98fd22d616fba2b6309c5b5f181ad4483e0017861"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:094ba386bb5c01e54e14434d4caabf6583334090865b23ef58e0424a6286d3dc"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5cd05d0f57846d8ba4b71d9c00f6f37d6b97d5e5ef8b3c3840426a475c8f70f4"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:9aa1a67bbf0f957bbe096375887b2505f5d8ae16bf04488e8b0f334c36e31360"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:98a2636994f943b871786c9e82bfe7883ecdaba2ef5df54e1450fa9869d1f756"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:37f8e93a81fc5e5bd8db7e10e62dc64261bcd88f8d7e6640aaebe9bc180d9ce2"}, - {file = "regex-2023.12.25-cp38-cp38-win32.whl", hash = "sha256:d78bd484930c1da2b9679290a41cdb25cc127d783768a0369d6b449e72f88beb"}, - {file = "regex-2023.12.25-cp38-cp38-win_amd64.whl", hash = "sha256:b521dcecebc5b978b447f0f69b5b7f3840eac454862270406a39837ffae4e697"}, - {file = "regex-2023.12.25-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f7bc09bc9c29ebead055bcba136a67378f03d66bf359e87d0f7c759d6d4ffa31"}, - {file = "regex-2023.12.25-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e14b73607d6231f3cc4622809c196b540a6a44e903bcfad940779c80dffa7be7"}, - {file = "regex-2023.12.25-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9eda5f7a50141291beda3edd00abc2d4a5b16c29c92daf8d5bd76934150f3edc"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc6bb9aa69aacf0f6032c307da718f61a40cf970849e471254e0e91c56ffca95"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:298dc6354d414bc921581be85695d18912bea163a8b23cac9a2562bbcd5088b1"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f4e475a80ecbd15896a976aa0b386c5525d0ed34d5c600b6d3ebac0a67c7ddf"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531ac6cf22b53e0696f8e1d56ce2396311254eb806111ddd3922c9d937151dae"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22f3470f7524b6da61e2020672df2f3063676aff444db1daa283c2ea4ed259d6"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:89723d2112697feaa320c9d351e5f5e7b841e83f8b143dba8e2d2b5f04e10923"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0ecf44ddf9171cd7566ef1768047f6e66975788258b1c6c6ca78098b95cf9a3d"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:905466ad1702ed4acfd67a902af50b8db1feeb9781436372261808df7a2a7bca"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:4558410b7a5607a645e9804a3e9dd509af12fb72b9825b13791a37cd417d73a5"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:7e316026cc1095f2a3e8cc012822c99f413b702eaa2ca5408a513609488cb62f"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3b1de218d5375cd6ac4b5493e0b9f3df2be331e86520f23382f216c137913d20"}, - {file = "regex-2023.12.25-cp39-cp39-win32.whl", hash = "sha256:11a963f8e25ab5c61348d090bf1b07f1953929c13bd2309a0662e9ff680763c9"}, - {file = "regex-2023.12.25-cp39-cp39-win_amd64.whl", hash = "sha256:e693e233ac92ba83a87024e1d32b5f9ab15ca55ddd916d878146f4e3406b5c91"}, - {file = "regex-2023.12.25.tar.gz", hash = "sha256:29171aa128da69afdf4bde412d5bedc335f2ca8fcfe4489038577d05f16181e5"}, + {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd196d056b40af073d95a2879678585f0b74ad35190fac04ca67954c582c6b61"}, + {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8bb381f777351bd534462f63e1c6afb10a7caa9fa2a421ae22c26e796fe31b1f"}, + {file = "regex-2024.4.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:47af45b6153522733aa6e92543938e97a70ce0900649ba626cf5aad290b737b6"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99d6a550425cc51c656331af0e2b1651e90eaaa23fb4acde577cf15068e2e20f"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bf29304a8011feb58913c382902fde3395957a47645bf848eea695839aa101b7"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:92da587eee39a52c91aebea8b850e4e4f095fe5928d415cb7ed656b3460ae79a"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6277d426e2f31bdbacb377d17a7475e32b2d7d1f02faaecc48d8e370c6a3ff31"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28e1f28d07220c0f3da0e8fcd5a115bbb53f8b55cecf9bec0c946eb9a059a94c"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:aaa179975a64790c1f2701ac562b5eeb733946eeb036b5bcca05c8d928a62f10"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6f435946b7bf7a1b438b4e6b149b947c837cb23c704e780c19ba3e6855dbbdd3"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:19d6c11bf35a6ad077eb23852827f91c804eeb71ecb85db4ee1386825b9dc4db"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:fdae0120cddc839eb8e3c15faa8ad541cc6d906d3eb24d82fb041cfe2807bc1e"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e672cf9caaf669053121f1766d659a8813bd547edef6e009205378faf45c67b8"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f57515750d07e14743db55d59759893fdb21d2668f39e549a7d6cad5d70f9fea"}, + {file = "regex-2024.4.28-cp310-cp310-win32.whl", hash = "sha256:a1409c4eccb6981c7baabc8888d3550df518add6e06fe74fa1d9312c1838652d"}, + {file = "regex-2024.4.28-cp310-cp310-win_amd64.whl", hash = "sha256:1f687a28640f763f23f8a9801fe9e1b37338bb1ca5d564ddd41619458f1f22d1"}, + {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:84077821c85f222362b72fdc44f7a3a13587a013a45cf14534df1cbbdc9a6796"}, + {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b45d4503de8f4f3dc02f1d28a9b039e5504a02cc18906cfe744c11def942e9eb"}, + {file = "regex-2024.4.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:457c2cd5a646dd4ed536c92b535d73548fb8e216ebee602aa9f48e068fc393f3"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b51739ddfd013c6f657b55a508de8b9ea78b56d22b236052c3a85a675102dc6"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:459226445c7d7454981c4c0ce0ad1a72e1e751c3e417f305722bbcee6697e06a"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:670fa596984b08a4a769491cbdf22350431970d0112e03d7e4eeaecaafcd0fec"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe00f4fe11c8a521b173e6324d862ee7ee3412bf7107570c9b564fe1119b56fb"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:36f392dc7763fe7924575475736bddf9ab9f7a66b920932d0ea50c2ded2f5636"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:23a412b7b1a7063f81a742463f38821097b6a37ce1e5b89dd8e871d14dbfd86b"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f1d6e4b7b2ae3a6a9df53efbf199e4bfcff0959dbdb5fd9ced34d4407348e39a"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:499334ad139557de97cbc4347ee921c0e2b5e9c0f009859e74f3f77918339257"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:0940038bec2fe9e26b203d636c44d31dd8766abc1fe66262da6484bd82461ccf"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:66372c2a01782c5fe8e04bff4a2a0121a9897e19223d9eab30c54c50b2ebeb7f"}, + {file = "regex-2024.4.28-cp311-cp311-win32.whl", hash = "sha256:c77d10ec3c1cf328b2f501ca32583625987ea0f23a0c2a49b37a39ee5c4c4630"}, + {file = "regex-2024.4.28-cp311-cp311-win_amd64.whl", hash = "sha256:fc0916c4295c64d6890a46e02d4482bb5ccf33bf1a824c0eaa9e83b148291f90"}, + {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:08a1749f04fee2811c7617fdd46d2e46d09106fa8f475c884b65c01326eb15c5"}, + {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b8eb28995771c087a73338f695a08c9abfdf723d185e57b97f6175c5051ff1ae"}, + {file = "regex-2024.4.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dd7ef715ccb8040954d44cfeff17e6b8e9f79c8019daae2fd30a8806ef5435c0"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb0315a2b26fde4005a7c401707c5352df274460f2f85b209cf6024271373013"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2fc053228a6bd3a17a9b0a3f15c3ab3cf95727b00557e92e1cfe094b88cc662"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fe9739a686dc44733d52d6e4f7b9c77b285e49edf8570754b322bca6b85b4cc"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74fcf77d979364f9b69fcf8200849ca29a374973dc193a7317698aa37d8b01c"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:965fd0cf4694d76f6564896b422724ec7b959ef927a7cb187fc6b3f4e4f59833"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2fef0b38c34ae675fcbb1b5db760d40c3fc3612cfa186e9e50df5782cac02bcd"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bc365ce25f6c7c5ed70e4bc674f9137f52b7dd6a125037f9132a7be52b8a252f"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ac69b394764bb857429b031d29d9604842bc4cbfd964d764b1af1868eeebc4f0"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:144a1fc54765f5c5c36d6d4b073299832aa1ec6a746a6452c3ee7b46b3d3b11d"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2630ca4e152c221072fd4a56d4622b5ada876f668ecd24d5ab62544ae6793ed6"}, + {file = "regex-2024.4.28-cp312-cp312-win32.whl", hash = "sha256:7f3502f03b4da52bbe8ba962621daa846f38489cae5c4a7b5d738f15f6443d17"}, + {file = "regex-2024.4.28-cp312-cp312-win_amd64.whl", hash = "sha256:0dd3f69098511e71880fb00f5815db9ed0ef62c05775395968299cb400aeab82"}, + {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:374f690e1dd0dbdcddea4a5c9bdd97632cf656c69113f7cd6a361f2a67221cb6"}, + {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f87ae6b96374db20f180eab083aafe419b194e96e4f282c40191e71980c666"}, + {file = "regex-2024.4.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5dbc1bcc7413eebe5f18196e22804a3be1bfdfc7e2afd415e12c068624d48247"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f85151ec5a232335f1be022b09fbbe459042ea1951d8a48fef251223fc67eee1"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57ba112e5530530fd175ed550373eb263db4ca98b5f00694d73b18b9a02e7185"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:224803b74aab56aa7be313f92a8d9911dcade37e5f167db62a738d0c85fdac4b"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a54a047b607fd2d2d52a05e6ad294602f1e0dec2291152b745870afc47c1397"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a2a512d623f1f2d01d881513af9fc6a7c46e5cfffb7dc50c38ce959f9246c94"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c06bf3f38f0707592898428636cbb75d0a846651b053a1cf748763e3063a6925"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1031a5e7b048ee371ab3653aad3030ecfad6ee9ecdc85f0242c57751a05b0ac4"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7a353ebfa7154c871a35caca7bfd8f9e18666829a1dc187115b80e35a29393e"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:7e76b9cfbf5ced1aca15a0e5b6f229344d9b3123439ffce552b11faab0114a02"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5ce479ecc068bc2a74cb98dd8dba99e070d1b2f4a8371a7dfe631f85db70fe6e"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d77b6f63f806578c604dca209280e4c54f0fa9a8128bb8d2cc5fb6f99da4150"}, + {file = "regex-2024.4.28-cp38-cp38-win32.whl", hash = "sha256:d84308f097d7a513359757c69707ad339da799e53b7393819ec2ea36bc4beb58"}, + {file = "regex-2024.4.28-cp38-cp38-win_amd64.whl", hash = "sha256:2cc1b87bba1dd1a898e664a31012725e48af826bf3971e786c53e32e02adae6c"}, + {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7413167c507a768eafb5424413c5b2f515c606be5bb4ef8c5dee43925aa5718b"}, + {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:108e2dcf0b53a7c4ab8986842a8edcb8ab2e59919a74ff51c296772e8e74d0ae"}, + {file = "regex-2024.4.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f1c5742c31ba7d72f2dedf7968998730664b45e38827637e0f04a2ac7de2f5f1"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecc6148228c9ae25ce403eade13a0961de1cb016bdb35c6eafd8e7b87ad028b1"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7d893c8cf0e2429b823ef1a1d360a25950ed11f0e2a9df2b5198821832e1947"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4290035b169578ffbbfa50d904d26bec16a94526071ebec3dadbebf67a26b25e"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44a22ae1cfd82e4ffa2066eb3390777dc79468f866f0625261a93e44cdf6482b"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd24fd140b69f0b0bcc9165c397e9b2e89ecbeda83303abf2a072609f60239e2"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:39fb166d2196413bead229cd64a2ffd6ec78ebab83fff7d2701103cf9f4dfd26"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9301cc6db4d83d2c0719f7fcda37229691745168bf6ae849bea2e85fc769175d"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7c3d389e8d76a49923683123730c33e9553063d9041658f23897f0b396b2386f"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:99ef6289b62042500d581170d06e17f5353b111a15aa6b25b05b91c6886df8fc"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b91d529b47798c016d4b4c1d06cc826ac40d196da54f0de3c519f5a297c5076a"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:43548ad74ea50456e1c68d3c67fff3de64c6edb85bcd511d1136f9b5376fc9d1"}, + {file = "regex-2024.4.28-cp39-cp39-win32.whl", hash = "sha256:05d9b6578a22db7dedb4df81451f360395828b04f4513980b6bd7a1412c679cc"}, + {file = "regex-2024.4.28-cp39-cp39-win_amd64.whl", hash = "sha256:3986217ec830c2109875be740531feb8ddafe0dfa49767cdcd072ed7e8927962"}, + {file = "regex-2024.4.28.tar.gz", hash = "sha256:83ab366777ea45d58f72593adf35d36ca911ea8bd838483c1823b883a121b0e4"}, ] [[package]] @@ -2031,130 +2296,120 @@ blobfile = ["blobfile (>=2)"] [[package]] name = "tokenizers" -version = "0.15.2" +version = "0.19.1" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "tokenizers-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52f6130c9cbf70544287575a985bf44ae1bda2da7e8c24e97716080593638012"}, - {file = "tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:054c1cc9c6d68f7ffa4e810b3d5131e0ba511b6e4be34157aa08ee54c2f8d9ee"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9b9b070fdad06e347563b88c278995735292ded1132f8657084989a4c84a6d5"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea621a7eef4b70e1f7a4e84dd989ae3f0eeb50fc8690254eacc08acb623e82f1"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf7fd9a5141634fa3aa8d6b7be362e6ae1b4cda60da81388fa533e0b552c98fd"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44f2a832cd0825295f7179eaf173381dc45230f9227ec4b44378322d900447c9"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b9ec69247a23747669ec4b0ca10f8e3dfb3545d550258129bd62291aabe8605"}, - {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b6a4c78da863ff26dbd5ad9a8ecc33d8a8d97b535172601cf00aee9d7ce9ce"}, - {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5ab2a4d21dcf76af60e05af8063138849eb1d6553a0d059f6534357bce8ba364"}, - {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a47acfac7e511f6bbfcf2d3fb8c26979c780a91e06fb5b9a43831b2c0153d024"}, - {file = "tokenizers-0.15.2-cp310-none-win32.whl", hash = "sha256:064ff87bb6acdbd693666de9a4b692add41308a2c0ec0770d6385737117215f2"}, - {file = "tokenizers-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3b919afe4df7eb6ac7cafd2bd14fb507d3f408db7a68c43117f579c984a73843"}, - {file = "tokenizers-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:89cd1cb93e4b12ff39bb2d626ad77e35209de9309a71e4d3d4672667b4b256e7"}, - {file = "tokenizers-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cfed5c64e5be23d7ee0f0e98081a25c2a46b0b77ce99a4f0605b1ec43dd481fa"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a907d76dcfda37023ba203ab4ceeb21bc5683436ebefbd895a0841fd52f6f6f2"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ea60479de6fc7b8ae756b4b097572372d7e4032e2521c1bbf3d90c90a99ff0"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48e2b9335be2bc0171df9281385c2ed06a15f5cf121c44094338306ab7b33f2c"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:112a1dd436d2cc06e6ffdc0b06d55ac019a35a63afd26475205cb4b1bf0bfbff"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4620cca5c2817177ee8706f860364cc3a8845bc1e291aaf661fb899e5d1c45b0"}, - {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccd73a82751c523b3fc31ff8194702e4af4db21dc20e55b30ecc2079c5d43cb7"}, - {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:107089f135b4ae7817affe6264f8c7a5c5b4fd9a90f9439ed495f54fcea56fb4"}, - {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0ff110ecc57b7aa4a594396525a3451ad70988e517237fe91c540997c4e50e29"}, - {file = "tokenizers-0.15.2-cp311-none-win32.whl", hash = "sha256:6d76f00f5c32da36c61f41c58346a4fa7f0a61be02f4301fd30ad59834977cc3"}, - {file = "tokenizers-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:cc90102ed17271cf0a1262babe5939e0134b3890345d11a19c3145184b706055"}, - {file = "tokenizers-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f86593c18d2e6248e72fb91c77d413a815153b8ea4e31f7cd443bdf28e467670"}, - {file = "tokenizers-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0774bccc6608eca23eb9d620196687c8b2360624619623cf4ba9dc9bd53e8b51"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d0222c5b7c9b26c0b4822a82f6a7011de0a9d3060e1da176f66274b70f846b98"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3835738be1de66624fff2f4f6f6684775da4e9c00bde053be7564cbf3545cc66"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0143e7d9dcd811855c1ce1ab9bf5d96d29bf5e528fd6c7824d0465741e8c10fd"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db35825f6d54215f6b6009a7ff3eedee0848c99a6271c870d2826fbbedf31a38"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f5e64b0389a2be47091d8cc53c87859783b837ea1a06edd9d8e04004df55a5c"}, - {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e0480c452217edd35eca56fafe2029fb4d368b7c0475f8dfa3c5c9c400a7456"}, - {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a33ab881c8fe70474980577e033d0bc9a27b7ab8272896e500708b212995d834"}, - {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a308a607ca9de2c64c1b9ba79ec9a403969715a1b8ba5f998a676826f1a7039d"}, - {file = "tokenizers-0.15.2-cp312-none-win32.whl", hash = "sha256:b8fcfa81bcb9447df582c5bc96a031e6df4da2a774b8080d4f02c0c16b42be0b"}, - {file = "tokenizers-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:38d7ab43c6825abfc0b661d95f39c7f8af2449364f01d331f3b51c94dcff7221"}, - {file = "tokenizers-0.15.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:38bfb0204ff3246ca4d5e726e8cc8403bfc931090151e6eede54d0e0cf162ef0"}, - {file = "tokenizers-0.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c861d35e8286a53e06e9e28d030b5a05bcbf5ac9d7229e561e53c352a85b1fc"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:936bf3842db5b2048eaa53dade907b1160f318e7c90c74bfab86f1e47720bdd6"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:620beacc3373277700d0e27718aa8b25f7b383eb8001fba94ee00aeea1459d89"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2735ecbbf37e52db4ea970e539fd2d450d213517b77745114f92867f3fc246eb"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:473c83c5e2359bb81b0b6fde870b41b2764fcdd36d997485e07e72cc3a62264a"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968fa1fb3c27398b28a4eca1cbd1e19355c4d3a6007f7398d48826bbe3a0f728"}, - {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:865c60ae6eaebdde7da66191ee9b7db52e542ed8ee9d2c653b6d190a9351b980"}, - {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7c0d8b52664ab2d4a8d6686eb5effc68b78608a9008f086a122a7b2996befbab"}, - {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f33dfbdec3784093a9aebb3680d1f91336c56d86cc70ddf88708251da1fe9064"}, - {file = "tokenizers-0.15.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d44ba80988ff9424e33e0a49445072ac7029d8c0e1601ad25a0ca5f41ed0c1d6"}, - {file = "tokenizers-0.15.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:dce74266919b892f82b1b86025a613956ea0ea62a4843d4c4237be2c5498ed3a"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0ef06b9707baeb98b316577acb04f4852239d856b93e9ec3a299622f6084e4be"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73e2e74bbb07910da0d37c326869f34113137b23eadad3fc00856e6b3d9930c"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eeb12daf02a59e29f578a865f55d87cd103ce62bd8a3a5874f8fdeaa82e336b"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ba9f6895af58487ca4f54e8a664a322f16c26bbb442effd01087eba391a719e"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccec77aa7150e38eec6878a493bf8c263ff1fa8a62404e16c6203c64c1f16a26"}, - {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f40604f5042ff210ba82743dda2b6aa3e55aa12df4e9f2378ee01a17e2855e"}, - {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5645938a42d78c4885086767c70923abad047163d809c16da75d6b290cb30bbe"}, - {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05a77cbfebe28a61ab5c3891f9939cc24798b63fa236d84e5f29f3a85a200c00"}, - {file = "tokenizers-0.15.2-cp37-none-win32.whl", hash = "sha256:361abdc068e8afe9c5b818769a48624687fb6aaed49636ee39bec4e95e1a215b"}, - {file = "tokenizers-0.15.2-cp37-none-win_amd64.whl", hash = "sha256:7ef789f83eb0f9baeb4d09a86cd639c0a5518528f9992f38b28e819df397eb06"}, - {file = "tokenizers-0.15.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4fe1f74a902bee74a3b25aff180fbfbf4f8b444ab37c4d496af7afd13a784ed2"}, - {file = "tokenizers-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4b89038a684f40a6b15d6b09f49650ac64d951ad0f2a3ea9169687bbf2a8ba"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d05a1b06f986d41aed5f2de464c003004b2df8aaf66f2b7628254bcbfb72a438"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508711a108684111ec8af89d3a9e9e08755247eda27d0ba5e3c50e9da1600f6d"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:daa348f02d15160cb35439098ac96e3a53bacf35885072611cd9e5be7d333daa"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494fdbe5932d3416de2a85fc2470b797e6f3226c12845cadf054dd906afd0442"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2d60f5246f4da9373f75ff18d64c69cbf60c3bca597290cea01059c336d2470"}, - {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93268e788825f52de4c7bdcb6ebc1fcd4a5442c02e730faa9b6b08f23ead0e24"}, - {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6fc7083ab404019fc9acafe78662c192673c1e696bd598d16dc005bd663a5cf9"}, - {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e39b41e5531d6b2122a77532dbea60e171ef87a3820b5a3888daa847df4153"}, - {file = "tokenizers-0.15.2-cp38-none-win32.whl", hash = "sha256:06cd0487b1cbfabefb2cc52fbd6b1f8d4c37799bd6c6e1641281adaa6b2504a7"}, - {file = "tokenizers-0.15.2-cp38-none-win_amd64.whl", hash = "sha256:5179c271aa5de9c71712e31cb5a79e436ecd0d7532a408fa42a8dbfa4bc23fd9"}, - {file = "tokenizers-0.15.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82f8652a74cc107052328b87ea8b34291c0f55b96d8fb261b3880216a9f9e48e"}, - {file = "tokenizers-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:02458bee6f5f3139f1ebbb6d042b283af712c0981f5bc50edf771d6b762d5e4f"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9a09cd26cca2e1c349f91aa665309ddb48d71636370749414fbf67bc83c5343"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158be8ea8554e5ed69acc1ce3fbb23a06060bd4bbb09029431ad6b9a466a7121"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ddba9a2b0c8c81633eca0bb2e1aa5b3a15362b1277f1ae64176d0f6eba78ab1"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ef5dd1d39797044642dbe53eb2bc56435308432e9c7907728da74c69ee2adca"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:454c203164e07a860dbeb3b1f4a733be52b0edbb4dd2e5bd75023ffa8b49403a"}, - {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cf6b7f1d4dc59af960e6ffdc4faffe6460bbfa8dce27a58bf75755ffdb2526d"}, - {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2ef09bbc16519f6c25d0c7fc0c6a33a6f62923e263c9d7cca4e58b8c61572afb"}, - {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c9a2ebdd2ad4ec7a68e7615086e633857c85e2f18025bd05d2a4399e6c5f7169"}, - {file = "tokenizers-0.15.2-cp39-none-win32.whl", hash = "sha256:918fbb0eab96fe08e72a8c2b5461e9cce95585d82a58688e7f01c2bd546c79d0"}, - {file = "tokenizers-0.15.2-cp39-none-win_amd64.whl", hash = "sha256:524e60da0135e106b254bd71f0659be9f89d83f006ea9093ce4d1fab498c6d0d"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9b648a58281c4672212fab04e60648fde574877d0139cd4b4f93fe28ca8944"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7c7d18b733be6bbca8a55084027f7be428c947ddf871c500ee603e375013ffba"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:13ca3611de8d9ddfbc4dc39ef54ab1d2d4aaa114ac8727dfdc6a6ec4be017378"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237d1bf3361cf2e6463e6c140628e6406766e8b27274f5fcc62c747ae3c6f094"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a0fe1e49e60c664915e9fb6b0cb19bac082ab1f309188230e4b2920230edb3"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e022fe65e99230b8fd89ebdfea138c24421f91c1a4f4781a8f5016fd5cdfb4d"}, - {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d857be2df69763362ac699f8b251a8cd3fac9d21893de129bc788f8baaef2693"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:708bb3e4283177236309e698da5fcd0879ce8fd37457d7c266d16b550bcbbd18"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c35e09e9899b72a76e762f9854e8750213f67567787d45f37ce06daf57ca78"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1257f4394be0d3b00de8c9e840ca5601d0a4a8438361ce9c2b05c7d25f6057b"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02272fe48280e0293a04245ca5d919b2c94a48b408b55e858feae9618138aeda"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dc3ad9ebc76eabe8b1d7c04d38be884b8f9d60c0cdc09b0aa4e3bcf746de0388"}, - {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:32e16bdeffa7c4f46bf2152172ca511808b952701d13e7c18833c0b73cb5c23f"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fb16ba563d59003028b678d2361a27f7e4ae0ab29c7a80690efa20d829c81fdb"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2277c36d2d6cdb7876c274547921a42425b6810d38354327dd65a8009acf870c"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cf75d32e8d250781940d07f7eece253f2fe9ecdb1dc7ba6e3833fa17b82fcbc"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b3b31884dc8e9b21508bb76da80ebf7308fdb947a17affce815665d5c4d028"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10122d8d8e30afb43bb1fe21a3619f62c3e2574bff2699cf8af8b0b6c5dc4a3"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d88b96ff0fe8e91f6ef01ba50b0d71db5017fa4e3b1d99681cec89a85faf7bf7"}, - {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:37aaec5a52e959892870a7c47cef80c53797c0db9149d458460f4f31e2fb250e"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2ea752f2b0fe96eb6e2f3adbbf4d72aaa1272079b0dfa1145507bd6a5d537e6"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b19a808d8799fda23504a5cd31d2f58e6f52f140380082b352f877017d6342b"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c86e5e068ac8b19204419ed8ca90f9d25db20578f5881e337d203b314f4104"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de19c4dc503c612847edf833c82e9f73cd79926a384af9d801dcf93f110cea4e"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea09acd2fe3324174063d61ad620dec3bcf042b495515f27f638270a7d466e8b"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cf27fd43472e07b57cf420eee1e814549203d56de00b5af8659cb99885472f1f"}, - {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7ca22bd897537a0080521445d91a58886c8c04084a6a19e6c78c586e0cfa92a5"}, - {file = "tokenizers-0.15.2.tar.gz", hash = "sha256:e6e9c6e019dd5484be5beafc775ae6c925f4c69a3487040ed09b45e13df2cb91"}, + {file = "tokenizers-0.19.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97"}, + {file = "tokenizers-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f03727225feaf340ceeb7e00604825addef622d551cbd46b7b775ac834c1e1c4"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:453e4422efdfc9c6b6bf2eae00d5e323f263fff62b29a8c9cd526c5003f3f642"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02e81bf089ebf0e7f4df34fa0207519f07e66d8491d963618252f2e0729e0b46"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b07c538ba956843833fee1190cf769c60dc62e1cf934ed50d77d5502194d63b1"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28cab1582e0eec38b1f38c1c1fb2e56bce5dc180acb1724574fc5f47da2a4fe"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b01afb7193d47439f091cd8f070a1ced347ad0f9144952a30a41836902fe09e"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7fb297edec6c6841ab2e4e8f357209519188e4a59b557ea4fafcf4691d1b4c98"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2e8a3dd055e515df7054378dc9d6fa8c8c34e1f32777fb9a01fea81496b3f9d3"}, + {file = "tokenizers-0.19.1-cp310-none-win32.whl", hash = "sha256:7ff898780a155ea053f5d934925f3902be2ed1f4d916461e1a93019cc7250837"}, + {file = "tokenizers-0.19.1-cp310-none-win_amd64.whl", hash = "sha256:bea6f9947e9419c2fda21ae6c32871e3d398cba549b93f4a65a2d369662d9403"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b"}, + {file = "tokenizers-0.19.1-cp311-none-win32.whl", hash = "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256"}, + {file = "tokenizers-0.19.1-cp311-none-win_amd64.whl", hash = "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c"}, + {file = "tokenizers-0.19.1-cp312-none-win32.whl", hash = "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57"}, + {file = "tokenizers-0.19.1-cp312-none-win_amd64.whl", hash = "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a"}, + {file = "tokenizers-0.19.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:bb9dfe7dae85bc6119d705a76dc068c062b8b575abe3595e3c6276480e67e3f1"}, + {file = "tokenizers-0.19.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:1f0360cbea28ea99944ac089c00de7b2e3e1c58f479fb8613b6d8d511ce98267"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:71e3ec71f0e78780851fef28c2a9babe20270404c921b756d7c532d280349214"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b82931fa619dbad979c0ee8e54dd5278acc418209cc897e42fac041f5366d626"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e8ff5b90eabdcdaa19af697885f70fe0b714ce16709cf43d4952f1f85299e73a"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e742d76ad84acbdb1a8e4694f915fe59ff6edc381c97d6dfdd054954e3478ad4"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8c5d59d7b59885eab559d5bc082b2985555a54cda04dda4c65528d90ad252ad"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b2da5c32ed869bebd990c9420df49813709e953674c0722ff471a116d97b22d"}, + {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:638e43936cc8b2cbb9f9d8dde0fe5e7e30766a3318d2342999ae27f68fdc9bd6"}, + {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:78e769eb3b2c79687d9cb0f89ef77223e8e279b75c0a968e637ca7043a84463f"}, + {file = "tokenizers-0.19.1-cp37-none-win32.whl", hash = "sha256:72791f9bb1ca78e3ae525d4782e85272c63faaef9940d92142aa3eb79f3407a3"}, + {file = "tokenizers-0.19.1-cp37-none-win_amd64.whl", hash = "sha256:f3bbb7a0c5fcb692950b041ae11067ac54826204318922da754f908d95619fbc"}, + {file = "tokenizers-0.19.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:07f9295349bbbcedae8cefdbcfa7f686aa420be8aca5d4f7d1ae6016c128c0c5"}, + {file = "tokenizers-0.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10a707cc6c4b6b183ec5dbfc5c34f3064e18cf62b4a938cb41699e33a99e03c1"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6309271f57b397aa0aff0cbbe632ca9d70430839ca3178bf0f06f825924eca22"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ad23d37d68cf00d54af184586d79b84075ada495e7c5c0f601f051b162112dc"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:427c4f0f3df9109314d4f75b8d1f65d9477033e67ffaec4bca53293d3aca286d"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e83a31c9cf181a0a3ef0abad2b5f6b43399faf5da7e696196ddd110d332519ee"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c27b99889bd58b7e301468c0838c5ed75e60c66df0d4db80c08f43462f82e0d3"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bac0b0eb952412b0b196ca7a40e7dce4ed6f6926489313414010f2e6b9ec2adf"}, + {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8a6298bde623725ca31c9035a04bf2ef63208d266acd2bed8c2cb7d2b7d53ce6"}, + {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:08a44864e42fa6d7d76d7be4bec62c9982f6f6248b4aa42f7302aa01e0abfd26"}, + {file = "tokenizers-0.19.1-cp38-none-win32.whl", hash = "sha256:1de5bc8652252d9357a666e609cb1453d4f8e160eb1fb2830ee369dd658e8975"}, + {file = "tokenizers-0.19.1-cp38-none-win_amd64.whl", hash = "sha256:0bcce02bf1ad9882345b34d5bd25ed4949a480cf0e656bbd468f4d8986f7a3f1"}, + {file = "tokenizers-0.19.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0b9394bd204842a2a1fd37fe29935353742be4a3460b6ccbaefa93f58a8df43d"}, + {file = "tokenizers-0.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4692ab92f91b87769d950ca14dbb61f8a9ef36a62f94bad6c82cc84a51f76f6a"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6258c2ef6f06259f70a682491c78561d492e885adeaf9f64f5389f78aa49a051"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c85cf76561fbd01e0d9ea2d1cbe711a65400092bc52b5242b16cfd22e51f0c58"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670b802d4d82bbbb832ddb0d41df7015b3e549714c0e77f9bed3e74d42400fbe"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85aa3ab4b03d5e99fdd31660872249df5e855334b6c333e0bc13032ff4469c4a"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cbf001afbbed111a79ca47d75941e9e5361297a87d186cbfc11ed45e30b5daba"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c89aa46c269e4e70c4d4f9d6bc644fcc39bb409cb2a81227923404dd6f5227"}, + {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:39c1ec76ea1027438fafe16ecb0fb84795e62e9d643444c1090179e63808c69d"}, + {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c2a0d47a89b48d7daa241e004e71fb5a50533718897a4cd6235cb846d511a478"}, + {file = "tokenizers-0.19.1-cp39-none-win32.whl", hash = "sha256:61b7fe8886f2e104d4caf9218b157b106207e0f2a4905c9c7ac98890688aabeb"}, + {file = "tokenizers-0.19.1-cp39-none-win_amd64.whl", hash = "sha256:f97660f6c43efd3e0bfd3f2e3e5615bf215680bad6ee3d469df6454b8c6e8256"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b11853f17b54c2fe47742c56d8a33bf49ce31caf531e87ac0d7d13d327c9334"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d26194ef6c13302f446d39972aaa36a1dda6450bc8949f5eb4c27f51191375bd"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e8d1ed93beda54bbd6131a2cb363a576eac746d5c26ba5b7556bc6f964425594"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca407133536f19bdec44b3da117ef0d12e43f6d4b56ac4c765f37eca501c7bda"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce05fde79d2bc2e46ac08aacbc142bead21614d937aac950be88dc79f9db9022"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:35583cd46d16f07c054efd18b5d46af4a2f070a2dd0a47914e66f3ff5efb2b1e"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b4399b59d1af5645bcee2072a463318114c39b8547437a7c2d6a186a1b5a0e2d"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6852c5b2a853b8b0ddc5993cd4f33bfffdca4fcc5d52f89dd4b8eada99379285"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcd266ae85c3d39df2f7e7d0e07f6c41a55e9a3123bb11f854412952deacd828"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecb2651956eea2aa0a2d099434134b1b68f1c31f9a5084d6d53f08ed43d45ff2"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b279ab506ec4445166ac476fb4d3cc383accde1ea152998509a94d82547c8e2a"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:89183e55fb86e61d848ff83753f64cded119f5d6e1f553d14ffee3700d0a4a49"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2edbc75744235eea94d595a8b70fe279dd42f3296f76d5a86dde1d46e35f574"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0e64bfde9a723274e9a71630c3e9494ed7b4c0f76a1faacf7fe294cd26f7ae7c"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b5ca92bfa717759c052e345770792d02d1f43b06f9e790ca0a1db62838816f3"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f8a20266e695ec9d7a946a019c1d5ca4eddb6613d4f466888eee04f16eedb85"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c38f45d8f2a2ec0f3a20073cccb335b9f99f73b3c69483cd52ebc75369d8a1"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dd26e3afe8a7b61422df3176e06664503d3f5973b94f45d5c45987e1cb711876"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:eddd5783a4a6309ce23432353cdb36220e25cbb779bfa9122320666508b44b88"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:56ae39d4036b753994476a1b935584071093b55c7a72e3b8288e68c313ca26e7"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f9939ca7e58c2758c01b40324a59c034ce0cebad18e0d4563a9b1beab3018243"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c330c0eb815d212893c67a032e9dc1b38a803eccb32f3e8172c19cc69fbb439"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec11802450a2487cdf0e634b750a04cbdc1c4d066b97d94ce7dd2cb51ebb325b"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b718f316b596f36e1dae097a7d5b91fc5b85e90bf08b01ff139bd8953b25af"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ed69af290c2b65169f0ba9034d1dc39a5db9459b32f1dd8b5f3f32a3fcf06eab"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f8a9c828277133af13f3859d1b6bf1c3cb6e9e1637df0e45312e6b7c2e622b1f"}, + {file = "tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3"}, ] [package.dependencies] -huggingface_hub = ">=0.16.4,<1.0" +huggingface-hub = ">=0.16.4,<1.0" [package.extras] dev = ["tokenizers[testing]"] -docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"] -testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] [[package]] name = "tomli" @@ -2189,13 +2444,13 @@ files = [ [[package]] name = "tqdm" -version = "4.66.2" +version = "4.66.4" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.2-py3-none-any.whl", hash = "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9"}, - {file = "tqdm-4.66.2.tar.gz", hash = "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531"}, + {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, + {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, ] [package.dependencies] @@ -2209,28 +2464,28 @@ telegram = ["requests"] [[package]] name = "traitlets" -version = "5.14.2" +version = "5.14.3" description = "Traitlets Python configuration system" optional = false python-versions = ">=3.8" files = [ - {file = "traitlets-5.14.2-py3-none-any.whl", hash = "sha256:fcdf85684a772ddeba87db2f398ce00b40ff550d1528c03c14dbf6a02003cd80"}, - {file = "traitlets-5.14.2.tar.gz", hash = "sha256:8cdd83c040dab7d1dee822678e5f5d100b514f7b72b01615b26fc5718916fdf9"}, + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, ] [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.1)", "pytest-mock", "pytest-mypy-testing"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] [[package]] name = "typing-extensions" -version = "4.10.0" +version = "4.11.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, - {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, + {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, + {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, ] [[package]] @@ -2250,6 +2505,47 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "watchdog" +version = "4.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, + {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, + {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, + {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, + {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, + {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, + {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, + {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + [[package]] name = "wcwidth" version = "0.2.13" @@ -2396,4 +2692,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "<3.13,>=3.10" -content-hash = "9f90e0b50761687dd70040e3d1bde55558e2d3e2743440deb63d4b37d0a4b051" +content-hash = "ff99446b72f4c067feddd1ac3abf93f984b8d6e29ac96adde3633b804871c538" diff --git a/pyproject.toml b/pyproject.toml index f183211..a01b84a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,9 @@ authors = ["Nick Landers "] license = "MIT" repository = "https://github.com/dreadnode/rigging" readme = "README.md" +packages = [ + {include = "rigging"} +] [tool.poetry.dependencies] python = "<3.13,>=3.10" @@ -20,6 +23,13 @@ mypy = "^1.8.0" ruff = "^0.1.14" pytest = "^8.0.0" +[tool.poetry.group.docs.dependencies] +mkdocs = "^1.6.0" +mkdocs-material = "^9.5.20" +mkdocstrings = "^0.25.0" +mkdocstrings-python = "^1.10.0" +mkdocs-section-index = "^0.3.9" + [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" diff --git a/rigging/chat.py b/rigging/chat.py index 1f27b7b..149c210 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -1,3 +1,7 @@ +""" +Chats are used pre and post generation to hold messages, and are the primary way to interact with the generator. +""" + import asyncio import typing as t from copy import deepcopy @@ -31,6 +35,18 @@ class Chat(BaseModel): + """ + Represents a completed chat conversation. + + Attributes: + uuid (UUID): The unique identifier for the chat. + timestamp (datetime): The timestamp when the chat was created. + messages (list[Message]): The list of messages prior to generation. + next_messages (list[Message]): The list of messages resulting from the generation. + pending (Optional[PendingChat]): The pending chat associated with the chat. + generator_id (Optional[str]): The identifier of the generator used to create the chat + """ + model_config = ConfigDict(arbitrary_types_allowed=True) uuid: UUID = Field(default_factory=uuid4) @@ -53,6 +69,16 @@ def __init__( pending: t.Optional["PendingChat"] = None, **kwargs: t.Any, ): + """ + Initialize a Chat object. + + Args: + messages (Messages): The messages for the chat. + next_messages (Messages | None, optional): The next messages for the chat. Defaults to None. + pending (Optional[PendingChat], optional): The pending chat. Defaults to None. + **kwargs (Any): Additional keyword arguments (typically used for deserialization) + + """ from rigging.generator import get_generator if "generator_id" in kwargs and pending is None: @@ -71,21 +97,40 @@ def __len__(self) -> int: @property def all(self) -> list[Message]: + """Returns all messages in the chat, including the next messages.""" return self.messages + self.next_messages @property def prev(self) -> list[Message]: + """Alias for the .messages property""" return self.messages @property def next(self) -> list[Message]: + """Alias for the .next_messages property""" return self.next_messages @property def last(self) -> Message: + """Alias for .next_messages[-1]""" return self.next_messages[-1] def restart(self, *, generator: t.Optional["Generator"] = None, include_next: bool = False) -> "PendingChat": + """ + Attempt to convert back to a PendingChat for further generation. + + Args: + generator (Optional[Generator]): The generator to use for the restarted chat. Otherwise + the generator from the original PendingChat will be used. + include_next (bool): Whether to include the next messages in the restarted chat. Defaults to False. + + Returns: + PendingChat: The restarted chat. + + Raises: + ValueError: If the chat was not created with a PendingChat and no generator is provided. + """ + messages = self.all if include_next else self.messages if generator is not None: return generator.chat(messages) @@ -96,27 +141,59 @@ def restart(self, *, generator: t.Optional["Generator"] = None, include_next: bo def fork( self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str ) -> "PendingChat": + """ + Forks the chat by creating calling [rigging.chat.Chat.restart][] and appending the specified messages. + + Args: + messages (Union[Sequence[Message], Sequence[MessageDict], Message, MessageDict, str]): + The messages to be added to the new `PendingChat` instance. + + Returns: + PendingChat: A new instance of `PendingChat` with the specified messages added. + + """ return self.restart().add(messages) def continue_(self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | str) -> "PendingChat": + """Alias for the [rigging.chat.Chat.fork][].""" return self.fork(messages) def clone(self) -> "Chat": + """Creates a deep copy of the chat.""" return Chat([m.model_copy() for m in self.messages], [m.model_copy() for m in self.next_messages], self.pending) def apply(self, **kwargs: str) -> "Chat": - self.messages[-1].apply(**kwargs) + """ + Calls [rigging.message.Message.apply][] on the last message in the chat with the given keyword arguments. + + Args: + **kwargs: The string mapping of replacements. + + Returns: + Chat: The modified Chat object. + """ + self.last.apply(**kwargs) return self def apply_to_all(self, **kwargs: str) -> "Chat": - for message in self.messages: + """ + Calls [rigging.message.Message.apply][] on all messages in the chat with the given keyword arguments. + + Args: + **kwargs: The string mapping of replacements. + + Returns: + Chat: The modified chat object. + + """ + for message in self.all: message.apply(**kwargs) return self def strip(self, model_type: type[Model], fail_on_missing: bool = False) -> "Chat": new = self.clone() for message in new.all: - message.strip(model_type, fail_on_missing) + message.strip(model_type, fail_on_missing=fail_on_missing) return new def inject_system_content(self, content: str) -> Message: diff --git a/rigging/error.py b/rigging/error.py index 9e6e272..821e743 100644 --- a/rigging/error.py +++ b/rigging/error.py @@ -1,14 +1,33 @@ +""" +We try to avoid creating custom exceptions unless they are necessary. + +We use the built-in and pydantic exceptions as much as possible. +""" + + class ExhaustedMaxRoundsError(Exception): + """ + Raised when the maximum number of rounds is exceeded while generating. + """ + def __init__(self, max_rounds: int): super().__init__(f"Exhausted max rounds ({max_rounds}) while generating") self.max_rounds = max_rounds class InvalidModelSpecifiedError(Exception): + """ + Raised when an invalid identifier is specified when getting a generator. + """ + def __init__(self, model: str): super().__init__(f"Invalid model specified: {model}") class MissingModelError(Exception): + """ + Raised when a model is missing when parsing a message. + """ + def __init__(self, content: str): super().__init__(content) diff --git a/rigging/generator.py b/rigging/generator.py index fa0a721..fa804c8 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -1,3 +1,7 @@ +""" +Generators produce completions for a given set of messages or text. +""" + import abc import typing as t @@ -27,7 +31,22 @@ # update our interfaces to support that class GenerateParams(BaseModel): """ - Common parameters for generating text using a language model. + Parameters for generating text using a language model. + + These are designed to generally overlap with underlying + APIs like litellm, but will be extended as needed. + + Attributes: + temperature (float | None): The sampling temperature. + max_tokens (int | None): The maximum number of tokens to generate. + top_p (float | None): The nucleus sampling probability. + stop (list[str] | None): A list of stop sequences to stop generation at. + presence_penalty (float | None): The presence penalty. + frequency_penalty (float | None): The frequency penalty. + api_base (str | None): The base URL for the API. + timeout (int | None): The timeout for the API request. + seed (int | None): The seed. + extra (dict[str, t.Any]): Extra parameters. """ model_config = ConfigDict(extra="forbid") @@ -41,6 +60,7 @@ class GenerateParams(BaseModel): api_base: str | None = None timeout: int | None = None seed: int | None = None + extra: dict[str, t.Any] = Field(default_factory=dict) @field_validator("stop", mode="before") def validate_stop(cls, value: t.Any) -> t.Any: @@ -52,18 +72,46 @@ def validate_stop(cls, value: t.Any) -> t.Any: class Generator(BaseModel, abc.ABC): + """ + Base class for all rigging generators. + + This class provides common functionality and methods for generating completion messages. + + Attributes: + model (str): The model used by the generator. + api_key (str | None): The API key used for authentication. Defaults to None. + params (GenerateParams): The parameters used for generating completion messages. + """ + model: str api_key: str | None = Field(None, exclude=True) params: GenerateParams def to_identifier(self, overloads: GenerateParams | None = None) -> str: + """ + Converts the generator instance back into a rigging identifier string. + + Note: + Extra parameters are not supported in identifiers. + + Args: + overloads (GenerateParams | None, optional): The parameters to be used for generating the identifier. + + Returns: + str: The identifier string. + """ provider = next(name for name, klass in g_providers.items() if isinstance(self, klass)) params_dict = self._merge_params(overloads) if not params_dict: return f"{provider}!{self.model}" + if "extra" in params_dict: + logger.warning("Extra parameters are not supported in identifiers.") + params_dict.pop("extra") + if "stop" in params_dict: params_dict["stop"] = ";".join(params_dict["stop"]) + params = ",".join([f"{k}={v}" for k, v in params_dict.items()]) return f"{provider}!{self.model},{params}" @@ -79,13 +127,16 @@ def _merge_params(self, overloads: GenerateParams | None = None) -> dict[str, t. Returns: dict[str, t.Any]: The merged parameters. - """ params: dict[str, t.Any] = self.params.model_dump(exclude_unset=True) if self.params else {} if overloads is None: return params - for name, value in overloads.model_dump(exclude_unset=True).items(): + overloads_dict = overloads.model_dump(exclude_unset=True) + if "extra" in overloads_dict: + params.update(overloads_dict.pop("extra")) + + for name, value in overloads_dict.items(): if value is not None: params[name] = value @@ -134,7 +185,6 @@ def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | No Returns: Message: The generated completion message. - """ ... @@ -149,13 +199,9 @@ async def acomplete(self, messages: t.Sequence[Message], overloads: GeneratePara Returns: Coroutine[None, None, Message]: A coroutine that yields completion messages. - """ ... - # These type overloads look unnecessary, but mypy - # doesn't pick up on MessageDict args for some reason - @t.overload def chat(self, messages: t.Sequence[MessageDict]) -> PendingChat: ... @@ -168,7 +214,7 @@ def chat( self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | str, overloads: GenerateParams | None = None ) -> PendingChat: """ - Initiates a pending chat with the given messages and optional overloads. + Builds a pending chat with the given messages and optional overloads. Args: messages (Sequence[MessageDict] | Sequence[Message] | str): The messages to be sent in the chat. @@ -176,7 +222,6 @@ def chat( Returns: PendingChat: Pending chat to run. - """ return PendingChat(self, Message.fit_as_list(messages), overloads) @@ -219,24 +264,50 @@ def chat( Returns: PendingChat: Pending chat to run. - """ return PendingChat(generator, Message.fit_as_list(messages), overloads) def trace_messages(messages: t.Sequence[Message], title: str) -> None: + """ + Helper function to trace log a sequence of Message objects. + + Args: + messages (Sequence[Message]): A sequence of Message objects to be logged. + title (str): The title to be displayed in the log. + + Returns: + None + """ logger.trace(f"--- {title} ---") logger.trace("\n".join([str(msg) for msg in messages])) logger.trace("---") def trace_str(content: str, title: str) -> None: + """ + Helper function to trace log a string. + + Parameters: + content (str): The string content to be logged. + title (str): The title of the log entry. + + Returns: + None + """ logger.trace(f"--- {title} ---") logger.trace(content) logger.trace("---") class LiteLLMGenerator(Generator): + """ + Generator backed by the LiteLLM library. + + !!! note + Find more information about supported models and formats [in their docs.](https://docs.litellm.ai/docs/providers). + """ + def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: trace_messages(messages, "Conversations") @@ -286,6 +357,18 @@ async def acomplete_text(self, text: str, overloads: GenerateParams | None = Non def get_identifier(generator: Generator, overloads: GenerateParams | None = None) -> str: + """ + Returns the identifier for the given generator. + + Delegates to [rigging.generator.Generator.to_identifier][] + + Args: + generator (Generator): The generator object. + overloads (GenerateParams | None, optional): The generate parameters. Defaults to None. + + Returns: + str: The identifier for the generator. + """ return generator.to_identifier(overloads) @@ -293,26 +376,32 @@ def get_generator(identifier: str) -> Generator: """ Get a generator by an identifier string. Uses LiteLLM by default. - !,<**kwargs> + Identifier strings are formatted like `!,<**kwargs>` (provider is optional and defaults to "litellm" if not specified) - :param identifier: The identifier string to use to get a generator - :return: The generator + Examples: - :raises InvalidModelSpecified: If the identifier is invalid + - "gpt-3.5-turbo" -> `LiteLLMGenerator(model="gpt-3.5-turbo")` + - "litellm!claude-2.1" -> `LiteLLMGenerator(model="claude-2.1")` + - "mistral/mistral-tiny" -> `LiteLLMGenerator(model="mistral/mistral-tiny")` - Examples: - "gpt-3.5-turbo" -> LiteLLMGenerator(model="gpt-3.5-turbo") - "litellm!claude-2.1" -> LiteLLMGenerator(model="claude-2.1") - "mistral/mistral-tiny" -> LiteLLMGenerator(model="mistral/mistral-tiny") + You can also specify arguments to the generator by comma-separating them: - You can also specify arguments to the generator by comma-separating them# - "mistral/mistral-medium,max_tokens=1024" - "gpt-4-0613,temperature=0.9,max_tokens=512" - "claude-2.1,stop_sequences=Human:;test,max_tokens=100" + - "mistral/mistral-medium,max_tokens=1024" + - "gpt-4-0613,temperature=0.9,max_tokens=512" + - "claude-2.1,stop_sequences=Human:;test,max_tokens=100" - (These get parsed as GenerateParams) + (These get parsed as [rigging.generator.GenerateParams][]) + + Args: + identifier (str): The identifier string to use to get a generator. + + Returns: + Generator: The generator object. + + Raises: + InvalidModelSpecified: If the identifier is invalid. """ provider: str = list(g_providers.keys())[0] @@ -345,6 +434,8 @@ def register_generator(provider: str, generator_cls: type[Generator]) -> None: """ Register a generator class for a provider id. + This let's you use [rigging.generator.get_generator][] with a custom generator class. + Args: provider (str): The name of the provider. generator_cls (type[Generator]): The generator class to register. diff --git a/rigging/logging.py b/rigging/logging.py index 7ba1127..c1d2f2b 100644 --- a/rigging/logging.py +++ b/rigging/logging.py @@ -1,3 +1,7 @@ +""" +We use loguru for logging. This module provides a function to configure the logging settings. +""" + import pathlib import sys import typing as t @@ -15,6 +19,25 @@ def configure_logging( log_file: pathlib.Path | None = None, log_file_level: LogLevelLiteral = "debug", ) -> None: + """ + Configures the loguru settings for the rigging module. + + This is optional, and calling `logger.enable("rigging")` will enable the logging + and you can control the formatting and log levels using the loguru API. + + Args: + log_level (str): The desired log level. Valid values are 'TRACE', 'DEBUG', 'INFO', + 'SUCCESS', 'WARNING', 'ERROR', and 'CRITICAL'. + log_file (pathlib.Path | None, optional): The path to the log file. If None, logging + will only be done to the console. Defaults to None. + log_file_level (LogLevelLiteral, optional): The log level for the log file. Valid values + are 'TRACE', 'DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', and 'CRITICAL'. + Defaults to 'debug'. + + Returns: + None: This function does not return anything. + + """ global g_configured if g_configured: diff --git a/rigging/message.py b/rigging/message.py index 6b28885..c939fa9 100644 --- a/rigging/message.py +++ b/rigging/message.py @@ -1,3 +1,7 @@ +""" +This module covers core message objects and handling. +""" + import string import typing as t @@ -21,6 +25,14 @@ # Helper type for messages structured # more similarly to other libraries class MessageDict(t.TypedDict): + """ + Helper to represent a [rigging.message.Message][] as a dictionary. + + Attributes: + role (Role): The role of the message. + content (str): The content of the message. + """ + role: Role content: str @@ -28,6 +40,14 @@ class MessageDict(t.TypedDict): # Structured portion of a message with # a slice indicating where is it located class ParsedMessagePart(BaseModel): + """ + Represents a parsed message part. + + Attributes: + model (SerializeAsAny[Model]): The rigging/pydantic model associated with the message part. + slice_ (slice): The slice representing the range into the message content. + """ + model_config = ConfigDict(arbitrary_types_allowed=True) model: SerializeAsAny[Model] @@ -48,6 +68,15 @@ def validate_slice(cls, value: t.Any) -> slice: class Message(BaseModel): + """ + Represents a message with role, content, and parsed message parts. + + Attributes: + role (Role): The role of the message. + content (str): The content of the message. + parts (List[ParsedMessagePart], optional): List of parsed part objects. + """ + role: Role parts: list[ParsedMessagePart] = Field(default_factory=list) @@ -139,10 +168,34 @@ def content(self, value: str) -> None: self._content = value def apply(self, **kwargs: str) -> None: + """ + Applies the given keyword arguments with string templating to the content of the message. + + Uses [string.Template.safe_substitute](https://docs.python.org/3/library/string.html#string.Template.safe_substitute) underneath. + + Args: + **kwargs: Keyword arguments to substitute in the message content. + + Returns: + None + """ template = string.Template(self.content) self.content = template.safe_substitute(**kwargs) - def strip(self, model_type: type[Model], fail_on_missing: bool = False) -> list[ParsedMessagePart]: + def strip(self, model_type: type[Model], *, fail_on_missing: bool = False) -> list[ParsedMessagePart]: + """ + Removes and returns a list of ParsedMessagePart objects from the message that match the specified model type. + + Args: + model_type (type[Model]): The type of model to match. + fail_on_missing (bool, optional): If True, raises a TypeError if no matching model is found. Defaults to False. + + Returns: + list[ParsedMessagePart]: A list of removed ParsedMessagePart objects. + + Raises: + TypeError: If no matching model is found and fail_on_missing is True. + """ removed: list[ParsedMessagePart] = [] for part in self.parts[:]: if isinstance(part.model, model_type): @@ -156,35 +209,113 @@ def strip(self, model_type: type[Model], fail_on_missing: bool = False) -> list[ @property def models(self) -> list[Model]: + """Returns a list of models parsed from the message.""" return [part.model for part in self.parts] def parse(self, model_type: type[ModelT]) -> ModelT: - for model in self.models: - if isinstance(model, model_type): - return model + """ + Parses a model from the message content. + + Args: + model_type (type): The type of model to parse. + + Returns: + ModelT: The parsed model. + + Raises: + ValueError: If no models of the given type are found and `fail_on_missing` is set to `True`. + """ + # TODO: We should validate, but I only predict problems + # grabbing existing models from the message. We should + # probably always just reparse - it's a cheap operation. + + # for model in self.models: + # if isinstance(model, model_type): + # return model return self.try_parse_many([model_type], fail_on_missing=True)[0] def try_parse(self, model_type: type[ModelT]) -> ModelT | None: - for model in self.models: - if isinstance(model, model_type): - return model + """ + Tries to parse a model from the message content. + + Args: + model_type (type[ModelT]): The type of model to search for. + + Returns: + ModelT | None: The first model that matches the given model type, or None if no match is found. + """ + # for model in self.models: + # if isinstance(model, model_type): + # return model return next(iter(self.try_parse_many([model_type])), None) def parse_set(self, model_type: type[ModelT], minimum: int | None = None) -> list[ModelT]: + """ + Parses a set of models of the specified identical type from the message content. + + Args: + model_type (type[ModelT]): The type of models to parse. + minimum (int | None, optional): The minimum number of models required. Defaults to None. + + Returns: + list[ModelT]: A list of parsed models. + + Raises: + MissingModelError: If the minimum number of models is not met. + """ return self.try_parse_set(model_type, minimum=minimum, fail_on_missing=True) def try_parse_set( self, model_type: type[ModelT], minimum: int | None = None, fail_on_missing: bool = False ) -> list[ModelT]: + """ + Tries to parse a set of models from the message content. + + Args: + model_type (type[ModelT]): The type of model to parse. + minimum (int | None, optional): The minimum number of models expected. Defaults to None. + fail_on_missing (bool, optional): Whether to raise an exception if models are missing. Defaults to False. + + Returns: + list[ModelT]: The parsed models. + + Raises: + MissingModelError: If the number of parsed models is less than the minimum required. + """ models = self.try_parse_many([model_type], fail_on_missing=fail_on_missing) if minimum is not None and len(models) < minimum: raise MissingModelError(f"Expected at least {minimum} {model_type.__name__} in message") return models def parse_many(self, types: t.Sequence[type[ModelT]]) -> list[ModelT]: + """ + Parses multiple models of the specified non-identical types from the message content. + + Args: + types (Sequence[type[ModelT]]): A sequence of model types to parse. + + Returns: + list[ModelT]: A list of parsed models. + + Raises: + MissingModelError: If any of the models are missing. + """ return self.try_parse_many(types, fail_on_missing=True) def try_parse_many(self, types: t.Sequence[type[ModelT]], fail_on_missing: bool = False) -> list[ModelT]: + """ + Tries to parse multiple models from the content of the message. + + Args: + types (Sequence[type[ModelT]]): A sequence of model types to parse. + fail_on_missing (bool, optional): Whether to raise an exception if a model type is missing. Defaults to False. + + Returns: + list[ModelT]: A list of parsed models. + + Raises: + MissingModelError: If a model type is missing and `fail_on_missing` is True. + """ model: ModelT parsed: list[ModelT] = [] for model_class in types: @@ -202,6 +333,18 @@ def try_parse_many(self, types: t.Sequence[type[ModelT]], fail_on_missing: bool def from_model( cls: type["Message"], models: Model | t.Sequence[Model], role: Role = "user", suffix: str | None = None ) -> "Message": + """ + Create a Message object from one or more Model objects. + + Args: + cls (type["Message"]): The class of the Message object. + models (Model | t.Sequence[Model]): The Model object(s) to convert to a Message. + role (Role, optional): The role of the Message. Defaults to "user". + suffix (str | None, optional): A suffix to append to the content. Defaults to None. + + Returns: + Message: The created Message object. + """ parts: list[ParsedMessagePart] = [] content: str = "" for model in models if isinstance(models, list) else [models]: @@ -219,12 +362,14 @@ def from_model( def fit_as_list( cls, messages: t.Sequence[MessageDict] | t.Sequence["Message"] | MessageDict | "Message" | str ) -> list["Message"]: + """Helper function to convert various common types to a strict list of Message objects.""" if isinstance(messages, Message | dict | str): return [cls.fit(messages)] return [cls.fit(message) for message in messages] @classmethod def fit(cls, message: t.Union["Message", MessageDict, str]) -> "Message": + """Helper function to convert various common types to a Message object.""" if isinstance(message, str): return cls(role="user", content=message) return cls(**message) if isinstance(message, dict) else message diff --git a/rigging/tool.py b/rigging/tool.py index 18deb25..d17282c 100644 --- a/rigging/tool.py +++ b/rigging/tool.py @@ -1,4 +1,7 @@ -import abc +""" +This module defines handles tool interaction with rigging generation. +""" + import inspect import typing as t @@ -57,6 +60,7 @@ class ToolCalls(Model, tag="tool_calls"): # TODO: We should consider building a base model # interface for both simple tags () # and full examples will filled in template vars + @classmethod def xml_example(cls) -> str: return cls( @@ -124,19 +128,58 @@ class ToolResults(Model, tag="tool_results"): # -class Tool(abc.ABC): - # TODO: I don't love having these defined as property getters, - # I would prefer to have them as class attributes, but I'm not - # sure how we can hint/enforce that to derived classes - @property - @abc.abstractmethod - def name(self) -> str: - ... +class Tool: + """ + Base class for implementing tools in the Rigging system. + + You should subclass this to define your own tools: + + ```python + def Hammer(Tool): + name = "Hammer" + description = "A tool for hitting things." + + def hit(self, target: Annotated[str, "Target of the hit") -> str: + return f"Hit {target} with a hammer." + + chat = generator.chat(...).using(Hammer()).run() + ``` + + !!! note + The `name` and `description` attributes are required and can be defined + as class attributes or properties. If you define them as properties, + you must also define a getter for them. + + !!! note + All functions on the tool must have type hints for their parameters and + use the `Annotated` type hint to provide a description for each parameter. + + Attributes: + name (str): The name of the tool. + description (str): A description of the tool. + """ + + name: str + description: str + + def __init_subclass__(cls, *, name: str | None = None, description: str | None = None, **kwargs: t.Any) -> None: + super().__init_subclass__(**kwargs) + if name is not None: + cls.name = name + if description is not None: + cls.description = description + + # Ensure name and description are defined + if not (hasattr(cls, "name") or hasattr(cls, "name_property")): + raise TypeError(f"{cls.__name__} must define 'name' attribute or 'name' property.") + if not (hasattr(cls, "description") or hasattr(cls, "description_property")): + raise TypeError(f"{cls.__name__} must define 'description' attribute or 'description' property.") - @property - @abc.abstractmethod - def description(self) -> str: - ... + # Check that they aren't empty or unset + if not getattr(cls, "name", None): + raise ValueError(f"{cls.__name__}.name must not be empty.") + if not getattr(cls, "description", None): + raise ValueError(f"{cls.__name__}.description must not be empty.") # TODO: We could alternatively use the get_description() # object and check against that (or even cast into it first) From 52cbf2e7429b7295fd8d2babd3a7e76b0cf96b32 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Sat, 4 May 2024 13:22:55 -0600 Subject: [PATCH 11/16] Added completions api More work on batch support Lots of docstring updates Separated parsing into it's own module --- rigging/__init__.py | 7 +- rigging/chat.py | 372 ++++++++++++++++++++++++++++----- rigging/completion.py | 459 +++++++++++++++++++++++++++++++++++++++++ rigging/generator.py | 465 +++++++++++++++++++++++++++++++----------- rigging/logging.py | 2 + rigging/message.py | 45 ++-- rigging/model.py | 61 +++++- rigging/parsing.py | 127 ++++++++++++ rigging/tool.py | 4 +- 9 files changed, 1340 insertions(+), 202 deletions(-) create mode 100644 rigging/completion.py create mode 100644 rigging/parsing.py diff --git a/rigging/__init__.py b/rigging/__init__.py index fcff37d..7a83c57 100644 --- a/rigging/__init__.py +++ b/rigging/__init__.py @@ -1,5 +1,6 @@ from rigging.chat import Chat, PendingChat -from rigging.generator import GenerateParams, Generator, chat, get_generator +from rigging.completion import Completion, PendingCompletion +from rigging.generator import GenerateParams, Generator, chat, complete, get_generator from rigging.message import Message, MessageDict, Messages from rigging.model import Model, attr, element, wrapped from rigging.tool import Tool @@ -19,7 +20,9 @@ "Generator", "GenerateParams", "chat", - "achat", + "complete", + "Completion", + "PendingCompletion", ] from loguru import logger diff --git a/rigging/chat.py b/rigging/chat.py index 149c210..363dd8f 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -1,5 +1,7 @@ """ -Chats are used pre and post generation to hold messages, and are the primary way to interact with the generator. +Chats are used pre and post generation to hold messages. + +They are the primary way to interact with the generator. """ import asyncio @@ -42,7 +44,8 @@ class Chat(BaseModel): uuid (UUID): The unique identifier for the chat. timestamp (datetime): The timestamp when the chat was created. messages (list[Message]): The list of messages prior to generation. - next_messages (list[Message]): The list of messages resulting from the generation. + generated (list[Message]): The list of messages resulting from the generation. + metadata (dict[str, Any]): Additional metadata for the chat. pending (Optional[PendingChat]): The pending chat associated with the chat. generator_id (Optional[str]): The identifier of the generator used to create the chat """ @@ -52,7 +55,8 @@ class Chat(BaseModel): uuid: UUID = Field(default_factory=uuid4) timestamp: datetime = Field(default_factory=datetime.now, repr=False) messages: list[Message] - next_messages: list[Message] = Field(default_factory=list) + generated: list[Message] = Field(default_factory=list) + metadata: dict[str, t.Any] = Field(default_factory=dict) pending: t.Optional["PendingChat"] = Field(None, exclude=True, repr=False) @@ -65,7 +69,7 @@ def generator_id(self) -> str | None: def __init__( self, messages: Messages, - next_messages: Messages | None = None, + generated: Messages | None = None, pending: t.Optional["PendingChat"] = None, **kwargs: t.Any, ): @@ -74,7 +78,7 @@ def __init__( Args: messages (Messages): The messages for the chat. - next_messages (Messages | None, optional): The next messages for the chat. Defaults to None. + generated (Messages | None, optional): The next messages for the chat. Defaults to None. pending (Optional[PendingChat], optional): The pending chat. Defaults to None. **kwargs (Any): Additional keyword arguments (typically used for deserialization) @@ -87,18 +91,18 @@ def __init__( super().__init__( messages=Message.fit_as_list(messages), - next_messages=Message.fit_as_list(next_messages) if next_messages is not None else [], + generated=Message.fit_as_list(generated) if generated is not None else [], pending=pending, **kwargs, ) def __len__(self) -> int: - return len(self.messages) + len(self.next_messages) + return len(self.messages) + len(self.generated) @property def all(self) -> list[Message]: """Returns all messages in the chat, including the next messages.""" - return self.messages + self.next_messages + return self.messages + self.generated @property def prev(self) -> list[Message]: @@ -107,22 +111,40 @@ def prev(self) -> list[Message]: @property def next(self) -> list[Message]: - """Alias for the .next_messages property""" - return self.next_messages + """Alias for the .generated property""" + return self.generated @property def last(self) -> Message: - """Alias for .next_messages[-1]""" - return self.next_messages[-1] + """Alias for .generated[-1]""" + return self.generated[-1] + + @property + def conversation(self) -> str: + """Returns a string representation of the chat.""" + return "\n\n".join([str(m) for m in self.all]) - def restart(self, *, generator: t.Optional["Generator"] = None, include_next: bool = False) -> "PendingChat": + def meta(self, **kwargs: t.Any) -> "Chat": + """ + Updates the metadata of the chat with the provided key-value pairs. + + Args: + **kwargs: Key-value pairs representing the metadata to be updated. + + Returns: + Chat: The updated chat object. + """ + self.metadata.update(kwargs) + return self + + def restart(self, *, generator: t.Optional["Generator"] = None, include_all: bool = False) -> "PendingChat": """ Attempt to convert back to a PendingChat for further generation. Args: generator (Optional[Generator]): The generator to use for the restarted chat. Otherwise the generator from the original PendingChat will be used. - include_next (bool): Whether to include the next messages in the restarted chat. Defaults to False. + include_all (bool): Whether to include the next messages in the restarted chat. Defaults to False. Returns: PendingChat: The restarted chat. @@ -131,7 +153,7 @@ def restart(self, *, generator: t.Optional["Generator"] = None, include_next: bo ValueError: If the chat was not created with a PendingChat and no generator is provided. """ - messages = self.all if include_next else self.messages + messages = self.all if include_all else self.messages if generator is not None: return generator.chat(messages) elif self.pending is None: @@ -139,7 +161,10 @@ def restart(self, *, generator: t.Optional["Generator"] = None, include_next: bo return PendingChat(self.pending.generator, messages, self.pending.params) def fork( - self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str + self, + messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | MessageDict | str, + *, + include_all: bool = False, ) -> "PendingChat": """ Forks the chat by creating calling [rigging.chat.Chat.restart][] and appending the specified messages. @@ -147,20 +172,28 @@ def fork( Args: messages (Union[Sequence[Message], Sequence[MessageDict], Message, MessageDict, str]): The messages to be added to the new `PendingChat` instance. + include_all (bool, optional): Whether to include the next messages in the restarted chat. Defaults to False. Returns: PendingChat: A new instance of `PendingChat` with the specified messages added. """ - return self.restart().add(messages) + return self.restart(include_all=include_all).add(messages) def continue_(self, messages: t.Sequence[Message] | t.Sequence[MessageDict] | Message | str) -> "PendingChat": - """Alias for the [rigging.chat.Chat.fork][].""" - return self.fork(messages) + """Alias for the [rigging.chat.Chat.fork][] with `include_all=True`.""" + return self.fork(messages, include_all=True) - def clone(self) -> "Chat": + def clone(self, *, only_messages: bool = False) -> "Chat": """Creates a deep copy of the chat.""" - return Chat([m.model_copy() for m in self.messages], [m.model_copy() for m in self.next_messages], self.pending) + new = Chat( + [m.model_copy() for m in self.messages], + [m.model_copy() for m in self.generated], + self.pending, + ) + if not only_messages: + new.metadata = deepcopy(self.metadata) + return new def apply(self, **kwargs: str) -> "Chat": """ @@ -212,10 +245,28 @@ def inject_tool_prompt(self, tools: t.Sequence[Tool]) -> None: # Passed the next message, returns whether or not to continue # and an optional list of messages to append before continuing -UntilCallback = t.Callable[[Message], tuple[bool, list[Message]]] +UntilMessageCallback = t.Callable[[Message], tuple[bool, list[Message]]] + +ThenChatCallback = t.Callable[[Chat], Chat | None] class PendingChat: + """ + Represents a pending chat that can be modified and executed. + + Attributes: + generator (Generator): The generator object responsible for generating the chat. + chat (Chat): The chat object representing the conversation. + params (Optional[GenerateParams]): The parameters for generating the chat. + metadata (dict[str, Any]): Additional metadata associated with the chat. + until_callbacks (list[tuple[UntilMessageCallback, bool, bool, int]]): List of until message callbacks. + until_types (list[type[Model]]): List of until message types. + until_tools (list[Tool]): List of until tools. + inject_tool_prompt (bool): Flag indicating whether to inject tool prompts. Default is True. + force_tool (bool): Flag indicating whether to force the use of a tool. Default is False. + then_callbacks (list[ThenChatCallback]): List of callbacks to be executed after generation. + """ + def __init__( self, generator: "Generator", messages: t.Sequence[Message], params: t.Optional["GenerateParams"] = None ): @@ -225,18 +276,45 @@ def __init__( self.metadata: dict[str, t.Any] = {} # (callback, attempt_recovery, drop_dialog, max_rounds) - self.until_callbacks: list[tuple[UntilCallback, bool, bool, int]] = [] + self.until_callbacks: list[tuple[UntilMessageCallback, bool, bool, int]] = [] self.until_types: list[type[Model]] = [] self.until_tools: list[Tool] = [] self.inject_tool_prompt: bool = True self.force_tool: bool = False + self.then_callbacks: list[ThenChatCallback] = [] def overload(self, **kwargs: t.Any) -> "PendingChat": + """ + Overloads the current chat with the given parameters. + + This is a convenience method for calling `with_params(GenerateParams(**kwargs))`. + + Note: + This will trigger a `clone` if overload params have already been set. + + Args: + **kwargs: Keyword arguments representing the parameters to be overloaded. + + Returns: + PendingChat: A new instance of PendingChat with the overloaded parameters. + """ from rigging.generator import GenerateParams return self.with_params(GenerateParams(**kwargs)) def with_params(self, params: "GenerateParams") -> "PendingChat": + """ + Sets the generation parameter overloads for the chat. + + Note: + This will trigger a `clone` if overload params have already been set. + + Args: + params (GenerateParams): The parameters to set for the chat. + + Returns: + PendingChat: A new instance of PendingChat with the updated parameters. + """ if self.params is not None: new = self.clone() new.params = params @@ -248,26 +326,57 @@ def with_params(self, params: "GenerateParams") -> "PendingChat": def add( self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str ) -> "PendingChat": + """ + Appends new message(s) to the internal chat before generation. + + Note: + If the last message in the chat is the same role as the first new message, + the content will be appended. instead of a new message being created. + + Args: + messages (Union[Sequence[MessageDict], Sequence[Message], MessageDict, Message, str]): + The messages to be added to the chat. It can be a single message or a sequence of messages. + + Returns: + PendingChat: The updated PendingChat object. + """ message_list = Message.fit_as_list(messages) # If the last message is the same role as the first new message, append to it if self.chat.all and self.chat.all[-1].role == message_list[0].role: self.chat.all[-1].content += "\n" + message_list[0].content message_list = message_list[1:] else: - self.chat.next_messages += message_list + self.chat.generated += message_list return self def fork( self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str ) -> "PendingChat": - return self.clone().add(messages) + """ + Creates a new instance of `PendingChat` by forking the current chat and adding the specified messages. - def continue_( - self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str - ) -> "PendingChat": - return self.fork(messages) + This is a convenience method for calling `clone().add(messages)`. + + Args: + messages: A sequence of messages or a single message to be added to the new chat. + + Returns: + A new instance of `PendingChat` with the specified messages added. + """ + return self.clone().add(messages) def clone(self, *, only_messages: bool = False) -> "PendingChat": + """ + Creates a clone of the current `PendingChat` instance. + + Args: + only_messages (bool, optional): If True, only the messages will be cloned. + If False (default), the entire `PendingChat` instance will be cloned + including until callbacks, types, and tools. + + Returns: + PendingChat: A new instance of `PendingChat` that is a clone of the current instance. + """ new = PendingChat(self.generator, [], self.params) new.chat = self.chat.clone() if not only_messages: @@ -280,27 +389,111 @@ def clone(self, *, only_messages: bool = False) -> "PendingChat": return new def meta(self, **kwargs: t.Any) -> "PendingChat": + """ + Updates the metadata of the chat with the provided key-value pairs. + + Args: + **kwargs: Key-value pairs representing the metadata to be updated. + + Returns: + PendingChat: The updated chat object. + """ self.metadata.update(kwargs) return self + def then(self, callback: ThenChatCallback) -> "PendingChat": + """ + Registers a callback to be executed after the generation process completes. + + Note: + Returning a Chat object from the callback will replace the current chat. + for the remainder of the callbacks + return value of `run()`. + + ``` + def process(chat: Chat) -> Chat | None: + ... + + pending.then(process).run() + ``` + + Args: + callback (ThenChatCallback): The callback function to be executed. + + Returns: + PendingChat: The current instance of the chat. + """ + self.then_callbacks.append(callback) + return self + def apply(self, **kwargs: str) -> "PendingChat": + """ + Clones this pending chat and calls [rigging.chat.Chat.apply][] with the given keyword arguments. + + Args: + **kwargs: Keyword arguments to be applied to the chat. + + Returns: + PendingChat: A new instance of PendingChat with the applied arguments. + """ new = self.clone() new.chat.apply(**kwargs) return new def apply_to_all(self, **kwargs: str) -> "PendingChat": + """ + Clones this pending chat and calls [rigging.chat.Chat.apply_to_all][] with the given keyword arguments. + + Args: + **kwargs: Keyword arguments to be applied to the chat. + + Returns: + PendingChat: A new instance of PendingChat with the applied arguments. + """ new = self.clone() new.chat.apply_to_all(**kwargs) return new def until( self, - callback: UntilCallback, + callback: UntilMessageCallback, *, attempt_recovery: bool = False, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, ) -> "PendingChat": + """ + Registers a callback to participate in validating the generation process. + + ```python + # Takes the next message being generated, and returns whether or not to continue + # generating new messages in addition to a list of messages to append before continuing + + def callback(message: Message) -> tuple[bool, list[Message]]: + if is_valid(message): + return (False, [message]) + else: + return (True, [message, ...]) + + pending.until(callback).run() + ``` + + Note: + In general, your callback function should always include the message that was passed to it. + + Whether these messages get used or discarded in the next round depends on `attempt_recovery`. + + Args: + callback (UntilMessageCallback): The callback function to be executed. + attempt_recovery (bool, optional): Whether to attempt recovery by continuing to append prior messages + before the next round of generation. Defaults to False. + drop_dialog (bool, optional): Whether to drop the intermediate dialog of recovery before returning + the final chat back to the caller. Defaults to True. + max_rounds (int, optional): The maximum number of rounds to attempt generation + callbacks + before giving uop. Defaults to DEFAULT_MAX_ROUNDS. + + Returns: + PendingChat: The current instance of the chat. + """ self.until_callbacks.append((callback, attempt_recovery, drop_dialog, max_rounds)) return self @@ -314,6 +507,24 @@ def using( max_rounds: int = DEFAULT_MAX_ROUNDS, inject_prompt: bool | None = None, ) -> "PendingChat": + """ + Adds a tool or a sequence of tools to participate in the generation process. + + Args: + tool (Tool | Sequence[Tool]): The tool or sequence of tools to be added. + force (bool, optional): Whether to force the use of the tool(s) at least once. Defaults to False. + attempt_recovery (bool, optional): Whether to attempt recovery if the tool(s) fail by providing + validation feedback to the model before the next round. Defaults to True. + drop_dialog (bool, optional): Whether to drop the intermediate dialog of recovery efforts + before returning the final chat to the caller. Defaults to False. + max_rounds (int, optional): The maximum number of rounds to attempt recovery. Defaults to DEFAULT_MAX_ROUNDS. + inject_prompt (bool | None, optional): Whether to inject the tool guidance prompt into a + system message. Defaults to None and will override self.inject_tool_prompt if provided. + + Returns: + PendingChat: The updated PendingChat object. + + """ self.until_tools += tool if isinstance(tool, t.Sequence) else [tool] self.inject_tool_prompt = inject_prompt or self.inject_tool_prompt self.force_tool = force @@ -330,34 +541,49 @@ def using( def until_parsed_as( self, - types: type[ModelT] | t.Sequence[type[ModelT]], - *, + *types: type[ModelT], attempt_recovery: bool = False, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, ) -> "PendingChat": - self.until_types += types if isinstance(types, t.Sequence) else [types] + """ + Adds the specified types to the list of types which should successfully parse + before the generation process completes. + + Args: + *types (type[ModelT]): The type or types of models to wait for. + attempt_recovery (bool, optional): Whether to attempt recovery if parsing fails by providing + validation feedback to the model before the next round. Defaults to True. + drop_dialog (bool, optional): Whether to drop the intermediate dialog of recovery efforts + before returning the final chat to the caller. Defaults to False. + max_rounds (int, optional): The maximum number of rounds to try to parse + successfully. Defaults to DEFAULT_MAX_ROUNDS. + + Returns: + PendingChat: The updated PendingChat object. + """ + self.until_types += types if next((c for c in self.until_callbacks if c[0] == self._until_parse_callback), None) is None: self.until_callbacks.append((self._until_parse_callback, attempt_recovery, drop_dialog, max_rounds)) return self def _until_tools_callback(self, message: Message) -> tuple[bool, list[Message]]: - next_messages: list[Message] = [message] + generated: list[Message] = [message] try: tool_calls = message.try_parse(ToolCalls) except ValidationError as e: - next_messages.append(Message.from_model(ValidationErrorModel(content=e))) - return (True, next_messages) + generated.append(Message.from_model(ValidationErrorModel(content=e))) + return (True, generated) if tool_calls is None: if self.force_tool: logger.debug("No tool calls or types, returning error") - next_messages.append(Message.from_model(SystemErrorModel(content="You must use a tool"))) + generated.append(Message.from_model(SystemErrorModel(content="You must use a tool"))) else: logger.debug("No tool calls or types, returning message") - return (self.force_tool, next_messages) + return (self.force_tool, generated) self.force_tool = False @@ -378,21 +604,21 @@ def _until_tools_callback(self, message: Message) -> tuple[bool, list[Message]]: tool_results.append(tool(call)) if errors: - next_messages.append(Message.from_model(errors, suffix="Rewrite your message with all the required tags.")) + generated.append(Message.from_model(errors, suffix="Rewrite your message with all the required tags.")) else: - next_messages.append(Message.from_model(ToolResults(results=tool_results))) + generated.append(Message.from_model(ToolResults(results=tool_results))) - return (True, next_messages) + return (True, generated) def _until_parse_callback(self, message: Message) -> tuple[bool, list[Message]]: should_continue: bool = False - next_messages: list[Message] = [message] + generated: list[Message] = [message] try: - message.parse_many(self.until_types) + message.parse_many(*self.until_types) except ValidationError as e: should_continue = True - next_messages.append( + generated.append( Message.from_model( ValidationErrorModel(content=e), suffix="Rewrite your entire message with all the required elements.", @@ -400,18 +626,18 @@ def _until_parse_callback(self, message: Message) -> tuple[bool, list[Message]]: ) except Exception as e: should_continue = True - next_messages.append( + generated.append( Message.from_model( SystemErrorModel(content=e), suffix="Rewrite your entire message with all the required elements." ) ) - return (should_continue, next_messages) + return (should_continue, generated) def _until( self, messages: list[Message], - callback: UntilCallback, + callback: UntilMessageCallback, attempt_recovery: bool, drop_dialog: bool, max_rounds: int, @@ -439,7 +665,20 @@ def _until( logger.warning(f"Exhausted max rounds ({max_rounds})") raise ExhaustedMaxRoundsError(max_rounds) + def _then(self, chat: Chat) -> Chat: + # TODO: Adding async support here would be nice + for callback in self.then_callbacks: + chat = callback(chat) or chat + return chat + def _execute(self) -> t.Generator[list[Message], Message, list[Message]]: + # TODO: Much like the PendingCompletion code, it's opaque + # exactly how multiple callbacks should be blended together + # when generating. I think we should look at limiting it to + # one callback in total, but I'll leave the behavior as is + # for now with the knowledge that behavior might be a bit + # unpredictable. + if self.until_tools: # TODO: This can cause issues when certain APIs do not return # the stop sequence as part of the response. This behavior @@ -456,10 +695,10 @@ def _execute(self) -> t.Generator[list[Message], Message, list[Message]]: new_messages = [first_message] for callback, reset_between, drop_internal, max_rounds in self.until_callbacks: - next_messages = yield from self._until( + generated = yield from self._until( self.chat.all + new_messages, callback, reset_between, drop_internal, max_rounds ) - new_messages = new_messages[:-1] + next_messages + new_messages = new_messages[:-1] + generated return new_messages @@ -472,6 +711,18 @@ def run(self, count: int) -> list[Chat]: ... def run(self, count: int | None = None) -> Chat | list[Chat]: + """ + Execute the generation process to produce the final chat. + + If `count` is provided, `run_many` will be called instead. + + Args: + count (int | None, optional): The number of times to generate using the same inputs. + + Returns: + Chat | list[Chat]: The chat object or a list of chat objects, depending on the value of `count`. + """ + if count is not None: return self.run_many(count) @@ -480,15 +731,24 @@ def run(self, count: int | None = None) -> Chat | list[Chat]: try: while True: - inbound = self.generator.complete(outbound, self.params) + inbound = self.generator.generate_message(outbound, self.params) outbound = executor.send(inbound) except StopIteration as stop: outbound = t.cast(list[Message], stop.value) - return Chat(self.chat.all, outbound, pending=self) + return self._then(Chat(self.chat.all, outbound, pending=self, metadata=self.metadata)) def run_many(self, count: int) -> list[Chat]: - return [self.run() for _ in range(count)] + """ + Executes the generation process multiple times with the same inputs. + + Parameters: + count (int): The number of times to execute the generation process. + + Returns: + list[Chat]: A list of Chat objects representing the results of each execution. + """ + return [self._then(self.run()) for _ in range(count)] __call__ = run @@ -501,6 +761,7 @@ async def arun(self, count: int) -> list[Chat]: ... async def arun(self, count: int | None = None) -> Chat | list[Chat]: + """async variant of the [rigging.chat.PendingChat.run][] method.""" if count is not None: return await self.arun_many(count) @@ -509,13 +770,14 @@ async def arun(self, count: int | None = None) -> Chat | list[Chat]: try: while True: - inbound = await self.generator.acomplete(outbound, self.params) + inbound = await self.generator.agenerate_message(outbound, self.params) outbound = executor.send(inbound) except StopIteration as stop: outbound = t.cast(list[Message], stop.value) - return Chat(self.chat.all, outbound, pending=self) + return self._then(Chat(self.chat.all, outbound, pending=self, metadata=self.metadata)) async def arun_many(self, count: int) -> list[Chat]: + """async variant of the [rigging.chat.PendingChat.run_many][] method.""" chats = await asyncio.gather(*[self.arun() for _ in range(count)]) - return list(chats) + return [self._then(chat) for chat in chats] diff --git a/rigging/completion.py b/rigging/completion.py new file mode 100644 index 0000000..012716d --- /dev/null +++ b/rigging/completion.py @@ -0,0 +1,459 @@ +""" +Completions work with isolated strings of text pre and post generation. +""" + +import asyncio +import string +import typing as t +from copy import deepcopy +from datetime import datetime +from uuid import UUID, uuid4 + +from loguru import logger +from pydantic import ( + BaseModel, + ConfigDict, + Field, + computed_field, +) + +from rigging.error import ExhaustedMaxRoundsError +from rigging.model import ( + Model, + ModelT, +) +from rigging.parsing import parse_many + +if t.TYPE_CHECKING: + from rigging.generator import GenerateParams, Generator + +DEFAULT_MAX_ROUNDS = 5 + + +class Completion(BaseModel): + """ + Represents a completed text generation. + + Attributes: + uuid (UUID): The unique identifier. + timestamp (datetime): The timestamp when the completion was created. + text (str): The original text. + generated (str): The generated text. + pending (Optional[PendingCompletion]): The pending completion associated with this completion. + generator_id (Optional[str]): The identifier of the generator used to create the completion + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + uuid: UUID = Field(default_factory=uuid4) + timestamp: datetime = Field(default_factory=datetime.now, repr=False) + text: str + generated: str + + pending: t.Optional["PendingCompletion"] = Field(None, exclude=True, repr=False) + + @computed_field(repr=False) + def generator_id(self) -> str | None: + if self.pending is not None: + return self.pending.generator.to_identifier(self.pending.params) + return None + + def __init__( + self, + text: str, + generated: str, + pending: t.Optional["PendingCompletion"] = None, + **kwargs: t.Any, + ): + """ + Initialize a Chat object. + + Args: + text (str): The original text. + generated (Optional[str]): The generated text. + pending (Optional[PendingCompletion]): The pending completion associated with this completion + **kwargs: Additional keyword arguments (typically used for serialization). + """ + from rigging.generator import get_generator + + if "generator_id" in kwargs and pending is None: + generator = get_generator(kwargs.pop("generator_id")) + pending = generator.complete(text) + + super().__init__( + text=text, + generated=generated, + pending=pending, + **kwargs, + ) + + def __len__(self) -> int: + return len(self.text) + len(self.generated) + + @property + def all(self) -> str: + """Returns both the text and the generation.""" + return self.text + self.generated + + def restart(self, *, generator: t.Optional["Generator"] = None, include_all: bool = False) -> "PendingCompletion": + """ + Attempt to convert back to a PendingCompletion for further generation. + + Args: + generator (Optional[Generator]): The generator to use for the restarted chat. Otherwise + the generator from the original PendingCompletion will be used. + include_all (bool): Whether to include the generation before the next round. Defaults to False. + + Returns: + PendingCompletion: The restarted completion. + + Raises: + ValueError: If the completion was not created with a PendingCompletion and no generator is provided. + """ + + text = self.all if include_all else self.text + if generator is not None: + return generator.complete(text) + elif self.pending is None: + raise ValueError("Cannot restart Completion that was not created with a PendingCompletion") + return PendingCompletion(self.pending.generator, text, self.pending.params) + + def fork(self, text: str) -> "PendingCompletion": + """ + Forks the completion by creating calling [rigging.completion.Completion.restart][] and appends the specified text. + + Args: + text (str): The text to append. + + Returns: + PendingCompletion: A new instance of `PendingCompletion` with the specified messages added. + + """ + return self.restart().add(text) + + def clone(self) -> "Completion": + """Creates a deep copy of the chat.""" + return Completion(self.text, self.generated, self.pending) + + +# Passed the next message, returns whether or not to continue +# and an optional list of messages to append before continuing +UntilCompletionCallback = t.Callable[[str], bool] + + +class PendingCompletion: + """ + Represents a pending completion that can be modified and executed. + + Attributes: + generator (Generator): The generator object responsible for generating the completion. + text (str): The text to be completed. + params (Optional[GenerateParams]): The parameters for generating the completion. + metadata (dict[str, Any]): Additional metadata associated with the completion. + until_callbacks (list[tuple[UntilCompletionCallback, bool, int]]): List of until completion callbacks. + until_types (list[type[Model]]): List of until completion types. + """ + + def __init__(self, generator: "Generator", text: str, params: t.Optional["GenerateParams"] = None): + self.generator: "Generator" = generator + self.text = text + self.params = params + self.metadata: dict[str, t.Any] = {} + + # (callback, all_text, max_rounds) + self.until_callbacks: list[tuple[UntilCompletionCallback, bool, int]] = [] + self.until_types: list[type[Model]] = [] + + def overload(self, **kwargs: t.Any) -> "PendingCompletion": + """ + Overloads the current completion with the given parameters. + + This is a convenience method for calling `with_params(GenerateParams(**kwargs))`. + + Note: + This will trigger a `clone` if overload params have already been set. + + Args: + **kwargs: Keyword arguments representing the parameters to be overloaded. + + Returns: + PendingCompletion: A new instance of PendingCompletion with the overloaded parameters. + """ + from rigging.generator import GenerateParams + + return self.with_params(GenerateParams(**kwargs)) + + def with_params(self, params: "GenerateParams") -> "PendingCompletion": + """ + Sets the generation parameter overloads for the completion. + + Note: + This will trigger a `clone` if overload params have already been set. + + Args: + params (GenerateParams): The parameters to set for the completion. + + Returns: + PendingCompletion: A new instance of PendingCompletion with the updated parameters. + """ + if self.params is not None: + new = self.clone() + new.params = params + return new + + self.params = params + return self + + def add(self, text: str) -> "PendingCompletion": + """ + Appends new text to the internal text before generation. + + Args: + text (str): The text to be added to the completion. + + Returns: + PendingCompletion: The updated PendingCompletion object. + """ + self.text += text + return self + + def fork(self, text: str) -> "PendingCompletion": + """ + Creates a new instance of `PendingCompletion` by forking the current completion and adding the specified text. + + This is a convenience method for calling `clone().add(text)`. + + Args: + text: The text to be added to the new completion. + + Returns: + A new instance of `PendingCompletion` with the specified text added. + """ + return self.clone().add(text) + + def clone(self, *, only_text: bool = False) -> "PendingCompletion": + """ + Creates a clone of the current `PendingCompletion` instance. + + Args: + only_text (bool, optional): If True, only the text will be cloned. + If False (default), the entire `PendingCompletion` instance will be cloned + including until callbacks and types. + + Returns: + PendingCompletion: A new instance of `PendingCompletion` that is a clone of the current instance. + """ + new = PendingCompletion(self.generator, self.text, self.params) + if not only_text: + new.until_callbacks = self.until_callbacks.copy() + new.until_types = self.until_types.copy() + new.metadata = deepcopy(self.metadata) + return new + + def meta(self, **kwargs: t.Any) -> "PendingCompletion": + """ + Updates the metadata of the completion with the provided key-value pairs. + + Args: + **kwargs: Key-value pairs representing the metadata to be updated. + + Returns: + PendingCompletion: The updated completion object. + """ + self.metadata.update(kwargs) + return self + + def apply(self, **kwargs: str) -> "PendingCompletion": + """ + Applies keyword arguments to the text using string template substitution. + + Args: + **kwargs: Keyword arguments to be applied to the text. + + Returns: + PendingCompletion: A new instance of PendingCompletion with the applied arguments. + """ + new = self.clone() + template = string.Template(self.text) + new.text = template.safe_substitute(**kwargs) + return new + + def until( + self, + callback: UntilCompletionCallback, + *, + use_all_text: bool = False, + max_rounds: int = DEFAULT_MAX_ROUNDS, + ) -> "PendingCompletion": + """ + Registers a callback to participate in validating the generation process. + + ```python + # Takes the generated text, and returns whether or not to retry generation. + + def callback(text: str) -> bool: + if is_valid(text): + return False + else: + return True + + pending.until(callback).run() + ``` + + Args: + callback (UntilCompletionCallback): The callback function to be executed. + use_all_text (bool, optional): Whether to pass the entire text (including prompt) to the callback. + Defaults to False. + max_rounds (int, optional): The maximum number of rounds to attempt generation + callbacks + before giving up. Defaults to DEFAULT_MAX_ROUNDS. + + Returns: + PendingCompletion: The current instance of the completion. + """ + self.until_callbacks.append((callback, use_all_text, max_rounds)) + return self + + def until_parsed_as( + self, + *types: type[ModelT], + use_all_text: bool = False, + max_rounds: int = DEFAULT_MAX_ROUNDS, + ) -> "PendingCompletion": + """ + Adds the specified types to the list of types which should successfully parse + before the generation process completes. + + Args: + *types (type[ModelT]): The type or types of models to wait for. + use_all_text (bool, optional): Whether to pass the entire text (including prompt) to the parser. + Defaults to False. + max_rounds (int, optional): The maximum number of rounds to try to parse + successfully. Defaults to DEFAULT_MAX_ROUNDS. + + Returns: + PendingCompletion: The updated PendingCompletion object. + """ + self.until_types += types + if next((c for c in self.until_callbacks if c[0] == self._until_parse_callback), None) is None: + self.until_callbacks.append((self._until_parse_callback, use_all_text, max_rounds)) + + return self + + def _until_parse_callback(self, text: str) -> bool: + try: + parse_many(text, *self.until_types) + except Exception: + return True + return False + + def _execute(self) -> t.Generator[str, str, str]: + # If there are no until_callbacks, we can just yield the text + if not self.until_callbacks: + generated = yield self.text + return generated + + # It's opaque exactly how we should blend multiple + # until callbacks together, so here is the current implementation: + # + # - We take the lowest max_rounds from all until_callbacks + # - Each loop, we let every callback run, if any tell us to retry, we do + # - If we leave the loop with should_retry still True, we raise an error + # - Assuming every should_retry is False, we break out of the loop and return + + lowest_max_rounds = min((c[2] for c in self.until_callbacks), default=1) + + current_round = 0 + should_retry = True + while should_retry and current_round < lowest_max_rounds: + current_round += 1 + generated = yield self.text + for callback, use_all_text, _ in self.until_callbacks: + should_retry = callback(self.text + generated if use_all_text else generated) + if should_retry: + continue + + if should_retry: + logger.warning(f"Exhausted lowest max rounds ({lowest_max_rounds})") + raise ExhaustedMaxRoundsError(lowest_max_rounds) + + return generated + + @t.overload + def run(self, count: t.Literal[None] = None) -> Completion: + ... + + @t.overload + def run(self, count: int) -> list[Completion]: + ... + + def run(self, count: int | None = None) -> Completion | list[Completion]: + """ + Execute the generation process to produce the final completion. + + If `count` is provided, `run_many` will be called instead. + + Args: + count (int | None, optional): The number of times to generate using the same inputs. + + Returns: + Completion | list[Completion]: The completion object or a list of completion objects, + depending on the value of `count`. + """ + if count is not None: + return self.run_many(count) + + executor = self._execute() + outbound = next(executor) + + try: + while True: + inbound = self.generator.generate_text(outbound, self.params) + outbound = executor.send(inbound) + except StopIteration as stop: + outbound = t.cast(str, stop.value) + + return Completion(self.text, outbound, pending=self) + + def run_many(self, count: int) -> list[Completion]: + """ + Executes the generation process multiple times with the same inputs. + + Parameters: + count (int): The number of times to execute the generation process. + + Returns: + list[Completion]: A list of Completion objects representing the results of each execution. + """ + return [self.run() for _ in range(count)] + + __call__ = run + + @t.overload + async def arun(self, count: t.Literal[None] = None) -> Completion: + ... + + @t.overload + async def arun(self, count: int) -> list[Completion]: + ... + + async def arun(self, count: int | None = None) -> Completion | list[Completion]: + """async variant of the [rigging.chat.PendingCompletion.run][] method.""" + if count is not None: + return await self.arun_many(count) + + executor = self._execute() + outbound = next(executor) + + try: + while True: + inbound = await self.generator.agenerate_text(outbound, self.params) + outbound = executor.send(inbound) + except StopIteration as stop: + outbound = t.cast(str, stop.value) + + return Completion(self.text, outbound, pending=self) + + async def arun_many(self, count: int) -> list[Completion]: + """async variant of the [rigging.chat.PendingCompletion.run_many][] method.""" + chats = await asyncio.gather(*[self.arun() for _ in range(count)]) + return list(chats) diff --git a/rigging/generator.py b/rigging/generator.py index fa804c8..89b2862 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -2,7 +2,7 @@ Generators produce completions for a given set of messages or text. """ -import abc +import asyncio import typing as t import litellm # type: ignore @@ -10,6 +10,7 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator from rigging.chat import PendingChat +from rigging.completion import PendingCompletion from rigging.error import InvalidModelSpecifiedError from rigging.message import ( Message, @@ -21,6 +22,9 @@ # fix it to prevent confusion litellm.drop_params = True +# Global provider map +g_providers: dict[str, type["Generator"]] = {} + # TODO: Ideally we flex this to support arbitrary # generator params, but we'll limit things @@ -36,6 +40,9 @@ class GenerateParams(BaseModel): These are designed to generally overlap with underlying APIs like litellm, but will be extended as needed. + Note: + Use the `extra` field to pass additional parameters to the API. + Attributes: temperature (float | None): The sampling temperature. max_tokens (int | None): The maximum number of tokens to generate. @@ -71,12 +78,21 @@ def validate_stop(cls, value: t.Any) -> t.Any: raise ValueError("Stop sequences must be a list or a string separated by ';'") -class Generator(BaseModel, abc.ABC): +class Generator(BaseModel): """ Base class for all rigging generators. This class provides common functionality and methods for generating completion messages. + A subclass of this can implement any of the following: + + - `generate_message`: Generate the next message for a given set of messages. + - `generate_text`: Generate a string completion of the given text. + - `batch_messages`: Process a batch of messages. + - `batch_texts`: Process a batch of texts. + + (In addition to async variants of these functions) + Attributes: model (str): The model used by the generator. api_key (str | None): The API key used for authentication. Defaults to None. @@ -142,7 +158,42 @@ def _merge_params(self, overloads: GenerateParams | None = None) -> dict[str, t. return params - def complete_text(self, text: str, overloads: GenerateParams | None = None) -> str: + # Message generation + + def generate_message(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: + """ + Generates the next message for a given set of messages. + + Args: + messages (Sequence[Message]): The list of messages to generate completion for. + overloads (GenerateParams | None, optional): The parameters to be used for completion. + + Returns: + Message: The generated completion message. + + Raises: + NotImplementedError: This generator does not support this method. + """ + raise NotImplementedError("generate_message is not supported by this generator.") + + async def agenerate_message( + self, messages: t.Sequence[Message], overloads: GenerateParams | None = None + ) -> Message: + """ + Asynchronously generates the next message for a given set of messages. + + Args: + messages (Sequence[Message]): A sequence of messages. + overloads (GenerateParams | None, optional): The parameters to be used for completion. + + Returns: + Coroutine[None, None, Message]: A coroutine that yields completion messages. + """ + raise NotImplementedError("agenerate_message is not supported by this generator.") + + # Text generation + + def generate_text(self, text: str, overloads: GenerateParams | None = None) -> str: """ Generates a string completion of the given text. @@ -154,11 +205,11 @@ def complete_text(self, text: str, overloads: GenerateParams | None = None) -> s str: The completed text. Raises: - NotImplementedError: This generator does not support the `complete_text` method. + NotImplementedError: This generator does not support this method. """ - raise NotImplementedError("complete_text is not supported by this generator.") + raise NotImplementedError("generate_text is not supported by this generator.") - async def acomplete_text(self, text: str, overloads: GenerateParams | None = None) -> str: + async def agenerate_text(self, text: str, overloads: GenerateParams | None = None) -> str: """ Asynchronously generates a string completion of the given text. @@ -170,48 +221,137 @@ async def acomplete_text(self, text: str, overloads: GenerateParams | None = Non Coroutine[None, None, str]: A coroutine that yields the completed text. Raises: - NotImplementedError: This generator does not support the `acomplete_text` method. + NotImplementedError: This generator does not support this method. """ - raise NotImplementedError("acomplete_text is not supported by this generator.") + raise NotImplementedError("agenerate_text is not supported by this generator.") + + # Batching messages - @abc.abstractmethod - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: + def batch_messages( + self, + many: t.Sequence[t.Sequence[Message]], + overloads: t.Sequence[GenerateParams | None] | None = None, + *, + fixed: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: """ - Generates the next message for a given set of messages. + Generate a batch of messages using the specified parameters. + + Note: + If supplied, the length of `overloads` must be the same as the length of `many`. Args: - messages (Sequence[Message]): The list of messages to generate completion for. - overloads (GenerateParams | None, optional): The parameters to be used for completion. + many (Sequence[Sequence[Message]]): A sequence of sequences of messages. + overloads (Sequence[GenerateParams | None], optional): A sequence of GenerateParams objects or None. Defaults to None. + fixed (Sequence[Message], optional): A sequence of fixed messages to be prefixed before every item of `many`. Defaults to None. Returns: - Message: The generated completion message. + Sequence[Message]: A sequence of generated messages. + + Raises: + NotImplementedError: This method is not supported by this generator. """ - ... + raise NotImplementedError("batch_messages is not supported by this generator.") + + async def abatch_messages( + self, + many: t.Sequence[t.Sequence[Message]], + overloads: t.Sequence[GenerateParams | None] | None = None, + *, + fixed: t.Sequence[Message], + ) -> t.Sequence[Message]: + """ + Asynchronously Generate a batch of messages based on the given parameters. + + Note: + If supplied, the length of `overloads` must be the same as the length of `many`. + + Args: + many (Sequence[Sequence[Message]]): A sequence of sequences of messages. + overloads (Sequence[GenerateParams | None], optional): A sequence of GenerateParams or None. Defaults to None. + fixed (Sequence[Message]): A sequence of fixed messages to be prefixed before every item of `many`. Defaults to None. + + Returns: + Sequence[Message]: A sequence of generated messages. + + Raises: + NotImplementedError: This method is not supported by this generator. + """ + raise NotImplementedError("abatch_messages is not supported by this generator.") - @abc.abstractmethod - async def acomplete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: + # Batching texts + + def batch_texts( + self, + many: t.Sequence[str], + overloads: t.Sequence[GenerateParams | None] | None = None, + *, + fixed: str | None = None, + ) -> t.Sequence[str]: """ - Asynchronously generates the next message for a given set of messages. + Generate a batch of texts using the generator. + + Note: + If supplied, the length of `overloads` must be the same as the length of `many`. Args: - messages (Sequence[Message]): A sequence of messages. - overloads (GenerateParams | None, optional): The parameters to be used for completion. + many (Sequence[str]): The input texts for generating the batch. + overloads (Sequence[GenerateParams | None] | None, optional): Additional parameters for generating each text in the batch. Defaults to None. + fixed (str | None, optional): A fixed input text to be used as a prefix for all of `many`. Defaults to None. Returns: - Coroutine[None, None, Message]: A coroutine that yields completion messages. + Sequence[str]: The generated texts in the batch. + + Raises: + NotImplementedError: This method is not supported by this generator. """ - ... + raise NotImplementedError("batch_texts is not supported by this generator.") + + async def abatch_texts( + self, + many: t.Sequence[str], + overloads: t.Sequence[GenerateParams | None] | None = None, + *, + fixed: str | None = None, + ) -> t.Sequence[str]: + """ + Asynchronously Generate multiple texts in batch. + + Args: + many (Sequence[str]): A sequence of texts to generate. + overloads (Sequence[GenerateParams | None] | None, optional): A sequence of optional parameters for each text. Defaults to None. + fixed (str | None, optional): A fixed parameter for all texts. Defaults to None. + + Returns: + Sequence[str]: A sequence of generated texts. + + Raises: + NotImplementedError: This method is not supported by this generator. + """ + raise NotImplementedError("abatch_texts is not supported by this generator.") + + # Helper alternative to chat(generator) -> generator.chat(...) + # + # Overloads seem odd, but mypy doesn't like the TypedDict in a list otherwise @t.overload - def chat(self, messages: t.Sequence[MessageDict]) -> PendingChat: + def chat( + self, + messages: t.Sequence[MessageDict], + overloads: GenerateParams | None = None, + ) -> PendingChat: ... @t.overload - def chat(self, messages: t.Sequence[Message] | str) -> PendingChat: + def chat( + self, messages: t.Sequence[Message] | MessageDict | Message | str, overloads: GenerateParams | None = None + ) -> PendingChat: ... def chat( - self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | str, overloads: GenerateParams | None = None + self, + messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, + overloads: GenerateParams | None = None, ) -> PendingChat: """ Builds a pending chat with the given messages and optional overloads. @@ -225,8 +365,20 @@ def chat( """ return PendingChat(self, Message.fit_as_list(messages), overloads) + # Helper alternative to complete(generator) -> generator.complete(...) + + def complete(self, text: str, overloads: GenerateParams | None = None) -> PendingCompletion: + """ + Generates a pending string completion of the given text. + + Args: + text (str): The input text to be completed. + overloads (GenerateParams | None, optional): The parameters to be used for completion. -# Helper function external to a generator + Returns: + str: The completed text. + """ + return PendingCompletion(self, text, overloads) @t.overload @@ -241,7 +393,7 @@ def chat( @t.overload def chat( generator: "Generator", - messages: t.Sequence[Message] | str, + messages: t.Sequence[Message] | MessageDict | Message | str, overloads: GenerateParams | None = None, ) -> PendingChat: ... @@ -265,95 +417,15 @@ def chat( Returns: PendingChat: Pending chat to run. """ - return PendingChat(generator, Message.fit_as_list(messages), overloads) - - -def trace_messages(messages: t.Sequence[Message], title: str) -> None: - """ - Helper function to trace log a sequence of Message objects. - - Args: - messages (Sequence[Message]): A sequence of Message objects to be logged. - title (str): The title to be displayed in the log. - - Returns: - None - """ - logger.trace(f"--- {title} ---") - logger.trace("\n".join([str(msg) for msg in messages])) - logger.trace("---") - - -def trace_str(content: str, title: str) -> None: - """ - Helper function to trace log a string. - - Parameters: - content (str): The string content to be logged. - title (str): The title of the log entry. - - Returns: - None - """ - logger.trace(f"--- {title} ---") - logger.trace(content) - logger.trace("---") - - -class LiteLLMGenerator(Generator): - """ - Generator backed by the LiteLLM library. - - !!! note - Find more information about supported models and formats [in their docs.](https://docs.litellm.ai/docs/providers). - """ - - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: - trace_messages(messages, "Conversations") - - messages_as_dicts = [message.model_dump() for message in messages] - params = self._merge_params(overloads) - result = litellm.completion(self.model, messages_as_dicts, api_key=self.api_key, **params) - response = result.choices[-1].message.content.strip() - next_message = Message(role="assistant", content=response) - - trace_messages([next_message], "Response") - - return next_message - - async def acomplete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: - trace_messages(messages, "Conversations") - - messages_as_dicts = [message.model_dump() for message in messages] - params = self._merge_params(overloads) - result = await litellm.acompletion(self.model, messages_as_dicts, api_key=self.api_key, **params) - response = result.choices[-1].message.content.strip() - next_message = Message(role="assistant", content=response) - - trace_messages([next_message], "Response") - - return next_message - - def complete_text(self, text: str, overloads: GenerateParams | None = None) -> str: - trace_str(text, "Text") - params = self._merge_params(overloads) - result = litellm.text_completion(self.model, text, api_key=self.api_key, **params) - completion: str = result.choices[-1]["text"] - trace_str(completion, "Completion") - return completion - - async def acomplete_text(self, text: str, overloads: GenerateParams | None = None) -> str: - trace_str(text, "Text") - params = self._merge_params(overloads) - result = await litellm.atext_completion(self.model, text, api_key=self.api_key, **params) - completion: str = result.choices[-1]["text"] - trace_str(completion, "Completion") - return completion + return generator.chat(messages, overloads) -g_providers: dict[str, type["Generator"]] = { - "litellm": LiteLLMGenerator, -} +def complete( + generator: Generator, + text: str, + overloads: GenerateParams | None = None, +) -> PendingCompletion: + return generator.complete(text, overloads) def get_identifier(generator: Generator, overloads: GenerateParams | None = None) -> str: @@ -445,3 +517,168 @@ def register_generator(provider: str, generator_cls: type[Generator]) -> None: """ global g_providers g_providers[provider] = generator_cls + + +def trace_messages(messages: t.Sequence[Message], title: str) -> None: + """ + Helper function to trace log a sequence of Message objects. + + Args: + messages (Sequence[Message]): A sequence of Message objects to be logged. + title (str): The title to be displayed in the log. + + Returns: + None + """ + logger.trace(f"--- {title} ---") + logger.trace("\n".join([str(msg) for msg in messages])) + logger.trace("---") + + +def trace_str(content: str, title: str) -> None: + """ + Helper function to trace log a string. + + Parameters: + content (str): The string content to be logged. + title (str): The title of the log entry. + + Returns: + None + """ + logger.trace(f"--- {title} ---") + logger.trace(content) + logger.trace("---") + + +class LiteLLMGenerator(Generator): + """ + Generator backed by the LiteLLM library. + + Note: + Find more information about supported models and formats [in their docs.](https://docs.litellm.ai/docs/providers). + + Note: + While this generator implements the batch methods, they are not performant and simply loop over the inputs. + """ + + def generate_message(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: + trace_messages(messages, "Conversations") + + messages_as_dicts = [message.model_dump(include={"role", "content"}) for message in messages] + params = self._merge_params(overloads) + result = litellm.completion(self.model, messages_as_dicts, api_key=self.api_key, **params) + response = result.choices[-1].message.content.strip() + next_message = Message(role="assistant", content=response) + + trace_messages([next_message], "Response") + + return next_message + + async def agenerate_message( + self, messages: t.Sequence[Message], overloads: GenerateParams | None = None + ) -> Message: + trace_messages(messages, "Conversations") + + messages_as_dicts = [message.model_dump(include={"role", "content"}) for message in messages] + params = self._merge_params(overloads) + result = await litellm.acompletion(self.model, messages_as_dicts, api_key=self.api_key, **params) + response = result.choices[-1].message.content.strip() + next_message = Message(role="assistant", content=response) + + trace_messages([next_message], "Response") + + return next_message + + def generate_text(self, text: str, overloads: GenerateParams | None = None) -> str: + trace_str(text, "Text") + + params = self._merge_params(overloads) + result = litellm.text_completion(text, self.model, api_key=self.api_key, **params) + completion: str = result.choices[-1]["text"] + + trace_str(completion, "Completion") + + return completion + + async def agenerate_text(self, text: str, overloads: GenerateParams | None = None) -> str: + trace_str(text, "Text") + + params = self._merge_params(overloads) + result = await litellm.atext_completion(text, self.model, api_key=self.api_key, **params) + completion: str = result.choices[-1]["text"] + + trace_str(completion, "Completion") + + return completion + + def batch_messages( + self, + many: t.Sequence[t.Sequence[Message]], + overloads: t.Sequence[GenerateParams | None] | None = None, + *, + fixed: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: + if overloads is not None and len(overloads) != len(many): + raise ValueError("Length of overloads must match the length of many.") + + overloads = [None] * len(many) if overloads is None else overloads + if fixed is not None: + many = [list(fixed) + list(messages) for messages in many] + + return [self.generate_message(messages, overload) for messages, overload in zip(many, overloads, strict=True)] + + async def abatch_messages( + self, + many: t.Sequence[t.Sequence[Message]], + overloads: t.Sequence[GenerateParams | None] | None = None, + *, + fixed: t.Sequence[Message], + ) -> t.Sequence[Message]: + if overloads is not None and len(overloads) != len(many): + raise ValueError("Length of overloads must match the length of many.") + + overloads = [None] * len(many) if overloads is None else overloads + if fixed is not None: + many = [list(fixed) + list(messages) for messages in many] + + return await asyncio.gather( + *[self.agenerate_message(messages, overload) for messages, overload in zip(many, overloads, strict=True)] + ) + + def batch_texts( + self, + many: t.Sequence[str], + overloads: t.Sequence[GenerateParams | None] | None = None, + *, + fixed: str | None = None, + ) -> t.Sequence[str]: + if overloads is not None and len(overloads) != len(many): + raise ValueError("Length of overloads must match the length of many.") + + overloads = [None] * len(many) if overloads is None else overloads + if fixed is not None: + many = [fixed + message for message in many] + + return [self.generate_text(message, overload) for message, overload in zip(many, overloads, strict=True)] + + async def abatch_texts( + self, + many: t.Sequence[str], + overloads: t.Sequence[GenerateParams | None] | None = None, + *, + fixed: str | None = None, + ) -> t.Sequence[str]: + if overloads is not None and len(overloads) != len(many): + raise ValueError("Length of overloads must match the length of many.") + + overloads = [None] * len(many) if overloads is None else overloads + if fixed is not None: + many = [fixed + message for message in many] + + return await asyncio.gather( + *[self.agenerate_text(message, overload) for message, overload in zip(many, overloads, strict=True)] + ) + + +g_providers["litellm"] = LiteLLMGenerator diff --git a/rigging/logging.py b/rigging/logging.py index c1d2f2b..d113f7d 100644 --- a/rigging/logging.py +++ b/rigging/logging.py @@ -1,5 +1,7 @@ """ We use loguru for logging. This module provides a function to configure the logging settings. + +To enable rigging logging, call `logger.enable("rigging")` after importing the module. """ import pathlib diff --git a/rigging/message.py b/rigging/message.py index c939fa9..9f85f4a 100644 --- a/rigging/message.py +++ b/rigging/message.py @@ -18,6 +18,7 @@ from rigging.error import MissingModelError from rigging.model import Model, ModelT +from rigging.parsing import try_parse_many Role = t.Literal["system", "user", "assistant"] @@ -212,6 +213,10 @@ def models(self) -> list[Model]: """Returns a list of models parsed from the message.""" return [part.model for part in self.parts] + # TODO: Many of these functions are duplicates from the parsing + # module, but here we don't hand back slices and want there + # to be a convient access model. We should probably consolidate. + def parse(self, model_type: type[ModelT]) -> ModelT: """ Parses a model from the message content. @@ -225,14 +230,7 @@ def parse(self, model_type: type[ModelT]) -> ModelT: Raises: ValueError: If no models of the given type are found and `fail_on_missing` is set to `True`. """ - # TODO: We should validate, but I only predict problems - # grabbing existing models from the message. We should - # probably always just reparse - it's a cheap operation. - - # for model in self.models: - # if isinstance(model, model_type): - # return model - return self.try_parse_many([model_type], fail_on_missing=True)[0] + return self.try_parse_many(model_type, fail_on_missing=True)[0] def try_parse(self, model_type: type[ModelT]) -> ModelT | None: """ @@ -244,10 +242,7 @@ def try_parse(self, model_type: type[ModelT]) -> ModelT | None: Returns: ModelT | None: The first model that matches the given model type, or None if no match is found. """ - # for model in self.models: - # if isinstance(model, model_type): - # return model - return next(iter(self.try_parse_many([model_type])), None) + return next(iter(self.try_parse_many(model_type)), None) def parse_set(self, model_type: type[ModelT], minimum: int | None = None) -> list[ModelT]: """ @@ -282,17 +277,17 @@ def try_parse_set( Raises: MissingModelError: If the number of parsed models is less than the minimum required. """ - models = self.try_parse_many([model_type], fail_on_missing=fail_on_missing) + models = self.try_parse_many(model_type, fail_on_missing=fail_on_missing) if minimum is not None and len(models) < minimum: raise MissingModelError(f"Expected at least {minimum} {model_type.__name__} in message") return models - def parse_many(self, types: t.Sequence[type[ModelT]]) -> list[ModelT]: + def parse_many(self, *types: type[ModelT]) -> list[ModelT]: """ Parses multiple models of the specified non-identical types from the message content. Args: - types (Sequence[type[ModelT]]): A sequence of model types to parse. + *types (type[ModelT]): The types of models to parse. Returns: list[ModelT]: A list of parsed models. @@ -300,14 +295,14 @@ def parse_many(self, types: t.Sequence[type[ModelT]]) -> list[ModelT]: Raises: MissingModelError: If any of the models are missing. """ - return self.try_parse_many(types, fail_on_missing=True) + return self.try_parse_many(*types, fail_on_missing=True) - def try_parse_many(self, types: t.Sequence[type[ModelT]], fail_on_missing: bool = False) -> list[ModelT]: + def try_parse_many(self, *types: type[ModelT], fail_on_missing: bool = False) -> list[ModelT]: """ Tries to parse multiple models from the content of the message. Args: - types (Sequence[type[ModelT]]): A sequence of model types to parse. + *types (type[ModelT]): The types of models to parse. fail_on_missing (bool, optional): Whether to raise an exception if a model type is missing. Defaults to False. Returns: @@ -317,17 +312,11 @@ def try_parse_many(self, types: t.Sequence[type[ModelT]], fail_on_missing: bool MissingModelError: If a model type is missing and `fail_on_missing` is True. """ model: ModelT - parsed: list[ModelT] = [] - for model_class in types: - try: - for model, slice_ in model_class.from_text(self.content): - self._add_part(ParsedMessagePart(model=model, slice_=slice_)) - parsed.append(model) - except MissingModelError as e: - if fail_on_missing: - raise e + parsed: list[tuple[ModelT, slice]] = try_parse_many(self.content, *types, fail_on_missing=fail_on_missing) + for model, slice_ in parsed: + self._add_part(ParsedMessagePart(model=model, slice_=slice_)) self._sync_parts() - return parsed + return [p[0] for p in parsed] @classmethod def from_model( diff --git a/rigging/model.py b/rigging/model.py index 08c7171..450827e 100644 --- a/rigging/model.py +++ b/rigging/model.py @@ -1,3 +1,7 @@ +""" +Models are the core datatypes for structured parsing. +""" + import re import typing as t from xml.etree import ElementTree as ET @@ -76,6 +80,12 @@ def __init_subclass__( # requirements like lxml seemed like poor form for # just this feature def to_pretty_xml(self) -> str: + """ + Converts the model to a pretty XML string with indents and newlines. + + Returns: + str: The pretty XML representation of the model. + """ tree = self.to_xml_tree() ET.indent(tree, " ") pretty_encoded_xml = ET.tostring(tree).decode() @@ -94,25 +104,47 @@ def to_pretty_xml(self) -> str: # TODO: lxml with the recover option is likely a better approach @classmethod def is_simple(cls) -> bool: + """ + Check if the model is "simple", meaning it has a single field with a basic datatype. + + Until we refactor our XML parsing, this helps make the parsing more consistent for models + which can support it. + + Returns: + bool: True if the model is simple, False otherwise. + """ field_values = list(cls.model_fields.values()) return len(field_values) == 1 and field_values[0].annotation in BASIC_TYPES @classmethod def xml_start_tag(cls) -> str: + """Helper method which wrapped the class tag in XML braces.""" return f"<{cls.__xml_tag__}>" @classmethod def xml_end_tag(cls) -> str: + """Helper method which wrapped the class tag in XML braces with a leading slash.""" return f"" @classmethod def xml_tags(cls) -> str: + """Helper method which returns the full XML tags for the class.""" return cls.xml_start_tag() + cls.xml_end_tag() # This can be overridden to provide a more complex example # to a model when it's required. @classmethod def xml_example(cls) -> str: + """ + Returns an example XML representation of the given class. + + Models should typically override this method to provide a more complex example. + + By default, this method just returns the XML tags for the class. + + Returns: + A string containing the XML representation of the class. + """ return cls.xml_tags() @classmethod @@ -139,6 +171,20 @@ def ensure_valid(cls) -> None: @classmethod def from_text(cls, content: str) -> list[tuple[ModelT, slice]]: + """ + The core parsing method which attempts to extract and parse as many + valid instances of a model from semi-structured text. + + Args: + content (str): The text content to parse. + + Returns: + list[tuple[ModelT, slice]]: A list of tuples containing the extracted models and their corresponding slices. + + Raises: + MissingModelError: If the specified model tags are not found in the message. + ValidationError: If an error occurs while parsing the content. + """ cls.ensure_valid() pattern = r"(<([\w-]+).*?>((.*?)))" @@ -193,7 +239,20 @@ def from_text(cls, content: str) -> list[tuple[ModelT, slice]]: return extracted @classmethod - def one_from_text(cls, content: str, fail_on_many: bool = False) -> tuple[ModelT, slice]: + def one_from_text(cls, content: str, *, fail_on_many: bool = False) -> tuple[ModelT, slice]: + """ + Finds and returns a single match from the given text content. + + Args: + content (str): The text content to search for matches. + fail_on_many (bool, optional): If True, raises a ValidationError if multiple matches are found. Defaults to False. + + Returns: + tuple[ModelT, slice]: A tuple containing the matched model and the slice indicating the match location. + + Raises: + ValidationError: If multiple matches are found and fail_on_many is True. + """ matches = cls.from_text(content) # type: ignore [var-annotated] if fail_on_many and len(matches) > 1: raise ValidationError("Multiple matches found with 'fail_on_many=True'") diff --git a/rigging/parsing.py b/rigging/parsing.py new file mode 100644 index 0000000..18a4897 --- /dev/null +++ b/rigging/parsing.py @@ -0,0 +1,127 @@ +""" +Parsing helpers for extracting rigging models from text +""" + +from rigging.error import MissingModelError +from rigging.model import ModelT + + +def parse(text: str, model_type: type[ModelT]) -> tuple[ModelT, slice]: + """ + Parses a single model from text. + + Args: + text (str): The content to parse. + model_type (type): The type of model to parse. + + Returns: + ModelT: The parsed model. + + Raises: + ValueError: If no models of the given type are found and `fail_on_missing` is set to `True`. + """ + return try_parse_many(text, model_type, fail_on_missing=True)[0] + + +def try_parse(text: str, model_type: type[ModelT]) -> tuple[ModelT, slice] | None: + """ + Tries to parse a model from text. + + Args: + text (str): The content to parse. + model_type (type[ModelT]): The type of model to search for. + + Returns: + ModelT | None: The first model that matches the given model type, or None if no match is found. + """ + # for model in self.models: + # if isinstance(model, model_type): + # return model + return next(iter(try_parse_many(text, model_type)), None) + + +def parse_set(text: str, model_type: type[ModelT], *, minimum: int | None = None) -> list[tuple[ModelT, slice]]: + """ + Parses a set of models with the specified identical type from text. + + Args: + text (str): The content to parse. + model_type (type[ModelT]): The type of models to parse. + minimum (int | None, optional): The minimum number of models required. Defaults to None. + + Returns: + list[tuple[ModelT, slice]]: A list of parsed models. + + Raises: + MissingModelError: If the minimum number of models is not met. + """ + return try_parse_set(text, model_type, minimum=minimum, fail_on_missing=True) + + +def try_parse_set( + text: str, model_type: type[ModelT], *, minimum: int | None = None, fail_on_missing: bool = False +) -> list[tuple[ModelT, slice]]: + """ + Tries to parse a set of models with the specified identical type from text. + + Args: + text (str): The content to parse. + model_type (type[ModelT]): The type of model to parse. + minimum (int | None, optional): The minimum number of models expected. Defaults to None. + fail_on_missing (bool, optional): Whether to raise an exception if models are missing. Defaults to False. + + Returns: + list[tuple[ModelT, slice]]: The parsed models. + + Raises: + MissingModelError: If the number of parsed models is less than the minimum required. + """ + models = try_parse_many(text, model_type, fail_on_missing=fail_on_missing) + if minimum is not None and len(models) < minimum: + raise MissingModelError(f"Expected at least {minimum} {model_type.__name__} in message") + return models + + +def parse_many(text: str, *types: type[ModelT]) -> list[tuple[ModelT, slice]]: + """ + Parses multiple models of the specified non-identical types from text. + + Args: + text (str): The content to parse. + *types (type[ModelT]): The types of models to parse. + + Returns: + list[tuple[ModelT, slice]]: A list of parsed models. + + Raises: + MissingModelError: If any of the models are missing. + """ + return try_parse_many(text, *types, fail_on_missing=True) + + +def try_parse_many(text: str, *types: type[ModelT], fail_on_missing: bool = False) -> list[tuple[ModelT, slice]]: + """ + Tries to parses multiple models of the specified non-identical types from text. + + Args: + text (str): The content to parse. + *types (type[ModelT]): The types of models to parse. + fail_on_missing (bool, optional): Whether to raise an exception if a model type is missing. Defaults to False. + + Returns: + list[tuple[ModelT, slice]]: A list of parsed models. + + Raises: + MissingModelError: If a model type is missing and `fail_on_missing` is True. + """ + model: ModelT + parsed: list[tuple[ModelT, slice]] = [] + for model_class in types: + try: + for model, slice_ in model_class.from_text(text): + parsed.append((model, slice_)) + except MissingModelError as e: + if fail_on_missing: + raise e + + return parsed diff --git a/rigging/tool.py b/rigging/tool.py index d17282c..c75b4a6 100644 --- a/rigging/tool.py +++ b/rigging/tool.py @@ -145,12 +145,12 @@ def hit(self, target: Annotated[str, "Target of the hit") -> str: chat = generator.chat(...).using(Hammer()).run() ``` - !!! note + Note: The `name` and `description` attributes are required and can be defined as class attributes or properties. If you define them as properties, you must also define a getter for them. - !!! note + Note: All functions on the tool must have type hints for their parameters and use the `Annotated` type hint to provide a description for each parameter. From 9c1f21ee0076f699ae09680a85dabb45ecda51e1 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Sat, 4 May 2024 16:52:55 -0600 Subject: [PATCH 12/16] Lots more docs updates, cleaning docstrings, etc. Implemented then callbacks for chat --- docs/api/completion.md | 1 + docs/api/parsing.md | 1 + docs/index.md | 65 ++++++-- docs/stylesheets/extra.css | 15 +- docs/{tutorial => topics}/chat.md | 0 docs/{tutorial => topics}/generators.md | 0 docs/{tutorial => topics}/logging.md | 0 docs/{tutorial => topics}/model.md | 0 docs/{tutorial => topics}/tools.md | 0 mkdocs.yml | 62 +++++-- poetry.lock | 205 ++++++++++++++++++++++-- pyproject.toml | 4 +- rigging/chat.py | 177 +++++++++++--------- rigging/completion.py | 98 ++++++----- rigging/generator.py | 181 ++++++++------------- rigging/logging.py | 13 +- rigging/message.py | 71 ++++---- rigging/model.py | 34 ++-- rigging/parsing.py | 47 +++--- rigging/tool.py | 8 +- 20 files changed, 603 insertions(+), 379 deletions(-) create mode 100644 docs/api/completion.md create mode 100644 docs/api/parsing.md rename docs/{tutorial => topics}/chat.md (100%) rename docs/{tutorial => topics}/generators.md (100%) rename docs/{tutorial => topics}/logging.md (100%) rename docs/{tutorial => topics}/model.md (100%) rename docs/{tutorial => topics}/tools.md (100%) diff --git a/docs/api/completion.md b/docs/api/completion.md new file mode 100644 index 0000000..a484659 --- /dev/null +++ b/docs/api/completion.md @@ -0,0 +1 @@ +::: rigging.completion \ No newline at end of file diff --git a/docs/api/parsing.md b/docs/api/parsing.md new file mode 100644 index 0000000..53c2c2a --- /dev/null +++ b/docs/api/parsing.md @@ -0,0 +1 @@ +::: rigging.parsing \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index 076e202..663269f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,36 +1,69 @@ # Rigging -Rigging is a lightweight LLM interaction framework built on Pydantic XML and LiteLLM. It supports useful primitives for validating LLM output and adding tool calling abilities to models that don't natively support it. It also has various helpers for common tasks like structured object parsing, templating chats, overloading generation parameters, stripping chat segments, and continuing conversations. +Rigging is a lightweight LLM interaction framework built on Pydantic XML. The goal is to make leveraging LLMs in production pipelines as simple and effictive as possible. Here are the highlights: -Modern python with type hints, pydantic validation, native serialization support, etc. +- **Structured Pydantic models** can be used interchangably with unstructured text output. +- LiteLLM as the default generator giving you **instant access to a huge array of models**. +- Add easy **tool calling** abilities to models which don't natively support it. +- Store different models and configs as **simple connection strings** just like databases. +- Chat templating, forking, continuations, generation parameter overloads, stripping segments, etc. +- Modern python with type hints, async support, pydantic validation, serialization, etc. +```py +import rigging as rg +from rigging.model import CommaDelimitedAnswer as Answer + +answer = rg.get_generator('gpt-4') \ + .chat(f"Give me 3 famous authors between {Answer.xml_tags()} tags.") \ + .until_parsed_as(Answer) \ + .run() + +answer = chat.last.parse(Answer) +print(answer.items) + +# ['J. R. R. Tolkien', 'Stephen King', 'George Orwell'] ``` + +Rigging is built and maintained by [dreadnode](https://dreadnode.io) where we use it daily for our work. + +## Installation +We publish every version to Pypi: +```bash pip install rigging ``` -### Overview +If you want to build from source: +```bash +cd rigging/ +poetry install +``` -The basic flow in rigging is: +## Workflow -1. Get a generator object -2. Call `.chat()` to produce a `PendingChat` -3. Call `.run()` on a `PendingChat` to get a `Chat` +1. Get a [`Generator`][rigging.generator.Generator] object - usually with [`get_generator()`][rigging.generator.get_generator]. +2. Call [`generator.chat()`][rigging.generator.Generator.chat] to produce a [`PendingChat`][rigging.chat.PendingChat] and ready it for generation. +3. Call [`pending.run()`][rigging.chat.PendingChat.run] to kick off generation and get your final [`Chat`][rigging.chat.Chat] object. -`PendingChat` objects hold any messages waiting to be delivered to an LLM in exchange -for a new response message. Afterwhich it is converted into a `Chat` which holds -all messages prior to generation (`.prev`) and after generation (`.next`). +[`PendingChat`][rigging.chat.PendingChat] objects hold any messages waiting to be delivered to an LLM in exchange +for a new response message. These objects are also where most of the power in rigging comes from. You'll build a +generation pipeline with options, parsing, callbacks, etc. After prep this pending chat is converted into a +final [`Chat`][rigging.chat.Chat] which holds all messages prior to generation ([`.prev`][rigging.chat.Chat.prev]) +and after generation ([`.next`][rigging.chat.Chat.next]). -You should think of `PendingChat` objects like the configurable pre-generation step -with calls like `.overload()`, `.apply()`, `.until()`, `.using()`, etc. Once you call -`.run()` the generator is used to produce the next message based on the prior context -and any constraints you have in place. Once you have a `Chat` object, the interation -is "done" and you can inspect/parse the messages. +You should think of [`PendingChat`][rigging.chat.PendingChat] objects like the configurable pre-generation step +with calls like [`.overload()`][rigging.chat.PendingChat.overload], [`.apply()`][rigging.chat.PendingChat.apply], +[`.until()`][rigging.chat.PendingChat.until], [`.using()`][rigging.chat.PendingChat.using], etc. Once you call one +of the many [`.run()`][rigging.chat.PendingChat.run] functions, the generator is used to produce the next +message (or many messages) based on the prior context and any constraints you have in place. Once you have a +[`Chat`][rigging.chat.Chat] object, the interation is "done" and you can inspect and operate on the messages. You'll often see us use functional styling chaining as most of our utility functions return the object back to you. ```python -chat = generator.chat(...).using(...).until(...).overload(...).run() +chat = generator.chat(...) \ (1) + .using(...).until(...).overload(...) \ + .run() ``` ### Continuing Chats diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 75e20a4..81e4a4a 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -1,6 +1,6 @@ [data-md-color-scheme="slate"] { --md-primary-fg-color: #EAEAEA; - --md-accent-fg-color: hsla(250, 62%, 70%, 1); + --md-accent-fg-color: rgb(149, 133, 227); --md-primary-color: #EAEAEA; --md-primary-bg-color: #191919; @@ -15,4 +15,17 @@ --md-footer-bg-color--dark: hsla(0, 0%, 8%, 1); --md-typeset-a-color: var(--md-accent-fg-color); + + --md-code-hl-number-color: rgb(231, 107, 93); + --md-code-hl-special-color: hsla(340, 83%, 66%, 1); + --md-code-hl-function-color: hsla(291, 57%, 65%, 1); + --md-code-hl-constant-color: hsla(250, 62%, 70%, 1); + --md-code-hl-keyword-color: hsla(219, 66%, 64%, 1); + --md-code-hl-string-color: var(--md-accent-fg-color); + --md-code-hl-name-color: var(--md-default-fg-color--light); + --md-code-hl-operator-color: var(--md-default-fg-color--light); + --md-code-hl-punctuation-color: var(--md-default-fg-color--light); + --md-code-hl-comment-color: rgb(55, 161, 108); + --md-code-hl-generic-color: var(--md-default-fg-color--light); + --md-code-hl-variable-color: var(--md-default-fg-color--light); } \ No newline at end of file diff --git a/docs/tutorial/chat.md b/docs/topics/chat.md similarity index 100% rename from docs/tutorial/chat.md rename to docs/topics/chat.md diff --git a/docs/tutorial/generators.md b/docs/topics/generators.md similarity index 100% rename from docs/tutorial/generators.md rename to docs/topics/generators.md diff --git a/docs/tutorial/logging.md b/docs/topics/logging.md similarity index 100% rename from docs/tutorial/logging.md rename to docs/topics/logging.md diff --git a/docs/tutorial/model.md b/docs/topics/model.md similarity index 100% rename from docs/tutorial/model.md rename to docs/topics/model.md diff --git a/docs/tutorial/tools.md b/docs/topics/tools.md similarity index 100% rename from docs/tutorial/tools.md rename to docs/topics/tools.md diff --git a/mkdocs.yml b/mkdocs.yml index c019f6c..6ee69c1 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -5,20 +5,24 @@ site_url: https://rigging.dreadnode.io repo_url: https://github.com/dreadnode/rigging nav: - - Introduction: index.md - - Generators: tutorial/generators.md - - Chats: tutorial/chat.md - - Models: tutorial/model.md - - Tools: tutorial/tools.md - - Logging: tutorial/logging.md + - Home: index.md + - Topics: + - Generators: topics/generators.md + - Chat: topics/chat.md + - Models: topics/model.md + - Tools: topics/tools.md + - Logging: topics/logging.md - API: - - rigging.chat: api/chat.md - - rigging.generator: api/generator.md - - rigging.message: api/message.md - - rigging.model: api/model.md - - rigging.tool: api/tool.md - - rigging.logging: api/logging.md - - rigging.error: api/error.md + - rigging.chat: api/chat.md + - rigging.completion: api/completion.md + - rigging.generator: api/generator.md + - rigging.model: api/model.md + - rigging.message: api/message.md + - rigging.tool: api/tool.md + - rigging.parsing: api/parsing.md + - rigging.logging: api/logging.md + - rigging.error: api/error.md + theme: logo: assets/logo_black.png @@ -30,24 +34,41 @@ theme: scheme: slate primary: custom features: + - content.code.copy + - content.code.annotate - toc.integrate - navigation.footer - navigation.indexes - navigation.sections - navigation.expand - navigation.path - - content.code.copy - navigation.top + - navigation.tabs plugins: - search - section-index - - mkdocstrings + - social + - mkdocstrings: + handlers: + python: + paths: [rigging] + options: + docstring_options: + ignore_init_summary: true + docstring_section_style: list + heading_level: 2 + merge_init_into_class: true + show_signature_annotations: true + show_symbol_type_heading: true + show_symbol_type_toc: true + signature_crossrefs: true watch: - rigging/ markdown_extensions: + - admonition - pymdownx.highlight: anchor_linenums: true line_spans: __span @@ -55,8 +76,8 @@ markdown_extensions: - pymdownx.inlinehilite - pymdownx.snippets - pymdownx.superfences - - admonition - pymdownx.details + - pymdownx.tabbed extra_css: - stylesheets/extra.css @@ -65,4 +86,11 @@ extra_javascript: - https://polyfill.io/v3/polyfill.min.js?features=es6 extra: - homepage: https://dreadnode.io \ No newline at end of file + homepage: https://dreadnode.io + social: + - icon: fontawesome/brands/github + link: https://github.com/dreadnode + - icon: fontawesome/brands/twitter + link: https://twitter.com/dreadnode + - icon: fontawesome/brands/python + link: https://pypi.org/project/rigging/ \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 2d99fc3..266224a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -216,6 +216,47 @@ files = [ [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] +[[package]] +name = "cairocffi" +version = "1.7.0" +description = "cffi-based cairo bindings for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cairocffi-1.7.0-py3-none-any.whl", hash = "sha256:1f29a8d41dbda4090c0aa33bcdea64f3b493e95f74a43ea107c4a8a7b7f632ef"}, + {file = "cairocffi-1.7.0.tar.gz", hash = "sha256:7761863603894305f3160eca68452f373433ca8745ab7dd445bd2c6ce50dcab7"}, +] + +[package.dependencies] +cffi = ">=1.1.0" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["numpy", "pikepdf", "pytest", "ruff"] +xcb = ["xcffib (>=1.4.0)"] + +[[package]] +name = "cairosvg" +version = "2.7.1" +description = "A Simple SVG Converter based on Cairo" +optional = false +python-versions = ">=3.5" +files = [ + {file = "CairoSVG-2.7.1-py3-none-any.whl", hash = "sha256:8a5222d4e6c3f86f1f7046b63246877a63b49923a1cd202184c3a634ef546b3b"}, + {file = "CairoSVG-2.7.1.tar.gz", hash = "sha256:432531d72347291b9a9ebfb6777026b607563fd8719c46ee742db0aef7271ba0"}, +] + +[package.dependencies] +cairocffi = "*" +cssselect2 = "*" +defusedxml = "*" +pillow = "*" +tinycss2 = "*" + +[package.extras] +doc = ["sphinx", "sphinx-rtd-theme"] +test = ["flake8", "isort", "pytest"] + [[package]] name = "certifi" version = "2024.2.2" @@ -432,6 +473,25 @@ traitlets = ">=4" [package.extras] test = ["pytest"] +[[package]] +name = "cssselect2" +version = "0.7.0" +description = "CSS selectors for Python ElementTree" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cssselect2-0.7.0-py3-none-any.whl", hash = "sha256:fd23a65bfd444595913f02fc71f6b286c29261e354c41d722ca7a261a49b5969"}, + {file = "cssselect2-0.7.0.tar.gz", hash = "sha256:1ccd984dab89fc68955043aca4e1b03e0cf29cad9880f6e28e3ba7a74b14aa5a"}, +] + +[package.dependencies] +tinycss2 = "*" +webencodings = "*" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["flake8", "isort", "pytest"] + [[package]] name = "debugpy" version = "1.8.1" @@ -474,6 +534,17 @@ files = [ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] +[[package]] +name = "defusedxml" +version = "0.7.1" +description = "XML bomb protection for Python stdlib modules" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] + [[package]] name = "distro" version = "1.9.0" @@ -1178,23 +1249,25 @@ pyyaml = ">=5.1" [[package]] name = "mkdocs-material" -version = "9.5.20" +version = "9.5.21" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.20-py3-none-any.whl", hash = "sha256:ad0094a7597bcb5d0cc3e8e543a10927c2581f7f647b9bb4861600f583180f9b"}, - {file = "mkdocs_material-9.5.20.tar.gz", hash = "sha256:986eef0250d22f70fb06ce0f4eac64cc92bd797a589ec3892ce31fad976fe3da"}, + {file = "mkdocs_material-9.5.21-py3-none-any.whl", hash = "sha256:210e1f179682cd4be17d5c641b2f4559574b9dea2f589c3f0e7c17c5bd1959bc"}, + {file = "mkdocs_material-9.5.21.tar.gz", hash = "sha256:049f82770f40559d3c2aa2259c562ea7257dbb4aaa9624323b5ef27b2d95a450"}, ] [package.dependencies] babel = ">=2.10,<3.0" +cairosvg = {version = ">=2.6,<3.0", optional = true, markers = "extra == \"imaging\""} colorama = ">=0.4,<1.0" jinja2 = ">=3.0,<4.0" markdown = ">=3.2,<4.0" mkdocs = ">=1.6,<2.0" mkdocs-material-extensions = ">=1.3,<2.0" paginate = ">=0.5,<1.0" +pillow = {version = ">=10.2,<11.0", optional = true, markers = "extra == \"imaging\""} pygments = ">=2.16,<3.0" pymdown-extensions = ">=10.2,<11.0" regex = ">=2022.4" @@ -1523,6 +1596,92 @@ files = [ [package.dependencies] ptyprocess = ">=0.5" +[[package]] +name = "pillow" +version = "10.3.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, + {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, + {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, + {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, + {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, + {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, + {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, + {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, + {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, + {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, + {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, + {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, + {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, + {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, + {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, + {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, + {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + [[package]] name = "platformdirs" version = "4.2.1" @@ -1788,17 +1947,16 @@ lxml = ["lxml (>=4.9.0)"] [[package]] name = "pygments" -version = "2.17.2" +version = "2.18.0" description = "Pygments is a syntax highlighting package written in Python." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, - {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, ] [package.extras] -plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] [[package]] @@ -2294,6 +2452,24 @@ requests = ">=2.26.0" [package.extras] blobfile = ["blobfile (>=2)"] +[[package]] +name = "tinycss2" +version = "1.3.0" +description = "A tiny CSS parser" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tinycss2-1.3.0-py3-none-any.whl", hash = "sha256:54a8dbdffb334d536851be0226030e9505965bb2f30f21a4a82c55fb2a80fae7"}, + {file = "tinycss2-1.3.0.tar.gz", hash = "sha256:152f9acabd296a8375fbca5b84c961ff95971fcfc32e79550c8df8e29118c54d"}, +] + +[package.dependencies] +webencodings = ">=0.4" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["pytest", "ruff"] + [[package]] name = "tokenizers" version = "0.19.1" @@ -2557,6 +2733,17 @@ files = [ {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, ] +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +optional = false +python-versions = "*" +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + [[package]] name = "win32-setctime" version = "1.1.0" @@ -2692,4 +2879,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "<3.13,>=3.10" -content-hash = "ff99446b72f4c067feddd1ac3abf93f984b8d6e29ac96adde3633b804871c538" +content-hash = "544f9df84f9b877c3bcbb425bf0970f630c2b7191de9ab84ef54e8c17f406911" diff --git a/pyproject.toml b/pyproject.toml index a01b84a..c9f1561 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,10 +25,12 @@ pytest = "^8.0.0" [tool.poetry.group.docs.dependencies] mkdocs = "^1.6.0" -mkdocs-material = "^9.5.20" +mkdocs-material = {extras = ["imaging"], version = "^9.5.20"} mkdocstrings = "^0.25.0" mkdocstrings-python = "^1.10.0" mkdocs-section-index = "^0.3.9" +pymdown-extensions = "^10.8.1" +pygments = "^2.18.0" [build-system] requires = ["poetry-core"] diff --git a/rigging/chat.py b/rigging/chat.py index 363dd8f..1397628 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -39,29 +39,27 @@ class Chat(BaseModel): """ Represents a completed chat conversation. - - Attributes: - uuid (UUID): The unique identifier for the chat. - timestamp (datetime): The timestamp when the chat was created. - messages (list[Message]): The list of messages prior to generation. - generated (list[Message]): The list of messages resulting from the generation. - metadata (dict[str, Any]): Additional metadata for the chat. - pending (Optional[PendingChat]): The pending chat associated with the chat. - generator_id (Optional[str]): The identifier of the generator used to create the chat """ model_config = ConfigDict(arbitrary_types_allowed=True) uuid: UUID = Field(default_factory=uuid4) + """The unique identifier for the chat.""" timestamp: datetime = Field(default_factory=datetime.now, repr=False) + """The timestamp when the chat was created.""" messages: list[Message] + """The list of messages prior to generation.""" generated: list[Message] = Field(default_factory=list) + """The list of messages resulting from the generation.""" metadata: dict[str, t.Any] = Field(default_factory=dict) + """Additional metadata for the chat.""" pending: t.Optional["PendingChat"] = Field(None, exclude=True, repr=False) + """The pending chat associated with the chat.""" @computed_field(repr=False) def generator_id(self) -> str | None: + """The identifier of the generator used to create the chat""" if self.pending is not None: return self.pending.generator.to_identifier(self.pending.params) return None @@ -77,11 +75,10 @@ def __init__( Initialize a Chat object. Args: - messages (Messages): The messages for the chat. - generated (Messages | None, optional): The next messages for the chat. Defaults to None. - pending (Optional[PendingChat], optional): The pending chat. Defaults to None. - **kwargs (Any): Additional keyword arguments (typically used for deserialization) - + messages: The messages for the chat. + generated: The next messages for the chat. + pending: The pending chat. + **kwargs: Additional keyword arguments (typically used for deserialization) """ from rigging.generator import get_generator @@ -132,7 +129,7 @@ def meta(self, **kwargs: t.Any) -> "Chat": **kwargs: Key-value pairs representing the metadata to be updated. Returns: - Chat: The updated chat object. + The updated chat object. """ self.metadata.update(kwargs) return self @@ -142,12 +139,12 @@ def restart(self, *, generator: t.Optional["Generator"] = None, include_all: boo Attempt to convert back to a PendingChat for further generation. Args: - generator (Optional[Generator]): The generator to use for the restarted chat. Otherwise + generator: The generator to use for the restarted chat. Otherwise the generator from the original PendingChat will be used. - include_all (bool): Whether to include the next messages in the restarted chat. Defaults to False. + include_all: Whether to include the next messages in the restarted chat. Returns: - PendingChat: The restarted chat. + The restarted chat. Raises: ValueError: If the chat was not created with a PendingChat and no generator is provided. @@ -170,12 +167,12 @@ def fork( Forks the chat by creating calling [rigging.chat.Chat.restart][] and appending the specified messages. Args: - messages (Union[Sequence[Message], Sequence[MessageDict], Message, MessageDict, str]): + messages: The messages to be added to the new `PendingChat` instance. - include_all (bool, optional): Whether to include the next messages in the restarted chat. Defaults to False. + include_all: Whether to include the next messages in the restarted chat. Returns: - PendingChat: A new instance of `PendingChat` with the specified messages added. + A new instance of `PendingChat` with the specified messages added. """ return self.restart(include_all=include_all).add(messages) @@ -203,7 +200,7 @@ def apply(self, **kwargs: str) -> "Chat": **kwargs: The string mapping of replacements. Returns: - Chat: The modified Chat object. + The modified Chat object. """ self.last.apply(**kwargs) return self @@ -216,20 +213,43 @@ def apply_to_all(self, **kwargs: str) -> "Chat": **kwargs: The string mapping of replacements. Returns: - Chat: The modified chat object. - + The modified chat object. """ for message in self.all: message.apply(**kwargs) return self def strip(self, model_type: type[Model], fail_on_missing: bool = False) -> "Chat": + """ + Strips all parsed parts of a particular type from the message content. + + Args: + model_type: The type of model to keep in the chat. + fail_on_missing: Whether to raise an exception if a message of the specified model type is not found. + + Returns: + A new Chat object with only the messages of the specified model type. + """ new = self.clone() for message in new.all: message.strip(model_type, fail_on_missing=fail_on_missing) return new def inject_system_content(self, content: str) -> Message: + """ + Injects content into the chat as a system message. + + Note: + If the chat is empty or the first message is not a system message, + a new system message with the given content is inserted at the beginning of the chat. + If the first message is a system message, the content is appended to it. + + Args: + content: The content to be injected. + + Returns: + The updated system message. + """ if len(self.messages) == 0 or self.messages[0].role != "system": self.messages.insert(0, Message(role="system", content=content)) elif self.messages[0].role == "system": @@ -237,6 +257,12 @@ def inject_system_content(self, content: str) -> Message: return self.messages[0] def inject_tool_prompt(self, tools: t.Sequence[Tool]) -> None: + """ + Injects a default tool use prompt into the system prompt. + + Args: + tools: A sequence of Tool objects. + """ call_format = ToolCalls.xml_example() tool_description_list = ToolDescriptionList(tools=[t.get_description() for t in tools]) tool_system_prompt = system_tool_extension(call_format, tool_description_list.to_pretty_xml()) @@ -253,27 +279,19 @@ def inject_tool_prompt(self, tools: t.Sequence[Tool]) -> None: class PendingChat: """ Represents a pending chat that can be modified and executed. - - Attributes: - generator (Generator): The generator object responsible for generating the chat. - chat (Chat): The chat object representing the conversation. - params (Optional[GenerateParams]): The parameters for generating the chat. - metadata (dict[str, Any]): Additional metadata associated with the chat. - until_callbacks (list[tuple[UntilMessageCallback, bool, bool, int]]): List of until message callbacks. - until_types (list[type[Model]]): List of until message types. - until_tools (list[Tool]): List of until tools. - inject_tool_prompt (bool): Flag indicating whether to inject tool prompts. Default is True. - force_tool (bool): Flag indicating whether to force the use of a tool. Default is False. - then_callbacks (list[ThenChatCallback]): List of callbacks to be executed after generation. """ def __init__( self, generator: "Generator", messages: t.Sequence[Message], params: t.Optional["GenerateParams"] = None ): self.generator: "Generator" = generator + """The generator object responsible for generating the chat.""" self.chat: Chat = Chat(messages, pending=self) + """The chat object representing the conversation.""" self.params = params + """The parameters for generating messages.""" self.metadata: dict[str, t.Any] = {} + """Additional metadata associated with the chat.""" # (callback, attempt_recovery, drop_dialog, max_rounds) self.until_callbacks: list[tuple[UntilMessageCallback, bool, bool, int]] = [] @@ -296,7 +314,7 @@ def overload(self, **kwargs: t.Any) -> "PendingChat": **kwargs: Keyword arguments representing the parameters to be overloaded. Returns: - PendingChat: A new instance of PendingChat with the overloaded parameters. + A new instance of PendingChat with the overloaded parameters. """ from rigging.generator import GenerateParams @@ -310,10 +328,10 @@ def with_params(self, params: "GenerateParams") -> "PendingChat": This will trigger a `clone` if overload params have already been set. Args: - params (GenerateParams): The parameters to set for the chat. + params: The parameters to set for the chat. Returns: - PendingChat: A new instance of PendingChat with the updated parameters. + A new instance of PendingChat with the updated parameters. """ if self.params is not None: new = self.clone() @@ -334,11 +352,10 @@ def add( the content will be appended. instead of a new message being created. Args: - messages (Union[Sequence[MessageDict], Sequence[Message], MessageDict, Message, str]): - The messages to be added to the chat. It can be a single message or a sequence of messages. + messages: The messages to be added to the chat. It can be a single message or a sequence of messages. Returns: - PendingChat: The updated PendingChat object. + The updated PendingChat object. """ message_list = Message.fit_as_list(messages) # If the last message is the same role as the first new message, append to it @@ -361,7 +378,7 @@ def fork( messages: A sequence of messages or a single message to be added to the new chat. Returns: - A new instance of `PendingChat` with the specified messages added. + A new instance the pending chat with the specified messages added. """ return self.clone().add(messages) @@ -370,12 +387,12 @@ def clone(self, *, only_messages: bool = False) -> "PendingChat": Creates a clone of the current `PendingChat` instance. Args: - only_messages (bool, optional): If True, only the messages will be cloned. + only_messages: If True, only the messages will be cloned. If False (default), the entire `PendingChat` instance will be cloned including until callbacks, types, and tools. Returns: - PendingChat: A new instance of `PendingChat` that is a clone of the current instance. + A new instance of `PendingChat` that is a clone of the current instance. """ new = PendingChat(self.generator, [], self.params) new.chat = self.chat.clone() @@ -396,7 +413,7 @@ def meta(self, **kwargs: t.Any) -> "PendingChat": **kwargs: Key-value pairs representing the metadata to be updated. Returns: - PendingChat: The updated chat object. + The updated chat object. """ self.metadata.update(kwargs) return self @@ -417,10 +434,10 @@ def process(chat: Chat) -> Chat | None: ``` Args: - callback (ThenChatCallback): The callback function to be executed. + callback: The callback function to be executed. Returns: - PendingChat: The current instance of the chat. + The current instance of the chat. """ self.then_callbacks.append(callback) return self @@ -433,7 +450,7 @@ def apply(self, **kwargs: str) -> "PendingChat": **kwargs: Keyword arguments to be applied to the chat. Returns: - PendingChat: A new instance of PendingChat with the applied arguments. + A new instance of PendingChat with the applied arguments. """ new = self.clone() new.chat.apply(**kwargs) @@ -447,7 +464,7 @@ def apply_to_all(self, **kwargs: str) -> "PendingChat": **kwargs: Keyword arguments to be applied to the chat. Returns: - PendingChat: A new instance of PendingChat with the applied arguments. + A new instance of PendingChat with the applied arguments. """ new = self.clone() new.chat.apply_to_all(**kwargs) @@ -483,16 +500,16 @@ def callback(message: Message) -> tuple[bool, list[Message]]: Whether these messages get used or discarded in the next round depends on `attempt_recovery`. Args: - callback (UntilMessageCallback): The callback function to be executed. - attempt_recovery (bool, optional): Whether to attempt recovery by continuing to append prior messages - before the next round of generation. Defaults to False. - drop_dialog (bool, optional): Whether to drop the intermediate dialog of recovery before returning - the final chat back to the caller. Defaults to True. - max_rounds (int, optional): The maximum number of rounds to attempt generation + callbacks - before giving uop. Defaults to DEFAULT_MAX_ROUNDS. + callback: The callback function to be executed. + attempt_recovery: Whether to attempt recovery by continuing to append prior messages + before the next round of generation. + drop_dialog: Whether to drop the intermediate dialog of recovery before returning + the final chat back to the caller. + max_rounds: The maximum number of rounds to attempt generation + callbacks + before giving uop. Returns: - PendingChat: The current instance of the chat. + The current instance of the chat. """ self.until_callbacks.append((callback, attempt_recovery, drop_dialog, max_rounds)) return self @@ -511,18 +528,18 @@ def using( Adds a tool or a sequence of tools to participate in the generation process. Args: - tool (Tool | Sequence[Tool]): The tool or sequence of tools to be added. - force (bool, optional): Whether to force the use of the tool(s) at least once. Defaults to False. - attempt_recovery (bool, optional): Whether to attempt recovery if the tool(s) fail by providing - validation feedback to the model before the next round. Defaults to True. - drop_dialog (bool, optional): Whether to drop the intermediate dialog of recovery efforts - before returning the final chat to the caller. Defaults to False. - max_rounds (int, optional): The maximum number of rounds to attempt recovery. Defaults to DEFAULT_MAX_ROUNDS. - inject_prompt (bool | None, optional): Whether to inject the tool guidance prompt into a - system message. Defaults to None and will override self.inject_tool_prompt if provided. + tool: The tool or sequence of tools to be added. + force: Whether to force the use of the tool(s) at least once. + attempt_recovery: Whether to attempt recovery if the tool(s) fail by providing + validation feedback to the model before the next round. + drop_dialog: Whether to drop the intermediate dialog of recovery efforts + before returning the final chat to the caller. + max_rounds: The maximum number of rounds to attempt recovery. + inject_prompt: Whether to inject the tool guidance prompt into a + system message.and will override self.inject_tool_prompt if provided. Returns: - PendingChat: The updated PendingChat object. + The updated PendingChat object. """ self.until_tools += tool if isinstance(tool, t.Sequence) else [tool] @@ -551,16 +568,16 @@ def until_parsed_as( before the generation process completes. Args: - *types (type[ModelT]): The type or types of models to wait for. - attempt_recovery (bool, optional): Whether to attempt recovery if parsing fails by providing - validation feedback to the model before the next round. Defaults to True. - drop_dialog (bool, optional): Whether to drop the intermediate dialog of recovery efforts - before returning the final chat to the caller. Defaults to False. - max_rounds (int, optional): The maximum number of rounds to try to parse - successfully. Defaults to DEFAULT_MAX_ROUNDS. + *types: The type or types of models to wait for. + attempt_recovery: Whether to attempt recovery if parsing fails by providing + validation feedback to the model before the next round. + drop_dialog: Whether to drop the intermediate dialog of recovery efforts + before returning the final chat to the caller. + max_rounds: The maximum number of rounds to try to parse + successfully. Returns: - PendingChat: The updated PendingChat object. + The updated PendingChat object. """ self.until_types += types if next((c for c in self.until_callbacks if c[0] == self._until_parse_callback), None) is None: @@ -717,7 +734,7 @@ def run(self, count: int | None = None) -> Chat | list[Chat]: If `count` is provided, `run_many` will be called instead. Args: - count (int | None, optional): The number of times to generate using the same inputs. + count: The number of times to generate using the same inputs. Returns: Chat | list[Chat]: The chat object or a list of chat objects, depending on the value of `count`. @@ -743,12 +760,12 @@ def run_many(self, count: int) -> list[Chat]: Executes the generation process multiple times with the same inputs. Parameters: - count (int): The number of times to execute the generation process. + count: The number of times to execute the generation process. Returns: - list[Chat]: A list of Chat objects representing the results of each execution. + list[ChatA list of Chat objects representing the results of each execution. """ - return [self._then(self.run()) for _ in range(count)] + return [self.run() for _ in range(count)] __call__ = run diff --git a/rigging/completion.py b/rigging/completion.py index 012716d..07ce7c3 100644 --- a/rigging/completion.py +++ b/rigging/completion.py @@ -33,27 +33,25 @@ class Completion(BaseModel): """ Represents a completed text generation. - - Attributes: - uuid (UUID): The unique identifier. - timestamp (datetime): The timestamp when the completion was created. - text (str): The original text. - generated (str): The generated text. - pending (Optional[PendingCompletion]): The pending completion associated with this completion. - generator_id (Optional[str]): The identifier of the generator used to create the completion """ model_config = ConfigDict(arbitrary_types_allowed=True) uuid: UUID = Field(default_factory=uuid4) + """The unique identifier.""" timestamp: datetime = Field(default_factory=datetime.now, repr=False) + """The timestamp when the completion was created.""" text: str + """The original text.""" generated: str + """The generated text.""" pending: t.Optional["PendingCompletion"] = Field(None, exclude=True, repr=False) + """The pending completion associated with this completion.""" @computed_field(repr=False) def generator_id(self) -> str | None: + """The identifier of the generator used to create the completion""" if self.pending is not None: return self.pending.generator.to_identifier(self.pending.params) return None @@ -69,9 +67,9 @@ def __init__( Initialize a Chat object. Args: - text (str): The original text. - generated (Optional[str]): The generated text. - pending (Optional[PendingCompletion]): The pending completion associated with this completion + text: The original text. + generated: The generated text. + pending: The pending completion associated with this completion **kwargs: Additional keyword arguments (typically used for serialization). """ from rigging.generator import get_generator @@ -100,12 +98,12 @@ def restart(self, *, generator: t.Optional["Generator"] = None, include_all: boo Attempt to convert back to a PendingCompletion for further generation. Args: - generator (Optional[Generator]): The generator to use for the restarted chat. Otherwise + generator: The generator to use for the restarted chat. Otherwise the generator from the original PendingCompletion will be used. - include_all (bool): Whether to include the generation before the next round. Defaults to False. + include_all: Whether to include the generation before the next round. Returns: - PendingCompletion: The restarted completion. + The restarted completion. Raises: ValueError: If the completion was not created with a PendingCompletion and no generator is provided. @@ -123,11 +121,10 @@ def fork(self, text: str) -> "PendingCompletion": Forks the completion by creating calling [rigging.completion.Completion.restart][] and appends the specified text. Args: - text (str): The text to append. + text: The text to append. Returns: - PendingCompletion: A new instance of `PendingCompletion` with the specified messages added. - + A new instance of a pending competion with the specified messages added. """ return self.restart().add(text) @@ -144,21 +141,17 @@ def clone(self) -> "Completion": class PendingCompletion: """ Represents a pending completion that can be modified and executed. - - Attributes: - generator (Generator): The generator object responsible for generating the completion. - text (str): The text to be completed. - params (Optional[GenerateParams]): The parameters for generating the completion. - metadata (dict[str, Any]): Additional metadata associated with the completion. - until_callbacks (list[tuple[UntilCompletionCallback, bool, int]]): List of until completion callbacks. - until_types (list[type[Model]]): List of until completion types. """ def __init__(self, generator: "Generator", text: str, params: t.Optional["GenerateParams"] = None): self.generator: "Generator" = generator + """The generator object responsible for generating the completion.""" self.text = text + """The text to be completed.""" self.params = params + """The parameters for generating the completion.""" self.metadata: dict[str, t.Any] = {} + """Additional metadata associated with the completion.""" # (callback, all_text, max_rounds) self.until_callbacks: list[tuple[UntilCompletionCallback, bool, int]] = [] @@ -177,7 +170,7 @@ def overload(self, **kwargs: t.Any) -> "PendingCompletion": **kwargs: Keyword arguments representing the parameters to be overloaded. Returns: - PendingCompletion: A new instance of PendingCompletion with the overloaded parameters. + A new instance of PendingCompletion with the overloaded parameters. """ from rigging.generator import GenerateParams @@ -191,10 +184,10 @@ def with_params(self, params: "GenerateParams") -> "PendingCompletion": This will trigger a `clone` if overload params have already been set. Args: - params (GenerateParams): The parameters to set for the completion. + params: The parameters to set for the completion. Returns: - PendingCompletion: A new instance of PendingCompletion with the updated parameters. + A new instance of PendingCompletion with the updated parameters. """ if self.params is not None: new = self.clone() @@ -209,10 +202,10 @@ def add(self, text: str) -> "PendingCompletion": Appends new text to the internal text before generation. Args: - text (str): The text to be added to the completion. + text: The text to be added to the completion. Returns: - PendingCompletion: The updated PendingCompletion object. + The updated PendingCompletion object. """ self.text += text return self @@ -236,12 +229,12 @@ def clone(self, *, only_text: bool = False) -> "PendingCompletion": Creates a clone of the current `PendingCompletion` instance. Args: - only_text (bool, optional): If True, only the text will be cloned. + only_text: If True, only the text will be cloned. If False (default), the entire `PendingCompletion` instance will be cloned including until callbacks and types. Returns: - PendingCompletion: A new instance of `PendingCompletion` that is a clone of the current instance. + A new instance of `PendingCompletion` that is a clone of the current instance. """ new = PendingCompletion(self.generator, self.text, self.params) if not only_text: @@ -258,7 +251,7 @@ def meta(self, **kwargs: t.Any) -> "PendingCompletion": **kwargs: Key-value pairs representing the metadata to be updated. Returns: - PendingCompletion: The updated completion object. + The updated completion object. """ self.metadata.update(kwargs) return self @@ -271,7 +264,7 @@ def apply(self, **kwargs: str) -> "PendingCompletion": **kwargs: Keyword arguments to be applied to the text. Returns: - PendingCompletion: A new instance of PendingCompletion with the applied arguments. + A new instance of PendingCompletion with the applied arguments. """ new = self.clone() template = string.Template(self.text) @@ -301,14 +294,14 @@ def callback(text: str) -> bool: ``` Args: - callback (UntilCompletionCallback): The callback function to be executed. - use_all_text (bool, optional): Whether to pass the entire text (including prompt) to the callback. - Defaults to False. - max_rounds (int, optional): The maximum number of rounds to attempt generation + callbacks - before giving up. Defaults to DEFAULT_MAX_ROUNDS. + callback: The callback function to be executed. + use_all_text: Whether to pass the entire text (including prompt) to the callback. + + max_rounds: The maximum number of rounds to attempt generation + callbacks + before giving up. Returns: - PendingCompletion: The current instance of the completion. + The current instance of the completion. """ self.until_callbacks.append((callback, use_all_text, max_rounds)) return self @@ -324,14 +317,14 @@ def until_parsed_as( before the generation process completes. Args: - *types (type[ModelT]): The type or types of models to wait for. - use_all_text (bool, optional): Whether to pass the entire text (including prompt) to the parser. - Defaults to False. - max_rounds (int, optional): The maximum number of rounds to try to parse - successfully. Defaults to DEFAULT_MAX_ROUNDS. + *types: The type or types of models to wait for. + use_all_text: Whether to pass the entire text (including prompt) to the parser. + + max_rounds: The maximum number of rounds to try to parse + successfully. Returns: - PendingCompletion: The updated PendingCompletion object. + The updated PendingCompletion object. """ self.until_types += types if next((c for c in self.until_callbacks if c[0] == self._until_parse_callback), None) is None: @@ -393,11 +386,10 @@ def run(self, count: int | None = None) -> Completion | list[Completion]: If `count` is provided, `run_many` will be called instead. Args: - count (int | None, optional): The number of times to generate using the same inputs. + count: The number of times to generate using the same inputs. Returns: - Completion | list[Completion]: The completion object or a list of completion objects, - depending on the value of `count`. + The completion object or a list of completion objects, depending on the value of `count`. """ if count is not None: return self.run_many(count) @@ -419,10 +411,10 @@ def run_many(self, count: int) -> list[Completion]: Executes the generation process multiple times with the same inputs. Parameters: - count (int): The number of times to execute the generation process. + count: The number of times to execute the generation process. Returns: - list[Completion]: A list of Completion objects representing the results of each execution. + A list of Completion objects representing the results of each execution. """ return [self.run() for _ in range(count)] @@ -437,7 +429,7 @@ async def arun(self, count: int) -> list[Completion]: ... async def arun(self, count: int | None = None) -> Completion | list[Completion]: - """async variant of the [rigging.chat.PendingCompletion.run][] method.""" + """async variant of the [rigging.completion.PendingCompletion.run][] method.""" if count is not None: return await self.arun_many(count) @@ -454,6 +446,6 @@ async def arun(self, count: int | None = None) -> Completion | list[Completion]: return Completion(self.text, outbound, pending=self) async def arun_many(self, count: int) -> list[Completion]: - """async variant of the [rigging.chat.PendingCompletion.run_many][] method.""" + """async variant of the [rigging.completion.PendingCompletion.run_many][] method.""" chats = await asyncio.gather(*[self.arun() for _ in range(count)]) return list(chats) diff --git a/rigging/generator.py b/rigging/generator.py index 89b2862..914abe6 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -42,32 +42,39 @@ class GenerateParams(BaseModel): Note: Use the `extra` field to pass additional parameters to the API. - - Attributes: - temperature (float | None): The sampling temperature. - max_tokens (int | None): The maximum number of tokens to generate. - top_p (float | None): The nucleus sampling probability. - stop (list[str] | None): A list of stop sequences to stop generation at. - presence_penalty (float | None): The presence penalty. - frequency_penalty (float | None): The frequency penalty. - api_base (str | None): The base URL for the API. - timeout (int | None): The timeout for the API request. - seed (int | None): The seed. - extra (dict[str, t.Any]): Extra parameters. """ model_config = ConfigDict(extra="forbid") temperature: float | None = None + """The sampling temperature.""" + max_tokens: int | None = None + """The maximum number of tokens to generate.""" + top_p: float | None = None + """The nucleus sampling probability.""" + stop: list[str] | None = None + """A list of stop sequences to stop generation at.""" + presence_penalty: float | None = None + """The presence penalty.""" + frequency_penalty: float | None = None + """The frequency penalty.""" + api_base: str | None = None + """The base URL for the API.""" + timeout: int | None = None + """The timeout for the API request.""" + seed: int | None = None + """The random seed.""" + extra: dict[str, t.Any] = Field(default_factory=dict) + """Extra parameters to be passed to the API.""" @field_validator("stop", mode="before") def validate_stop(cls, value: t.Any) -> t.Any: @@ -92,16 +99,14 @@ class Generator(BaseModel): - `batch_texts`: Process a batch of texts. (In addition to async variants of these functions) - - Attributes: - model (str): The model used by the generator. - api_key (str | None): The API key used for authentication. Defaults to None. - params (GenerateParams): The parameters used for generating completion messages. """ model: str + """The model name to be used by the generator.""" api_key: str | None = Field(None, exclude=True) + """The API key used for authentication.""" params: GenerateParams + """The parameters used for generating completion messages.""" def to_identifier(self, overloads: GenerateParams | None = None) -> str: """ @@ -111,10 +116,10 @@ def to_identifier(self, overloads: GenerateParams | None = None) -> str: Extra parameters are not supported in identifiers. Args: - overloads (GenerateParams | None, optional): The parameters to be used for generating the identifier. + overloads: The parameters to be used for generating the identifier. Returns: - str: The identifier string. + The identifier string. """ provider = next(name for name, klass in g_providers.items() if isinstance(self, klass)) params_dict = self._merge_params(overloads) @@ -139,10 +144,10 @@ def _merge_params(self, overloads: GenerateParams | None = None) -> dict[str, t. Typically used to prepare a dictionary of API parameters for a request. Args: - overloads (GenerateParams): The parameters to be merged with the current instance's parameters. + overloads: The parameters to be merged with the current instance's parameters. Returns: - dict[str, t.Any]: The merged parameters. + The merged parameters. """ params: dict[str, t.Any] = self.params.model_dump(exclude_unset=True) if self.params else {} if overloads is None: @@ -165,11 +170,11 @@ def generate_message(self, messages: t.Sequence[Message], overloads: GeneratePar Generates the next message for a given set of messages. Args: - messages (Sequence[Message]): The list of messages to generate completion for. - overloads (GenerateParams | None, optional): The parameters to be used for completion. + messages: The list of messages to generate completion for. + overloads: The parameters to be used for completion. Returns: - Message: The generated completion message. + The generated completion message. Raises: NotImplementedError: This generator does not support this method. @@ -179,16 +184,7 @@ def generate_message(self, messages: t.Sequence[Message], overloads: GeneratePar async def agenerate_message( self, messages: t.Sequence[Message], overloads: GenerateParams | None = None ) -> Message: - """ - Asynchronously generates the next message for a given set of messages. - - Args: - messages (Sequence[Message]): A sequence of messages. - overloads (GenerateParams | None, optional): The parameters to be used for completion. - - Returns: - Coroutine[None, None, Message]: A coroutine that yields completion messages. - """ + """async version of [rigging.generator.Generator.generate_message][]""" raise NotImplementedError("agenerate_message is not supported by this generator.") # Text generation @@ -198,11 +194,11 @@ def generate_text(self, text: str, overloads: GenerateParams | None = None) -> s Generates a string completion of the given text. Args: - text (str): The input text to be completed. - overloads (GenerateParams | None, optional): The parameters to be used for completion. + text: The input text to be completed. + overloads: The parameters to be used for completion. Returns: - str: The completed text. + The completed text. Raises: NotImplementedError: This generator does not support this method. @@ -210,19 +206,7 @@ def generate_text(self, text: str, overloads: GenerateParams | None = None) -> s raise NotImplementedError("generate_text is not supported by this generator.") async def agenerate_text(self, text: str, overloads: GenerateParams | None = None) -> str: - """ - Asynchronously generates a string completion of the given text. - - Args: - text (str): The input text to be completed. - overloads (GenerateParams | None, optional): The parameters to be used for completion. - - Returns: - Coroutine[None, None, str]: A coroutine that yields the completed text. - - Raises: - NotImplementedError: This generator does not support this method. - """ + """async version of [rigging.generator.Generator.generate_text][]""" raise NotImplementedError("agenerate_text is not supported by this generator.") # Batching messages @@ -241,12 +225,12 @@ def batch_messages( If supplied, the length of `overloads` must be the same as the length of `many`. Args: - many (Sequence[Sequence[Message]]): A sequence of sequences of messages. - overloads (Sequence[GenerateParams | None], optional): A sequence of GenerateParams objects or None. Defaults to None. - fixed (Sequence[Message], optional): A sequence of fixed messages to be prefixed before every item of `many`. Defaults to None. + many: A sequence of sequences of messages. + overloads: A sequence of GenerateParams objects or None. + fixed: A sequence of fixed messages to be prefixed before every item of `many`. Returns: - Sequence[Message]: A sequence of generated messages. + Sequence[MessageA sequence of generated messages. Raises: NotImplementedError: This method is not supported by this generator. @@ -260,23 +244,7 @@ async def abatch_messages( *, fixed: t.Sequence[Message], ) -> t.Sequence[Message]: - """ - Asynchronously Generate a batch of messages based on the given parameters. - - Note: - If supplied, the length of `overloads` must be the same as the length of `many`. - - Args: - many (Sequence[Sequence[Message]]): A sequence of sequences of messages. - overloads (Sequence[GenerateParams | None], optional): A sequence of GenerateParams or None. Defaults to None. - fixed (Sequence[Message]): A sequence of fixed messages to be prefixed before every item of `many`. Defaults to None. - - Returns: - Sequence[Message]: A sequence of generated messages. - - Raises: - NotImplementedError: This method is not supported by this generator. - """ + """async version of [rigging.generator.Generator.batch_messages][]""" raise NotImplementedError("abatch_messages is not supported by this generator.") # Batching texts @@ -295,12 +263,12 @@ def batch_texts( If supplied, the length of `overloads` must be the same as the length of `many`. Args: - many (Sequence[str]): The input texts for generating the batch. - overloads (Sequence[GenerateParams | None] | None, optional): Additional parameters for generating each text in the batch. Defaults to None. - fixed (str | None, optional): A fixed input text to be used as a prefix for all of `many`. Defaults to None. + many: The input texts for generating the batch. + overloads: Additional parameters for generating each text in the batch. + fixed: A fixed input text to be used as a prefix for all of `many`. Returns: - Sequence[str]: The generated texts in the batch. + Sequence[strThe generated texts in the batch. Raises: NotImplementedError: This method is not supported by this generator. @@ -314,20 +282,7 @@ async def abatch_texts( *, fixed: str | None = None, ) -> t.Sequence[str]: - """ - Asynchronously Generate multiple texts in batch. - - Args: - many (Sequence[str]): A sequence of texts to generate. - overloads (Sequence[GenerateParams | None] | None, optional): A sequence of optional parameters for each text. Defaults to None. - fixed (str | None, optional): A fixed parameter for all texts. Defaults to None. - - Returns: - Sequence[str]: A sequence of generated texts. - - Raises: - NotImplementedError: This method is not supported by this generator. - """ + """async version of [rigging.generator.Generator.batch_texts][]""" raise NotImplementedError("abatch_texts is not supported by this generator.") # Helper alternative to chat(generator) -> generator.chat(...) @@ -357,11 +312,11 @@ def chat( Builds a pending chat with the given messages and optional overloads. Args: - messages (Sequence[MessageDict] | Sequence[Message] | str): The messages to be sent in the chat. - overloads (GenerateParams | None, optional): Optional parameters for generating responses. Defaults to None. + messages: The messages to be sent in the chat. + overloads: Optional parameters for generating responses. Returns: - PendingChat: Pending chat to run. + Pending chat to run. """ return PendingChat(self, Message.fit_as_list(messages), overloads) @@ -372,11 +327,11 @@ def complete(self, text: str, overloads: GenerateParams | None = None) -> Pendin Generates a pending string completion of the given text. Args: - text (str): The input text to be completed. - overloads (GenerateParams | None, optional): The parameters to be used for completion. + text: The input text to be completed. + overloads: The parameters to be used for completion. Returns: - str: The completed text. + The completed text. """ return PendingCompletion(self, text, overloads) @@ -408,14 +363,13 @@ def chat( Creates a pending chat using the given generator, messages, and overloads. Args: - generator (Generator): The generator to use for creating the chat. - messages (Sequence[MessageDict] | Sequence[Message] | MessageDict | Message | str): + generator: The generator to use for creating the chat. + messages: The messages to include in the chat. Can be a single message or a sequence of messages. - overloads (GenerateParams | None, optional): Additional parameters for generating the chat. - Defaults to None. + overloads: Additional parameters for generating the chat. Returns: - PendingChat: Pending chat to run. + Pending chat to run. """ return generator.chat(messages, overloads) @@ -435,11 +389,11 @@ def get_identifier(generator: Generator, overloads: GenerateParams | None = None Delegates to [rigging.generator.Generator.to_identifier][] Args: - generator (Generator): The generator object. - overloads (GenerateParams | None, optional): The generate parameters. Defaults to None. + generator: The generator object. + overloads: The generate parameters. Returns: - str: The identifier for the generator. + The identifier for the generator. """ return generator.to_identifier(overloads) @@ -450,7 +404,7 @@ def get_generator(identifier: str) -> Generator: Identifier strings are formatted like `!,<**kwargs>` - (provider is optional and defaults to "litellm" if not specified) + (provider is optional andif not specified) Examples: @@ -467,10 +421,10 @@ def get_generator(identifier: str) -> Generator: (These get parsed as [rigging.generator.GenerateParams][]) Args: - identifier (str): The identifier string to use to get a generator. + identifier: The identifier string to use to get a generator. Returns: - Generator: The generator object. + The generator object. Raises: InvalidModelSpecified: If the identifier is invalid. @@ -509,11 +463,8 @@ def register_generator(provider: str, generator_cls: type[Generator]) -> None: This let's you use [rigging.generator.get_generator][] with a custom generator class. Args: - provider (str): The name of the provider. - generator_cls (type[Generator]): The generator class to register. - - Returns: - None + provider: The name of the provider. + generator_cls: The generator class to register. """ global g_providers g_providers[provider] = generator_cls @@ -524,8 +475,8 @@ def trace_messages(messages: t.Sequence[Message], title: str) -> None: Helper function to trace log a sequence of Message objects. Args: - messages (Sequence[Message]): A sequence of Message objects to be logged. - title (str): The title to be displayed in the log. + messages: A sequence of Message objects to be logged. + title: The title to be displayed in the log. Returns: None @@ -540,8 +491,8 @@ def trace_str(content: str, title: str) -> None: Helper function to trace log a string. Parameters: - content (str): The string content to be logged. - title (str): The title of the log entry. + content: The string content to be logged. + title: The title of the log entry. Returns: None diff --git a/rigging/logging.py b/rigging/logging.py index d113f7d..8b7a265 100644 --- a/rigging/logging.py +++ b/rigging/logging.py @@ -28,17 +28,12 @@ def configure_logging( and you can control the formatting and log levels using the loguru API. Args: - log_level (str): The desired log level. Valid values are 'TRACE', 'DEBUG', 'INFO', + log_level: The desired log level. Valid values are 'TRACE', 'DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', and 'CRITICAL'. - log_file (pathlib.Path | None, optional): The path to the log file. If None, logging - will only be done to the console. Defaults to None. - log_file_level (LogLevelLiteral, optional): The log level for the log file. Valid values + log_file: The path to the log file. If None, logging + will only be done to the console. + log_file_level: The log level for the log file. Valid values are 'TRACE', 'DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', and 'CRITICAL'. - Defaults to 'debug'. - - Returns: - None: This function does not return anything. - """ global g_configured diff --git a/rigging/message.py b/rigging/message.py index 9f85f4a..344243f 100644 --- a/rigging/message.py +++ b/rigging/message.py @@ -21,6 +21,7 @@ from rigging.parsing import try_parse_many Role = t.Literal["system", "user", "assistant"] +"""The role of a message. Can be 'system', 'user', or 'assistant'.""" # Helper type for messages structured @@ -28,14 +29,12 @@ class MessageDict(t.TypedDict): """ Helper to represent a [rigging.message.Message][] as a dictionary. - - Attributes: - role (Role): The role of the message. - content (str): The content of the message. """ role: Role + """The role of the message.""" content: str + """The content of the message.""" # Structured portion of a message with @@ -43,16 +42,14 @@ class MessageDict(t.TypedDict): class ParsedMessagePart(BaseModel): """ Represents a parsed message part. - - Attributes: - model (SerializeAsAny[Model]): The rigging/pydantic model associated with the message part. - slice_ (slice): The slice representing the range into the message content. """ model_config = ConfigDict(arbitrary_types_allowed=True) model: SerializeAsAny[Model] + """The rigging/pydantic model associated with the message part.""" slice_: slice + """The slice representing the range into the message content.""" @field_serializer("slice_") def serialize_slice(self, slice_: slice, _info: FieldSerializationInfo) -> list[int]: @@ -71,15 +68,12 @@ def validate_slice(cls, value: t.Any) -> slice: class Message(BaseModel): """ Represents a message with role, content, and parsed message parts. - - Attributes: - role (Role): The role of the message. - content (str): The content of the message. - parts (List[ParsedMessagePart], optional): List of parsed part objects. """ role: Role + """The role of the message.""" parts: list[ParsedMessagePart] = Field(default_factory=list) + """The parsed message parts.""" _content: str = "" @@ -152,6 +146,7 @@ def _sync_parts(self) -> None: @computed_field # type: ignore[misc] @property def content(self) -> str: + """The content of the message.""" # We used to sync the models and content each time it was accessed, # hence the getter. Now we just return the stored content. # I'll leave it as is for now in case we want to add any @@ -176,9 +171,6 @@ def apply(self, **kwargs: str) -> None: Args: **kwargs: Keyword arguments to substitute in the message content. - - Returns: - None """ template = string.Template(self.content) self.content = template.safe_substitute(**kwargs) @@ -188,11 +180,11 @@ def strip(self, model_type: type[Model], *, fail_on_missing: bool = False) -> li Removes and returns a list of ParsedMessagePart objects from the message that match the specified model type. Args: - model_type (type[Model]): The type of model to match. - fail_on_missing (bool, optional): If True, raises a TypeError if no matching model is found. Defaults to False. + model_type: The type of model to match. + fail_on_missing: If True, raises a TypeError if no matching model is found. Returns: - list[ParsedMessagePart]: A list of removed ParsedMessagePart objects. + A list of removed ParsedMessagePart objects. Raises: TypeError: If no matching model is found and fail_on_missing is True. @@ -222,10 +214,10 @@ def parse(self, model_type: type[ModelT]) -> ModelT: Parses a model from the message content. Args: - model_type (type): The type of model to parse. + model_type: The type of model to parse. Returns: - ModelT: The parsed model. + The parsed model. Raises: ValueError: If no models of the given type are found and `fail_on_missing` is set to `True`. @@ -237,10 +229,10 @@ def try_parse(self, model_type: type[ModelT]) -> ModelT | None: Tries to parse a model from the message content. Args: - model_type (type[ModelT]): The type of model to search for. + model_type: The type of model to search for. Returns: - ModelT | None: The first model that matches the given model type, or None if no match is found. + The first model that matches the given model type, or None if no match is found. """ return next(iter(self.try_parse_many(model_type)), None) @@ -249,11 +241,11 @@ def parse_set(self, model_type: type[ModelT], minimum: int | None = None) -> lis Parses a set of models of the specified identical type from the message content. Args: - model_type (type[ModelT]): The type of models to parse. - minimum (int | None, optional): The minimum number of models required. Defaults to None. + model_type: The type of models to parse. + minimum: The minimum number of models required. Returns: - list[ModelT]: A list of parsed models. + A list of parsed models. Raises: MissingModelError: If the minimum number of models is not met. @@ -267,12 +259,12 @@ def try_parse_set( Tries to parse a set of models from the message content. Args: - model_type (type[ModelT]): The type of model to parse. - minimum (int | None, optional): The minimum number of models expected. Defaults to None. - fail_on_missing (bool, optional): Whether to raise an exception if models are missing. Defaults to False. + model_type: The type of model to parse. + minimum: The minimum number of models expected. + fail_on_missing: Whether to raise an exception if models are missing. Returns: - list[ModelT]: The parsed models. + The parsed models. Raises: MissingModelError: If the number of parsed models is less than the minimum required. @@ -287,10 +279,10 @@ def parse_many(self, *types: type[ModelT]) -> list[ModelT]: Parses multiple models of the specified non-identical types from the message content. Args: - *types (type[ModelT]): The types of models to parse. + *types: The types of models to parse. Returns: - list[ModelT]: A list of parsed models. + A list of parsed models. Raises: MissingModelError: If any of the models are missing. @@ -302,11 +294,11 @@ def try_parse_many(self, *types: type[ModelT], fail_on_missing: bool = False) -> Tries to parse multiple models from the content of the message. Args: - *types (type[ModelT]): The types of models to parse. - fail_on_missing (bool, optional): Whether to raise an exception if a model type is missing. Defaults to False. + *types: The types of models to parse. + fail_on_missing: Whether to raise an exception if a model type is missing. Returns: - list[ModelT]: A list of parsed models. + A list of parsed models. Raises: MissingModelError: If a model type is missing and `fail_on_missing` is True. @@ -326,13 +318,12 @@ def from_model( Create a Message object from one or more Model objects. Args: - cls (type["Message"]): The class of the Message object. - models (Model | t.Sequence[Model]): The Model object(s) to convert to a Message. - role (Role, optional): The role of the Message. Defaults to "user". - suffix (str | None, optional): A suffix to append to the content. Defaults to None. + models: The Model object(s) to convert to a Message. + role: The role of the Message. + suffix: A suffix to append to the content. Returns: - Message: The created Message object. + The created Message object. """ parts: list[ParsedMessagePart] = [] content: str = "" diff --git a/rigging/model.py b/rigging/model.py index 450827e..3478978 100644 --- a/rigging/model.py +++ b/rigging/model.py @@ -84,7 +84,7 @@ def to_pretty_xml(self) -> str: Converts the model to a pretty XML string with indents and newlines. Returns: - str: The pretty XML representation of the model. + The pretty XML representation of the model. """ tree = self.to_xml_tree() ET.indent(tree, " ") @@ -111,7 +111,7 @@ def is_simple(cls) -> bool: which can support it. Returns: - bool: True if the model is simple, False otherwise. + True if the model is simple, False otherwise. """ field_values = list(cls.model_fields.values()) return len(field_values) == 1 and field_values[0].annotation in BASIC_TYPES @@ -176,10 +176,10 @@ def from_text(cls, content: str) -> list[tuple[ModelT, slice]]: valid instances of a model from semi-structured text. Args: - content (str): The text content to parse. + content: The text content to parse. Returns: - list[tuple[ModelT, slice]]: A list of tuples containing the extracted models and their corresponding slices. + A list of tuples containing the extracted models and their corresponding slices. Raises: MissingModelError: If the specified model tags are not found in the message. @@ -244,11 +244,11 @@ def one_from_text(cls, content: str, *, fail_on_many: bool = False) -> tuple[Mod Finds and returns a single match from the given text content. Args: - content (str): The text content to search for matches. - fail_on_many (bool, optional): If True, raises a ValidationError if multiple matches are found. Defaults to False. + content: The text content to search for matches. + fail_on_many: If True, raises a ValidationError if multiple matches are found. Returns: - tuple[ModelT, slice]: A tuple containing the matched model and the slice indicating the match location. + A tuple containing the matched model and the slice indicating the match location. Raises: ValidationError: If multiple matches are found and fail_on_many is True. @@ -286,27 +286,41 @@ class ValidationErrorModel(ErrorModel, tag="validation_error"): class Thinking(Model): + """Quick model for thinking messages.""" + content: str class Question(Model): + """Quick model for questions.""" + content: str class Answer(Model): + """Quick model for answers.""" + content: str class QuestionAnswer(Model): - question: Question - answer: Answer + """Quick model for question-answer pairs.""" + + question: Question = element() + """The question""" + answer: Answer = element() + """The answer""" class Description(Model): + """Quick model for descriptions.""" + content: str class Instructions(Model): + """Quick model for instructions.""" + content: str @@ -318,6 +332,7 @@ class DelimitedAnswer(Model): @property def items(self) -> list[str]: + """Parsed items from the content.""" split_sizes: dict[str, int] = {} for delimiter in self._delimiters: split_sizes[delimiter] = len(self.content.split(delimiter)) @@ -348,6 +363,7 @@ class YesNoAnswer(Model): "Yes/No answer answer with coercion" boolean: bool + """The boolean value of the answer.""" @field_validator("boolean", mode="before") def parse_str_to_bool(cls, v: t.Any) -> t.Any: diff --git a/rigging/parsing.py b/rigging/parsing.py index 18a4897..47bd21d 100644 --- a/rigging/parsing.py +++ b/rigging/parsing.py @@ -11,11 +11,11 @@ def parse(text: str, model_type: type[ModelT]) -> tuple[ModelT, slice]: Parses a single model from text. Args: - text (str): The content to parse. - model_type (type): The type of model to parse. + text: The content to parse. + model_type: The type of model to parse. Returns: - ModelT: The parsed model. + The parsed model. Raises: ValueError: If no models of the given type are found and `fail_on_missing` is set to `True`. @@ -28,15 +28,12 @@ def try_parse(text: str, model_type: type[ModelT]) -> tuple[ModelT, slice] | Non Tries to parse a model from text. Args: - text (str): The content to parse. - model_type (type[ModelT]): The type of model to search for. + text: The content to parse. + model_type: The type of model to search for. Returns: - ModelT | None: The first model that matches the given model type, or None if no match is found. + The first model that matches the given model type, or None if no match is found. """ - # for model in self.models: - # if isinstance(model, model_type): - # return model return next(iter(try_parse_many(text, model_type)), None) @@ -45,12 +42,12 @@ def parse_set(text: str, model_type: type[ModelT], *, minimum: int | None = None Parses a set of models with the specified identical type from text. Args: - text (str): The content to parse. - model_type (type[ModelT]): The type of models to parse. - minimum (int | None, optional): The minimum number of models required. Defaults to None. + text: The content to parse. + model_type: The type of models to parse. + minimum: The minimum number of models required. Returns: - list[tuple[ModelT, slice]]: A list of parsed models. + A list of parsed models. Raises: MissingModelError: If the minimum number of models is not met. @@ -65,13 +62,13 @@ def try_parse_set( Tries to parse a set of models with the specified identical type from text. Args: - text (str): The content to parse. - model_type (type[ModelT]): The type of model to parse. - minimum (int | None, optional): The minimum number of models expected. Defaults to None. - fail_on_missing (bool, optional): Whether to raise an exception if models are missing. Defaults to False. + text: The content to parse. + model_type: The type of model to parse. + minimum: The minimum number of models expected. + fail_on_missing: Whether to raise an exception if models are missing. Returns: - list[tuple[ModelT, slice]]: The parsed models. + The parsed models. Raises: MissingModelError: If the number of parsed models is less than the minimum required. @@ -87,11 +84,11 @@ def parse_many(text: str, *types: type[ModelT]) -> list[tuple[ModelT, slice]]: Parses multiple models of the specified non-identical types from text. Args: - text (str): The content to parse. - *types (type[ModelT]): The types of models to parse. + text: The content to parse. + *types: The types of models to parse. Returns: - list[tuple[ModelT, slice]]: A list of parsed models. + A list of parsed models. Raises: MissingModelError: If any of the models are missing. @@ -104,12 +101,12 @@ def try_parse_many(text: str, *types: type[ModelT], fail_on_missing: bool = Fals Tries to parses multiple models of the specified non-identical types from text. Args: - text (str): The content to parse. - *types (type[ModelT]): The types of models to parse. - fail_on_missing (bool, optional): Whether to raise an exception if a model type is missing. Defaults to False. + text: The content to parse. + *types: The types of models to parse. + fail_on_missing: Whether to raise an exception if a model type is missing. Returns: - list[tuple[ModelT, slice]]: A list of parsed models. + A list of parsed models. Raises: MissingModelError: If a model type is missing and `fail_on_missing` is True. diff --git a/rigging/tool.py b/rigging/tool.py index c75b4a6..7cd0afe 100644 --- a/rigging/tool.py +++ b/rigging/tool.py @@ -153,14 +153,12 @@ def hit(self, target: Annotated[str, "Target of the hit") -> str: Note: All functions on the tool must have type hints for their parameters and use the `Annotated` type hint to provide a description for each parameter. - - Attributes: - name (str): The name of the tool. - description (str): A description of the tool. """ name: str + """Name of the tool""" description: str + """Description of the tool""" def __init_subclass__(cls, *, name: str | None = None, description: str | None = None, **kwargs: t.Any) -> None: super().__init_subclass__(**kwargs) @@ -213,6 +211,7 @@ def _execute(self, call: ToolCall) -> str: return str(result) def execute(self, call: ToolCall) -> ToolResult: + """Executes a function call on the tool.""" try: content = self._execute(call) return ToolResult(tool=call.tool, function=call.function, error=False, content=content) @@ -226,6 +225,7 @@ def execute(self, call: ToolCall) -> ToolResult: # build a ToolDescription object that can be serialized # and passed to a model def get_description(self) -> ToolDescription: + """Creates a full description of the tool for use in prompting""" functions: list[ToolFunction] = [] for method_name, method in inspect.getmembers(self.__class__, predicate=inspect.isfunction): if not method.__qualname__.startswith(self.__class__.__name__): From 72848d4e6d75c99c1331c14c55c9d81a1b0d665a Mon Sep 17 00:00:00 2001 From: monoxgas Date: Sun, 5 May 2024 22:41:53 -0600 Subject: [PATCH 13/16] Folding in batch support into PendingChat and PendingCompletion. --- rigging/chat.py | 348 +++++++++++++++++++++++---------- rigging/completion.py | 330 ++++++++++++++++++++++--------- rigging/generator.py | 445 +++++++++++++++++++----------------------- 3 files changed, 690 insertions(+), 433 deletions(-) diff --git a/rigging/chat.py b/rigging/chat.py index 1397628..06b8e1c 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -4,9 +4,9 @@ They are the primary way to interact with the generator. """ -import asyncio import typing as t from copy import deepcopy +from dataclasses import dataclass from datetime import datetime from uuid import UUID, uuid4 @@ -275,6 +275,28 @@ def inject_tool_prompt(self, tools: t.Sequence[Tool]) -> None: ThenChatCallback = t.Callable[[Chat], Chat | None] +MessageProducer = t.Generator[t.Sequence[Message], None, None] +BatchProducer = t.Generator[t.Sequence[t.Sequence[Message]], None, None] + + +@dataclass +class RunState: + messages: list[Message] + params: "GenerateParams" + processor: t.Generator[list[Message], Message, list[Message]] + chat: Chat | None = None + done: bool = False + + +@dataclass +class BatchRunState: + inputs: list[Message] + messages: list[Message] + params: "GenerateParams" + processor: t.Generator[list[Message], Message, list[Message]] + chat: Chat | None = None + done: bool = False + class PendingChat: """ @@ -300,42 +322,30 @@ def __init__( self.inject_tool_prompt: bool = True self.force_tool: bool = False self.then_callbacks: list[ThenChatCallback] = [] + # self.producer: MessageProducer | None = None - def overload(self, **kwargs: t.Any) -> "PendingChat": + def with_(self, params: t.Optional["GenerateParams"] = None, **kwargs: t.Any) -> "PendingChat": """ - Overloads the current chat with the given parameters. - - This is a convenience method for calling `with_params(GenerateParams(**kwargs))`. + Assign specific generation parameter overloads for this chat. Note: This will trigger a `clone` if overload params have already been set. Args: - **kwargs: Keyword arguments representing the parameters to be overloaded. + params: The parameters to set for the chat. + **kwargs: An alternative way to pass parameters as keyword arguments. Returns: - A new instance of PendingChat with the overloaded parameters. + A new instance of PendingChat with the updated parameters. """ from rigging.generator import GenerateParams - return self.with_params(GenerateParams(**kwargs)) + if params is None: + params = GenerateParams(**kwargs) - def with_params(self, params: "GenerateParams") -> "PendingChat": - """ - Sets the generation parameter overloads for the chat. - - Note: - This will trigger a `clone` if overload params have already been set. - - Args: - params: The parameters to set for the chat. - - Returns: - A new instance of PendingChat with the updated parameters. - """ if self.params is not None: new = self.clone() - new.params = params + new.params = self.params.merge_with(params) return new self.params = params @@ -442,6 +452,24 @@ def process(chat: Chat) -> Chat | None: self.then_callbacks.append(callback) return self + # def from_(self, producer: MessageProducer) -> "PendingChat": + # """ + # Adds a generator to the chat to produce messages. + + # Args: + # producer: The generator that produces messages. + + # Returns: + # The current instance of the chat. + + # Raises: + # ValueError: If a producer has already been set. + # """ + # if self.producer is not None: + # raise ValueError("A producer has already been set") + # self.producer = producer + # return self + def apply(self, **kwargs: str) -> "PendingChat": """ Clones this pending chat and calls [rigging.chat.Chat.apply][] with the given keyword arguments. @@ -653,13 +681,13 @@ def _until_parse_callback(self, message: Message) -> tuple[bool, list[Message]]: def _until( self, - messages: list[Message], + message: Message, callback: UntilMessageCallback, attempt_recovery: bool, drop_dialog: bool, max_rounds: int, ) -> t.Generator[list[Message], Message, list[Message]]: - should_continue, step_messages = callback(messages[-1]) + should_continue, step_messages = callback(message) if not should_continue: return step_messages @@ -669,7 +697,7 @@ def _until( logger.trace( f"_until({callback.__name__}) round {_ + 1}/{max_rounds} (attempt_recovery={attempt_recovery})" ) - next_message = yield messages[:-1] + running_messages + next_message = yield running_messages should_continue, step_messages = callback(next_message) logger.trace(f" |- returned {should_continue} with {len(step_messages)} new messages)") @@ -682,21 +710,32 @@ def _until( logger.warning(f"Exhausted max rounds ({max_rounds})") raise ExhaustedMaxRoundsError(max_rounds) + # TODO: Much like the PendingCompletion code, it's opaque + # exactly how multiple callbacks should be blended together + # when generating. I think we should look at limiting it to + # one callback in total, but I'll leave the behavior as is + # for now with the knowledge that behavior might be a bit + # unpredictable. + def _process(self) -> t.Generator[list[Message], Message, list[Message]]: + first_response = yield [] + new_messages = [first_response] + for callback, reset_between, drop_internal, max_rounds in self.until_callbacks: + generated = yield from self._until(new_messages[-1], callback, reset_between, drop_internal, max_rounds) + new_messages = new_messages[:-1] + generated + return new_messages + def _then(self, chat: Chat) -> Chat: # TODO: Adding async support here would be nice for callback in self.then_callbacks: chat = callback(chat) or chat return chat - def _execute(self) -> t.Generator[list[Message], Message, list[Message]]: - # TODO: Much like the PendingCompletion code, it's opaque - # exactly how multiple callbacks should be blended together - # when generating. I think we should look at limiting it to - # one callback in total, but I'll leave the behavior as is - # for now with the knowledge that behavior might be a bit - # unpredictable. - + def _prepare(self) -> None: if self.until_tools: + if self.inject_tool_prompt: + self.chat.inject_tool_prompt(self.until_tools) + self.inject_tool_prompt = False + # TODO: This can cause issues when certain APIs do not return # the stop sequence as part of the response. This behavior # seems like a larger issue than the model continuining after @@ -704,97 +743,210 @@ def _execute(self) -> t.Generator[list[Message], Message, list[Message]]: # # self.params.stop = [ToolCalls.xml_end_tag()] - if self.inject_tool_prompt: - self.chat.inject_tool_prompt(self.until_tools) - self.inject_tool_prompt = False - - first_message = yield self.chat.all - - new_messages = [first_message] - for callback, reset_between, drop_internal, max_rounds in self.until_callbacks: - generated = yield from self._until( - self.chat.all + new_messages, callback, reset_between, drop_internal, max_rounds - ) - new_messages = new_messages[:-1] + generated + def _fit_params( + self, count: int, params: t.Sequence[t.Optional["GenerateParams"] | None] | None = None + ) -> list["GenerateParams"]: + from rigging.generator import GenerateParams - return new_messages + params = [None] * count if params is None else list(params) + if len(params) != count: + raise ValueError(f"The number of params must be {count}") + if self.params is not None: + params = [self.params.merge_with(p) for p in params] + return [(p or GenerateParams()) for p in params] - @t.overload - def run(self, count: t.Literal[None] = None) -> Chat: - ... + # TODO: There is an embarrassing amount of code duplication here + # between the async and non-async methods, batch and many, etc. - @t.overload - def run(self, count: int) -> list[Chat]: - ... + # Single messages - def run(self, count: int | None = None) -> Chat | list[Chat]: + def run(self) -> Chat: """ Execute the generation process to produce the final chat. - If `count` is provided, `run_many` will be called instead. - - Args: - count: The number of times to generate using the same inputs. - Returns: - Chat | list[Chat]: The chat object or a list of chat objects, depending on the value of `count`. + The generated Chat. """ + return self.run_many(1)[0] - if count is not None: - return self.run_many(count) - - executor = self._execute() - outbound = next(executor) + async def arun(self) -> Chat: + """async variant of the [rigging.chat.PendingChat.run][] method.""" + return (await self.arun_many(1))[0] - try: - while True: - inbound = self.generator.generate_message(outbound, self.params) - outbound = executor.send(inbound) - except StopIteration as stop: - outbound = t.cast(list[Message], stop.value) + __call__ = run - return self._then(Chat(self.chat.all, outbound, pending=self, metadata=self.metadata)) + # Many messages - def run_many(self, count: int) -> list[Chat]: + def run_many( + self, + count: int, + *, + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + skip_failed: bool = False, + ) -> list[Chat]: """ Executes the generation process multiple times with the same inputs. Parameters: count: The number of times to execute the generation process. + params: A sequence of parameters to be used for each execution. + skip_failed: Enable to ignore any max rounds errors and return only successful chats. Returns: - list[ChatA list of Chat objects representing the results of each execution. + A list of generatated Chats. """ - return [self.run() for _ in range(count)] + states: list[RunState] = [RunState([], p, self._process()) for p in self._fit_params(count, params)] + _ = [next(state.processor) for state in states] - __call__ = run + pending_states = states + while pending_states: + inbounds = self.generator.generate_messages( + [s.messages for s in pending_states], [s.params for s in pending_states], prefix=self.chat.all + ) - @t.overload - async def arun(self, count: t.Literal[None] = None) -> Chat: - ... + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.messages = state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.chat = Chat( + self.chat.all, t.cast(list[Message], stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True - @t.overload - async def arun(self, count: int) -> list[Chat]: - ... + pending_states = [s for s in pending_states if not s.done] - async def arun(self, count: int | None = None) -> Chat | list[Chat]: - """async variant of the [rigging.chat.PendingChat.run][] method.""" - if count is not None: - return await self.arun_many(count) + return [self._then(s.chat) for s in states if s.chat is not None] - executor = self._execute() - outbound = next(executor) + async def arun_many( + self, + count: int, + *, + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + skip_failed: bool = False, + ) -> list[Chat]: + """async variant of the [rigging.chat.PendingChat.run_many][] method.""" + states: list[RunState] = [RunState([], p, self._process()) for p in self._fit_params(count, params)] + _ = [next(state.processor) for state in states] - try: - while True: - inbound = await self.generator.agenerate_message(outbound, self.params) - outbound = executor.send(inbound) - except StopIteration as stop: - outbound = t.cast(list[Message], stop.value) + pending_states = states + while pending_states: + inbounds = await self.generator.agenerate_messages( + [s.messages for s in pending_states], [s.params for s in pending_states], prefix=self.chat.all + ) - return self._then(Chat(self.chat.all, outbound, pending=self, metadata=self.metadata)) + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.messages = state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.chat = Chat( + self.chat.all, t.cast(list[Message], stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True - async def arun_many(self, count: int) -> list[Chat]: - """async variant of the [rigging.chat.PendingChat.run_many][] method.""" - chats = await asyncio.gather(*[self.arun() for _ in range(count)]) - return [self._then(chat) for chat in chats] + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.chat) for s in states if s.chat is not None] + + # Batch messages + + def run_batch( + self, + many: t.Sequence[t.Sequence[Message]] | t.Sequence[Message] | t.Sequence[MessageDict] | t.Sequence[str], + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + *, + skip_failed: bool = False, + ) -> list[Chat]: + """ + Executes the generation process accross multiple input messages. + + Note: + Anything already in this pending chat will be used as the `prefix` parameter + to [rigging.generator.Generator.generate_messages][]. + + Parameters: + many: A sequence of sequences of messages to be generated. + params: A sequence of parameters to be used for each set of messages. + skip_failed: Enable to ignore any max rounds errors and return only successful chats. + + Returns: + A list of generatated Chats. + """ + many = [Message.fit_as_list(m) for m in many] + params = self._fit_params(len(many), params) + states: list[BatchRunState] = [ + BatchRunState(m, [], p, self._process()) for m, p in zip(many, params, strict=True) + ] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = self.generator.generate_messages( + [s.inputs + s.messages for s in pending_states], + [s.params for s in pending_states], + prefix=self.chat.all, + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.messages = state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.chat = Chat( + self.chat.all, t.cast(list[Message], stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.chat) for s in states if s.chat is not None] + + async def arun_batch( + self, + many: t.Sequence[t.Sequence[Message]] | t.Sequence[Message] | t.Sequence[MessageDict] | t.Sequence[str], + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + *, + skip_failed: bool = False, + ) -> list[Chat]: + """async variant of the [rigging.chat.PendingChat.run_batch][] method.""" + many = [Message.fit_as_list(m) for m in many] + params = self._fit_params(len(many), params) + states: list[BatchRunState] = [ + BatchRunState(m, [], p, self._process()) for m, p in zip(many, params, strict=True) + ] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = await self.generator.agenerate_messages( + [s.inputs + s.messages for s in pending_states], + [s.params for s in pending_states], + prefix=self.chat.all, + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.messages = state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.chat = Chat( + self.chat.all, t.cast(list[Message], stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.chat) for s in states if s.chat is not None] diff --git a/rigging/completion.py b/rigging/completion.py index 07ce7c3..7a9b671 100644 --- a/rigging/completion.py +++ b/rigging/completion.py @@ -2,10 +2,10 @@ Completions work with isolated strings of text pre and post generation. """ -import asyncio import string import typing as t from copy import deepcopy +from dataclasses import dataclass from datetime import datetime from uuid import UUID, uuid4 @@ -29,6 +29,9 @@ DEFAULT_MAX_ROUNDS = 5 +# TODO: Chats and Completions share a lot of structure and code. +# Ideally we should build out a base class which they both inherit from. + class Completion(BaseModel): """ @@ -45,6 +48,8 @@ class Completion(BaseModel): """The original text.""" generated: str """The generated text.""" + metadata: dict[str, t.Any] = Field(default_factory=dict) + """Additional metadata for the chat.""" pending: t.Optional["PendingCompletion"] = Field(None, exclude=True, repr=False) """The pending completion associated with this completion.""" @@ -69,7 +74,7 @@ def __init__( Args: text: The original text. generated: The generated text. - pending: The pending completion associated with this completion + pending: The pending completion associated with this completion. **kwargs: Additional keyword arguments (typically used for serialization). """ from rigging.generator import get_generator @@ -137,6 +142,17 @@ def clone(self) -> "Completion": # and an optional list of messages to append before continuing UntilCompletionCallback = t.Callable[[str], bool] +ThenCompletionCallback = t.Callable[[Completion], Completion | None] + + +@dataclass +class RunState: + text: str + params: "GenerateParams" + processor: t.Generator[None, str, str] + completion: Completion | None = None + done: bool = False + class PendingCompletion: """ @@ -156,45 +172,57 @@ def __init__(self, generator: "Generator", text: str, params: t.Optional["Genera # (callback, all_text, max_rounds) self.until_callbacks: list[tuple[UntilCompletionCallback, bool, int]] = [] self.until_types: list[type[Model]] = [] + self.then_callbacks: list[ThenCompletionCallback] = [] - def overload(self, **kwargs: t.Any) -> "PendingCompletion": + def with_(self, params: t.Optional["GenerateParams"] = None, **kwargs: t.Any) -> "PendingCompletion": """ - Overloads the current completion with the given parameters. - - This is a convenience method for calling `with_params(GenerateParams(**kwargs))`. + Assign specific generation parameter overloads for this completion. Note: This will trigger a `clone` if overload params have already been set. Args: - **kwargs: Keyword arguments representing the parameters to be overloaded. + params: The parameters to set for the completion. + **kwargs: An alternative way to pass parameters as keyword arguments. Returns: - A new instance of PendingCompletion with the overloaded parameters. + The current (or cloned) instance of the completion. """ from rigging.generator import GenerateParams - return self.with_params(GenerateParams(**kwargs)) + if params is None: + params = GenerateParams(**kwargs) + + if self.params is not None: + new = self.clone() + new.params = params + return new + + self.params = params + return self - def with_params(self, params: "GenerateParams") -> "PendingCompletion": + def then(self, callback: ThenCompletionCallback) -> "PendingCompletion": """ - Sets the generation parameter overloads for the completion. + Registers a callback to be executed after the generation process completes. Note: - This will trigger a `clone` if overload params have already been set. + Returning a Completion object from the callback will replace the current completion. + for the remainder of the callbacks + return value of `run()`. + + ``` + def process(chat: Completion) -> Completion | None: + ... + + pending.then(process).run() + ``` Args: - params: The parameters to set for the completion. + callback: The callback function to be executed. Returns: - A new instance of PendingCompletion with the updated parameters. + The current instance of the pending completion. """ - if self.params is not None: - new = self.clone() - new.params = params - return new - - self.params = params + self.then_callbacks.append(callback) return self def add(self, text: str) -> "PendingCompletion": @@ -339,27 +367,45 @@ def _until_parse_callback(self, text: str) -> bool: return True return False - def _execute(self) -> t.Generator[str, str, str]: + def _then(self, chat: Completion) -> Completion: + # TODO: Adding async support here would be nice + for callback in self.then_callbacks: + chat = callback(chat) or chat + return chat + + def _fit_params( + self, count: int, params: t.Sequence[t.Optional["GenerateParams"] | None] | None = None + ) -> list["GenerateParams"]: + from rigging.generator import GenerateParams + + params = [None] * count if params is None else list(params) + if len(params) != count: + raise ValueError(f"The number of params must be {count}") + if self.params is not None: + params = [self.params.merge_with(p) for p in params] + return [(p or GenerateParams()) for p in params] + + # TODO: It's opaque exactly how we should blend multiple + # until callbacks together, so here is the current implementation: + # + # - We take the lowest max_rounds from all until_callbacks + # - Each loop, we let every callback run, if any tell us to retry, we do + # - If we leave the loop with should_retry still True, we raise an error + # - Assuming every should_retry is False, we break out of the loop and return + + def _process(self) -> t.Generator[None, str, str]: # If there are no until_callbacks, we can just yield the text if not self.until_callbacks: - generated = yield self.text + generated = yield return generated - # It's opaque exactly how we should blend multiple - # until callbacks together, so here is the current implementation: - # - # - We take the lowest max_rounds from all until_callbacks - # - Each loop, we let every callback run, if any tell us to retry, we do - # - If we leave the loop with should_retry still True, we raise an error - # - Assuming every should_retry is False, we break out of the loop and return - lowest_max_rounds = min((c[2] for c in self.until_callbacks), default=1) current_round = 0 should_retry = True while should_retry and current_round < lowest_max_rounds: current_round += 1 - generated = yield self.text + generated = yield for callback, use_all_text, _ in self.until_callbacks: should_retry = callback(self.text + generated if use_all_text else generated) if should_retry: @@ -371,81 +417,187 @@ def _execute(self) -> t.Generator[str, str, str]: return generated - @t.overload - def run(self, count: t.Literal[None] = None) -> Completion: - ... - - @t.overload - def run(self, count: int) -> list[Completion]: - ... - - def run(self, count: int | None = None) -> Completion | list[Completion]: + def run(self) -> Completion: """ Execute the generation process to produce the final completion. - If `count` is provided, `run_many` will be called instead. - - Args: - count: The number of times to generate using the same inputs. - Returns: - The completion object or a list of completion objects, depending on the value of `count`. + The generated Completion. """ - if count is not None: - return self.run_many(count) + return self.run_many(1)[0] - executor = self._execute() - outbound = next(executor) + async def arun(self) -> Completion: + """async variant of the [rigging.chat.PendingChat.run][] method.""" + return (await self.arun_many(1))[0] - try: - while True: - inbound = self.generator.generate_text(outbound, self.params) - outbound = executor.send(inbound) - except StopIteration as stop: - outbound = t.cast(str, stop.value) + __call__ = run - return Completion(self.text, outbound, pending=self) + # Many messages - def run_many(self, count: int) -> list[Completion]: + def run_many( + self, + count: int, + *, + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + skip_failed: bool = False, + ) -> list[Completion]: """ Executes the generation process multiple times with the same inputs. Parameters: count: The number of times to execute the generation process. + params: A sequence of parameters to be used for each execution. + skip_failed: Enable to ignore any max rounds errors and return only successful completions. Returns: - A list of Completion objects representing the results of each execution. + A list of generatated Completions. """ - return [self.run() for _ in range(count)] - - __call__ = run - - @t.overload - async def arun(self, count: t.Literal[None] = None) -> Completion: - ... - - @t.overload - async def arun(self, count: int) -> list[Completion]: - ... + states: list[RunState] = [RunState(self.text, p, self._process()) for p in self._fit_params(count, params)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = self.generator.generate_texts( + [s.text for s in pending_states], [s.params for s in pending_states] + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.completion = Completion( + self.text, t.cast(str, stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.completion) for s in states if s.completion is not None] + + async def arun_many( + self, + count: int, + *, + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + skip_failed: bool = False, + ) -> list[Completion]: + """async variant of the [rigging.chat.PendingCompletion.run_many][] method.""" + states: list[RunState] = [RunState(self.text, p, self._process()) for p in self._fit_params(count, params)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = await self.generator.agenerate_texts( + [s.text for s in pending_states], [s.params for s in pending_states] + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.completion = Completion( + self.text, t.cast(str, stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.completion) for s in states if s.completion is not None] + + # Batch completions + + def run_batch( + self, + many: t.Sequence[str], + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + *, + skip_failed: bool = False, + ) -> list[Completion]: + """ + Executes the generation process accross multiple input messages. - async def arun(self, count: int | None = None) -> Completion | list[Completion]: - """async variant of the [rigging.completion.PendingCompletion.run][] method.""" - if count is not None: - return await self.arun_many(count) + Note: + Anything already in this pending completion will be used as the `prefix` parameter + to [rigging.generator.Generator.generate_messages][]. - executor = self._execute() - outbound = next(executor) + Parameters: + many: A sequence of texts to generate with. + params: A sequence of parameters to be used for each text. + skip_failed: Enable to ignore any max rounds errors and return only successful completions. - try: - while True: - inbound = await self.generator.agenerate_text(outbound, self.params) - outbound = executor.send(inbound) - except StopIteration as stop: - outbound = t.cast(str, stop.value) - - return Completion(self.text, outbound, pending=self) - - async def arun_many(self, count: int) -> list[Completion]: - """async variant of the [rigging.completion.PendingCompletion.run_many][] method.""" - chats = await asyncio.gather(*[self.arun() for _ in range(count)]) - return list(chats) + Returns: + A list of generatated Completions. + """ + params = self._fit_params(len(many), params) + states: list[RunState] = [RunState(m, p, self._process()) for m, p in zip(many, params, strict=True)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = self.generator.generate_texts( + [s.text for s in pending_states], + [s.params for s in pending_states], + prefix=self.text, + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.completion = Completion( + self.text, t.cast(str, stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.completion) for s in states if s.completion is not None] + + async def arun_batch( + self, + many: t.Sequence[str], + params: t.Sequence[t.Optional["GenerateParams"]] | None = None, + *, + skip_failed: bool = False, + ) -> list[Completion]: + """async variant of the [rigging.chat.PendingChat.run_batch][] method.""" + params = self._fit_params(len(many), params) + states: list[RunState] = [RunState(m, p, self._process()) for m, p in zip(many, params, strict=True)] + _ = [next(state.processor) for state in states] + + pending_states = states + while pending_states: + inbounds = await self.generator.agenerate_texts( + [s.text for s in pending_states], + [s.params for s in pending_states], + prefix=self.text, + ) + + for inbound, state in zip(inbounds, pending_states, strict=True): + try: + state.processor.send(inbound) + except StopIteration as stop: + state.done = True + state.completion = Completion( + self.text, t.cast(str, stop.value), pending=self, metadata=self.metadata + ) + except ExhaustedMaxRoundsError: + if not skip_failed: + raise + state.done = True + + pending_states = [s for s in pending_states if not s.done] + + return [self._then(s.completion) for s in states if s.completion is not None] diff --git a/rigging/generator.py b/rigging/generator.py index 914abe6..4d67d7d 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -84,6 +84,41 @@ def validate_stop(cls, value: t.Any) -> t.Any: return value raise ValueError("Stop sequences must be a list or a string separated by ';'") + def merge_with(self, *others: t.Optional["GenerateParams"]) -> "GenerateParams": + """ + Apply a series of parameter overrides to the current instance and return a copy. + + Args: + *others: The parameters to be merged with the current instance's parameters. + Can be multiple and overrides will be applied in order. + + Returns: + The merged parameters instance. + """ + if len(others) == 0 or all(p is None for p in others): + return self + + updates: dict[str, t.Any] = {} + for other in [o for o in others if o is not None]: + other_dict = other.model_dump(exclude_unset=True) + for name, value in other_dict.items(): + if value is not None: + updates[name] = value + + return self.model_copy(update=updates) + + def to_dict(self) -> dict[str, t.Any]: + """ + Convert the parameters to a dictionary. + + Returns: + The parameters as a dictionary. + """ + params = self.model_dump(exclude_unset=True) + if "extra" in params: + params.update(params.pop("extra")) + return params + class Generator(BaseModel): """ @@ -93,10 +128,8 @@ class Generator(BaseModel): A subclass of this can implement any of the following: - - `generate_message`: Generate the next message for a given set of messages. - - `generate_text`: Generate a string completion of the given text. - - `batch_messages`: Process a batch of messages. - - `batch_texts`: Process a batch of texts. + - `generate_messages`: Process a batch of messages. + - `generate_texts`: Process a batch of texts. (In addition to async variants of these functions) """ @@ -108,239 +141,151 @@ class Generator(BaseModel): params: GenerateParams """The parameters used for generating completion messages.""" - def to_identifier(self, overloads: GenerateParams | None = None) -> str: + def to_identifier(self, params: GenerateParams | None = None) -> str: """ Converts the generator instance back into a rigging identifier string. - Note: - Extra parameters are not supported in identifiers. + This calls [rigging.generator.get_identifier][] with the current instance. Args: - overloads: The parameters to be used for generating the identifier. + params: The generation parameters. Returns: The identifier string. """ - provider = next(name for name, klass in g_providers.items() if isinstance(self, klass)) - params_dict = self._merge_params(overloads) - if not params_dict: - return f"{provider}!{self.model}" - - if "extra" in params_dict: - logger.warning("Extra parameters are not supported in identifiers.") - params_dict.pop("extra") - - if "stop" in params_dict: - params_dict["stop"] = ";".join(params_dict["stop"]) - - params = ",".join([f"{k}={v}" for k, v in params_dict.items()]) - - return f"{provider}!{self.model},{params}" - - def _merge_params(self, overloads: GenerateParams | None = None) -> dict[str, t.Any]: - """ - Helper to merge the parameters of the current instance with the provided `overloads` parameters. - - Typically used to prepare a dictionary of API parameters for a request. - - Args: - overloads: The parameters to be merged with the current instance's parameters. - - Returns: - The merged parameters. - """ - params: dict[str, t.Any] = self.params.model_dump(exclude_unset=True) if self.params else {} - if overloads is None: - return params - - overloads_dict = overloads.model_dump(exclude_unset=True) - if "extra" in overloads_dict: - params.update(overloads_dict.pop("extra")) - - for name, value in overloads_dict.items(): - if value is not None: - params[name] = value - - return params - - # Message generation - - def generate_message(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: - """ - Generates the next message for a given set of messages. - - Args: - messages: The list of messages to generate completion for. - overloads: The parameters to be used for completion. - - Returns: - The generated completion message. - - Raises: - NotImplementedError: This generator does not support this method. - """ - raise NotImplementedError("generate_message is not supported by this generator.") - - async def agenerate_message( - self, messages: t.Sequence[Message], overloads: GenerateParams | None = None - ) -> Message: - """async version of [rigging.generator.Generator.generate_message][]""" - raise NotImplementedError("agenerate_message is not supported by this generator.") - - # Text generation + return get_identifier(self, params) - def generate_text(self, text: str, overloads: GenerateParams | None = None) -> str: - """ - Generates a string completion of the given text. - - Args: - text: The input text to be completed. - overloads: The parameters to be used for completion. - - Returns: - The completed text. - - Raises: - NotImplementedError: This generator does not support this method. - """ - raise NotImplementedError("generate_text is not supported by this generator.") - - async def agenerate_text(self, text: str, overloads: GenerateParams | None = None) -> str: - """async version of [rigging.generator.Generator.generate_text][]""" - raise NotImplementedError("agenerate_text is not supported by this generator.") - - # Batching messages - - def batch_messages( + def generate_messages( self, - many: t.Sequence[t.Sequence[Message]], - overloads: t.Sequence[GenerateParams | None] | None = None, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], *, - fixed: t.Sequence[Message] | None = None, + prefix: t.Sequence[Message] | None = None, ) -> t.Sequence[Message]: """ Generate a batch of messages using the specified parameters. Note: - If supplied, the length of `overloads` must be the same as the length of `many`. + The length of `params` must be the same as the length of `many`. Args: - many: A sequence of sequences of messages. - overloads: A sequence of GenerateParams objects or None. - fixed: A sequence of fixed messages to be prefixed before every item of `many`. + messages: A sequence of sequences of messages. + params: A sequence of GenerateParams objects. + prefix: A sequence of fixed messages to be prefixed before every item of `many`. Returns: - Sequence[MessageA sequence of generated messages. + A sequence of generated messages. Raises: NotImplementedError: This method is not supported by this generator. """ - raise NotImplementedError("batch_messages is not supported by this generator.") + raise NotImplementedError("`generate_messages` is not supported by this generator.") - async def abatch_messages( + async def agenerate_messages( self, - many: t.Sequence[t.Sequence[Message]], - overloads: t.Sequence[GenerateParams | None] | None = None, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], *, - fixed: t.Sequence[Message], + prefix: t.Sequence[Message] | None = None, ) -> t.Sequence[Message]: - """async version of [rigging.generator.Generator.batch_messages][]""" - raise NotImplementedError("abatch_messages is not supported by this generator.") - - # Batching texts + """async version of [rigging.generator.Generator.generate_messages][]""" + raise NotImplementedError("`agenerate_messages` is not supported by this generator.") - def batch_texts( + def generate_texts( self, - many: t.Sequence[str], - overloads: t.Sequence[GenerateParams | None] | None = None, + texts: t.Sequence[str], + params: t.Sequence[GenerateParams], *, - fixed: str | None = None, + prefix: str | None = None, ) -> t.Sequence[str]: """ - Generate a batch of texts using the generator. + Generate a batch of text completions using the generator. + + Note: + This method falls back to looping over the inputs and calling `generate_text` for each item. Note: - If supplied, the length of `overloads` must be the same as the length of `many`. + If supplied, the length of `params` must be the same as the length of `many`. Args: - many: The input texts for generating the batch. - overloads: Additional parameters for generating each text in the batch. - fixed: A fixed input text to be used as a prefix for all of `many`. + texts: The input texts for generating the batch. + params: Additional parameters for generating each text in the batch. + prefix: A fixed input text to be used as a prefix for all of `many`. Returns: - Sequence[strThe generated texts in the batch. + The generated texts. Raises: NotImplementedError: This method is not supported by this generator. """ - raise NotImplementedError("batch_texts is not supported by this generator.") + raise NotImplementedError("`generate_texts` is not supported by this generator.") - async def abatch_texts( + async def agenerate_texts( self, - many: t.Sequence[str], - overloads: t.Sequence[GenerateParams | None] | None = None, + texts: t.Sequence[str], + params: t.Sequence[GenerateParams], *, - fixed: str | None = None, + prefix: str | None = None, ) -> t.Sequence[str]: - """async version of [rigging.generator.Generator.batch_texts][]""" - raise NotImplementedError("abatch_texts is not supported by this generator.") + """async version of [rigging.generator.Generator.generate_texts][]""" + raise NotImplementedError("`agenerate_texts` is not supported by this generator.") # Helper alternative to chat(generator) -> generator.chat(...) # - # Overloads seem odd, but mypy doesn't like the TypedDict in a list otherwise + # params seem odd, but mypy doesn't like the TypedDict in a list otherwise @t.overload def chat( self, messages: t.Sequence[MessageDict], - overloads: GenerateParams | None = None, + params: GenerateParams | None = None, ) -> PendingChat: ... @t.overload def chat( - self, messages: t.Sequence[Message] | MessageDict | Message | str, overloads: GenerateParams | None = None + self, messages: t.Sequence[Message] | MessageDict | Message | str, params: GenerateParams | None = None ) -> PendingChat: ... def chat( self, messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, - overloads: GenerateParams | None = None, + params: GenerateParams | None = None, ) -> PendingChat: """ - Builds a pending chat with the given messages and optional overloads. + Builds a pending chat with the given messages and optional params. Args: messages: The messages to be sent in the chat. - overloads: Optional parameters for generating responses. + params: Optional parameters for generating responses. Returns: Pending chat to run. """ - return PendingChat(self, Message.fit_as_list(messages), overloads) + return PendingChat(self, Message.fit_as_list(messages), params) # Helper alternative to complete(generator) -> generator.complete(...) - def complete(self, text: str, overloads: GenerateParams | None = None) -> PendingCompletion: + def complete(self, text: str, params: GenerateParams | None = None) -> PendingCompletion: """ Generates a pending string completion of the given text. Args: text: The input text to be completed. - overloads: The parameters to be used for completion. + params: The parameters to be used for completion. Returns: The completed text. """ - return PendingCompletion(self, text, overloads) + return PendingCompletion(self, text, params) @t.overload def chat( generator: "Generator", messages: t.Sequence[MessageDict], - overloads: GenerateParams | None = None, + params: GenerateParams | None = None, ) -> PendingChat: ... @@ -349,7 +294,7 @@ def chat( def chat( generator: "Generator", messages: t.Sequence[Message] | MessageDict | Message | str, - overloads: GenerateParams | None = None, + params: GenerateParams | None = None, ) -> PendingChat: ... @@ -357,45 +302,61 @@ def chat( def chat( generator: "Generator", messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, - overloads: GenerateParams | None = None, + params: GenerateParams | None = None, ) -> PendingChat: """ - Creates a pending chat using the given generator, messages, and overloads. + Creates a pending chat using the given generator, messages, and params. Args: generator: The generator to use for creating the chat. messages: The messages to include in the chat. Can be a single message or a sequence of messages. - overloads: Additional parameters for generating the chat. + params: Additional parameters for generating the chat. Returns: Pending chat to run. """ - return generator.chat(messages, overloads) + return generator.chat(messages, params) def complete( generator: Generator, text: str, - overloads: GenerateParams | None = None, + params: GenerateParams | None = None, ) -> PendingCompletion: - return generator.complete(text, overloads) + return generator.complete(text, params) -def get_identifier(generator: Generator, overloads: GenerateParams | None = None) -> str: +def get_identifier(generator: Generator, params: GenerateParams | None = None) -> str: """ - Returns the identifier for the given generator. + Converts the generator instance back into a rigging identifier string. - Delegates to [rigging.generator.Generator.to_identifier][] + Warning: + The `extra` parameter field is not currently supported in identifiers. Args: generator: The generator object. - overloads: The generate parameters. + params: The generation parameters. Returns: - The identifier for the generator. + The identifier string for the generator. """ - return generator.to_identifier(overloads) + + provider = next(name for name, klass in g_providers.items() if isinstance(generator, klass)) + identifier = f"{provider}!{generator.model}" + + merged_params = generator.params.merge_with(params) + if merged_params.extra: + logger.warning("Extra parameters are not supported in identifiers.") + merged_params.extra = {} + + params_dict = merged_params.to_dict() + if params_dict: + if "stop" in params_dict: + params_dict["stop"] = ";".join(params_dict["stop"]) + identifier += f",{','.join([f'{k}={v}' for k, v in params_dict.items()])}" + + return identifier def get_generator(identifier: str) -> Generator: @@ -510,126 +471,118 @@ class LiteLLMGenerator(Generator): Find more information about supported models and formats [in their docs.](https://docs.litellm.ai/docs/providers). Note: - While this generator implements the batch methods, they are not performant and simply loop over the inputs. + Batching support is not performant and simply a loop over inputs. """ - def generate_message(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: - trace_messages(messages, "Conversations") - - messages_as_dicts = [message.model_dump(include={"role", "content"}) for message in messages] - params = self._merge_params(overloads) - result = litellm.completion(self.model, messages_as_dicts, api_key=self.api_key, **params) + def _generate_message(self, messages: t.Sequence[Message], params: GenerateParams) -> Message: + result = litellm.completion( + self.model, + [message.model_dump(include={"role", "content"}) for message in messages], + api_key=self.api_key, + **self.params.merge_with(params).to_dict(), + ) response = result.choices[-1].message.content.strip() - next_message = Message(role="assistant", content=response) - - trace_messages([next_message], "Response") - - return next_message - - async def agenerate_message( - self, messages: t.Sequence[Message], overloads: GenerateParams | None = None - ) -> Message: - trace_messages(messages, "Conversations") - - messages_as_dicts = [message.model_dump(include={"role", "content"}) for message in messages] - params = self._merge_params(overloads) - result = await litellm.acompletion(self.model, messages_as_dicts, api_key=self.api_key, **params) + return Message(role="assistant", content=response) + + async def _agenerate_message(self, messages: t.Sequence[Message], params: GenerateParams) -> Message: + result = await litellm.acompletion( + self.model, + [message.model_dump(include={"role", "content"}) for message in messages], + api_key=self.api_key, + **self.params.merge_with(params).to_dict(), + ) response = result.choices[-1].message.content.strip() - next_message = Message(role="assistant", content=response) - - trace_messages([next_message], "Response") - - return next_message - - def generate_text(self, text: str, overloads: GenerateParams | None = None) -> str: - trace_str(text, "Text") - - params = self._merge_params(overloads) - result = litellm.text_completion(text, self.model, api_key=self.api_key, **params) - completion: str = result.choices[-1]["text"] - - trace_str(completion, "Completion") + return Message(role="assistant", content=response) - return completion - - async def agenerate_text(self, text: str, overloads: GenerateParams | None = None) -> str: - trace_str(text, "Text") - - params = self._merge_params(overloads) - result = await litellm.atext_completion(text, self.model, api_key=self.api_key, **params) - completion: str = result.choices[-1]["text"] - - trace_str(completion, "Completion") + def _generate_text(self, text: str, params: GenerateParams) -> str: + result = litellm.text_completion( + text, self.model, api_key=self.api_key, **self.params.merge_with(params).to_dict() + ) + return t.cast(str, result.choices[-1]["text"]) - return completion + async def _agenerate_text(self, text: str, params: GenerateParams) -> str: + result = await litellm.atext_completion( + text, self.model, api_key=self.api_key, **self.params.merge_with(params).to_dict() + ) + return t.cast(str, result.choices[-1]["text"]) - def batch_messages( + def generate_messages( self, - many: t.Sequence[t.Sequence[Message]], - overloads: t.Sequence[GenerateParams | None] | None = None, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], *, - fixed: t.Sequence[Message] | None = None, + prefix: t.Sequence[Message] | None = None, ) -> t.Sequence[Message]: - if overloads is not None and len(overloads) != len(many): - raise ValueError("Length of overloads must match the length of many.") + if prefix is not None: + messages = [list(prefix) + list(messages) for messages in messages] - overloads = [None] * len(many) if overloads is None else overloads - if fixed is not None: - many = [list(fixed) + list(messages) for messages in many] + generated: list[Message] = [] + for i, (_messages, _params) in enumerate(zip(messages, params, strict=True)): + trace_messages(_messages, f"Messages {i+1}/{len(messages)}") + next_message = self._generate_message(_messages, _params) + generated.append(next_message) + trace_messages([next_message], f"Response {i+1}/{len(messages)}") - return [self.generate_message(messages, overload) for messages, overload in zip(many, overloads, strict=True)] + return generated - async def abatch_messages( + async def agenerate_messages( self, - many: t.Sequence[t.Sequence[Message]], - overloads: t.Sequence[GenerateParams | None] | None = None, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], *, - fixed: t.Sequence[Message], + prefix: t.Sequence[Message] | None = None, ) -> t.Sequence[Message]: - if overloads is not None and len(overloads) != len(many): - raise ValueError("Length of overloads must match the length of many.") - - overloads = [None] * len(many) if overloads is None else overloads - if fixed is not None: - many = [list(fixed) + list(messages) for messages in many] + if prefix is not None: + messages = [list(prefix) + list(messages) for messages in messages] - return await asyncio.gather( - *[self.agenerate_message(messages, overload) for messages, overload in zip(many, overloads, strict=True)] + generated: list[Message] = await asyncio.gather( + *[self._agenerate_message(_messages, _params) for _messages, _params in zip(messages, params, strict=True)] ) - def batch_texts( + for i, (_messages, _generated) in enumerate(zip(messages, generated, strict=True)): + trace_messages(_messages, f"Messages {i+1}/{len(messages)}") + trace_messages([_generated], f"Response {i+1}/{len(messages)}") + + return generated + + def generate_texts( self, - many: t.Sequence[str], - overloads: t.Sequence[GenerateParams | None] | None = None, + texts: t.Sequence[str], + params: t.Sequence[GenerateParams], *, - fixed: str | None = None, + prefix: str | None = None, ) -> t.Sequence[str]: - if overloads is not None and len(overloads) != len(many): - raise ValueError("Length of overloads must match the length of many.") + if prefix is not None: + texts = [prefix + text for text in texts] - overloads = [None] * len(many) if overloads is None else overloads - if fixed is not None: - many = [fixed + message for message in many] + generated: list[str] = [] + for i, (text, _params) in enumerate(zip(texts, params, strict=True)): + trace_str(text, f"Text {i+1}/{len(texts)}") + response = self._generate_text(text, _params) + generated.append(response) + trace_str(response, f"Generated {i+1}/{len(texts)}") - return [self.generate_text(message, overload) for message, overload in zip(many, overloads, strict=True)] + return generated - async def abatch_texts( + async def agenerate_texts( self, - many: t.Sequence[str], - overloads: t.Sequence[GenerateParams | None] | None = None, + texts: t.Sequence[str], + params: t.Sequence[GenerateParams], *, - fixed: str | None = None, + prefix: str | None = None, ) -> t.Sequence[str]: - if overloads is not None and len(overloads) != len(many): - raise ValueError("Length of overloads must match the length of many.") - - overloads = [None] * len(many) if overloads is None else overloads - if fixed is not None: - many = [fixed + message for message in many] + if prefix is not None: + texts = [prefix + text for text in texts] - return await asyncio.gather( - *[self.agenerate_text(message, overload) for message, overload in zip(many, overloads, strict=True)] + generated: list[str] = await asyncio.gather( + *[self._agenerate_text(text, _params) for text, _params in zip(texts, params, strict=True)] ) + for i, (text, response) in enumerate(zip(texts, generated, strict=True)): + trace_str(text, f"Text {i+1}/{len(texts)}") + trace_str(response, f"Generated {i+1}/{len(texts)}") + + return generated + g_providers["litellm"] = LiteLLMGenerator From bc1bf5eed1586951326ee2e5eabda5d08e65b400 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Mon, 6 May 2024 11:11:48 -0600 Subject: [PATCH 14/16] More batch updates and async then/map callbacks --- rigging/chat.py | 227 ++++++++++++++++++++++++++++++++++--------- rigging/generator.py | 26 +++-- 2 files changed, 200 insertions(+), 53 deletions(-) diff --git a/rigging/chat.py b/rigging/chat.py index 06b8e1c..7307309 100644 --- a/rigging/chat.py +++ b/rigging/chat.py @@ -4,10 +4,12 @@ They are the primary way to interact with the generator. """ +import asyncio import typing as t from copy import deepcopy from dataclasses import dataclass from datetime import datetime +from typing import runtime_checkable from uuid import UUID, uuid4 from loguru import logger @@ -54,21 +56,23 @@ class Chat(BaseModel): metadata: dict[str, t.Any] = Field(default_factory=dict) """Additional metadata for the chat.""" - pending: t.Optional["PendingChat"] = Field(None, exclude=True, repr=False) - """The pending chat associated with the chat.""" + generator: t.Optional["Generator"] = Field(None, exclude=True, repr=False) + """The generator associated with the chat.""" + params: t.Optional["GenerateParams"] = Field(None, exclude=True, repr=False) + """Any additional generation params used for this chat.""" @computed_field(repr=False) def generator_id(self) -> str | None: """The identifier of the generator used to create the chat""" - if self.pending is not None: - return self.pending.generator.to_identifier(self.pending.params) + if self.generator is not None: + return self.generator.to_identifier(self.params) return None def __init__( self, messages: Messages, generated: Messages | None = None, - pending: t.Optional["PendingChat"] = None, + generator: t.Optional["Generator"] = None, **kwargs: t.Any, ): """ @@ -77,19 +81,19 @@ def __init__( Args: messages: The messages for the chat. generated: The next messages for the chat. - pending: The pending chat. + generator: The generator associated with this chat. **kwargs: Additional keyword arguments (typically used for deserialization) """ from rigging.generator import get_generator - if "generator_id" in kwargs and pending is None: + if "generator_id" in kwargs and generator is None: + # TODO: Should we move params to self.params? generator = get_generator(kwargs.pop("generator_id")) - pending = generator.chat(messages) super().__init__( messages=Message.fit_as_list(messages), generated=Message.fit_as_list(generated) if generated is not None else [], - pending=pending, + generator=generator, **kwargs, ) @@ -149,13 +153,12 @@ def restart(self, *, generator: t.Optional["Generator"] = None, include_all: boo Raises: ValueError: If the chat was not created with a PendingChat and no generator is provided. """ - messages = self.all if include_all else self.messages - if generator is not None: - return generator.chat(messages) - elif self.pending is None: - raise ValueError("Cannot restart chat that was not created with a PendingChat") - return PendingChat(self.pending.generator, messages, self.pending.params) + if generator is None: + generator = self.generator + if generator is None: + raise ValueError("Cannot restart a chat without an associated generator") + return generator.chat(messages, self.params) def fork( self, @@ -186,7 +189,7 @@ def clone(self, *, only_messages: bool = False) -> "Chat": new = Chat( [m.model_copy() for m in self.messages], [m.model_copy() for m in self.generated], - self.pending, + self.generator, ) if not only_messages: new.metadata = deepcopy(self.metadata) @@ -269,14 +272,61 @@ def inject_tool_prompt(self, tools: t.Sequence[Tool]) -> None: self.inject_system_content(tool_system_prompt) -# Passed the next message, returns whether or not to continue -# and an optional list of messages to append before continuing -UntilMessageCallback = t.Callable[[Message], tuple[bool, list[Message]]] +# Callbacks for pending chat + + +class UntilMessageCallback(t.Protocol): + def __call__(self, message: Message) -> tuple[bool, list[Message]]: + """ + Passed the next message, returns whether or not to continue and an + optional list of messages to append before continuing. + """ + ... + -ThenChatCallback = t.Callable[[Chat], Chat | None] +@runtime_checkable +class ThenChatCallback(t.Protocol): + def __call__(self, chat: Chat) -> Chat | None: + """ + Passed a finalized chat to process and can return a new chat to replace it. + """ + ... + + +@runtime_checkable +class AsyncThenChatCallback(t.Protocol): + async def __call__(self, chat: Chat) -> Chat | None: + """ + async variant of the [rigging.chat.ThenChatCallback][] protocol. + """ + ... + + +@runtime_checkable +class MapChatCallback(t.Protocol): + def __call__(self, chats: list[Chat]) -> list[Chat]: + """ + Passed a finalized chats to process. Can replace chats in the pipeline by returning + a new chat object. + """ + ... + + +@runtime_checkable +class AsyncMapChatCallback(t.Protocol): + async def __call__(self, chats: list[Chat]) -> list[Chat]: + """ + async variant of the [rigging.chat.MapChatCallback][] protocol. + """ + ... + + +PostRunCallbacks = ThenChatCallback | AsyncThenChatCallback | MapChatCallback | AsyncMapChatCallback MessageProducer = t.Generator[t.Sequence[Message], None, None] -BatchProducer = t.Generator[t.Sequence[t.Sequence[Message]], None, None] +MessagesProducer = t.Generator[t.Sequence[t.Sequence[Message]], None, None] + +# Helper classes to manage complexity inside the run functions @dataclass @@ -321,7 +371,7 @@ def __init__( self.until_tools: list[Tool] = [] self.inject_tool_prompt: bool = True self.force_tool: bool = False - self.then_callbacks: list[ThenChatCallback] = [] + self.post_run_callbacks: list[PostRunCallbacks] = [] # self.producer: MessageProducer | None = None def with_(self, params: t.Optional["GenerateParams"] = None, **kwargs: t.Any) -> "PendingChat": @@ -428,13 +478,18 @@ def meta(self, **kwargs: t.Any) -> "PendingChat": self.metadata.update(kwargs) return self - def then(self, callback: ThenChatCallback) -> "PendingChat": + def then(self, callback: ThenChatCallback | AsyncThenChatCallback) -> "PendingChat": """ Registers a callback to be executed after the generation process completes. Note: Returning a Chat object from the callback will replace the current chat. - for the remainder of the callbacks + return value of `run()`. + for the remainder of the callbacks + return value of `run()`. This is + optional. + + Warning: + If you implement an async callback, you must use the async variant of the + run methods when executing the generation process. ``` def process(chat: Chat) -> Chat | None: @@ -449,7 +504,35 @@ def process(chat: Chat) -> Chat | None: Returns: The current instance of the chat. """ - self.then_callbacks.append(callback) + self.post_run_callbacks.append(callback) + return self + + def map(self, callback: MapChatCallback | AsyncMapChatCallback) -> "PendingChat": + """ + Registers a callback to be executed after the generation process completes. + + Note: + You must return a list of Chat objects from the callback which will + represent the state of chats for the remainder of the callbacks and return. + + Warning: + If you implement an async callback, you must use the async variant of the + run methods when executing the generation process. + + ``` + def process(chats: list[Chat]) -> list[Chat]: + ... + + pending.map(process).run() + ``` + + Args: + callback: The callback function to be executed. + + Returns: + The current instance of the chat. + """ + self.post_run_callbacks.append(callback) return self # def from_(self, producer: MessageProducer) -> "PendingChat": @@ -695,7 +778,7 @@ def _until( for _ in range(max_rounds): logger.trace( - f"_until({callback.__name__}) round {_ + 1}/{max_rounds} (attempt_recovery={attempt_recovery})" + f"_until({callback.__call__.__name__}) round {_ + 1}/{max_rounds} (attempt_recovery={attempt_recovery})" ) next_message = yield running_messages should_continue, step_messages = callback(next_message) @@ -724,13 +807,31 @@ def _process(self) -> t.Generator[list[Message], Message, list[Message]]: new_messages = new_messages[:-1] + generated return new_messages - def _then(self, chat: Chat) -> Chat: - # TODO: Adding async support here would be nice - for callback in self.then_callbacks: - chat = callback(chat) or chat - return chat + def _post_run(self, chats: list[Chat]) -> list[Chat]: + for callback in self.post_run_callbacks: + if isinstance(callback, ThenChatCallback): + chats = [callback(chat) or chat for chat in chats] + elif isinstance(callback, MapChatCallback): + chats = callback(chats) + + return chats - def _prepare(self) -> None: + async def _apost_run(self, chats: list[Chat]) -> list[Chat]: + if not all( + isinstance(callback, AsyncThenChatCallback | AsyncMapChatCallback) for callback in self.post_run_callbacks + ): + raise ValueError("Cannot use async then()/map() callbacks inside a non-async run call") + + for callback in self.post_run_callbacks: + if isinstance(callback, AsyncThenChatCallback): + updated = await asyncio.gather(*[callback(chat) for chat in chats]) + chats = [updated[i] or chat for i, chat in enumerate(chats)] + elif isinstance(callback, AsyncMapChatCallback): + chats = await callback(chats) + + return chats + + def _pre_run(self) -> None: if self.until_tools: if self.inject_tool_prompt: self.chat.inject_tool_prompt(self.until_tools) @@ -755,6 +856,18 @@ def _fit_params( params = [self.params.merge_with(p) for p in params] return [(p or GenerateParams()) for p in params] + def _fit_many( + self, + count: int, + many: t.Sequence[t.Sequence[Message]] | t.Sequence[Message] | t.Sequence[MessageDict] | t.Sequence[str], + ) -> list[list[Message]]: + many = [Message.fit_as_list(m) for m in many] + if len(many) < count: + if len(many) != 1: + raise ValueError(f"Can't fit many of length {len(many)} to {count}") + many = many * count + return many + # TODO: There is an embarrassing amount of code duplication here # between the async and non-async methods, batch and many, etc. @@ -810,7 +923,11 @@ def run_many( except StopIteration as stop: state.done = True state.chat = Chat( - self.chat.all, t.cast(list[Message], stop.value), pending=self, metadata=self.metadata + self.chat.all, + t.cast(list[Message], stop.value), + generator=self.generator, + metadata=self.metadata, + params=state.params, ) except ExhaustedMaxRoundsError: if not skip_failed: @@ -819,7 +936,7 @@ def run_many( pending_states = [s for s in pending_states if not s.done] - return [self._then(s.chat) for s in states if s.chat is not None] + return self._post_run([s.chat for s in states if s.chat is not None]) async def arun_many( self, @@ -844,7 +961,11 @@ async def arun_many( except StopIteration as stop: state.done = True state.chat = Chat( - self.chat.all, t.cast(list[Message], stop.value), pending=self, metadata=self.metadata + self.chat.all, + t.cast(list[Message], stop.value), + generator=self.generator, + metadata=self.metadata, + params=state.params, ) except ExhaustedMaxRoundsError: if not skip_failed: @@ -853,7 +974,7 @@ async def arun_many( pending_states = [s for s in pending_states if not s.done] - return [self._then(s.chat) for s in states if s.chat is not None] + return await self._apost_run([s.chat for s in states if s.chat is not None]) # Batch messages @@ -879,8 +1000,13 @@ def run_batch( Returns: A list of generatated Chats. """ - many = [Message.fit_as_list(m) for m in many] - params = self._fit_params(len(many), params) + if isinstance(many, str | dict): + raise ValueError("many must be a sequence, even if it only contains one item") + + count = max(len(many), len(params) if params is not None else 0) + many = self._fit_many(count, many) + params = self._fit_params(count, params) + states: list[BatchRunState] = [ BatchRunState(m, [], p, self._process()) for m, p in zip(many, params, strict=True) ] @@ -900,7 +1026,11 @@ def run_batch( except StopIteration as stop: state.done = True state.chat = Chat( - self.chat.all, t.cast(list[Message], stop.value), pending=self, metadata=self.metadata + self.chat.all, + t.cast(list[Message], stop.value), + generator=self.generator, + metadata=self.metadata, + params=state.params, ) except ExhaustedMaxRoundsError: if not skip_failed: @@ -909,7 +1039,7 @@ def run_batch( pending_states = [s for s in pending_states if not s.done] - return [self._then(s.chat) for s in states if s.chat is not None] + return self._post_run([s.chat for s in states if s.chat is not None]) async def arun_batch( self, @@ -919,8 +1049,13 @@ async def arun_batch( skip_failed: bool = False, ) -> list[Chat]: """async variant of the [rigging.chat.PendingChat.run_batch][] method.""" - many = [Message.fit_as_list(m) for m in many] - params = self._fit_params(len(many), params) + if isinstance(many, str | dict): + raise ValueError("many must be a sequence, even if it only contains one item") + + count = max(len(many), len(params) if params is not None else 0) + many = self._fit_many(count, many) + params = self._fit_params(count, params) + states: list[BatchRunState] = [ BatchRunState(m, [], p, self._process()) for m, p in zip(many, params, strict=True) ] @@ -940,7 +1075,11 @@ async def arun_batch( except StopIteration as stop: state.done = True state.chat = Chat( - self.chat.all, t.cast(list[Message], stop.value), pending=self, metadata=self.metadata + self.chat.all, + t.cast(list[Message], stop.value), + generator=self.generator, + metadata=self.metadata, + params=state.params, ) except ExhaustedMaxRoundsError: if not skip_failed: @@ -949,4 +1088,4 @@ async def arun_batch( pending_states = [s for s in pending_states if not s.done] - return [self._then(s.chat) for s in states if s.chat is not None] + return await self._apost_run([s.chat for s in states if s.chat is not None]) diff --git a/rigging/generator.py b/rigging/generator.py index 4d67d7d..b9f9036 100644 --- a/rigging/generator.py +++ b/rigging/generator.py @@ -9,8 +9,8 @@ from loguru import logger from pydantic import BaseModel, ConfigDict, Field, field_validator -from rigging.chat import PendingChat -from rigging.completion import PendingCompletion +from rigging.chat import Chat, PendingChat +from rigging.completion import Completion, PendingCompletion from rigging.error import InvalidModelSpecifiedError from rigging.message import ( Message, @@ -244,17 +244,19 @@ def chat( @t.overload def chat( - self, messages: t.Sequence[Message] | MessageDict | Message | str, params: GenerateParams | None = None + self, + messages: t.Sequence[Message] | MessageDict | Message | str | None = None, + params: GenerateParams | None = None, ) -> PendingChat: ... def chat( self, - messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, + messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str | None = None, params: GenerateParams | None = None, ) -> PendingChat: """ - Builds a pending chat with the given messages and optional params. + Build a pending chat with the given messages and optional params overloads. Args: messages: The messages to be sent in the chat. @@ -263,13 +265,13 @@ def chat( Returns: Pending chat to run. """ - return PendingChat(self, Message.fit_as_list(messages), params) + return PendingChat(self, Message.fit_as_list(messages) if messages else [], params) # Helper alternative to complete(generator) -> generator.complete(...) def complete(self, text: str, params: GenerateParams | None = None) -> PendingCompletion: """ - Generates a pending string completion of the given text. + Build a pending string completion of the given text with optional param overloads. Args: text: The input text to be completed. @@ -293,7 +295,7 @@ def chat( @t.overload def chat( generator: "Generator", - messages: t.Sequence[Message] | MessageDict | Message | str, + messages: t.Sequence[Message] | MessageDict | Message | str | None = None, params: GenerateParams | None = None, ) -> PendingChat: ... @@ -301,7 +303,7 @@ def chat( def chat( generator: "Generator", - messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str, + messages: t.Sequence[MessageDict] | t.Sequence[Message] | MessageDict | Message | str | None = None, params: GenerateParams | None = None, ) -> PendingChat: """ @@ -586,3 +588,9 @@ async def agenerate_texts( g_providers["litellm"] = LiteLLMGenerator + +# TODO: This fixes some almost-circular import issues and +# typed forwardrefs we use in the other module + +Chat.model_rebuild() +Completion.model_rebuild() From 347c460dfe74a5738864f6be9dd54b142383e8f3 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Mon, 6 May 2024 12:43:20 -0600 Subject: [PATCH 15/16] Fixing tests --- tests/test_generation.py | 32 +++++++++++++++++++++++--------- tests/test_generator_creation.py | 24 ++++++++++++++++++++++-- tests/test_messages.py | 6 +++--- 3 files changed, 48 insertions(+), 14 deletions(-) diff --git a/tests/test_generation.py b/tests/test_generation.py index 53e5600..3bc417f 100644 --- a/tests/test_generation.py +++ b/tests/test_generation.py @@ -8,22 +8,36 @@ class EchoGenerator(Generator): - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: - return Message(role="assistant", content=messages[-1].content) + def generate_messages( + self, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], + *, + prefix: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: + if prefix is not None: + messages = [list(m) + list(prefix) for m in messages] - async def acomplete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: - return self.complete(messages, overloads) + assert len(messages) == 1 + return [Message(role="assistant", content=messages[-1][-1].content) for m in messages] class CallbackGenerator(Generator): callback: t.Callable[["CallbackGenerator", t.Sequence[Message]], str] | None = None - def complete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: - assert self.callback is not None, "Callback must be defined for CallbackGenerator" - return Message(role="assistant", content=self.callback(self, messages)) + def generate_messages( + self, + messages: t.Sequence[t.Sequence[Message]], + params: t.Sequence[GenerateParams], + *, + prefix: t.Sequence[Message] | None = None, + ) -> t.Sequence[Message]: + if prefix is not None: + messages = [list(prefix) + list(m) for m in messages] - async def acomplete(self, messages: t.Sequence[Message], overloads: GenerateParams | None = None) -> Message: - return self.complete(messages, overloads) + assert len(messages) == 1 + assert self.callback is not None + return [Message(role="assistant", content=self.callback(self, m)) for m in messages] def test_until_parsed_as_with_reset() -> None: diff --git a/tests/test_generator_creation.py b/tests/test_generator_creation.py index db07016..1da584e 100644 --- a/tests/test_generator_creation.py +++ b/tests/test_generator_creation.py @@ -1,7 +1,7 @@ import pytest from rigging.error import InvalidModelSpecifiedError -from rigging.generator import GenerateParams, LiteLLMGenerator, get_generator, register_generator +from rigging.generator import GenerateParams, LiteLLMGenerator, get_generator, get_identifier, register_generator from tests.test_generation import EchoGenerator @@ -23,7 +23,7 @@ def test_get_generator_invalid_provider(identifier: str) -> None: [ ("litellm!test_model,max_tokens=123,top_p=10", GenerateParams(max_tokens=123, top_p=10)), ("litellm!test_model,temperature=0.5", GenerateParams(temperature=0.5)), - ("test_model,max_tokens=100,temperature=1.0", GenerateParams(max_tokens=100, temperature=1.0)), + ("test_model,temperature=1.0,max_tokens=100", GenerateParams(max_tokens=100, temperature=1.0)), ], ) def test_get_generator_with_params(identifier: str, valid_params: GenerateParams) -> None: @@ -33,6 +33,26 @@ def test_get_generator_with_params(identifier: str, valid_params: GenerateParams assert generator.params == valid_params +@pytest.mark.parametrize( + "identifier", + [ + ("litellm!test_model,max_tokens=1024,top_p=0.1"), + ("litellm!custom,temperature=1.0,max_tokens=100,api_base=https://localhost:8000"), + ("litellm!many/model/slashes,stop=a;b;c;"), + ], +) +def test_identifier_roundtrip(identifier: str) -> None: + generator = get_generator(identifier) + assert generator.to_identifier() == identifier + + +def test_get_identifier_no_extra() -> None: + generator = get_generator("testing_model,temperature=0.5") + generator.params.extra = {"abc": 123} + identifier = get_identifier(generator) + assert "extra" not in identifier + + @pytest.mark.parametrize("identifier", ["litellm:invalid,stuff:test,t1/123", "litellm:invalid,stuff:test,t1/123"]) def test_get_generator_invalid_structure_format(identifier: str) -> None: with pytest.raises(InvalidModelSpecifiedError): diff --git a/tests/test_messages.py b/tests/test_messages.py index 8ff2f26..4f4a3e1 100644 --- a/tests/test_messages.py +++ b/tests/test_messages.py @@ -150,7 +150,7 @@ def test_chat_continue() -> None: Message("user", "Hello"), Message("assistant", "Hi there!"), ], - pending=PendingChat(get_generator("gpt-3.5"), [], GenerateParams()), + generator=get_generator("gpt-3.5"), ) continued = chat.continue_([Message("user", "How are you?")]).chat @@ -163,7 +163,7 @@ def test_chat_continue() -> None: def test_pending_chat_continue() -> None: pending = PendingChat(get_generator("gpt-3.5"), [], GenerateParams()) - continued = pending.continue_([Message("user", "Hello")]) + continued = pending.fork([Message("user", "Hello")]) assert continued != pending assert len(continued.chat) == 1 @@ -190,7 +190,7 @@ def test_chat_continue_maintains_parsed_models() -> None: Message("user", "30"), Message("assistant", "
123 Main StAnytown
"), ], - pending=PendingChat(get_generator("gpt-3.5"), [], GenerateParams()), + generator=get_generator("gpt-3.5"), ) chat.all[0].parse(Person) From 7c33d1221803a3bc6d771ffe131770fa00727ed0 Mon Sep 17 00:00:00 2001 From: monoxgas Date: Mon, 6 May 2024 17:16:50 -0600 Subject: [PATCH 16/16] Prepping for 1.0.0rc0 --- README.md | 334 +++------------------------- docs/index.md | 152 +------------ docs/topics/chat.md | 29 --- docs/topics/chats.md | 99 +++++++++ docs/topics/generators.md | 52 +++++ docs/topics/logging.md | 22 -- docs/topics/{model.md => models.md} | 29 +++ docs/topics/setup_logging.md | 23 ++ docs/topics/tools.md | 3 + mkdocs.yml | 6 +- pyproject.toml | 2 +- rigging/completion.py | 2 +- rigging/message.py | 2 + 13 files changed, 242 insertions(+), 513 deletions(-) delete mode 100644 docs/topics/chat.md create mode 100644 docs/topics/chats.md delete mode 100644 docs/topics/logging.md rename docs/topics/{model.md => models.md} (70%) create mode 100644 docs/topics/setup_logging.md diff --git a/README.md b/README.md index 28d83d0..2113d1e 100644 --- a/README.md +++ b/README.md @@ -1,327 +1,43 @@ # Rigging -Rigging is a lightweight LLM interaction framework built on Pydantic XML and LiteLLM. It supports useful primitives for validating LLM output and adding tool calling abilities to models that don't natively support it. It also has various helpers for common tasks like structured object parsing, templating chats, overloading generation parameters, stripping chat segments, and continuing conversations. +Rigging is a lightweight LLM interaction framework built on Pydantic XML. The goal is to make leveraging LLMs in production pipelines as simple and effictive as possible. Here are the highlights: -Modern python with type hints, pydantic validation, native serialization support, etc. +- **Structured Pydantic models** can be used interchangably with unstructured text output. +- LiteLLM as the default generator giving you **instant access to a huge array of models**. +- Add easy **tool calling** abilities to models which don't natively support it. +- Store different models and configs as **simple connection strings** just like databases. +- Chat templating, forking, continuations, generation parameter overloads, stripping segments, etc. +- Modern python with type hints, async support, pydantic validation, serialization, etc. -``` -pip install rigging -``` - -### Overview - -The basic flow in rigging is: - -1. Get a generator object -2. Call `.chat()` to produce a `PendingChat` -3. Call `.run()` on a `PendingChat` to get a `Chat` - -`PendingChat` objects hold any messages waiting to be delivered to an LLM in exchange -for a new response message. Afterwhich it is converted into a `Chat` which holds -all messages prior to generation (`.prev`) and after generation (`.next`). - -You should think of `PendingChat` objects like the configurable pre-generation step -with calls like `.overload()`, `.apply()`, `.until()`, `.using()`, etc. Once you call -`.run()` the generator is used to produce the next message based on the prior context -and any constraints you have in place. Once you have a `Chat` object, the interation -is "done" and you can inspect/parse the messages. - -You'll often see us use functional styling chaining as most of our -utility functions return the object back to you. - -```python -chat = generator.chat(...).using(...).until(...).overload(...).run() -``` - -### Basic Chats - -```python +```py import rigging as rg +from rigging.model import CommaDelimitedAnswer as Answer -generator = rg.get_generator("claude-2.1") -chat = generator.chat( - [ - {"role": "system", "content": "You are a wizard harry."}, - {"role": "user", "content": "Say hello!"}, - ] -).run() - -print(chat.last) -# [assistant]: Hello! - -print(f"{chat.last!r}") -# Message(role='assistant', parts=[], content='Hello!') - -print(chat.prev) -# [ -# Message(role='system', parts=[], content='You are a wizard harry.'), -# Message(role='user', parts=[], content='Say hello!'), -# ] - -print(chat.json) -# [{ ... }] - -``` - -### Model Parsing - -```python -import rigging as rg - -class Answer(rg.Model): - content: str - -chat = ( - rg.get_generator("claude-3-haiku-20240307") - .chat([ - {"role": "user", "content": f"Say your name between {Answer.xml_tags()}."}, - ]) - .until_parsed_as(Answer) +answer = rg.get_generator('gpt-4') \ + .chat(f"Give me 3 famous authors between {Answer.xml_tags()} tags.") \ + .until_parsed_as(Answer) \ .run() -) answer = chat.last.parse(Answer) -print(answer.content) - -# "Claude" - -print(f"{chat.last!r}") +print(answer.items) -# Message(role='assistant', parts=[ -# ParsedMessagePart(model=Answer(content='Claude'), ref='Claude') -# ], content='Claude') - -chat.last.content = "new content" # Updating content strips parsed parts -print(f"{chat.last!r}") - -# Message(role='assistant', parts=[], content='new content') -``` - -### Mutliple Models - -```python -import rigging as rg - -class Joke(rg.Model): - content: str - -chat = ( - rg.get_generator("claude-2.1") - .chat([{ - "role": "user", - "content": f"Provide 3 short jokes each wrapped with {Joke.xml_tags()} tags."}, - ]) - .run() -) - -jokes = chat.last.parse_set(Joke) - -# [ -# Joke(content="Why don't eggs tell jokes? They'd crack each other up!"), -# Joke(content='What do you call a bear with no teeth? A gummy bear!'), -# Joke(content='What do you call a fake noodle? An Impasta!') -# ] -``` - -### Tools - -```python -from typing import Annotated -import rigging as rg - -class WeatherTool(rg.Tool): - @property - def name(self) -> str: - return "weather" - - @property - def description(self) -> str: - return "A tool to get the weather for a location" - - def get_for_city(self, city: Annotated[str, "The city name to get weather for"]) -> str: - print(f"[=] get_for_city('{city}')") - return f"The weather in {city} is nice today" - -chat = ( - rg.get_generator("mistral/mistral-tiny") - .chat( - [ - {"role": "user", "content": "What is the weather in London?"}, - ] - ) - .using(WeatherTool(), force=True) - .run() -) - -# [=] get_for_city('London') - -print(chat.last.content) - -# "Based on the information I've received, the weather in London is nice today." +# ['J. R. R. Tolkien', 'Stephen King', 'George Orwell'] ``` -### Continuing Chats - -```python -import rigging as rg - -generator = rg.get_generator("gpt-3.5-turbo") -chat = generator.chat([ - {"role": "user", "content": "Hello, how are you?"}, -]) +Rigging is built and maintained by [dreadnode](https://dreadnode.io) where we use it daily for our work. -# We can fork (continue_) before generation has occured -specific = chat.fork("Be specific please.").run() -poetic = chat.fork("Be as poetic as possible").overload(temperature=1.5).run() - -# We can also fork (continue_) after generation -next_chat = poetic.fork( - {"role": "user", "content": "That's good, tell me a joke"} -) - -update = next_chat.run() -``` - -### Basic Templating - -```python -import rigging as rg - -template = rg.get_generator("gpt-4").chat([ - {"role": "user", "content": "What is the capitol of $country?"}, -]) - -for country in ["France", "Germany"]: - print(template.apply(country=country).run().last) - -# The capital of France is Paris. -# The capital of Germany is Berlin. -``` - -### Overload Generation Params - -```python -import rigging as rg - -pending = rg.get_generator("gpt-3.5-turbo,max_tokens=50").chat([ - {"role": "user", "content": "Say a haiku about boats"}, -]) - -for temp in [0.1, 0.5, 1.0]: - print(pending.overload(temperature=temp).run().last.content) - -``` - -### Complex Models - -```python -import rigging as rg - -class Inner(rg.Model): - type: str = rg.attr() - content: str - -class Outer(rg.Model): - name: str = rg.attr() - inners: list[Inner] = rg.element() - -outer = Outer(name="foo", inners=[ - Inner(type="cat", content="meow"), - Inner(type="dog", content="bark") -]) - -print(outer.to_pretty_xml()) - -# -# meow -# bark -# -``` - -### Strip Parsed Sections - -```python -import rigging as rg - -class Reasoning(rg.Model): - content: str - -meaning = rg.get_generator("claude-2.1").chat([ - { - "role": "user", - "content": "What is the meaning of life in one sentence? " - f"Document your reasoning between {Reasoning.xml_tags()} tags.", - }, -]).run() - -# Gracefully handle mising models -reasoning = meaning.last.try_parse(Reasoning) -if reasoning: - print("reasoning:", reasoning.content.strip()) - -# Strip parsed content to avoid sharing -# previous thoughts with the model. -without_reasons = meaning.strip(Reasoning) -print("meaning of life:", without_reasons.last.content.strip()) - -# follow_up = without_thoughts.continue_(...) -``` - -### Custom Generator - -Any custom generator simply needs to implement a `complete` function, and -then it can be used anywhere inside rigging. - -```python -class Custom(Generator): - # model: str - # api_key: str - # params: GeneratorParams - - custom_field: bool - - def complete( - self, - messages: t.Sequence[rg.Message], - overloads: GenerateParams = GenerateParams(), - ) -> rg.Message: - # Access self vars where needed - api_key = self.api_key - model_id = self.model - - # Merge in args for API overloads - marged: dict[str, t.Any] = self._merge_params(overloads) - - # response: str = ... - - return rg.Message("assistant", response) - - -generator = Custom(model='foo', custom_field=True) -generator.chat(...) +## Installation +We publish every version to Pypi: +```bash +pip install rigging ``` -*Note: we currently don't have anyway to "register" custom generators for `get_generator`.* - -### Logging - -By default rigging disables it's logger with loguru. To enable it run: - -```python -from loguru import logger - -logger.enable('rigging') +If you want to build from source: +```bash +cd rigging/ +poetry install ``` -To configure loguru terminal + file logging format overrides: - -```python -from rigging.logging import configure_logging +## Getting Started -configure_logging( - 'info', # stderr level - 'out.log', # log file (optional) - 'trace' # log file level -) -``` -*(This will remove existing handlers, so you might prefer to configure them yourself)* +Head over to **[our documentation](https://rigging.dreadnode.io) for more information. \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index 663269f..596caa0 100644 --- a/docs/index.md +++ b/docs/index.md @@ -51,7 +51,7 @@ final [`Chat`][rigging.chat.Chat] which holds all messages prior to generation ( and after generation ([`.next`][rigging.chat.Chat.next]). You should think of [`PendingChat`][rigging.chat.PendingChat] objects like the configurable pre-generation step -with calls like [`.overload()`][rigging.chat.PendingChat.overload], [`.apply()`][rigging.chat.PendingChat.apply], +with calls like [`.with_()`][rigging.chat.PendingChat.with_], [`.apply()`][rigging.chat.PendingChat.apply], [`.until()`][rigging.chat.PendingChat.until], [`.using()`][rigging.chat.PendingChat.using], etc. Once you call one of the many [`.run()`][rigging.chat.PendingChat.run] functions, the generator is used to produce the next message (or many messages) based on the prior context and any constraints you have in place. Once you have a @@ -61,151 +61,7 @@ You'll often see us use functional styling chaining as most of our utility functions return the object back to you. ```python -chat = generator.chat(...) \ (1) - .using(...).until(...).overload(...) \ +chat = generator.chat(...) \ + .using(...).until(...).with_(...) \ .run() -``` - -### Continuing Chats - -```python -import rigging as rg - -generator = rg.get_generator("gpt-3.5-turbo") -chat = generator.chat([ - {"role": "user", "content": "Hello, how are you?"}, -]) - -# We can fork (continue_) before generation has occured -specific = chat.fork("Be specific please.").run() -poetic = chat.fork("Be as poetic as possible").overload(temperature=1.5).run() - -# We can also fork (continue_) after generation -next_chat = poetic.fork( - {"role": "user", "content": "That's good, tell me a joke"} -) - -update = next_chat.run() -``` - -### Basic Templating - -```python -import rigging as rg - -template = rg.get_generator("gpt-4").chat([ - {"role": "user", "content": "What is the capitol of $country?"}, -]) - -for country in ["France", "Germany"]: - print(template.apply(country=country).run().last) - -# The capital of France is Paris. -# The capital of Germany is Berlin. -``` - -### Overload Generation Params - -```python -import rigging as rg - -pending = rg.get_generator("gpt-3.5-turbo,max_tokens=50").chat([ - {"role": "user", "content": "Say a haiku about boats"}, -]) - -for temp in [0.1, 0.5, 1.0]: - print(pending.overload(temperature=temp).run().last.content) - -``` - -### Complex Models - -```python -import rigging as rg - -class Inner(rg.Model): - type: str = rg.attr() - content: str - -class Outer(rg.Model): - name: str = rg.attr() - inners: list[Inner] = rg.element() - -outer = Outer(name="foo", inners=[ - Inner(type="cat", content="meow"), - Inner(type="dog", content="bark") -]) - -print(outer.to_pretty_xml()) - -# -# meow -# bark -# -``` - -### Strip Parsed Sections - -```python -import rigging as rg - -class Reasoning(rg.Model): - content: str - -meaning = rg.get_generator("claude-2.1").chat([ - { - "role": "user", - "content": "What is the meaning of life in one sentence? " - f"Document your reasoning between {Reasoning.xml_tags()} tags.", - }, -]).run() - -# Gracefully handle mising models -reasoning = meaning.last.try_parse(Reasoning) -if reasoning: - print("reasoning:", reasoning.content.strip()) - -# Strip parsed content to avoid sharing -# previous thoughts with the model. -without_reasons = meaning.strip(Reasoning) -print("meaning of life:", without_reasons.last.content.strip()) - -# follow_up = without_thoughts.continue_(...) -``` - -### Custom Generator - -Any custom generator simply needs to implement a `complete` function, and -then it can be used anywhere inside rigging. - -```python -class Custom(Generator): - # model: str - # api_key: str - # params: GeneratorParams - - custom_field: bool - - def complete( - self, - messages: t.Sequence[rg.Message], - overloads: GenerateParams = GenerateParams(), - ) -> rg.Message: - # Access self vars where needed - api_key = self.api_key - model_id = self.model - - # Merge in args for API overloads - marged: dict[str, t.Any] = self._merge_params(overloads) - - # response: str = ... - - return rg.Message("assistant", response) - - -generator = Custom(model='foo', custom_field=True) -generator.chat(...) -``` - -*Note: we currently don't have anyway to "register" custom generators for `get_generator`.* - +``` \ No newline at end of file diff --git a/docs/topics/chat.md b/docs/topics/chat.md deleted file mode 100644 index cb82549..0000000 --- a/docs/topics/chat.md +++ /dev/null @@ -1,29 +0,0 @@ -### Basic Chats - -```python -import rigging as rg - -generator = rg.get_generator("claude-2.1") -chat = generator.chat( - [ - {"role": "system", "content": "You are a wizard harry."}, - {"role": "user", "content": "Say hello!"}, - ] -).run() - -print(chat.last) -# [assistant]: Hello! - -print(f"{chat.last!r}") -# Message(role='assistant', parts=[], content='Hello!') - -print(chat.prev) -# [ -# Message(role='system', parts=[], content='You are a wizard harry.'), -# Message(role='user', parts=[], content='Say hello!'), -# ] - -print(chat.json) -# [{ ... }] - -``` \ No newline at end of file diff --git a/docs/topics/chats.md b/docs/topics/chats.md new file mode 100644 index 0000000..af84250 --- /dev/null +++ b/docs/topics/chats.md @@ -0,0 +1,99 @@ +!!! note + This content is currently being refactored + +### Basic Chats + +```python +import rigging as rg + +generator = rg.get_generator("claude-2.1") +chat = generator.chat( + [ + {"role": "system", "content": "You are a wizard harry."}, + {"role": "user", "content": "Say hello!"}, + ] +).run() + +print(chat.last) +# [assistant]: Hello! + +print(f"{chat.last!r}") +# Message(role='assistant', parts=[], content='Hello!') + +print(chat.prev) +# [ +# Message(role='system', parts=[], content='You are a wizard harry.'), +# Message(role='user', parts=[], content='Say hello!'), +# ] + +print(chat.json) +# [{ ... }] + +``` + +### Continuing Chats + +```python +import rigging as rg + +generator = rg.get_generator("gpt-3.5-turbo") +chat = generator.chat([ + {"role": "user", "content": "Hello, how are you?"}, +]) + +# We can fork (continue_) before generation has occured +specific = chat.fork("Be specific please.").run() +poetic = chat.fork("Be as poetic as possible").overload(temperature=1.5).run() + +# We can also fork (continue_) after generation +next_chat = poetic.fork( + {"role": "user", "content": "That's good, tell me a joke"} +) + +update = next_chat.run() +``` + +### Basic Templating + +```python +import rigging as rg + +template = rg.get_generator("gpt-4").chat([ + {"role": "user", "content": "What is the capitol of $country?"}, +]) + +for country in ["France", "Germany"]: + print(template.apply(country=country).run().last) + +# The capital of France is Paris. +# The capital of Germany is Berlin. +``` + +### Strip Parsed Sections + +```python +import rigging as rg + +class Reasoning(rg.Model): + content: str + +meaning = rg.get_generator("claude-2.1").chat([ + { + "role": "user", + "content": "What is the meaning of life in one sentence? " + f"Document your reasoning between {Reasoning.xml_tags()} tags.", + }, +]).run() + +# Gracefully handle mising models +reasoning = meaning.last.try_parse(Reasoning) +if reasoning: + print("reasoning:", reasoning.content.strip()) + +# Strip parsed content to avoid sharing +# previous thoughts with the model. +without_reasons = meaning.strip(Reasoning) +print("meaning of life:", without_reasons.last.content.strip()) + +# follow_up = without_thoughts.continue_(...) +``` \ No newline at end of file diff --git a/docs/topics/generators.md b/docs/topics/generators.md index e69de29..c65d5be 100644 --- a/docs/topics/generators.md +++ b/docs/topics/generators.md @@ -0,0 +1,52 @@ +!!! note + This content is currently being refactored + +### Overload Generation Params + +```python +import rigging as rg + +pending = rg.get_generator("gpt-3.5-turbo,max_tokens=50").chat([ + {"role": "user", "content": "Say a haiku about boats"}, +]) + +for temp in [0.1, 0.5, 1.0]: + print(pending.overload(temperature=temp).run().last.content) + +``` + +### Custom Generator + +Any custom generator simply needs to implement a `complete` function, and +then it can be used anywhere inside rigging. + +```python +class Custom(Generator): + # model: str + # api_key: str + # params: GeneratorParams + + custom_field: bool + + def complete( + self, + messages: t.Sequence[rg.Message], + overloads: GenerateParams = GenerateParams(), + ) -> rg.Message: + # Access self vars where needed + api_key = self.api_key + model_id = self.model + + # Merge in args for API overloads + marged: dict[str, t.Any] = self._merge_params(overloads) + + # response: str = ... + + return rg.Message("assistant", response) + + +generator = Custom(model='foo', custom_field=True) +generator.chat(...) +``` + +*Note: we currently don't have anyway to "register" custom generators for `get_generator`.* \ No newline at end of file diff --git a/docs/topics/logging.md b/docs/topics/logging.md deleted file mode 100644 index 8ce8c22..0000000 --- a/docs/topics/logging.md +++ /dev/null @@ -1,22 +0,0 @@ -### Logging - -By default rigging disables it's logger with loguru. To enable it run: - -```python -from loguru import logger - -logger.enable('rigging') -``` - -To configure loguru terminal + file logging format overrides: - -```python -from rigging.logging import configure_logging - -configure_logging( - 'info', # stderr level - 'out.log', # log file (optional) - 'trace' # log file level -) -``` -*(This will remove existing handlers, so you might prefer to configure them yourself)* diff --git a/docs/topics/model.md b/docs/topics/models.md similarity index 70% rename from docs/topics/model.md rename to docs/topics/models.md index 07eaa1d..36ca97e 100644 --- a/docs/topics/model.md +++ b/docs/topics/models.md @@ -1,3 +1,6 @@ +!!! note + This content is currently being refactored + ### Model Parsing ```python @@ -56,4 +59,30 @@ jokes = chat.last.parse_set(Joke) # Joke(content='What do you call a bear with no teeth? A gummy bear!'), # Joke(content='What do you call a fake noodle? An Impasta!') # ] +``` + +### Complex Models + +```python +import rigging as rg + +class Inner(rg.Model): + type: str = rg.attr() + content: str + +class Outer(rg.Model): + name: str = rg.attr() + inners: list[Inner] = rg.element() + +outer = Outer(name="foo", inners=[ + Inner(type="cat", content="meow"), + Inner(type="dog", content="bark") +]) + +print(outer.to_pretty_xml()) + +# +# meow +# bark +# ``` \ No newline at end of file diff --git a/docs/topics/setup_logging.md b/docs/topics/setup_logging.md new file mode 100644 index 0000000..1074a81 --- /dev/null +++ b/docs/topics/setup_logging.md @@ -0,0 +1,23 @@ +Rigging uses [loguru](https://loguru.readthedocs.io/) for it's logging. By default it disables it's logger allowing users to choose when/how to gather messages. + +If you want to let rigging messages flow into loguru, you should enable it: + +```python +from loguru import logger + +logger.enable('rigging') +``` + +If you want to have some sane default handlers with dual console & file logging, +you can use the [rigging.logging.configure_logging][] function. + +```python +from rigging.logging import configure_logging + +configure_logging( + 'info', # stderr level + 'out.log', # log file (optional) + 'trace' # log file level +) +``` +*(This will remove existing handlers, so you might prefer to configure them yourself)* diff --git a/docs/topics/tools.md b/docs/topics/tools.md index 9f7c475..8f232fb 100644 --- a/docs/topics/tools.md +++ b/docs/topics/tools.md @@ -1,3 +1,6 @@ +!!! note + This content is currently being refactored + ### Tools ```python diff --git a/mkdocs.yml b/mkdocs.yml index 6ee69c1..c285add 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -8,10 +8,10 @@ nav: - Home: index.md - Topics: - Generators: topics/generators.md - - Chat: topics/chat.md - - Models: topics/model.md + - Chats: topics/chats.md + - Models: topics/models.md - Tools: topics/tools.md - - Logging: topics/logging.md + - Logging: topics/setup_logging.md - API: - rigging.chat: api/chat.md - rigging.completion: api/completion.md diff --git a/pyproject.toml b/pyproject.toml index c9f1561..5dbea84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "rigging" -version = "0.2.2" +version = "1.0.0rc0" description = "LLM Interaction Framework" authors = ["Nick Landers "] license = "MIT" diff --git a/rigging/completion.py b/rigging/completion.py index 7a9b671..45a82c0 100644 --- a/rigging/completion.py +++ b/rigging/completion.py @@ -485,7 +485,7 @@ async def arun_many( params: t.Sequence[t.Optional["GenerateParams"]] | None = None, skip_failed: bool = False, ) -> list[Completion]: - """async variant of the [rigging.chat.PendingCompletion.run_many][] method.""" + """async variant of the [rigging.completion.PendingCompletion.run_many][] method.""" states: list[RunState] = [RunState(self.text, p, self._process()) for p in self._fit_params(count, params)] _ = [next(state.processor) for state in states] diff --git a/rigging/message.py b/rigging/message.py index 344243f..2d27e74 100644 --- a/rigging/message.py +++ b/rigging/message.py @@ -143,6 +143,8 @@ def _sync_parts(self) -> None: shift += new_length - old_length + self.parts = sorted(self.parts, key=lambda p: p.slice_.start) + @computed_field # type: ignore[misc] @property def content(self) -> str: