mirror of
https://github.com/ArthurDanjou/ArtStudies.git
synced 2026-01-14 11:54:10 +01:00
Compare commits
4 Commits
c8c1bf4807
...
56fdd5da45
| Author | SHA1 | Date | |
|---|---|---|---|
| 56fdd5da45 | |||
| 3e6b2e313a | |||
| 346695212d | |||
| 8e7bbc1fe9 |
208
M2/Advanced Machine Learning/TP1.ipynb
Normal file
208
M2/Advanced Machine Learning/TP1.ipynb
Normal file
@@ -0,0 +1,208 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "8226e658",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "7e95cb09",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.microsoft.datawrangler.viewer.v0+json": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "index",
|
||||
"rawType": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "X1",
|
||||
"rawType": "float64",
|
||||
"type": "float"
|
||||
},
|
||||
{
|
||||
"name": "X2",
|
||||
"rawType": "float64",
|
||||
"type": "float"
|
||||
},
|
||||
{
|
||||
"name": "Y",
|
||||
"rawType": "float64",
|
||||
"type": "float"
|
||||
}
|
||||
],
|
||||
"ref": "018727a2-2342-424f-8395-021f40817c5a",
|
||||
"rows": [
|
||||
[
|
||||
"0",
|
||||
"-0.8363543",
|
||||
"4.520502",
|
||||
"-19.868094121443526"
|
||||
],
|
||||
[
|
||||
"1",
|
||||
"0.4020083",
|
||||
"3.252834",
|
||||
"-10.46598545005849"
|
||||
],
|
||||
[
|
||||
"2",
|
||||
"-0.2492138",
|
||||
"3.610425",
|
||||
"-12.91499193423918"
|
||||
],
|
||||
[
|
||||
"3",
|
||||
"-0.6257167",
|
||||
"4.58877",
|
||||
"-20.67839639765537"
|
||||
],
|
||||
[
|
||||
"4",
|
||||
"-0.9899948",
|
||||
"4.893924",
|
||||
"-22.99404413854238"
|
||||
]
|
||||
],
|
||||
"shape": {
|
||||
"columns": 3,
|
||||
"rows": 5
|
||||
}
|
||||
},
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>X1</th>\n",
|
||||
" <th>X2</th>\n",
|
||||
" <th>Y</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>-0.836354</td>\n",
|
||||
" <td>4.520502</td>\n",
|
||||
" <td>-19.868094</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>0.402008</td>\n",
|
||||
" <td>3.252834</td>\n",
|
||||
" <td>-10.465985</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>-0.249214</td>\n",
|
||||
" <td>3.610425</td>\n",
|
||||
" <td>-12.914992</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>-0.625717</td>\n",
|
||||
" <td>4.588770</td>\n",
|
||||
" <td>-20.678396</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>-0.989995</td>\n",
|
||||
" <td>4.893924</td>\n",
|
||||
" <td>-22.994044</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" X1 X2 Y\n",
|
||||
"0 -0.836354 4.520502 -19.868094\n",
|
||||
"1 0.402008 3.252834 -10.465985\n",
|
||||
"2 -0.249214 3.610425 -12.914992\n",
|
||||
"3 -0.625717 4.588770 -20.678396\n",
|
||||
"4 -0.989995 4.893924 -22.994044"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data = pd.read_excel(\"./data/data_pdp.xlsx\")\n",
|
||||
"data.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "4e9a9a97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def partial_dependant_function(data: pd.DataFrame, model: object, feature: str, grid_points: list) -> list:\n",
|
||||
" \"\"\"Compute the Partial Dependence Plot (PDP) for a given feature.\"\"\"\n",
|
||||
" pdp = []\n",
|
||||
" for val in grid_points:\n",
|
||||
" data_temp = data.copy()\n",
|
||||
" data_temp[feature] = val\n",
|
||||
" preds = model.predict(data_temp)\n",
|
||||
" pdp.append(preds.mean())\n",
|
||||
" return pdp"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9553a1d8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "studies",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
BIN
M2/Advanced Machine Learning/data/data_pdp.xlsx
Normal file
BIN
M2/Advanced Machine Learning/data/data_pdp.xlsx
Normal file
Binary file not shown.
851
M2/Generative AI/TP1/TP1 Benchmark.ipynb
Normal file
851
M2/Generative AI/TP1/TP1 Benchmark.ipynb
Normal file
@@ -0,0 +1,851 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "172a7a9f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# TP2 - Benchmark automatique\n",
|
||||
"\n",
|
||||
"Dans ce TP nous allons définir une fonction pour mesurer les performances d'un modèle de langage via l'exécution de plusieurs benchmarks. Nous avons vu en cours trois manières de mesurer la performance d'un modèle de langage qu'on peut résumer à:\n",
|
||||
"1. **Évaluation automatique**: via un ensemble de questions dont on connait la réponse\n",
|
||||
"2. **Évaluation humaine**: qualification humaine de la réponse d'un modèle à une question\n",
|
||||
"3. **Évaluation par modèle de langage**: notation ou comparaison de réponse d'un ou plusieurs modèles par un autre modèle\n",
|
||||
"\n",
|
||||
"Nous nous intéressons ici au premier point, en particulier avec les benchmarks [GSM8K](https://huggingface.co/datasets/openai/gsm8k) et [HellaSwag](https://huggingface.co/datasets/Rowan/hellaswag).\n",
|
||||
"Dans l'ensemble du notebook nous utiliserons la librairie LangChain.\n",
|
||||
"\n",
|
||||
"Il est à garder en tête que ce notebook n'a qu'une portée pédagogique et n'est pas forcément à jour puisque le domaine évolue rapidement, ni que les pratiques sont celles validées par l'industrie.\n",
|
||||
"\n",
|
||||
"## Uniformisation des benchmarks\n",
|
||||
"\n",
|
||||
"Pour chaque benchmark que l'on considère, nous avons besoin de plusieurs informations :\n",
|
||||
"* **Dataset** : une fonction pour charger les questions du benchmark\n",
|
||||
"* **Référence** : une fonction capable d'identifier la réponse attentue\n",
|
||||
"* **Prompt** : un prompt qui permet de demander correctement au modèle de répondre à la question\n",
|
||||
"* **Chaîne** : une fonction qui renvoie la chaîne de traitement de LangChain\n",
|
||||
"* **Score** : une fonction qui score la performance d'un modèle sur une question\n",
|
||||
"\n",
|
||||
"Nous allons commencer par créer une classe qui regroupe ces desiderata :\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cd75374d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import Runnable\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Benchmark:\n",
|
||||
" \"\"\"Base class for benchmarks.\"\"\"\n",
|
||||
"\n",
|
||||
" name: str\n",
|
||||
"\n",
|
||||
" def __init__(self, prompt: PromptTemplate) -> None:\n",
|
||||
" \"\"\"Initialize the benchmark with a prompt template.\"\"\"\n",
|
||||
" self.prompt = prompt\n",
|
||||
"\n",
|
||||
" def load_data(self) -> list:\n",
|
||||
" \"\"\"Load and return the benchmark data samples.\"\"\"\n",
|
||||
" raise NotImplementedError\n",
|
||||
"\n",
|
||||
" def build_chain(self, model) -> Runnable:\n",
|
||||
" \"\"\"Build and return the evaluation chain using the provided model.\"\"\"\n",
|
||||
" raise NotImplementedError\n",
|
||||
"\n",
|
||||
" def get_reference(self, sample) -> str:\n",
|
||||
" \"\"\"Extract and return the reference answer from a data sample.\"\"\"\n",
|
||||
" raise NotImplementedError\n",
|
||||
"\n",
|
||||
" def score(self, prediction, reference) -> float:\n",
|
||||
" \"\"\"Score the prediction against the reference answer.\"\"\"\n",
|
||||
" raise NotImplementedError"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2ab41df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Pour rendre cette classe plus concrète, commençons par travailler avec le benchmark [GSM8K](https://huggingface.co/datasets/openai/gsm8k).\n",
|
||||
"\n",
|
||||
"### Benchmark GSM8K\n",
|
||||
"\n",
|
||||
"On commence par charger le dataset et observer une question.\n",
|
||||
"\n",
|
||||
"**Consigne** : Résoudre la question *à la main* et vérifier votre réponse. On recommande d'explorer plusieurs questions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "93979ba0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of questions: 1319\n",
|
||||
"Example of question:\n",
|
||||
" Janet’s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?\n",
|
||||
"And its answer:\n",
|
||||
" Janet sells 16 - 3 - 4 = <<16-3-4=9>>9 duck eggs a day.\n",
|
||||
"She makes 9 * 2 = $<<9*2=18>>18 every day at the farmer’s market.\n",
|
||||
"#### 18\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"from datasets import load_dataset\n",
|
||||
"\n",
|
||||
"np.random.seed(42)\n",
|
||||
"\n",
|
||||
"dataset = load_dataset(\"gsm8k\", \"main\")\n",
|
||||
"dataset = dataset[\"test\"]\n",
|
||||
"\n",
|
||||
"print(f\"Number of questions: {len(dataset)}\")\n",
|
||||
"index = 0\n",
|
||||
"print(\"Example of question:\\n\", dataset[index][\"question\"])\n",
|
||||
"print(\"And its answer:\\n\", dataset[index][\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "82d797f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Après avoir inspecté plusieurs éléments du dataset, on remarque que la réponse finale est placée après la chaîne de caractères \"####\".\n",
|
||||
"\n",
|
||||
"**Consigne**: Construire une fonction `get_reference` qui prend en argument un élément de GMS8K (dictionnaire avec question et réponse) et renvoie la réponse attendue (string). On pourra utiliser la fonction [`search`](https://docs.python.org/3/library/re.html#re.search) de la librairie [`re`](https://docs.python.org/3/library/re.html#).\n",
|
||||
"Puis tester cette fonction sur l'exemple précédent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b336056a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Reference: 18\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from re import search\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_reference(sample: dict) -> str:\n",
|
||||
" \"\"\"Extract the reference answer from a data sample.\"\"\"\n",
|
||||
" match = search(r\"#### (\\d+)\", sample[\"answer\"])\n",
|
||||
" return match.group(1) if match else None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"index = 0\n",
|
||||
"reference = get_reference(sample=dataset[index])\n",
|
||||
"print(f\"Reference: {reference}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4c137e6a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Il nous reste maintenant à définir un prompt tel que l'on puisse appeler un modèle et tester notre mécanique."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0b899872",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=(\n",
|
||||
" \"\"\"You are a careful mathematician. Solve the problem step by step, then display your answer in the end.\n",
|
||||
" Question: {question}\n",
|
||||
" Answer:\"\"\"\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36433b53",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"En intégrant l'appel à un modèle via Ollama sur notre ordinateur, on peut définir avec LangChain :"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2f0676b6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Model answer : Here's how we can solve this problem step by step:\n",
|
||||
"\n",
|
||||
"1. **Calculate the total number of eggs laid:** Janet's ducks lay 16 eggs per day.\n",
|
||||
"\n",
|
||||
"2. **Calculate the number of eggs eaten:** She eats 3 eggs per day.\n",
|
||||
"\n",
|
||||
"3. **Calculate the number of eggs remaining after breakfast:** 16 eggs (laid) - 3 eggs (eaten) = 13 eggs\n",
|
||||
"\n",
|
||||
"4. **Calculate the number of eggs used for baking:** She uses 4 eggs for baking.\n",
|
||||
"\n",
|
||||
"5. **Calculate the number of eggs remaining after baking:** 13 eggs - 4 eggs (baking) = 9 eggs\n",
|
||||
"\n",
|
||||
"6. **Calculate the earnings from selling the remaining eggs:** She sells 9 eggs at $2 per egg. So she makes 9 * $2 = $18.\n",
|
||||
"\n",
|
||||
"**Answer:** $18\n",
|
||||
"The answer was : 18\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_ollama import OllamaLLM\n",
|
||||
"\n",
|
||||
"model = OllamaLLM(model=\"gemma3:4b\")\n",
|
||||
"\n",
|
||||
"chain = {\"question\": RunnablePassthrough()} | prompt | model | StrOutputParser()\n",
|
||||
"\n",
|
||||
"index = 0\n",
|
||||
"\n",
|
||||
"question = dataset[index][\"question\"]\n",
|
||||
"answer = get_reference(dataset[index])\n",
|
||||
"response = chain.invoke(question)\n",
|
||||
"print(f\"Model answer : {response}\")\n",
|
||||
"print(f\"The answer was : {answer}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "97dd7db7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Il nous faut extraire la dernière valeur numérique pour obtenir automatiquement la réponse du modèle.\n",
|
||||
"\n",
|
||||
"**Consigne** : Définir une fonction `score` qui prend en paramètre la réponse du modèle et la réponse attendue puis renvoie si les deux réponses sont identiques (1 / 0). On pourra utiliser la fonction [`findall`](https://docs.python.org/3/library/re.html#re.findall) de la librairie `re`.\n",
|
||||
"Puis l'appliquer sur l'exemple précédent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ad43cf84",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The model scored 1.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from re import findall\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def score(prediction, reference):\n",
|
||||
" if reference is None:\n",
|
||||
" return 0.0\n",
|
||||
"\n",
|
||||
" numbers = findall(r\"\\d+\", prediction)\n",
|
||||
" return 1.0 if numbers and numbers[-1] == reference else 0.0\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"value = score(response, answer)\n",
|
||||
"print(f\"The model scored {value}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a2ec5088",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Nous avons l'ensemble des éléments nécessaire pour définir la classe `GSM8KBenchmark` depuis la classe `Benchmark` que nous avons défini précédemment.\n",
|
||||
"\n",
|
||||
"**Consigne** : Définir cette classe comme sous-classe de `Benchmark`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "d83f4394",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class GSM8KBenchmark(Benchmark):\n",
|
||||
" name = \"GSM8K\"\n",
|
||||
"\n",
|
||||
" def load_data(self):\n",
|
||||
" return load_dataset(\"gsm8k\", \"main\", split=\"test\")\n",
|
||||
"\n",
|
||||
" def build_chain(self, model):\n",
|
||||
" return (\n",
|
||||
" {\"question\": RunnablePassthrough()}\n",
|
||||
" | self.prompt\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def get_reference(self, sample):\n",
|
||||
" match = search(r\"#### (\\d+)\", sample[\"answer\"])\n",
|
||||
" return match.group(1) if match else None\n",
|
||||
"\n",
|
||||
" def score(self, prediction, reference):\n",
|
||||
" if reference is None:\n",
|
||||
" return 0.0\n",
|
||||
" numbers = findall(r\"\\d+\", prediction)\n",
|
||||
" return 1.0 if numbers and numbers[-1] == reference else 0.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dfc3cb78",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Il est maintenant temps de définir une fonction qui *fait* le benchmark.\n",
|
||||
"\n",
|
||||
"**Consigne** : Définir une fonction `run_benchmark` qui prend en paramètre :\n",
|
||||
"* `model_name` : le nom du modèle Ollama que l'on veut tester\n",
|
||||
"* `benchmark` : la classe benchmark que l'on souhaite tester\n",
|
||||
"* `max_samples` : le nombre maximum de questions que l'on souhaite utiliser\n",
|
||||
"\n",
|
||||
"Puisque l'object avec lequel nous travaillons est un dataset HuggingFace, pour sélectionner $n$ lignes, on utilisera \n",
|
||||
"```python\n",
|
||||
"dataset = dataset.select(range(max_samples))\n",
|
||||
"```\n",
|
||||
"De cette manière on préserve la structure."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2d7125af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from tqdm import tqdm\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def run_benchmark(\n",
|
||||
" model_name: str, benchmark: Benchmark, max_samples: int | None = None\n",
|
||||
") -> dict:\n",
|
||||
" model = OllamaLLM(model=model_name)\n",
|
||||
"\n",
|
||||
" data = benchmark.load_data()\n",
|
||||
" if max_samples:\n",
|
||||
" data = data.select(range(max_samples))\n",
|
||||
" chain = benchmark.build_chain(model)\n",
|
||||
"\n",
|
||||
" scores = []\n",
|
||||
"\n",
|
||||
" for sample in tqdm(data, desc=f\"Running {benchmark.name}\"):\n",
|
||||
" prediction = chain.invoke(sample)\n",
|
||||
" reference = benchmark.get_reference(sample)\n",
|
||||
" scores.append(benchmark.score(prediction, reference))\n",
|
||||
"\n",
|
||||
" results = {\n",
|
||||
" \"benchmark\": benchmark.name,\n",
|
||||
" \"model\": model_name,\n",
|
||||
" \"num_samples\": len(scores),\n",
|
||||
" \"accuracy\": np.mean(scores),\n",
|
||||
" }\n",
|
||||
" return results\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "81de8940",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Consigne** : Utiliser la fonction `run_benchmark` en définissant un prompt pour GSM8K."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f6bbeb53",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running GSM8K: 100%|██████████| 5/5 [00:50<00:00, 10.18s/it]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'benchmark': 'GSM8K', 'model': 'gemma3:4b', 'num_samples': 5, 'accuracy': 0.8}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt_GMS8K = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=(\n",
|
||||
" \"\"\"You are a careful mathematician. Solve the problem step by step, then display your answer in the end.\n",
|
||||
" Question: {question}\n",
|
||||
" Answer:\"\"\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"benchmark_GSM8K = GSM8KBenchmark(prompt=prompt_GMS8K)\n",
|
||||
"results = run_benchmark(\n",
|
||||
" model_name=\"gemma3:4b\", benchmark=benchmark_GSM8K, max_samples=5\n",
|
||||
")\n",
|
||||
"print(results)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0c943124",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### HellaSwag\n",
|
||||
"\n",
|
||||
"Maintenant que nous avons réussi à le faire pour le dataset GMS8K, attaquons-nous à [HellaSwag](https://huggingface.co/datasets/Rowan/hellaswag).\n",
|
||||
"\n",
|
||||
"**Consigne** : En suivant la même approche que précédemment, implémenter une sous classe `HellaSwagBenchmark` à partir de la classe `Benchmark`. Puis utiliser la fonction `run_benchmark` pour valider votre travail."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"id": "32886901",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class HellaSwagBenchmark(Benchmark):\n",
|
||||
" name = \"HellaSwag\"\n",
|
||||
"\n",
|
||||
" def load_data(self):\n",
|
||||
" return load_dataset(\"hellaswag\", split=\"validation\")\n",
|
||||
"\n",
|
||||
" def build_chain(self, model):\n",
|
||||
" return (\n",
|
||||
" {\n",
|
||||
" \"context\": lambda x: x[\"ctx\"],\n",
|
||||
" \"choices\": lambda x: \"\\n\".join(\n",
|
||||
" f\"{index}: {choice}\" for index, choice in enumerate(x[\"endings\"])\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
" | self.prompt\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def get_reference(self, sample):\n",
|
||||
" return str(sample[\"label\"])\n",
|
||||
"\n",
|
||||
" def score(self, prediction, reference):\n",
|
||||
" match = search(r\"\\d\", prediction)\n",
|
||||
" return 1.0 if match and match.group(0) == reference else 0.0\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "96a3031a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running HellaSwag: 100%|██████████| 5/5 [00:02<00:00, 2.08it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'benchmark': 'HellaSwag', 'model': 'gemma3:4b', 'num_samples': 5, 'accuracy': 1.0}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt_HellaSwag = PromptTemplate(\n",
|
||||
" input_variables=[\"context\", \"choices\"],\n",
|
||||
" template=(\n",
|
||||
" \"\"\"You will be given a context and then different choices. You need to find the most likely continuation to the context. Answer with the number of the most likely choice only.\n",
|
||||
" Context: {context}\n",
|
||||
" Choices: {choices}\n",
|
||||
" Answer:\"\"\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"benchmark_HellaSwag = HellaSwagBenchmark(prompt=prompt_HellaSwag)\n",
|
||||
"\n",
|
||||
"results = run_benchmark(\n",
|
||||
" model_name=\"gemma3:4b\", benchmark=benchmark_HellaSwag, max_samples=5\n",
|
||||
")\n",
|
||||
"print(results)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c542783c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Réponses structurées\n",
|
||||
"\n",
|
||||
"Sur quelques exemples tout semble fonctionner ! Mais il y a au moins une fragilité dans notre travail : la récupération de la réponse est peu fiable et largement dépendante des prompts.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Par exemple pour GMS8K, on aimerait avoir une réponse sous la forme d'un JSON :\n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"reasoning\": \"étapes de raisonnement\",\n",
|
||||
" \"final_answer\": 18\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"De cette manière ce serait particulièrement simple d'extraire la réponse, sans pour autant ne pas avoir de *réflexion* du modèle. En revanche pour HellaSwag, un JSON extrêment simple suffit :\n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"choice\": 2\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Pour forcer le modèle à suivre ces formats, nous allons utiliser l'option [Pydantic](https://docs.langchain.com/oss/python/langchain/structured-output). Elle s'utilise comme suit, pour GSM8K :"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "988dbca3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GSM8KOutput(BaseModel):\n",
|
||||
" reasoning: str = Field(description=\"Step-by-step reasoning\")\n",
|
||||
" final_answer: float = Field(description=\"Final numeric answer\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d855adfe",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Concernant l'intégration dans le prompt :"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f25afddc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The output should be formatted as a JSON instance that conforms to the JSON schema below.\n",
|
||||
"\n",
|
||||
"As an example, for the schema {\"properties\": {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\n",
|
||||
"the object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n",
|
||||
"\n",
|
||||
"Here is the output schema:\n",
|
||||
"```\n",
|
||||
"{\"properties\": {\"reasoning\": {\"description\": \"Step-by-step reasoning\", \"title\": \"Reasoning\", \"type\": \"string\"}, \"final_answer\": {\"description\": \"Final numeric answer\", \"title\": \"Final Answer\", \"type\": \"number\"}}, \"required\": [\"reasoning\", \"final_answer\"]}\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"\n",
|
||||
"parser_gsm8k = PydanticOutputParser(pydantic_object=GSM8KOutput)\n",
|
||||
"\n",
|
||||
"prompt_gsm8k = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" partial_variables={\"format_instructions\": parser_gsm8k.get_format_instructions()},\n",
|
||||
" template=(\n",
|
||||
" \"\"\"You are a careful mathematician. Solve the problem step by step.\n",
|
||||
" Question: {question}\n",
|
||||
" {format_instructions}\"\"\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(parser_gsm8k.get_format_instructions())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1dcc480",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Consigne** : Modifier la classe `Benchmark` et la sous-classe `GMS8KBenchmark` pour intégrer ces évolutions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 67,
|
||||
"id": "542a31d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import Runnable\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Benchmark:\n",
|
||||
" name: str\n",
|
||||
"\n",
|
||||
" def __init__(self, prompt: PromptTemplate, parser: PydanticOutputParser):\n",
|
||||
" self.prompt = prompt\n",
|
||||
" self.parser = parser\n",
|
||||
"\n",
|
||||
" def load_data(self):\n",
|
||||
" raise NotImplementedError\n",
|
||||
"\n",
|
||||
" def build_chain(self, model) -> Runnable:\n",
|
||||
" raise NotImplementedError\n",
|
||||
"\n",
|
||||
" def get_reference(self, sample):\n",
|
||||
" raise NotImplementedError\n",
|
||||
"\n",
|
||||
" def score(self, prediction, reference):\n",
|
||||
" raise NotImplementedError"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c94f1dd1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class GSM8KBenchmark(Benchmark):\n",
|
||||
" name = \"GSM8K\"\n",
|
||||
"\n",
|
||||
" def load_data(self):\n",
|
||||
" return load_dataset(\"gsm8k\", \"main\", split=\"test\")\n",
|
||||
"\n",
|
||||
" def build_chain(self, model):\n",
|
||||
" return {\"question\": RunnablePassthrough()} | self.prompt | model | self.parser\n",
|
||||
"\n",
|
||||
" def get_reference(self, sample):\n",
|
||||
" match = search(r\"#### (\\d+)\", sample[\"answer\"])\n",
|
||||
" return float(match.group(1)) if match else None\n",
|
||||
"\n",
|
||||
" def score(self, prediction: GSM8KOutput, reference: float | None):\n",
|
||||
" if reference is None:\n",
|
||||
" return 0.0\n",
|
||||
" return 1.0 if prediction.final_answer == reference else 0.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b2076f24",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Consigne** : Utiliser la fonction `run_benchmark` et vérifier que tout fonctionne."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 69,
|
||||
"id": "31e433b0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running GSM8K: 100%|██████████| 5/5 [01:01<00:00, 12.25s/it]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'benchmark': 'GSM8K', 'model': 'gemma3:4b', 'num_samples': 5, 'accuracy': 0.8}"
|
||||
]
|
||||
},
|
||||
"execution_count": 69,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"gsm8k = GSM8KBenchmark(\n",
|
||||
" prompt=prompt_gsm8k,\n",
|
||||
" parser=parser_gsm8k,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"run_benchmark(\"gemma3:4b\", gsm8k, max_samples=5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b7ed90cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Consigne** : Réaliser la même modification pour HellaSwag, et vérifier que cela fonctionne."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e678bed2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class HellaSwagOutput(BaseModel):\n",
|
||||
" choice: int = Field(description=\"Index of the chosen continuation\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class HellaSwagBenchmark(Benchmark):\n",
|
||||
" name = \"HellaSwag\"\n",
|
||||
"\n",
|
||||
" def load_data(self):\n",
|
||||
" return load_dataset(\"hellaswag\", split=\"validation\")\n",
|
||||
"\n",
|
||||
" def build_chain(self, model):\n",
|
||||
" return (\n",
|
||||
" {\n",
|
||||
" \"context\": lambda x: x[\"ctx\"],\n",
|
||||
" \"choices\": lambda x: \"\\n\".join(\n",
|
||||
" f\"{index}: {choice}\" for index, choice in enumerate(x[\"endings\"])\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
" | self.prompt\n",
|
||||
" | model\n",
|
||||
" | self.parser\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def get_reference(self, sample):\n",
|
||||
" return str(sample[\"label\"])\n",
|
||||
"\n",
|
||||
" def score(self, prediction: HellaSwagOutput, reference: str) -> float:\n",
|
||||
" return 1.0 if str(prediction.choice) == reference else 0.0\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2455f816",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running HellaSwag: 100%|██████████| 5/5 [00:15<00:00, 3.12s/it]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'benchmark': 'HellaSwag',\n",
|
||||
" 'model': 'gemma3:4b',\n",
|
||||
" 'num_samples': 5,\n",
|
||||
" 'accuracy': 1.0}"
|
||||
]
|
||||
},
|
||||
"execution_count": 65,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"parser_hellaswag = PydanticOutputParser(pydantic_object=HellaSwagOutput)\n",
|
||||
"\n",
|
||||
"prompt_HellaSwag = PromptTemplate(\n",
|
||||
" input_variables=[\"context\", \"choices\"],\n",
|
||||
" partial_variables={\n",
|
||||
" \"format_instructions\": parser_hellaswag.get_format_instructions()\n",
|
||||
" },\n",
|
||||
" template=(\n",
|
||||
" \"\"\"You will be given a context and then different choices. You need to find the most likely continuation to the context.\n",
|
||||
" Context: {context}\n",
|
||||
" Choices: {choices}\n",
|
||||
" {format_instructions}\"\"\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"hella_swag = HellaSwagBenchmark(\n",
|
||||
" prompt=prompt_HellaSwag,\n",
|
||||
" parser=parser_hellaswag,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"run_benchmark(\"gemma3:4b\", hella_swag, max_samples=5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ba9acd54",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Pour aller plus loin\n",
|
||||
"\n",
|
||||
"On pourrait implémenter d'autres benchmark, comparer vraiment des modèles entre eux, comparer des prompts entre eux..."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
1395
M2/Generative AI/TP1/TP1 RAG.ipynb
Normal file
1395
M2/Generative AI/TP1/TP1 RAG.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -1,456 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "172a7a9f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# TP2 - Benchmark automatique\n",
|
||||
"\n",
|
||||
"Dans ce TP nous allons définir une fonction pour mesurer les performances d'un modèle de langage via l'exécution de plusieurs benchmarks. Nous avons vu en cours trois manières de mesurer la performance d'un modèle de langage qu'on peut résumer à:\n",
|
||||
"1. **Évaluation automatique**: via un ensemble de questions dont on connait la réponse\n",
|
||||
"2. **Évaluation humaine**: qualification humaine de la réponse d'un modèle à une question\n",
|
||||
"3. **Évaluation par modèle de langage**: notation ou comparaison de réponse d'un ou plusieurs modèles par un autre modèle\n",
|
||||
"\n",
|
||||
"Nous nous intéressons ici au premier point, en particulier avec les benchmarks [GSM8K](https://huggingface.co/datasets/openai/gsm8k) et [HellaSwag](https://huggingface.co/datasets/Rowan/hellaswag).\n",
|
||||
"Dans l'ensemble du notebook nous utiliserons la librairie LangChain.\n",
|
||||
"\n",
|
||||
"Il est à garder en tête que ce notebook n'a qu'une portée pédagogique et n'est pas forcément à jour puisque le domaine évolue rapidement, ni que les pratiques sont celles validées par l'industrie.\n",
|
||||
"\n",
|
||||
"## Uniformisation des benchmarks\n",
|
||||
"\n",
|
||||
"Pour chaque benchmark que l'on considère, nous avons besoin de plusieurs informations :\n",
|
||||
"* **Dataset** : une fonction pour charger les questions du benchmark\n",
|
||||
"* **Référence** : une fonction capable d'identifier la réponse attentue\n",
|
||||
"* **Prompt** : un prompt qui permet de demander correctement au modèle de répondre à la question\n",
|
||||
"* **Chaîne** : une fonction qui renvoie la chaîne de traitement de LangChain\n",
|
||||
"* **Score** : une fonction qui score la performance d'un modèle sur une question\n",
|
||||
"\n",
|
||||
"Nous allons commencer par créer une classe qui regroupe ces desiderata :\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cd75374d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import Runnable\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Benchmark:\n",
|
||||
" name: str\n",
|
||||
"\n",
|
||||
" def __init__(self, prompt: PromptTemplate):\n",
|
||||
" self.prompt = prompt\n",
|
||||
"\n",
|
||||
" def load_data(self):\n",
|
||||
" raise NotImplementedError\n",
|
||||
"\n",
|
||||
" def build_chain(self, model) -> Runnable:\n",
|
||||
" raise NotImplementedError\n",
|
||||
"\n",
|
||||
" def get_reference(self, sample):\n",
|
||||
" raise NotImplementedError\n",
|
||||
"\n",
|
||||
" def score(self, prediction, reference):\n",
|
||||
" raise NotImplementedError"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2ab41df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Pour rendre cette classe plus concrète, commençons par travailler avec le benchmark [GSM8K](https://huggingface.co/datasets/openai/gsm8k).\n",
|
||||
"\n",
|
||||
"### Benchmark GSM8K\n",
|
||||
"\n",
|
||||
"On commence par charger le dataset et observer une question.\n",
|
||||
"\n",
|
||||
"**Consigne** : Résoudre la question *à la main* et vérifier votre réponse. On recommande d'explorer plusieurs questions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "93979ba0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np; np.random.seed(42)\n",
|
||||
"from datasets import load_dataset\n",
|
||||
"\n",
|
||||
"dataset = load_dataset(\"gsm8k\", \"main\")\n",
|
||||
"dataset = dataset[\"test\"]\n",
|
||||
"\n",
|
||||
"print(f\"Number of questions: {len(dataset)}\")\n",
|
||||
"index = 0\n",
|
||||
"print(\"Example of question:\\n\", dataset[index][\"question\"])\n",
|
||||
"print(\"And its answer:\\n\", dataset[index][\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "82d797f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Après avoir inspecté plusieurs éléments du dataset, on remarque que la réponse finale est placée après la chaîne de caractères \"####\".\n",
|
||||
"\n",
|
||||
"**Consigne**: Construire une fonction `get_reference` qui prend en argument un élément de GMS8K (dictionnaire avec question et réponse) et renvoie la réponse attendue (string). On pourra utiliser la fonction [`search`](https://docs.python.org/3/library/re.html#re.search) de la librairie [`re`](https://docs.python.org/3/library/re.html#).\n",
|
||||
"Puis tester cette fonction sur l'exemple précédent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b336056a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4c137e6a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Il nous reste maintenant à définir un prompt tel que l'on puisse appeler un modèle et tester notre mécanique."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0b899872",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=(\n",
|
||||
" \"\"\"You are a careful mathematician. Solve the problem step by step, then display your answer in the end.\n",
|
||||
" Question: {question}\n",
|
||||
" Answer:\"\"\"\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36433b53",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"En intégrant l'appel à un modèle via Ollama sur notre ordinateur, on peut définir avec LangChain :"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2f0676b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_ollama import OllamaLLM\n",
|
||||
"\n",
|
||||
"model = OllamaLLM(model=\"gemma3:4b\")\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"index = 0\n",
|
||||
"\n",
|
||||
"question = dataset[index][\"question\"]\n",
|
||||
"answer = get_reference(dataset[index])\n",
|
||||
"response = chain.invoke(question)\n",
|
||||
"print(f\"Model answer : {response}\")\n",
|
||||
"print(f\"The answer was : {answer}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "97dd7db7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Il nous faut extraire la dernière valeur numérique pour obtenir automatiquement la réponse du modèle.\n",
|
||||
"\n",
|
||||
"**Consigne** : Définir une fonction `score` qui prend en paramètre la réponse du modèle et la réponse attendue puis renvoie si les deux réponses sont identiques (1 / 0). On pourra utiliser la fonction [`findall`](https://docs.python.org/3/library/re.html#re.findall) de la librairie `re`.\n",
|
||||
"Puis l'appliquer sur l'exemple précédent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ad43cf84",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a2ec5088",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Nous avons l'ensemble des éléments nécessaire pour définir la classe `GSM8KBenchmark` depuis la classe `Benchmark` que nous avons défini précédemment.\n",
|
||||
"\n",
|
||||
"**Consigne** : Définir cette classe comme sous-classe de `Benchmark`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d83f4394",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dfc3cb78",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Il est maintenant temps de définir une fonction qui *fait* le benchmark.\n",
|
||||
"\n",
|
||||
"**Consigne** : Définir une fonction `run_benchmark` qui prend en paramètre :\n",
|
||||
"* `model_name` : le nom du modèle Ollama que l'on veut tester\n",
|
||||
"* `benchmark` : la classe benchmark que l'on souhaite tester\n",
|
||||
"* `max_samples` : le nombre maximum de questions que l'on souhaite utiliser\n",
|
||||
"\n",
|
||||
"Puisque l'object avec lequel nous travaillons est un dataset HuggingFace, pour sélectionner $n$ lignes, on utilisera \n",
|
||||
"```python\n",
|
||||
"dataset = dataset.select(range(max_samples))\n",
|
||||
"```\n",
|
||||
"De cette manière on préserve la structure."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2d7125af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "81de8940",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Consigne** : Utiliser la fonction `run_benchmark` en définissant un prompt pour GSM8K."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f6bbeb53",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0c943124",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### HellaSwag\n",
|
||||
"\n",
|
||||
"Maintenant que nous avons réussi à le faire pour le dataset GMS8K, attaquons-nous à [HellaSwag](https://huggingface.co/datasets/Rowan/hellaswag).\n",
|
||||
"\n",
|
||||
"**Consigne** : En suivant la même approche que précédemment, implémenter une sous classe `HellaSwagBenchmark` à partir de la classe `Benchmark`. Puis utiliser la fonction `run_benchmark` pour valider votre travail."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "32886901",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "96a3031a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c542783c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Réponses structurées\n",
|
||||
"\n",
|
||||
"Sur quelques exemples tout semble fonctionner ! Mais il y a au moins une fragilité dans notre travail : la récupération de la réponse est peu fiable et largement dépendante des prompts.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Par exemple pour GMS8K, on aimerait avoir une réponse sous la forme d'un JSON :\n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"reasoning\": \"étapes de raisonnement\",\n",
|
||||
" \"final_answer\": 18\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"De cette manière ce serait particulièrement simple d'extraire la réponse, sans pour autant ne pas avoir de *réflexion* du modèle. En revanche pour HellaSwag, un JSON extrêment simple suffit :\n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"choice\": 2\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Pour forcer le modèle à suivre ces formats, nous allons utiliser l'option [Pydantic](https://docs.langchain.com/oss/python/langchain/structured-output). Elle s'utilise comme suit, pour GSM8K :"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "988dbca3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"class GSM8KOutput(BaseModel):\n",
|
||||
" reasoning: str = Field(description=\"Step-by-step reasoning\")\n",
|
||||
" final_answer: float = Field(description=\"Final numeric answer\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d855adfe",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Concernant l'intégration dans le prompt :"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f25afddc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"\n",
|
||||
"parser_gsm8k = PydanticOutputParser(pydantic_object=GSM8KOutput)\n",
|
||||
"\n",
|
||||
"prompt_gsm8k = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" partial_variables={\"format_instructions\": parser_gsm8k.get_format_instructions()},\n",
|
||||
" template=(\n",
|
||||
" \"\"\"You are a careful mathematician. Solve the problem step by step.\n",
|
||||
" Question: {question}\n",
|
||||
" {format_instructions}\"\"\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(parser_gsm8k.get_format_instructions())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1dcc480",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Consigne** : Modifier la classe `Benchmark` et la sous-classe `GMS8KBenchmark` pour intégrer ces évolutions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "542a31d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c94f1dd1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b2076f24",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Consigne** : Utiliser la fonction `run_benchmark` et vérifier que tout fonctionne."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "31e433b0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b7ed90cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Consigne** : Réaliser la même modification pour HellaSwag, et vérifier que cela fonctionne."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e678bed2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2455f816",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ba9acd54",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Pour aller plus loin\n",
|
||||
"\n",
|
||||
"On pourrait implémenter d'autres benchmark, comparer vraiment des modèles entre eux, comparer des prompts entre eux..."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -13,13 +13,16 @@ dependencies = [
|
||||
"langchain-community>=0.4.1",
|
||||
"langchain-huggingface>=1.2.0",
|
||||
"langchain-ollama>=1.0.1",
|
||||
"langchain-text-splitters>=1.1.0",
|
||||
"matplotlib>=3.10.1",
|
||||
"nbformat>=5.10.4",
|
||||
"numpy>=2.2.5",
|
||||
"opencv-python>=4.11.0.86",
|
||||
"openpyxl>=3.1.5",
|
||||
"pandas>=2.2.3",
|
||||
"pandas-stubs>=2.3.2.250926",
|
||||
"plotly>=6.3.0",
|
||||
"polars>=1.37.0",
|
||||
"pypdf>=6.5.0",
|
||||
"scikit-learn>=1.6.1",
|
||||
"scipy>=1.15.2",
|
||||
|
||||
55
uv.lock
generated
55
uv.lock
generated
@@ -514,6 +514,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "et-xmlfile"
|
||||
version = "2.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d3/38/af70d7ab1ae9d4da450eeec1fa3918940a5fafb9055e934af8d6eb0c2313/et_xmlfile-2.0.0.tar.gz", hash = "sha256:dab3f4764309081ce75662649be815c4c9081e88f0837825f90fd28317d4da54", size = 17234, upload-time = "2024-10-25T17:25:40.039Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/8b/5fe2cc11fee489817272089c4203e679c63b570a5aaeb18d852ae3cbba6a/et_xmlfile-2.0.0-py3-none-any.whl", hash = "sha256:7a91720bc756843502c3b7504c77b8fe44217c85c537d85037f0f536151b2caa", size = 18059, upload-time = "2024-10-25T17:25:39.051Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "executing"
|
||||
version = "2.2.0"
|
||||
@@ -2101,6 +2110,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:085ad9b77c18853ea66283e98affefe2de8cc4c1f43eda4c100cf9b2721142ec", size = 39488044, upload-time = "2025-01-16T13:52:21.928Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openpyxl"
|
||||
version = "3.1.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "et-xmlfile" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3d/f9/88d94a75de065ea32619465d2f77b29a0469500e99012523b91cc4141cd1/openpyxl-3.1.5.tar.gz", hash = "sha256:cf0e3cf56142039133628b5acffe8ef0c12bc902d2aadd3e0fe5878dc08d1050", size = 186464, upload-time = "2024-06-28T14:03:44.161Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/da/977ded879c29cbd04de313843e76868e6e13408a94ed6b987245dc7c8506/openpyxl-3.1.5-py2.py3-none-any.whl", hash = "sha256:5282c12b107bffeef825f4617dc029afaf41d0ea60823bbb665ef3079dc79de2", size = 250910, upload-time = "2024-06-28T14:03:41.161Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opt-einsum"
|
||||
version = "3.4.0"
|
||||
@@ -2473,6 +2494,34 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/95/a9/12e2dc726ba1ba775a2c6922d5d5b4488ad60bdab0888c337c194c8e6de8/plotly-6.3.0-py3-none-any.whl", hash = "sha256:7ad806edce9d3cdd882eaebaf97c0c9e252043ed1ed3d382c3e3520ec07806d4", size = 9791257, upload-time = "2025-08-12T20:22:09.205Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "polars"
|
||||
version = "1.37.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "polars-runtime-32" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6c/b5/ce40267c54b66f93572d84f7ba1c216b72a71cb2235e3724fab0911541fe/polars-1.37.0.tar.gz", hash = "sha256:6bbbeefb6f02f848d46ad4f4e922a92573986fd38611801c696bae98b02be4c8", size = 715429, upload-time = "2026-01-10T12:28:06.741Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/31/07/d890382bbfdeb25db039ef4a8c8f93b3faf0016e18130513274204954203/polars-1.37.0-py3-none-any.whl", hash = "sha256:fcc549b9923ef1bd6fd99b5fd0a00dfedf85406f4758ae018a69bcd18a91f113", size = 805614, upload-time = "2026-01-10T12:26:47.897Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "polars-runtime-32"
|
||||
version = "1.37.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/30/92/b818590a5ebcc55657f5483f26133174bd2b9ca88457b60c93669a9d0c75/polars_runtime_32-1.37.0.tar.gz", hash = "sha256:954ddb056e3a2db2cbcaae501225ac5604d1599b6debd9c6dbdf8efbac0e6511", size = 2820371, upload-time = "2026-01-10T12:28:08.195Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/67/76162c9fcc71b917bdfd2804eaf0ab7cdb264a89b89af4f195a918f9f97d/polars_runtime_32-1.37.0-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3591f4b8e734126d713a12869d3727360acbbcd1d440b45d830497a317a5a8b3", size = 43518436, upload-time = "2026-01-10T12:26:51.442Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/ec/56f328e8fa4ebea453f5bc10c579774dff774a873ff224b3108d53c514f9/polars_runtime_32-1.37.0-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:47849420859159681e94589daad3a04ff66a2379c116ccd812d043f7ffe0094c", size = 39663939, upload-time = "2026-01-10T12:26:54.664Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/b2/f1ea0edba327a92ce0158b7a0e4abe21f541e44c9fb8ec932cc47592ca5c/polars_runtime_32-1.37.0-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4648ea1e821b9a841b2a562f27bcf54ff1ad21f9c217adcf0f7d0b3c33dc6400", size = 41481348, upload-time = "2026-01-10T12:26:57.598Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/21/788a3dd724bb21cf42e2f4daa6510a47787e8b30dd535aa6cae20ea968d0/polars_runtime_32-1.37.0-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5272b6f1680a3e0d77c9f07cb5a54f307079eb5d519c71aa3c37b9af0ee03a9e", size = 45168069, upload-time = "2026-01-10T12:27:00.98Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/73/823d6534a20ebdcec4b7706ab2b3f2cfb8e07571305f4e7381cc22d83e31/polars_runtime_32-1.37.0-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:73301ef4fe80d8d748085259a4063ac52ff058088daa702e2a75e7d1ab7f14fc", size = 41675645, upload-time = "2026-01-10T12:27:04.334Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/54/1bacad96dc2b67d33b886a45b249777212782561493718785cb27c7c362a/polars_runtime_32-1.37.0-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:c60d523d738a7b3660d9abdfaff798f7602488f469d427865965b0bd2e40473a", size = 44737715, upload-time = "2026-01-10T12:27:08.152Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/e3/aad525d8d89b903fcfa2bd0b4cb66b8a6e83e80b3d1348c5a428092d2983/polars_runtime_32-1.37.0-cp310-abi3-win_amd64.whl", hash = "sha256:f87f76f16e8030d277ecca0c0976aca62ec2b6ba2099ee9c6f75dfc97e7dc1b1", size = 45018403, upload-time = "2026-01-10T12:27:11.292Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/4d/ddcaa5f2e18763e02e66d0fd2efca049a42fe96fbeda188e89aeb38dd6fa/polars_runtime_32-1.37.0-cp310-abi3-win_arm64.whl", hash = "sha256:7ffbd9487e3668b0a57519f7ab5ab53ab656086db9f62dceaab41393a07be721", size = 41026243, upload-time = "2026-01-10T12:27:14.563Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prompt-toolkit"
|
||||
version = "3.0.51"
|
||||
@@ -3482,13 +3531,16 @@ dependencies = [
|
||||
{ name = "langchain-community" },
|
||||
{ name = "langchain-huggingface" },
|
||||
{ name = "langchain-ollama" },
|
||||
{ name = "langchain-text-splitters" },
|
||||
{ name = "matplotlib" },
|
||||
{ name = "nbformat" },
|
||||
{ name = "numpy" },
|
||||
{ name = "opencv-python" },
|
||||
{ name = "openpyxl" },
|
||||
{ name = "pandas" },
|
||||
{ name = "pandas-stubs" },
|
||||
{ name = "plotly" },
|
||||
{ name = "polars" },
|
||||
{ name = "pypdf" },
|
||||
{ name = "scikit-learn" },
|
||||
{ name = "scipy" },
|
||||
@@ -3517,13 +3569,16 @@ requires-dist = [
|
||||
{ name = "langchain-community", specifier = ">=0.4.1" },
|
||||
{ name = "langchain-huggingface", specifier = ">=1.2.0" },
|
||||
{ name = "langchain-ollama", specifier = ">=1.0.1" },
|
||||
{ name = "langchain-text-splitters", specifier = ">=1.1.0" },
|
||||
{ name = "matplotlib", specifier = ">=3.10.1" },
|
||||
{ name = "nbformat", specifier = ">=5.10.4" },
|
||||
{ name = "numpy", specifier = ">=2.2.5" },
|
||||
{ name = "opencv-python", specifier = ">=4.11.0.86" },
|
||||
{ name = "openpyxl", specifier = ">=3.1.5" },
|
||||
{ name = "pandas", specifier = ">=2.2.3" },
|
||||
{ name = "pandas-stubs", specifier = ">=2.3.2.250926" },
|
||||
{ name = "plotly", specifier = ">=6.3.0" },
|
||||
{ name = "polars", specifier = ">=1.37.0" },
|
||||
{ name = "pypdf", specifier = ">=6.5.0" },
|
||||
{ name = "scikit-learn", specifier = ">=1.6.1" },
|
||||
{ name = "scipy", specifier = ">=1.15.2" },
|
||||
|
||||
Reference in New Issue
Block a user