10 Commits

Author SHA1 Message Date
f9dccb42cb fix: corriger le type de projet de 'Personal Project' à 'Academic Project' 2026-03-11 13:40:27 +01:00
b6f791e633 feat: ajouter le projet Intelligent HR Onboarding Assistant avec documentation complète 2026-03-11 13:32:04 +01:00
263081ea6b fix: mettre à jour la date de compatibilité et supprimer les journaux de débogage dans la page d'état 2026-03-11 10:45:24 +01:00
266fc63482 fix: supprimer le débogage de l'environnement et ajouter un journal pour la récupération des données de la page d'état 2026-03-11 10:40:00 +01:00
6e763e96ab Add debug 2026-03-11 10:34:41 +01:00
4ad3bedd2a fix: mettre à jour la durée de lecture et améliorer la documentation du projet hackathon-natixis 2026-03-11 10:10:06 +01:00
f8f9d26254 fix: mettre à jour les dépendances et ajouter la configuration du sitemap 2026-03-10 14:39:09 +01:00
c45b1d6f25 fix: mettre à jour les liens vers le dépôt GitHub et l'application en direct dans le projet de visualisation de la tuberculose 2026-03-10 12:25:20 +01:00
1537343e44 fix: supprimer le fichier de projet "Data Visualisation Project" 2026-03-10 12:23:32 +01:00
ac5ccb3555 Refactor project documentation and structure
- Updated data visualization project documentation to remove incomplete warning.
- Deleted the glm-financial-assets project file and replaced it with glm-implied-volatility project file, detailing a comprehensive study on implied volatility prediction using GLMs and machine learning.
- Marked n8n automations project as completed.
- Added new project on reinforcement learning applied to Atari Tennis, detailing agent comparisons and results.
- Removed outdated rl-tennis project file.
- Updated package dependencies in package.json for improved stability and performance.
2026-03-10 12:07:09 +01:00
15 changed files with 2391 additions and 943 deletions

View File

@@ -1,7 +1,7 @@
<script lang="ts" setup>
import type { StatusPageData } from '~~/types'
const { data, status, error } = await useAsyncData<StatusPageData>('home-status', () =>
const { data, status } = await useAsyncData<StatusPageData>('home-status', () =>
$fetch('/api/status-page'),
{ lazy: true }
)
@@ -49,7 +49,7 @@ const statusState = computed(() => {
<template>
<ClientOnly>
<UCard v-if="!error" class="h-full flex flex-col overflow-hidden">
<UCard v-if="data" class="h-full flex flex-col overflow-hidden">
<div class="p-5 border-b border-neutral-200 dark:border-neutral-800">
<div class="flex items-center justify-between mb-2">
<h3 class="font-bold text-neutral-900 dark:text-white text-sm">

1349
bun.lock

File diff suppressed because it is too large Load Diff

View File

@@ -1,58 +0,0 @@
---
slug: data-visualisation
title: Data Visualisation Project
type: Academic Project
description: An interactive data visualization project built with R, R Shiny, and ggplot2 for creating dynamic, explorable visualizations.
shortDescription: An interactive data visualization project using R and R Shiny.
publishedAt: 2026-01-05
readingTime: 1
status: Completed
tags:
- R
- R Shiny
- Data Visualization
- ggplot2
icon: i-ph-chart-bar-duotone
---
::warning
The project is complete, but the documentation is still being expanded with more details.
::
This project involves building an interactive data visualization application using R and R Shiny. The goal is to deliver dynamic, explorable visualizations that let users interact with the data in meaningful ways.
::BackgroundTitle{title="Technologies & Tools"}
::
- **[R](https://www.r-project.org)**: A statistical computing environment, perfect for data analysis and visualization.
- **[R Shiny](https://shiny.rstudio.com)**: A web application framework for R that enables the creation of interactive web applications directly from R.
- **[ggplot2](https://ggplot2.tidyverse.org)**: A powerful R package for creating static and dynamic visualizations using the Grammar of Graphics.
- **[dplyr](https://dplyr.tidyverse.org)**: An R package for data manipulation, providing a consistent set of verbs to help you solve common data manipulation challenges.
- **[tidyr](https://tidyr.tidyverse.org)**: An R package for tidying data, making it easier to work with and visualize.
- **[tidyverse](https://www.tidyverse.org)**: A collection of R packages designed for data science that share an underlying design philosophy, grammar, and data structures.
- **[sf](https://r-spatial.github.io/sf/)**: An R package for working with simple features, providing support for spatial data manipulation and analysis.
- **[rnaturalearth](https://docs.ropensci.org/rnaturalearth/)**: An R package that provides easy access to natural earth map data for creating geographical visualizations.
- **[rnaturalearthdata](https://github.com/ropensci/rnaturalearthdata)**: Companion package to rnaturalearth containing large natural earth datasets.
- **[knitr](https://yihui.org/knitr/)**: An R package for dynamic report generation, enabling the integration of code and text.
- **[kableExtra](https://haozhu233.github.io/kableExtra/)**: An R package for customizing tables and enhancing their visual presentation.
- **[gridExtra](https://cran.r-project.org/web/packages/gridExtra/)**: An R package for arranging multiple grid-based plots on a single page.
- **[moments](https://cran.r-project.org/web/packages/moments/)**: An R package for computing moments, skewness, kurtosis and related statistics.
- **[factoextra](http://www.sthda.com/english/rpkgs/factoextra/)**: An R package for multivariate data analysis and visualization, including PCA and clustering methods.
- **[shinydashboard](https://rstudio.github.io/shinydashboard/)**: An R package for creating dashboards with Shiny.
- **[leaflet](https://rstudio.github.io/leaflet/)**: An R package for creating interactive maps using the Leaflet JavaScript library.
- **[plotly](https://plotly.com/r/)**: An R package for creating interactive visualizations with the Plotly library.
- **[RColorBrewer](https://cran.r-project.org/web/packages/RColorBrewer/)**: An R package providing color palettes for maps and other graphics.
- **[DT](https://rstudio.github.io/DT/)**: An R package for creating interactive data tables.
::BackgroundTitle{title="Resources"}
::
You can find the code here: [Data Visualisation Code](https://go.arthurdanjou.fr/datavis-code)
And the online application here: [Data Visualisation App](https://go.arthurdanjou.fr/datavis-app)
::BackgroundTitle{title="Detailed Report"}
::
<iframe src="/projects/datavis.pdf" width="100%" height="1000px">
</iframe>

View File

@@ -0,0 +1,97 @@
---
slug: dataviz-tuberculose
title: Monitoring & Segmentation of Tuberculosis Cases
type: Academic Project
description: An interactive data visualization project built with R, R Shiny, and ggplot2 for creating dynamic, explorable visualizations.
shortDescription: An interactive data visualization project using R and R Shiny.
publishedAt: 2026-01-05
readingTime: 1
status: Completed
tags:
- R
- R Shiny
- Data Visualization
- ggplot2
icon: i-ph-chart-bar-duotone
---
Interactive Shiny dashboard for WHO tuberculosis data analysis and clustering.
- **GitHub Repository:** [Tuberculose-Visualisation](https://github.com/ArthurDanjou/Tuberculose-Visualisation)
- **Live Application:** [Tuberculose Data Visualization](https://go.arthurdanjou.fr/datavis-app)
::BackgroundTitle{title="Overview"}
::
This project provides an interactive visualization tool for monitoring and segmenting global tuberculosis data from the World Health Organization (WHO). It applies multivariate analysis to reveal operational typologies of global health risks.
**Author:** Arthur Danjou
**Program:** M2 ISF - Dauphine PSL
**Course:** Data Visualisation (2025-2026)
::BackgroundTitle{title="Features"}
::
- Interactive world map with cluster visualization
- K-means clustering for country segmentation (Low/Moderate/Critical Impact)
- Time series analysis with year selector (animated)
- Region filtering by WHO regions
- Key Performance Indicators (KPIs) dashboard
- Raw data exploration with data tables
::BackgroundTitle{title="Project Structure"}
::
```
├── app.R # Shiny application
├── NoticeTechnique.Rmd # Technical report (R Markdown)
├── NoticeTechnique.pdf # Compiled technical report
├── data/
│ ├── TB_analysis_ready.RData # Processed data with clusters
│ └── TB_burden_countries_2025-12-09.csv # Raw WHO data
└── renv/ # R package management
```
::BackgroundTitle{title="Requirements"}
::
- R (>= 4.0.0)
- R packages (see `renv.lock`):
- shiny
- shinydashboard
- leaflet
- plotly
- dplyr
- sf
- RColorBrewer
- DT
- rnaturalearth
::BackgroundTitle{title="Installation"}
::
1. Clone this repository
2. Open R/RStudio in the project directory
3. Restore packages with `renv::restore()`
4. Run the application:
```r
shiny::runApp("app.R")
```
::BackgroundTitle{title="Detailed Report"}
::
<iframe src="/projects/datavis.pdf" width="100%" height="1000px">
</iframe>
::BackgroundTitle{title="License"}
::
© 2026 Arthur Danjou. All rights reserved.
::BackgroundTitle{title="Resources"}
::
You can find the code here: [Data Visualisation Code](https://go.arthurdanjou.fr/datavis-code)
And the online application here: [Data Visualisation App](https://go.arthurdanjou.fr/datavis-app)

View File

@@ -1,71 +0,0 @@
---
slug: implied-volatility-modeling
title: Implied Volatility Surface Modeling
type: Academic Project
description: A large-scale statistical study comparing Generalized Linear Models (GLMs) and black-box machine learning architectures to predict the implied volatility of S&P 500 options.
shortDescription: Predicting the SPX volatility surface using GLMs and black-box models on 1.2 million observations.
publishedAt: 2026-02-28
readingTime: 3
status: In progress
tags:
- R
- GLM
- Finance
- Machine Learning
icon: i-ph-graph-duotone
---
This project targets high-precision calibration of the **Implied Volatility Surface** using a large-scale dataset of S&P 500 (SPX) European options.
The core objective is to stress-test classic statistical models against modern predictive algorithms. **Generalized Linear Models (GLMs)** provide a transparent baseline, while more complex "black-box" architectures are evaluated on whether their accuracy gains justify reduced interpretability in a risk management context.
::BackgroundTitle{title="Dataset & Scale"}
::
The modeling is performed on a high-dimensional dataset with over **1.2 million observations**.
- **Target Variable**: `implied_vol_ref` (implied volatility).
- **Features**: Option strike price ($K$), underlying asset price ($S$), and time to maturity ($\tau$).
- **Volume**: A training set of $1,251,307$ rows and a test set of identical size.
::BackgroundTitle{title="Modeling Methodology"}
::
The project follows a rigorous statistical pipeline to compare two modeling philosophies:
### 1. The Statistical Baseline (GLM)
Using R's GLM framework, I implement models with targeted link functions and error distributions (such as **Gamma** or **Inverse Gaussian**) to capture the global structure of the volatility surface. These models serve as the benchmark for transparency and stability.
### 2. The Black-Box Challenge
To capture local non-linearities such as the volatility smile and skew, I explore more complex architectures. Performance is evaluated by **Root Mean Squared Error (RMSE)** relative to the GLM baselines.
### 3. Feature Engineering
Key financial indicators are derived from the raw data:
- **Moneyness**: Calculated as the ratio $K/S$.
- **Temporal Dynamics**: Transformations of time to maturity to linearize the term structure.
::BackgroundTitle{title="Evaluation & Reproducibility"}
::
Performance is measured strictly via RMSE on the original scale of the target variable. To ensure reproducibility and precise comparisons across model iterations, a fixed random seed is maintained throughout the workflow.
```r
set.seed(2025)
TrainData <- read.csv("train_ISF.csv", stringsAsFactors = FALSE)
TestX <- read.csv("test_ISF.csv", stringsAsFactors = FALSE)
rmse_eval <- function(actual, predicted) {
sqrt(mean((actual - predicted)^2))
}
```
::BackgroundTitle{title="Critical Analysis"}
::
Beyond pure prediction, the project addresses:
- Model Limits: Identifying market regimes where models fail (e.g., deep out-of-the-money options).
- Interpretability: Quantifying the trade-off between complexity and practical utility in a risk management context.
- Future Extensions: Considering richer dynamics, such as historical volatility or skew-specific targets.

View File

@@ -0,0 +1,336 @@
---
slug: implied-volatility-prediction-from-options-data
title: Implied Volatility Prediction from Options Data
type: Academic Project
description: A large-scale statistical study comparing Generalized Linear Models (GLMs) and black-box machine learning architectures to predict the implied volatility of S&P 500 options.
shortDescription: Predicting implied volatility using advanced regression techniques and machine learning models on financial options data.
publishedAt: 2026-02-28
readingTime: 3
status: Completed
tags:
- R
- GLM
- Finance
- Machine Learning
- Statistical Modeling
icon: i-ph-graph-duotone
---
> **M2 Master's Project** Predicting implied volatility using advanced regression techniques and machine learning models on financial options data.
This project explores the prediction of **implied volatility** from options market data, combining classical statistical methods with modern machine learning approaches. The analysis covers data preprocessing, feature engineering, model benchmarking, and interpretability analysis using real-world financial panel data.
- **GitHub Repository:** [Implied-Volatility-from-Options-Data](https://github.com/ArthurDanjou/Implied-Volatility-from-Options-Data)
---
::BackgroundTitle{title="Project Overview"}
::
### Problem Statement
Implied volatility represents the market's forward-looking expectation of an asset's future volatility. Accurate prediction is crucial for:
- **Option pricing** and valuation
- **Risk management** and hedging strategies
- **Trading strategies** based on volatility arbitrage
### Dataset
The project uses a comprehensive panel dataset tracking **3,887 assets** across **544 observation dates** (2019-2022):
| File | Description | Shape |
|------|-------------|-------|
| `Train_ISF.csv` | Training data with target variable | 1,909,465 rows × 21 columns |
| `Test_ISF.csv` | Test data for prediction | 1,251,308 rows × 18 columns |
| `hat_y.csv` | Final predictions from both models | 1,251,308 rows × 2 columns |
### Key Variables
**Target Variable:**
- `implied_vol_ref` The implied volatility to predict
**Feature Categories:**
- **Identifiers:** `asset_id`, `obs_date`
- **Market Activity:** `call_volume`, `put_volume`, `call_oi`, `put_oi`, `total_contracts`
- **Volatility Metrics:** `realized_vol_short`, `realized_vol_mid1-3`, `realized_vol_long1-4`, `market_vol_index`
- **Option Structure:** `strike_dispersion`, `maturity_count`
---
::BackgroundTitle{title="Methodology"}
::
### Data Pipeline
```
Raw Data
┌─────────────────────────────────────────────────────────┐
│ Data Splitting (Chronological 80/20) │
│ - Training: 2019-10 to 2021-07 │
│ - Validation: 2021-07 to 2022-03 │
└─────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────┐
│ Feature Engineering │
│ - Aggregation of volatility horizons │
│ - Creation of financial indicators │
└─────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────┐
│ Data Preprocessing (tidymodels) │
│ - Winsorization (99.5th percentile) │
│ - Log/Yeo-Johnson transformations │
│ - Z-score normalization │
│ - PCA (95% variance retention) │
└─────────────────────────────────────────────────────────┘
Three Datasets Generated:
├── Tree-based (raw, scale-invariant)
├── Linear (normalized, winsorized)
└── PCA (dimensionality-reduced)
```
### Feature Engineering
New financial indicators created to capture market dynamics:
| Feature | Description | Formula |
|---------|-------------|---------|
| `pulse_ratio` | Volatility trend direction | RV_short / RV_long |
| `stress_spread` | Asset vs market stress | RV_short - Market_VIX |
| `put_call_ratio_volume` | Immediate market stress | Put_Volume / Call_Volume |
| `put_call_ratio_oi` | Long-term risk structure | Put_OI / Call_OI |
| `liquidity_ratio` | Market depth | Total_Volume / Total_OI |
| `option_dispersion` | Market uncertainty | Strike_Dispersion / Total_Contracts |
| `put_low_strike` | Downside protection density | Strike_Dispersion / Put_OI |
| `put_proportion` | Hedging vs speculation | Put_Volume / Total_Volume |
---
::BackgroundTitle{title="Models Implemented"}
::
### Linear Models
| Model | Description | Best RMSE |
|-------|-------------|-----------|
| **OLS** | Ordinary Least Squares | 11.26 |
| **Ridge** | L2 regularization | 12.48 |
| **Lasso** | L1 regularization (variable selection) | 12.03 |
| **Elastic Net** | L1 + L2 combined | ~12.03 |
| **PLS** | Partial Least Squares (on PCA) | 12.79 |
### Linear Mixed-Effects Models (LMM)
Advanced panel data models accounting for asset-specific effects:
| Model | Features | RMSE |
|-------|----------|------|
| LMM Baseline | All variables + Random Intercept | 8.77 |
| LMM Reduced | Collinearity removal | ~8.77 |
| LMM Interactions | Financial interaction terms | ~8.77 |
| LMM + Quadratic | Convexity terms (vol of vol) | 8.41 |
| **LMM + Random Slopes (mod_lmm_5)** | Asset-specific betas | **8.10** ⭐ |
### Tree-Based Models
| Model | Strategy | Validation RMSE | Training RMSE |
|-------|----------|-----------------|---------------|
| **XGBoost** | Level-wise, Bayesian tuning | 10.70 | 0.57 |
| **LightGBM** | Leaf-wise, feature regularization | **10.61** ⭐ | 10.90 |
| Random Forest | Bagging | DNF* | - |
*DNF: Did Not Finish (computational constraints)
### Neural Networks
| Model | Architecture | Status |
|-------|--------------|--------|
| MLP | 128-64 units, tanh activation | Failed to converge |
---
::BackgroundTitle{title="Results Summary"}
::
### Model Comparison
```
RMSE Performance (Lower is Better)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Linear Mixed-Effects (LMM5) 8.38 ████████████████████ Best Linear
Linear Mixed-Effects (LMM4) 8.41 ███████████████████
Linear Mixed-Effects (Baseline) 8.77 ██████████████████
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
LightGBM 10.61 ███████████████ Best Non-Linear
XGBoost 10.70 ██████████████
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
OLS (with interactions) 11.26 █████████████
Lasso 12.03 ███████████
OLS (baseline) 12.01 ███████████
Ridge 12.48 ██████████
PLS 12.79 █████████
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
```
### Key Findings
1. **Best Linear Model:** LMM with Random Slopes (RMSE = 8.38)
- Captures asset-specific volatility sensitivities
- Includes quadratic terms for convexity effects
2. **Best Non-Linear Model:** LightGBM (RMSE = 10.61)
- Superior generalization vs XGBoost
- Feature regularization prevents overfitting
3. **Interpretability Insights (SHAP Analysis):**
- `realized_vol_mid` dominates (57% of gain)
- Volatility clustering confirmed as primary driver
- Non-linear regime switching in stress_spread
---
::BackgroundTitle{title="Repository Structure"}
::
```
PROJECT/
├── Projet_MRC_DANJOU_LEGRAND_MERIC_VONSIEMENS.qmd # Main analysis (Quarto)
├── Projet_MRC_DANJOU_LEGRAND_MERIC_VONSIEMENS.html # Rendered report
├── packages.R # R dependencies installer
├── Train_ISF.csv # Training data (~1.9M rows)
├── Test_ISF.csv # Test data (~1.25M rows)
├── hat_y.csv # Final predictions
├── README.md # This file
└── results/
├── lightgbm/ # LightGBM model outputs
└── xgboost/ # XGBoost model outputs
```
---
::BackgroundTitle{title="Getting Started"}
::
### Prerequisites
- **R** ≥ 4.0
- Required packages (auto-installed via `packages.R`)
### Installation
```r
# Install all dependencies
source("packages.R")
```
Or manually install key packages:
```r
install.packages(c(
"tidyverse", "tidymodels", "caret", "glmnet",
"lme4", "lmerTest", "xgboost", "lightgbm",
"ranger", "pls", "shapviz", "rBayesianOptimization"
))
```
### Running the Analysis
1. **Open the Quarto document:**
```r
# In RStudio
rstudioapi::navigateToFile("Projet_MRC_DANJOU_LEGRAND_MERIC_VONSIEMENS.qmd")
```
2. **Render the document:**
```r
quarto::quarto_render("Projet_MRC_DANJOU_LEGRAND_MERIC_VONSIEMENS.qmd")
```
3. **Or run specific sections interactively** using the code chunks in the `.qmd` file
---
::BackgroundTitle{title="Technical Details"}
::
### Data Split Strategy
- **Chronological split** at 80th percentile of dates
- Prevents look-ahead bias and data leakage
- Training: ~1.53M observations
- Validation: ~376K observations
### Hyperparameter Tuning
- **Method:** Bayesian Optimization (Gaussian Processes)
- **Acquisition:** Expected Improvement (UCB)
- **Goal:** Maximize negative RMSE
### Evaluation Metric
**Exponential RMSE** on original scale:
$$
RMSE_{real} = \sqrt{\frac{1}{n} \sum_{i=1}^{n} \left( \exp(\hat{y}_{\log, i}) - y_i \right)^2}
$$
Models trained on log-transformed target for variance stabilization.
---
::BackgroundTitle{title="Key Concepts"}
::
### Financial Theories Applied
1. **Volatility Clustering** Past volatility predicts future volatility
2. **Variance Risk Premium** Spread between implied and realized volatility
3. **Fear Gauge** Put-call ratio as sentiment indicator
4. **Mean Reversion** Volatility tends to return to long-term average
5. **Liquidity Premium** Illiquid assets command higher volatility
### Statistical Methods
- Panel data modeling with fixed and random effects
- Principal Component Analysis (PCA)
- Bayesian hyperparameter optimization
- SHAP values for model interpretability
---
::BackgroundTitle{title="Authors"}
::
**Team:**
- Arthur DANJOU
- Camille LEGRAND
- Axelle MERIC
- Moritz VON SIEMENS
**Course:** Classification and Regression (M2)
**Academic Year:** 2025-2026
---
::BackgroundTitle{title="Notes"}
::
- **Computational Constraints:** Some models (Random Forest, MLP) failed due to hardware limitations (16GB RAM, CPU-only)
- **Reproducibility:** Set `seed = 2025` for consistent results
- **Language:** Analysis documented in English, course materials in French
---
::BackgroundTitle{title="References"}
::
Key R packages used:
- `tidymodels` Modern modeling framework
- `glmnet` Regularized regression
- `lme4` / `lmerTest` Mixed-effects models
- `xgboost` / `lightgbm` Gradient boosting
- `shapviz` Model interpretability
- `rBayesianOptimization` Hyperparameter tuning

View File

@@ -5,7 +5,7 @@ type: Hackathon
description: An intensive 4-week challenge to build an AI-powered data assistant. Our team developed a GenAI agent that transforms natural language into executable SQL queries, interactive visualizations, and natural language insights.
shortDescription: A team-based project building an NL-to-SQL agent with Nuxt, Ollama, and Vercel AI SDK.
publishedAt: 2026-03-07
readingTime: 4
readingTime: 15
status: Completed
tags:
- Nuxt
@@ -23,24 +23,45 @@ Organized by **Natixis**, this hackathon followed a high-intensity format: **thr
Working in a **team of four**, our goal was to bridge the gap between non-technical stakeholders and complex financial databases by creating an autonomous "Data Talk" agent.
::BackgroundTitle{title="Core Features"}
::BackgroundTitle{title="How We Built It"}
::
### 1. Data Engineering & Schema Design
### Data Engineering & Schema Design
Before building the AI layer, we handled a significant data migration task. I led the effort to:
* **ETL Pipeline:** Convert fragmented datasets from **.xlsx** and **.csv** formats into a structured **SQL database**.
* **Schema Optimization:** Design robust SQL schemas that allow an LLM to understand relationships (foreign keys, indexing) for accurate query generation.
### 2. Natural Language to SQL (NL-to-SQL)
### Natural Language to SQL (NL-to-SQL)
Using the **Vercel AI SDK** and **Ollama**, we implemented an agentic workflow:
* **Prompt Engineering:** Fine-tuning the agent to translate complex business questions (e.g., "What was our highest growth margin last quarter?") into valid, optimized SQL.
* **Self-Correction:** If a query fails, the agent analyzes the SQL error and self-corrects the syntax before returning a result.
### 3. Automated Insights & Visualization
Data is only useful if its readable. Our Nuxt application goes beyond raw tables:
### Automated Insights & Visualization
Data is only useful if it's readable. Our Nuxt application goes beyond raw tables:
* **Dynamic Charts:** The agent automatically determines the best visualization type (Bar, Line, Pie) based on the query result and renders it using interactive components.
* **Narrative Explanations:** A final LLM pass summarizes the data findings in plain English, highlighting anomalies or key trends.
::BackgroundTitle{title="Impact & Results"}
::
This project demonstrated that a modern stack (Nuxt + local LLMs) can drastically reduce the time needed for data discovery. By the final Saturday, our team presented a working prototype capable of handling multi-table joins and generating real-time financial dashboards from simple chat prompts.
::BackgroundTitle{title="Features"}
::
- **Natural Language Queries**: Ask questions about anomalies in plain French or English
- **SQL Execution**: Automatic SQL query generation and execution against MySQL database
- **Visualizations**: Automatic chart generation (Line, Bar, Area, Donut, Bubble, Gantt)
- **KPI Cards**: Dynamic KPI generation with trends and icons
- **AI-Powered**: Uses Ollama models with tool calling capabilities
- **Dark Mode**: Full light/dark theme support
::BackgroundTitle{title="Technical Stack"}
::
@@ -49,10 +70,467 @@ Data is only useful if its readable. Our Nuxt application goes beyond raw tab
* **Inference:** **Ollama** for running LLMs locally, ensuring data privacy during development.
* **Storage:** **PostgreSQL** for the converted data warehouse.
::BackgroundTitle{title="Impact & Results"}
::BackgroundTitle{title="Quick Start"}
::
This project demonstrated that a modern stack (Nuxt + local LLMs) can drastically reduce the time needed for data discovery. By the final Saturday, our team presented a working prototype capable of handling multi-table joins and generating real-time financial dashboards from simple chat prompts.
### Prerequisites
- **Docker & Docker Compose** (for MySQL database)
- **Python 3.13+** (project managed with `uv`; `pip` works too)
- **Bun** (package manager): `npm install -g bun`
- **Ollama** running locally with a compatible model (e.g., `llama3.2`, `qwen2.5`, `mistral`)
### 1. Start MySQL Database
```bash
docker compose up -d mysql
```
The `natixis` database is created automatically from `init.sql`:
- Default: `mysql://root:@localhost:3306/natixis`
- Root password is empty for local development only
### 2. Load Data into Database
Install Python dependencies:
```bash
uv sync # or: pip install -e .
```
Place source files in `./data/` directory:
- `Configuration.xlsx` - Control and typology configuration
- `anomaly_dump_result.csv` - Anomaly data
- `GenericAnomaly_dump_result_chunk_*.xlsx` - Generic anomaly chunks
These datasets are not tracked in the repository - use the files shared with the project.
Then run the Jupyter notebook:
```bash
jupyter notebook data_exploration.ipynb
```
Execute the `insert_into_sql` and `reset_and_load` cells to populate `generic_anomalies`, `anomalies`, and configuration tables.
### 3. Configure Environment
Create `.env` file in `/chat`:
```env
DATABASE_URL="mysql://root:@localhost:3306/natixis"
```
### 4. Run the Chat Application
```bash
cd chat
bun install
bun run dev --host
```
The app will be available at `http://localhost:3000`
::BackgroundTitle{title="Project Structure"}
::
```
.
├── data_exploration.ipynb # Jupyter notebook for data loading
├── init.sql # MySQL initialization script
├── docker-compose.yml # Docker services configuration
├── data/ # Source data files (not tracked)
│ ├── Configuration.xlsx
│ ├── anomaly_dump_result.csv
│ └── GenericAnomaly_dump_result_chunk_*.xlsx
└── chat/ # Nuxt application
├── app/ # Vue components and pages
├── server/ # API endpoints
├── shared/ # Shared utilities and tools
└── nuxt.config.ts # Nuxt configuration
```
::BackgroundTitle{title="Database Schema"}
::
The database contains the following main tables:
### Core Tables
| Table | Description |
|-------|-------------|
| `anomalies` | Standard anomaly records |
| `generic_anomalies` | Generic anomaly records (default for analysis) |
| `typologies` | Anomaly classification typologies |
| `functional_controls` | Control definitions and ownership |
| `business_objects` | Business object definitions |
| `business_object_fields` | Field definitions for business objects |
| `business_data` | Business data definitions |
| `business_data_field_link` | Links between fields and business data |
### Key Fields in Anomalies
- `anomaly_kuid` - Unique identifier (primary key)
- `title_txt`, `description_txt` - Anomaly details
- `priority_typ` - Priority level (CRITICAL, HIGH, etc.)
- `detection_time` - When anomaly was detected
- `hotfix_flg` - Hotfix eligibility flag
- `object_identification_fields` - JSON with contract/object context
- `error_fields` - JSON with error details and resolution status
::BackgroundTitle{title="Chat Application Architecture"}
::
```
chat/
├── app/
│ ├── app.vue # Root component with UI providers
│ ├── components/
│ │ ├── Helper.vue # Help tooltip component
│ │ ├── ModelSelect.vue # Model selector dropdown
│ │ ├── Reasoning.vue # AI reasoning display
│ │ └── tool/ # Tool UI components
│ │ ├── SqlDisplay.vue # SQL execution display
│ │ ├── Chart.vue # Chart visualizations
│ │ └── KPI.vue # KPI cards display
│ └── pages/
│ ├── index.vue # Landing page with prompt suggestions
│ └── chat.vue # Main chat interface
├── server/
│ └── api/chat.ts # Chat API endpoint with streaming
├── shared/utils/tools/
│ ├── executeSql.ts # SQL query execution tool
│ ├── chart.ts # Chart visualization tool
│ └── kpi.ts # KPI display tool
├── nuxt.config.ts # Nuxt configuration
└── package.json # Dependencies
```
### Data Flow
```
User Message
AI Streaming (server/api/chat.ts)
Tool Selection (toolChoice: 'auto')
Tool Execution (server-side)
Stream Results to Client
groupParts() in chat.vue
Component Selection
├── type: 'reasoning' → <Reasoning />
├── type: 'tool-executeSqlTool' → <ToolSqlDisplay />
├── type: 'tool-chartTool' → <ToolChart />
└── type: 'tool-kpiTool' → <ToolKPI />
Rendered Message
```
::BackgroundTitle{title="Available Tools"}
::
### 1. `executeSqlTool`
**Server Tool**: `shared/utils/tools/executeSql.ts`
Executes SQL SELECT queries against the database.
**Parameters:**
- `query`: SQL SELECT query (MySQL syntax)
- `reason`: Explanation for debugging
**Example usage by AI:**
```sql
SELECT priority_typ, COUNT(*) AS anomaly_count
FROM generic_anomalies
GROUP BY priority_typ
LIMIT 10
```
**Security:** Only SELECT queries are allowed. Results are limited to 50 rows.
**UI Component**: `app/components/tool/SqlDisplay.vue`
### 2. `chartTool`
**Server Tool**: `shared/utils/tools/chart.ts`
Generates data visualizations.
**Parameters:**
- `chartType`: `line`, `bar`, `area`, `donut`, `bubble`, `gantt`
- `title`: Chart title
- `data`: Array of data objects
- `xKey`: Field for X-axis
- `xKeyStart`, `xKeyEnd`: Start/end fields for Gantt charts
- `radiusKey`: Size field for Bubble charts
- `series`: Array of `{ key, name, color? }` for Y-axis values
- `showMarkers`, `showLegend`, `isStacked`: Display options
- `xLabel`, `yLabel`: Axis labels
**Example:**
```typescript
{
chartType: 'donut',
title: 'Anomalies by Priority',
data: [{ priority: 'CRITICAL', count: 150 }, { priority: 'HIGH', count: 89 }],
xKey: 'priority',
series: [{ key: 'count', name: 'Anomalies' }]
}
```
**UI Component**: `app/components/tool/Chart.vue`
**Supported Chart Types:**
| Type | Use Case | Example |
|------|----------|---------|
| `line` | Time series trends | Anomalies over time |
| `bar` | Category comparisons | Anomalies by priority |
| `area` | Cumulative metrics | Volume over time |
| `donut` | Proportions | Distribution by type |
| `bubble` | Multi-dimensional data | Risk vs. volume vs. severity |
| `gantt` | Timelines | Remediation schedules |
### 3. `kpiTool`
**Server Tool**: `shared/utils/tools/kpi.ts`
Displays KPI cards with metrics.
**Parameters:**
- `kpis`: Array of KPI objects (max 6 recommended)
**KPI Object:**
```typescript
{
label: 'Critical Anomalies', // Short metric name
value: '150', // Formatted value (string or number)
description: 'Active critical issues', // Context description
icon: 'i-lucide-alert-triangle', // Lucide icon name
trend: 'up', // 'up' | 'down' | 'stable'
trendValue: '+12%' // Optional trend percentage
}
```
**UI Component**: `app/components/tool/KPI.vue`
::BackgroundTitle{title="Adding a New Tool"}
::
To add a new tool, implement both server-side and client-side layers:
### Step 1: Define Server Tool
Create a new file in `shared/utils/tools/`:
```typescript
// shared/utils/tools/myTool.ts
import { tool, type UIToolInvocation } from 'ai'
import { z } from 'zod'
export type MyUIToolInvocation = UIToolInvocation<typeof myTool>
export const myTool = tool({
description: 'Brief description of what the tool does and when to use it.',
inputSchema: z.object({
param1: z.string().describe('Parameter description'),
param2: z.number().optional().describe('Optional parameter')
}),
outputSchema: z.object({
result: z.string()
}),
execute: async ({ param1, param2 }) => {
return { result: 'processed data' }
}
})
```
### Step 2: Export Tool
Add to `shared/utils/index.ts`:
```typescript
export * from './tools/myTool'
```
### Step 3: Register in Chat API
Update `server/api/chat.ts`:
```typescript
import { myTool } from '~/shared/utils'
const result = await streamText({
model: ollama(model, { /* ... */ }),
tools: {
executeSqlTool,
chartTool,
kpiTool,
myTool
},
})
```
### Step 4: Update System Prompt
Add tool documentation to the system prompt in `server/api/chat.ts`:
```typescript
## myTool
- Purpose: What the tool does
- When to use: Specific use cases
- Required parameters: param1, param2
- Output: Description of result format
```
### Step 5: Create UI Component
Create `app/components/tool/MyTool.vue`:
```vue
<script setup lang="ts">
import type { MyUIToolInvocation } from '~/shared/utils'
const props = defineProps<{
invocation: MyUIToolInvocation
isStreaming?: boolean
}>()
const output = computed(() => props.invocation.output)
const state = computed(() => props.invocation.state)
</script>
<template>
<div v-if="state !== 'output-available'" class="my-4 flex items-center gap-2 text-gray-500">
<UIcon name="i-lucide-loader-2" class="animate-spin" />
<span>Processing...</span>
</div>
<div v-else-if="output" class="my-4 p-4 rounded-lg border bg-gray-50 dark:bg-gray-900">
<h3 class="font-semibold">Tool Result</h3>
<p>{{ output.result }}</p>
</div>
</template>
```
### Step 6: Register Component in Chat Page
Update `app/pages/chat.vue`:
```vue
<ToolMyTool
v-else-if="block.type === 'tool' && block.part.type === 'tool-myTool'"
:invocation="(block.part as any).toolInvocation || block.part"
:is-streaming="block.isStreaming"
/>
```
::BackgroundTitle{title="System Prompt Guidelines"}
::
The system prompt (`server/api/chat.ts`) controls AI behavior:
### Key Rules
1. **Schema Compliance**: Only use columns/tables defined in `init.sql`
2. **Default Table**: Use `generic_anomalies` for general analysis
3. **Language**: Respond in the user's language
4. **No SQL Visibility**: Never show raw SQL to users
5. **Explicit Requests**: Only use charts/KPIs when explicitly requested
6. **Proactive Suggestions**: Offer visualizations without auto-executing
### Critical Definitions
- **Critical Anomaly**: `priority_typ` IN ('CRITICAL', 'CRITIQUE', 'HIGH', 'HAUTE') OR `hotfix_flg = 1`
- **Open/Unresolved**: Check `error_fields.resolved_value_txt` for resolution status
- **Owner**: Use `functional_controls.responsible_login_id` as default owner
::BackgroundTitle{title="Development"}
::
### Scripts
```bash
bun run dev # Start development server
bun run build # Build for production
bun run preview # Preview production build
bun run lint # Run ESLint
bun run typecheck # Type check with vue-tsc
```
### Environment Variables
| Variable | Description | Required |
|----------|-------------|----------|
| `DATABASE_URL` | MySQL connection string | Yes |
### Tech Stack
- **Framework**: Nuxt 4 + Vue 3
- **UI**: Nuxt UI 4 (based on Tailwind CSS)
- **AI**: AI SDK + Ollama
- **Database**: MySQL via NuxtHub
- **Charts**: nuxt-charts
- **Utilities**: VueUse, Zod
::BackgroundTitle{title="Deployment"}
::
### NuxtHub (Recommended)
```bash
cd chat
bun run build
npx hub deploy
```
### Manual Deployment
```bash
cd chat
bun run build
bun run preview
```
Set production environment variables for database connection.
::BackgroundTitle{title="Troubleshooting"}
::
### Common Issues
1. **Ollama Connection Failed**
- Ensure Ollama is running: `ollama serve`
- Check model availability: `ollama pull llama3.2`
2. **Database Connection Error**
- Verify MySQL is running: `docker ps`
- Check `.env` has correct `DATABASE_URL`
3. **Empty Query Results**
- Ensure data is loaded via Jupyter notebook
- Verify tables exist: `SHOW TABLES;`
4. **Tool Not Called**
- System prompt may need adjustment
- Check `toolChoice: 'auto'` in streamText config
### Debug Mode
View AI reasoning by checking console logs:
```typescript
// In chat.ts, the tool execution logs
console.log('⚡ executing SQL:', query)
```
---

View File

@@ -6,7 +6,7 @@ description: An academic project exploring the automation of GenAI workflows usi
shortDescription: Automating GenAI workflows with n8n and Ollama in a self-hosted environment.
publishedAt: 2026-03-15
readingTime: 2
status: In progress
status: Completed
tags:
- n8n
- Gemini

View File

@@ -0,0 +1,159 @@
---
slug: nlp-hr-onboarding
title: Intelligent HR Onboarding Assistant
type: Academic Project
description: Intelligent HR onboarding assistant using RAG, LangChain agents, and MistralAI embeddings to help new employees navigate company policies, employee directory, and administrative tasks.
shortDescription: An AI-powered assistant for streamlining HR onboarding processes and improving new hire experience.
publishedAt: 2026-03-13
readingTime: 3
favorite: false
status: Completed
tags:
- Python
- NLP
- LangChain
- RAG
icon: i-ph-robot-duotone
---
**NLP Project — Master M2**
*Authors: Arthur DANJOU, Aksscel Meh-Rik, Moritz von SIEMENS*
::BackgroundTitle{title="Project Overview"}
::
The **Intelligent HR Onboarding Assistant** is a conversational AI system designed to guide new employees during their first week at **TechCorp**. It combines retrieval-augmented generation, tool-using agents, and conversational memory to provide accurate and actionable HR support.
The assistant can answer policy questions, retrieve employee information, schedule internal meetings, and prepare leave requests from natural-language prompts.
::BackgroundTitle{title="Key Features"}
::
- **Semantic HR policy search** powered by a RAG pipeline.
- **Employee directory lookup** from structured JSON records.
- **Meeting scheduling tools** integrated through LangChain.
- **Automated leave request workflow** from chat instructions.
- **Sliding-window memory** to keep multi-turn context coherent.
- **Interactive Gradio UI** with visible agent actions and tool calls.
::BackgroundTitle{title="Architecture"}
::
```
┌──────────────────────────────────────────────────────────┐
│ HR Onboarding Assistant — TechCorp │
│ │
│ 📝 System prompts (LangChain LCEL) │
│ 🧠 Sliding window conversational memory │
│ 🔧 Tools: │
│ ├── 🔍 Knowledge base search (RAG) │
│ ├── 👤 Employee directory │
│ ├── 📅 Meeting scheduling │
│ ├── 🏖️ Leave request submission │
│ └── 🕐 Current date and time │
│ 🔄 ReAct loop: reason → act → observe │
│ 📊 MistralAI Embeddings + Qdrant Vector Store │
└──────────────────────────────────────────────────────────┘
```
::BackgroundTitle{title="Prerequisites"}
::
- Python ≥ 3.13
- MistralAI API key
::BackgroundTitle{title="Installation"}
::
1. **Clone the repository**
```bash
git clone <repository-url>
cd NLP-Intelligent-HR-Onboarding-Assistant-with-RAG-and-LangChain
```
2. **Install dependencies**
```bash
uv sync
```
3. **Configure MistralAI API key**
Set the environment variable:
```bash
export MISTRAL_API_KEY="your_api_key"
```
::BackgroundTitle{title="Usage"}
::
### Run the Jupyter notebook
```bash
jupyter notebook projet.ipynb
```
Execute cells sequentially to:
1. Analyze tokenization of HR documents
2. Create the Qdrant vector database
3. Initialize the ReAct agent
4. Run demonstrations
5. Launch the Gradio interface (runs on `http://127.0.0.1:7860`)
### Data structure
```
data/
├── entreprise.md # HR knowledge base (leave policy, remote work, etc.)
└── employés.json # TechCorp employee directory
```
::BackgroundTitle{title="Learning Modules"}
::
| TP | Concept | Usage |
|:---|:--------|:------|
| **TP1** | Embeddings | Document vectorization and cosine similarity retrieval |
| **TP2** | BPE Tokenization | Token-cost analysis with FR/EN comparison |
| **TP3** | LLM + LangChain | ChatMistralAI setup, prompts, and LCEL chains |
| **TP4** | Agents + Memory | `@tool` usage, ReAct orchestration, sliding-window memory |
| **TP5** | RAG + Gradio | Qdrant indexing, semantic retrieval, interactive UI |
::BackgroundTitle{title="Technologies"}
::
- **LangChain**: LLM orchestration framework
- **MistralAI**: LLM inference and embeddings (`mistral-embed`)
- **Qdrant**: In-memory vector database
- **Gradio**: Interactive web interface
- **tiktoken**: BPE tokenization analysis
- **pandas**: Employee data manipulation
::BackgroundTitle{title="Main Dependencies"}
::
```
langchain>=1.2.11
langchain-mistralai>=1.1.1
langchain-qdrant>=1.1.0
gradio>=6.9.0
tiktoken>=0.12.0
pandas>=3.0.1
```
::BackgroundTitle{title="Example Prompts"}
::
- "How many days of annual leave do I have?"
- "What is the remote work policy?"
- "Give me Claire Petit's contact information"
- "Schedule a meeting with the Data Science team tomorrow at 2pm"
- "I want to request leave from January 15th to 20th"
::BackgroundTitle{title="Authors"}
::
- **Arthur DANJOU**
- **Axelle MERIC**
- **Moritz von SIEMENS**
*Project completed as part of the Natural Language Processing course — Master M2*

View File

@@ -0,0 +1,119 @@
---
slug: rl-tennis-atari-game
title: Reinforcement Learning for Tennis Strategy Optimization
type: Academic Project
description: An academic project exploring the application of reinforcement learning to optimize tennis strategies. The project involves training RL agents on Atari Tennis (ALE) to evaluate strategic decision-making through competitive self-play and baseline benchmarking.
shortDescription: Reinforcement learning algorithms applied to Atari tennis matches for strategy optimization and competitive benchmarking.
publishedAt: 2026-03-13
readingTime: 3
status: Completed
tags:
- Reinforcement Learning
- Python
- Gymnasium
- Atari
- ALE
icon: i-ph-lightning-duotone
---
Comparison of Reinforcement Learning algorithms on Atari Tennis (`ALE/Tennis-v5` via Gymnasium/PettingZoo).
- **GitHub Repository:** [Tennis-Atari-Game](https://github.com/ArthurDanjou/Tennis-Atari-Game)
::BackgroundTitle{title="Overview"}
::
This project implements and compares five RL agents playing Atari Tennis against the built-in AI and in head-to-head tournaments.
::BackgroundTitle{title="Algorithms"}
::
| Agent | Type | Policy | Update Rule |
|-------|------|--------|-------------|
| **Random** | Baseline | Uniform random | None |
| **SARSA** | TD(0), on-policy | ε-greedy | $W_a \leftarrow W_a + \alpha \cdot (r + \gamma \hat{q}(s', a') - \hat{q}(s, a)) \cdot \phi(s)$ |
| **Q-Learning** | TD(0), off-policy | ε-greedy | $W_a \leftarrow W_a + \alpha \cdot (r + \gamma \max_{a'} \hat{q}(s', a') - \hat{q}(s, a)) \cdot \phi(s)$ |
| **Monte Carlo** | First-visit MC | ε-greedy | $W_a \leftarrow W_a + \alpha \cdot (G_t - \hat{q}(s, a)) \cdot \phi(s)$ |
| **DQN** | Deep Q-Network | ε-greedy | MLP (256→256) with experience replay & target network |
::BackgroundTitle{title="Architecture"}
::
- **Linear agents** (SARSA, Q-Learning, Monte Carlo): $\hat{q}(s, a; \mathbf{W}) = \mathbf{W}_a^\top \phi(s)$ with $\phi(s) \in \mathbb{R}^{128}$ (RAM observation)
- **DQN**: MLP network (128 → 128 → 64 → 18) trained with Adam optimizer, Huber loss, and periodic target network sync
::BackgroundTitle{title="Environment"}
::
- **Game**: Atari Tennis via PettingZoo (`tennis_v3`)
- **Observation**: RAM state (128 features)
- **Action Space**: 18 discrete actions
- **Agents**: 2 players (`first_0` and `second_0`)
::BackgroundTitle{title="Project Structure"}
::
```
.
├── Project_RL_DANJOU_VON-SIEMENS.ipynb # Main notebook
├── README.md # This file
├── checkpoints/ # Saved agent weights
│ ├── sarsa.pkl
│ ├── q_learning.pkl
│ ├── montecarlo.pkl
│ └── dqn.pkl
└── plots/ # Training & evaluation plots
├── SARSA_training_curves.png
├── Q-Learning_training_curves.png
├── MonteCarlo_training_curves.png
├── DQN_training_curves.png
├── evaluation_results.png
└── championship_matrix.png
```
::BackgroundTitle{title="Key Results"}
::
### Win Rate vs Random Baseline
| Agent | Win Rate |
|-------|----------|
| SARSA | 88.9% |
| Q-Learning | 41.2% |
| Monte Carlo | 47.1% |
| DQN | 6.2% |
### Championship Tournament
Full round-robin tournament where each agent faces every other agent in both positions (first_0/second_0).
::BackgroundTitle{title="Notebook Sections"}
::
1. **Configuration & Checkpoints** — Incremental training workflow with pickle serialization
2. **Utility Functions** — Observation normalization, ε-greedy policy
3. **Agent Definitions**`RandomAgent`, `SarsaAgent`, `QLearningAgent`, `MonteCarloAgent`, `DQNAgent`
4. **Training Infrastructure**`train_agent()`, `plot_training_curves()`
5. **Evaluation** — Match system, random baseline, round-robin tournament
6. **Results & Visualization** — Win rate plots, matchup matrix heatmap
::BackgroundTitle{title="Known Issues"}
::
- **Monte Carlo & DQN**: Checkpoint loading issues — saved weights may not restore properly during evaluation (training works correctly)
::BackgroundTitle{title="Dependencies"}
::
- Python 3.13+
- `numpy`, `matplotlib`
- `torch`
- `gymnasium`, `ale-py`
- `pettingzoo`
- `tqdm`
::BackgroundTitle{title="Authors"}
::
- Arthur DANJOU
- Moritz VON SIEMENS

View File

@@ -1,55 +0,0 @@
---
slug: rl-tennis
title: Reinforcement Learning for Tennis Strategy Optimization
type: Academic Project
description: An academic project exploring the application of reinforcement learning to optimize tennis strategies. The project involves training RL agents on Atari Tennis (ALE) to evaluate strategic decision-making through competitive self-play and baseline benchmarking.
shortDescription: Reinforcement learning algorithms applied to Atari tennis matches for strategy optimization and competitive benchmarking.
publishedAt: 2026-03-13
readingTime: 3
status: In progress
tags:
- Reinforcement Learning
- Python
- Gymnasium
- Atari
- ALE
icon: i-ph-lightning-duotone
---
::BackgroundTitle{title="Overview"}
::
This project serves as a practical application of theoretical Reinforcement Learning (RL) principles. The goal is to develop and train autonomous agents capable of mastering the complex dynamics of **Atari Tennis**, using the **Arcade Learning Environment (ALE)** via Farama Foundation's Gymnasium.
Instead of simply reaching a high score, this project focuses on **strategy optimization** and **comparative performance** through a multi-stage tournament architecture.
::BackgroundTitle{title="Technical Objectives"}
::
The project is divided into three core phases:
### 1. Algorithm Implementation
I am implementing several key RL algorithms covered during my academic curriculum to observe their behavioral differences in a high-dimensional state space:
* **Value-Based Methods:** Deep Q-Networks (DQN) and its variants (Double DQN, Dueling DQN).
* **Policy Gradient Methods:** Proximal Policy Optimization (PPO) for more stable continuous action control.
* **Exploration Strategies:** Implementing epsilon-greedy and entropy-based exploration to handle the sparse reward signals in tennis rallies.
#### 2. The "Grand Slam" Tournament (Self-Play)
To determine the most robust strategy, I developed a competitive framework:
* **Agent vs. Agent:** Different algorithms (e.g., PPO vs. DQN) are pitted against each other in head-to-head matches.
* **Evolutionary Ranking:** Success is measured not just by points won, but by the ability to adapt to the opponent's playstyle (serve-and-volley vs. baseline play).
* **Winner Identification:** The agent with the highest win rate and most stable policy is crowned the "Optimal Strategist."
#### 3. Benchmarking Against Atari Baselines
The final "Boss Level" involves taking my best-performing trained agent and testing it against the pre-trained, high-performance algorithms provided by the Atari/ALE benchmarks. This serves as a validation step to measure the efficiency of my custom implementations against industry-standard baselines.
::BackgroundTitle{title="Tech Stack & Environment"}
::
* **Environment:** [ALE (Arcade Learning Environment) - Tennis](https://ale.farama.org/environments/tennis/)
* **Frameworks:** Python, Gymnasium, PyTorch (for neural network backends).
* **Key Challenges:** Handling the long-horizon dependency of a tennis match and the high-frequency input of the Atari RAM/Pixels.
---
*This project is currently in the training phase. I am fine-tuning the reward function to discourage "passive" play and reward aggressive net approaches.*

View File

@@ -113,7 +113,7 @@ export default defineNuxtConfig({
experimental: {
viewTransition: true
},
compatibilityDate: '2025-12-13',
compatibilityDate: '2026-02-24',
nitro: {
preset: 'cloudflare_module',
@@ -166,6 +166,10 @@ export default defineNuxtConfig({
})
},
sitemap: {
zeroRuntime: true
},
seo: {
redirectToCanonicalSiteUrl: true
},

View File

@@ -18,11 +18,11 @@
},
"dependencies": {
"@libsql/client": "^0.17.0",
"@nuxt/content": "3.11.2",
"@nuxt/eslint": "1.15.1",
"@nuxt/ui": "^4.4.0",
"@nuxthub/core": "0.10.6",
"@nuxtjs/mdc": "0.20.1",
"@nuxt/content": "3.12.0",
"@nuxt/eslint": "1.15.2",
"@nuxt/ui": "4.5.1",
"@nuxthub/core": "0.10.7",
"@nuxtjs/mdc": "0.20.2",
"@nuxtjs/seo": "3.4.0",
"@vueuse/core": "^14.2.1",
"@vueuse/math": "^14.2.1",
@@ -30,23 +30,24 @@
"drizzle-kit": "^0.31.9",
"drizzle-orm": "^0.45.1",
"nuxt": "4.3.1",
"nuxt-studio": "1.3.2",
"vue": "3.5.28",
"vue-router": "5.0.2",
"nuxt-studio": "1.4.0",
"vue": "3.5.30",
"vue-router": "5.0.3",
"zod": "^4.3.6"
},
"devDependencies": {
"@iconify-json/devicon": "1.2.58",
"@iconify-json/devicon": "1.2.59",
"@iconify-json/file-icons": "^1.2.2",
"@iconify-json/logos": "^1.2.10",
"@iconify-json/ph": "^1.2.2",
"@iconify-json/twemoji": "1.2.5",
"@iconify-json/vscode-icons": "1.2.43",
"@types/node": "25.2.3",
"@iconify-json/vscode-icons": "1.2.45",
"@types/node": "25.4.0",
"@vueuse/nuxt": "14.2.1",
"eslint": "10.0.0",
"baseline-browser-mapping": "^2.10.0",
"eslint": "10.0.3",
"typescript": "^5.9.3",
"vue-tsc": "3.2.4",
"wrangler": "4.66.0"
"vue-tsc": "3.2.5",
"wrangler": "4.71.0"
}
}

View File

@@ -1,14 +1,16 @@
/* eslint-disable */
// Generated by Wrangler by running `wrangler types` (hash: 8c48032b4b2801cdbac6e8dbc9d26203)
// Runtime types generated with workerd@1.20251210.0 2025-12-13 nodejs_compat
// Generated by Wrangler by running `wrangler types` (hash: d4a0374e6b0a617a49990dba7b6dc49b)
// Runtime types generated with workerd@1.20260301.1 2025-12-13 nodejs_compat
declare namespace Cloudflare {
interface GlobalProps {
mainModule: typeof import("./.output/server/index");
}
interface Env {
interface PreviewEnv {
CACHE: KVNamespace;
DB: D1Database;
STUDIO_GITHUB_CLIENT_ID: string;
STUDIO_GITHUB_CLIENT_SECRET: string;
STUDIO_GITHUB_MODERATORS: string;
NUXT_DISCORD_USER_ID: string;
NUXT_WAKATIME_CODING: string;
NUXT_WAKATIME_EDITORS: string;
@@ -19,8 +21,24 @@ declare namespace Cloudflare {
NUXT_HUB_CLOUDFLARE_ACCOUNT_ID: string;
NUXT_HUB_CLOUDFLARE_API_TOKEN: string;
NUXT_HUB_CLOUDFLARE_CACHE_NAMESPACE_ID: string;
}
interface Env {
CACHE: KVNamespace;
DB: D1Database;
ASSETS: Fetcher;
STUDIO_GITHUB_CLIENT_ID: string;
STUDIO_GITHUB_CLIENT_SECRET: string;
STUDIO_GITHUB_MODERATORS: string;
NUXT_DISCORD_USER_ID: string;
NUXT_WAKATIME_CODING: string;
NUXT_WAKATIME_EDITORS: string;
NUXT_WAKATIME_LANGUAGES: string;
NUXT_WAKATIME_OS: string;
NUXT_WAKATIME_USER_ID: string;
NUXT_STATUS_PAGE: string;
NUXT_HUB_CLOUDFLARE_ACCOUNT_ID: string;
NUXT_HUB_CLOUDFLARE_API_TOKEN: string;
NUXT_HUB_CLOUDFLARE_CACHE_NAMESPACE_ID: string;
ASSETS?: Fetcher;
}
}
interface Env extends Cloudflare.Env {}
@@ -28,7 +46,7 @@ type StringifyValues<EnvType extends Record<string, unknown>> = {
[Binding in keyof EnvType]: EnvType[Binding] extends string ? EnvType[Binding] : string;
};
declare namespace NodeJS {
interface ProcessEnv extends StringifyValues<Pick<Cloudflare.Env, "STUDIO_GITHUB_CLIENT_ID" | "STUDIO_GITHUB_CLIENT_SECRET" | "NUXT_DISCORD_USER_ID" | "NUXT_WAKATIME_CODING" | "NUXT_WAKATIME_EDITORS" | "NUXT_WAKATIME_LANGUAGES" | "NUXT_WAKATIME_OS" | "NUXT_WAKATIME_USER_ID" | "NUXT_STATUS_PAGE" | "NUXT_HUB_CLOUDFLARE_ACCOUNT_ID" | "NUXT_HUB_CLOUDFLARE_API_TOKEN" | "NUXT_HUB_CLOUDFLARE_CACHE_NAMESPACE_ID">> {}
interface ProcessEnv extends StringifyValues<Pick<Cloudflare.Env, "STUDIO_GITHUB_CLIENT_ID" | "STUDIO_GITHUB_CLIENT_SECRET" | "STUDIO_GITHUB_MODERATORS" | "NUXT_DISCORD_USER_ID" | "NUXT_WAKATIME_CODING" | "NUXT_WAKATIME_EDITORS" | "NUXT_WAKATIME_LANGUAGES" | "NUXT_WAKATIME_OS" | "NUXT_WAKATIME_USER_ID" | "NUXT_STATUS_PAGE" | "NUXT_HUB_CLOUDFLARE_ACCOUNT_ID" | "NUXT_HUB_CLOUDFLARE_API_TOKEN" | "NUXT_HUB_CLOUDFLARE_CACHE_NAMESPACE_ID">> {}
}
// Begin runtime types
@@ -517,8 +535,10 @@ interface DurableObjectNamespaceNewUniqueIdOptions {
jurisdiction?: DurableObjectJurisdiction;
}
type DurableObjectLocationHint = "wnam" | "enam" | "sam" | "weur" | "eeur" | "apac" | "oc" | "afr" | "me";
type DurableObjectRoutingMode = "primary-only";
interface DurableObjectNamespaceGetDurableObjectOptions {
locationHint?: DurableObjectLocationHint;
routingMode?: DurableObjectRoutingMode;
}
interface DurableObjectClass<_T extends Rpc.DurableObjectBranded | undefined = undefined> {
}
@@ -1406,6 +1426,12 @@ declare abstract class PromiseRejectionEvent extends Event {
*/
declare class FormData {
constructor();
/**
* The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist.
*
* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append)
*/
append(name: string, value: string | Blob): void;
/**
* The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist.
*
@@ -1442,6 +1468,12 @@ declare class FormData {
* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/has)
*/
has(name: string): boolean;
/**
* The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist.
*
* [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set)
*/
set(name: string, value: string | Blob): void;
/**
* The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist.
*
@@ -1768,7 +1800,7 @@ interface Request<CfHostMetadata = unknown, Cf = CfProperties<CfHostMetadata>> e
* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/signal)
*/
signal: AbortSignal;
cf: Cf | undefined;
cf?: Cf;
/**
* The **`integrity`** read-only property of the Request interface contains the subresource integrity value of the request.
*
@@ -2103,6 +2135,8 @@ interface Transformer<I = any, O = any> {
expectedLength?: number;
}
interface StreamPipeOptions {
preventAbort?: boolean;
preventCancel?: boolean;
/**
* Pipes this readable stream to a given writable stream destination. The way in which the piping process behaves under various error conditions can be customized with a number of passed options. It returns a promise that fulfills when the piping process completes successfully, or rejects if any errors were encountered.
*
@@ -2121,8 +2155,6 @@ interface StreamPipeOptions {
* The signal option can be set to an AbortSignal to allow aborting an ongoing pipe operation via the corresponding AbortController. In this case, this source readable stream will be canceled, and destination aborted, unless the respective options preventCancel or preventAbort are set.
*/
preventClose?: boolean;
preventAbort?: boolean;
preventCancel?: boolean;
signal?: AbortSignal;
}
type ReadableStreamReadResult<R = any> = {
@@ -2397,13 +2429,13 @@ declare abstract class TransformStreamDefaultController<O = any> {
terminate(): void;
}
interface ReadableWritablePair<R = any, W = any> {
readable: ReadableStream<R>;
/**
* Provides a convenient, chainable way of piping this readable stream through a transform stream (or any other { writable, readable } pair). It simply pipes the stream into the writable side of the supplied pair, and returns the readable side for further use.
*
* Piping a stream will lock it for the duration of the pipe, preventing any other consumer from acquiring a reader.
*/
writable: WritableStream<W>;
readable: ReadableStream<R>;
}
/**
* The **`WritableStream`** interface of the Streams API provides a standard abstraction for writing streaming data to a destination, known as a sink.
@@ -3333,6 +3365,191 @@ declare abstract class Performance {
get timeOrigin(): number;
/* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */
now(): number;
/**
* The **`toJSON()`** method of the Performance interface is a Serialization; it returns a JSON representation of the Performance object.
*
* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/toJSON)
*/
toJSON(): object;
}
// AI Search V2 API Error Interfaces
interface AiSearchInternalError extends Error {
}
interface AiSearchNotFoundError extends Error {
}
interface AiSearchNameNotSetError extends Error {
}
// Filter types (shared with AutoRAG for compatibility)
type ComparisonFilter = {
key: string;
type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
value: string | number | boolean;
};
type CompoundFilter = {
type: 'and' | 'or';
filters: ComparisonFilter[];
};
// AI Search V2 Request Types
type AiSearchSearchRequest = {
messages: Array<{
role: 'system' | 'developer' | 'user' | 'assistant' | 'tool';
content: string | null;
}>;
ai_search_options?: {
retrieval?: {
retrieval_type?: 'vector' | 'keyword' | 'hybrid';
/** Match threshold (0-1, default 0.4) */
match_threshold?: number;
/** Maximum number of results (1-50, default 10) */
max_num_results?: number;
filters?: CompoundFilter | ComparisonFilter;
/** Context expansion (0-3, default 0) */
context_expansion?: number;
[key: string]: unknown;
};
query_rewrite?: {
enabled?: boolean;
model?: string;
rewrite_prompt?: string;
[key: string]: unknown;
};
reranking?: {
/** Enable reranking (default false) */
enabled?: boolean;
model?: '@cf/baai/bge-reranker-base' | '';
/** Match threshold (0-1, default 0.4) */
match_threshold?: number;
[key: string]: unknown;
};
[key: string]: unknown;
};
};
type AiSearchChatCompletionsRequest = {
messages: Array<{
role: 'system' | 'developer' | 'user' | 'assistant' | 'tool';
content: string | null;
}>;
model?: string;
stream?: boolean;
ai_search_options?: {
retrieval?: {
retrieval_type?: 'vector' | 'keyword' | 'hybrid';
match_threshold?: number;
max_num_results?: number;
filters?: CompoundFilter | ComparisonFilter;
context_expansion?: number;
[key: string]: unknown;
};
query_rewrite?: {
enabled?: boolean;
model?: string;
rewrite_prompt?: string;
[key: string]: unknown;
};
reranking?: {
enabled?: boolean;
model?: '@cf/baai/bge-reranker-base' | '';
match_threshold?: number;
[key: string]: unknown;
};
[key: string]: unknown;
};
[key: string]: unknown;
};
// AI Search V2 Response Types
type AiSearchSearchResponse = {
search_query: string;
chunks: Array<{
id: string;
type: string;
/** Match score (0-1) */
score: number;
text: string;
item: {
timestamp?: number;
key: string;
metadata?: Record<string, unknown>;
};
scoring_details?: {
/** Keyword match score (0-1) */
keyword_score?: number;
/** Vector similarity score (0-1) */
vector_score?: number;
};
}>;
};
type AiSearchListResponse = Array<{
id: string;
internal_id?: string;
account_id?: string;
account_tag?: string;
/** Whether the instance is enabled (default true) */
enable?: boolean;
type?: 'r2' | 'web-crawler';
source?: string;
[key: string]: unknown;
}>;
type AiSearchConfig = {
/** Instance ID (1-32 chars, pattern: ^[a-z0-9_]+(?:-[a-z0-9_]+)*$) */
id: string;
type: 'r2' | 'web-crawler';
source: string;
source_params?: object;
/** Token ID (UUID format) */
token_id?: string;
ai_gateway_id?: string;
/** Enable query rewriting (default false) */
rewrite_query?: boolean;
/** Enable reranking (default false) */
reranking?: boolean;
embedding_model?: string;
ai_search_model?: string;
};
type AiSearchInstance = {
id: string;
enable?: boolean;
type?: 'r2' | 'web-crawler';
source?: string;
[key: string]: unknown;
};
// AI Search Instance Service - Instance-level operations
declare abstract class AiSearchInstanceService {
/**
* Search the AI Search instance for relevant chunks.
* @param params Search request with messages and AI search options
* @returns Search response with matching chunks
*/
search(params: AiSearchSearchRequest): Promise<AiSearchSearchResponse>;
/**
* Generate chat completions with AI Search context.
* @param params Chat completions request with optional streaming
* @returns Response object (if streaming) or chat completion result
*/
chatCompletions(params: AiSearchChatCompletionsRequest): Promise<Response | object>;
/**
* Delete this AI Search instance.
*/
delete(): Promise<void>;
}
// AI Search Account Service - Account-level operations
declare abstract class AiSearchAccountService {
/**
* List all AI Search instances in the account.
* @returns Array of AI Search instances
*/
list(): Promise<AiSearchListResponse>;
/**
* Get an AI Search instance by ID.
* @param name Instance ID
* @returns Instance service for performing operations
*/
get(name: string): AiSearchInstanceService;
/**
* Create a new AI Search instance.
* @param config Instance configuration
* @returns Instance service for performing operations
*/
create(config: AiSearchConfig): Promise<AiSearchInstanceService>;
}
type AiImageClassificationInput = {
image: number[];
@@ -5522,7 +5739,7 @@ interface Ai_Cf_Qwen_Qwq_32B_Messages {
};
})[];
/**
* JSON schema that should be fufilled for the response.
* JSON schema that should be fulfilled for the response.
*/
guided_json?: object;
/**
@@ -5788,7 +6005,7 @@ interface Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Messages {
};
})[];
/**
* JSON schema that should be fufilled for the response.
* JSON schema that should be fulfilled for the response.
*/
guided_json?: object;
/**
@@ -5879,7 +6096,7 @@ interface Ai_Cf_Google_Gemma_3_12B_It_Prompt {
*/
prompt: string;
/**
* JSON schema that should be fufilled for the response.
* JSON schema that should be fulfilled for the response.
*/
guided_json?: object;
/**
@@ -6038,7 +6255,7 @@ interface Ai_Cf_Google_Gemma_3_12B_It_Messages {
};
})[];
/**
* JSON schema that should be fufilled for the response.
* JSON schema that should be fulfilled for the response.
*/
guided_json?: object;
/**
@@ -6310,7 +6527,7 @@ interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages {
})[];
response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode;
/**
* JSON schema that should be fufilled for the response.
* JSON schema that should be fulfilled for the response.
*/
guided_json?: object;
/**
@@ -6540,7 +6757,7 @@ interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages_Inner {
})[];
response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode;
/**
* JSON schema that should be fufilled for the response.
* JSON schema that should be fulfilled for the response.
*/
guided_json?: object;
/**
@@ -7590,7 +7807,7 @@ interface Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Input {
*/
text: string | string[];
/**
* Target langauge to translate to
* Target language to translate to
*/
target_language: "asm_Beng" | "awa_Deva" | "ben_Beng" | "bho_Deva" | "brx_Deva" | "doi_Deva" | "eng_Latn" | "gom_Deva" | "gon_Deva" | "guj_Gujr" | "hin_Deva" | "hne_Deva" | "kan_Knda" | "kas_Arab" | "kas_Deva" | "kha_Latn" | "lus_Latn" | "mag_Deva" | "mai_Deva" | "mal_Mlym" | "mar_Deva" | "mni_Beng" | "mni_Mtei" | "npi_Deva" | "ory_Orya" | "pan_Guru" | "san_Deva" | "sat_Olck" | "snd_Arab" | "snd_Deva" | "tam_Taml" | "tel_Telu" | "urd_Arab" | "unr_Deva";
}
@@ -8521,6 +8738,48 @@ type AiModelListType = Record<string, any>;
declare abstract class Ai<AiModelList extends AiModelListType = AiModels> {
aiGatewayLogId: string | null;
gateway(gatewayId: string): AiGateway;
/**
* Access the AI Search API for managing AI-powered search instances.
*
* This is the new API that replaces AutoRAG with better namespace separation:
* - Account-level operations: `list()`, `create()`
* - Instance-level operations: `get(id).search()`, `get(id).chatCompletions()`, `get(id).delete()`
*
* @example
* ```typescript
* // List all AI Search instances
* const instances = await env.AI.aiSearch.list();
*
* // Search an instance
* const results = await env.AI.aiSearch.get('my-search').search({
* messages: [{ role: 'user', content: 'What is the policy?' }],
* ai_search_options: {
* retrieval: { max_num_results: 10 }
* }
* });
*
* // Generate chat completions with AI Search context
* const response = await env.AI.aiSearch.get('my-search').chatCompletions({
* messages: [{ role: 'user', content: 'What is the policy?' }],
* model: '@cf/meta/llama-3.3-70b-instruct-fp8-fast'
* });
* ```
*/
aiSearch(): AiSearchAccountService;
/**
* @deprecated AutoRAG has been replaced by AI Search.
* Use `env.AI.aiSearch` instead for better API design and new features.
*
* Migration guide:
* - `env.AI.autorag().list()` → `env.AI.aiSearch.list()`
* - `env.AI.autorag('id').search({ query: '...' })` → `env.AI.aiSearch.get('id').search({ messages: [{ role: 'user', content: '...' }] })`
* - `env.AI.autorag('id').aiSearch(...)` → `env.AI.aiSearch.get('id').chatCompletions(...)`
*
* Note: The old API continues to work for backwards compatibility, but new projects should use AI Search.
*
* @see AiSearchAccountService
* @param autoragId Optional instance ID (omit for account-level operations)
*/
autorag(autoragId: string): AutoRAG;
run<Name extends keyof AiModelList, Options extends AiOptions, InputOptions extends AiModelList[Name]["inputs"]>(model: Name, inputs: InputOptions, options?: Options): Promise<Options extends {
returnRawResponse: true;
@@ -8629,23 +8888,34 @@ declare abstract class AiGateway {
}): Promise<Response>;
getUrl(provider?: AIGatewayProviders | string): Promise<string>; // eslint-disable-line
}
/**
* @deprecated AutoRAG has been replaced by AI Search. Use AiSearchInternalError instead.
* @see AiSearchInternalError
*/
interface AutoRAGInternalError extends Error {
}
/**
* @deprecated AutoRAG has been replaced by AI Search. Use AiSearchNotFoundError instead.
* @see AiSearchNotFoundError
*/
interface AutoRAGNotFoundError extends Error {
}
/**
* @deprecated This error type is no longer used in the AI Search API.
*/
interface AutoRAGUnauthorizedError extends Error {
}
/**
* @deprecated AutoRAG has been replaced by AI Search. Use AiSearchNameNotSetError instead.
* @see AiSearchNameNotSetError
*/
interface AutoRAGNameNotSetError extends Error {
}
type ComparisonFilter = {
key: string;
type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
value: string | number | boolean;
};
type CompoundFilter = {
type: 'and' | 'or';
filters: ComparisonFilter[];
};
/**
* @deprecated AutoRAG has been replaced by AI Search.
* Use AiSearchSearchRequest with the new API instead.
* @see AiSearchSearchRequest
*/
type AutoRagSearchRequest = {
query: string;
filters?: CompoundFilter | ComparisonFilter;
@@ -8660,13 +8930,28 @@ type AutoRagSearchRequest = {
};
rewrite_query?: boolean;
};
/**
* @deprecated AutoRAG has been replaced by AI Search.
* Use AiSearchChatCompletionsRequest with the new API instead.
* @see AiSearchChatCompletionsRequest
*/
type AutoRagAiSearchRequest = AutoRagSearchRequest & {
stream?: boolean;
system_prompt?: string;
};
/**
* @deprecated AutoRAG has been replaced by AI Search.
* Use AiSearchChatCompletionsRequest with stream: true instead.
* @see AiSearchChatCompletionsRequest
*/
type AutoRagAiSearchRequestStreaming = Omit<AutoRagAiSearchRequest, 'stream'> & {
stream: true;
};
/**
* @deprecated AutoRAG has been replaced by AI Search.
* Use AiSearchSearchResponse with the new API instead.
* @see AiSearchSearchResponse
*/
type AutoRagSearchResponse = {
object: 'vector_store.search_results.page';
search_query: string;
@@ -8683,6 +8968,11 @@ type AutoRagSearchResponse = {
has_more: boolean;
next_page: string | null;
};
/**
* @deprecated AutoRAG has been replaced by AI Search.
* Use AiSearchListResponse with the new API instead.
* @see AiSearchListResponse
*/
type AutoRagListResponse = {
id: string;
enable: boolean;
@@ -8692,14 +8982,51 @@ type AutoRagListResponse = {
paused: boolean;
status: string;
}[];
/**
* @deprecated AutoRAG has been replaced by AI Search.
* The new API returns different response formats for chat completions.
*/
type AutoRagAiSearchResponse = AutoRagSearchResponse & {
response: string;
};
/**
* @deprecated AutoRAG has been replaced by AI Search.
* Use the new AI Search API instead: `env.AI.aiSearch`
*
* Migration guide:
* - `env.AI.autorag().list()` → `env.AI.aiSearch.list()`
* - `env.AI.autorag('id').search(...)` → `env.AI.aiSearch.get('id').search(...)`
* - `env.AI.autorag('id').aiSearch(...)` → `env.AI.aiSearch.get('id').chatCompletions(...)`
*
* @see AiSearchAccountService
* @see AiSearchInstanceService
*/
declare abstract class AutoRAG {
/**
* @deprecated Use `env.AI.aiSearch.list()` instead.
* @see AiSearchAccountService.list
*/
list(): Promise<AutoRagListResponse>;
/**
* @deprecated Use `env.AI.aiSearch.get(id).search(...)` instead.
* Note: The new API uses a messages array instead of a query string.
* @see AiSearchInstanceService.search
*/
search(params: AutoRagSearchRequest): Promise<AutoRagSearchResponse>;
/**
* @deprecated Use `env.AI.aiSearch.get(id).chatCompletions(...)` instead.
* @see AiSearchInstanceService.chatCompletions
*/
aiSearch(params: AutoRagAiSearchRequestStreaming): Promise<Response>;
/**
* @deprecated Use `env.AI.aiSearch.get(id).chatCompletions(...)` instead.
* @see AiSearchInstanceService.chatCompletions
*/
aiSearch(params: AutoRagAiSearchRequest): Promise<AutoRagAiSearchResponse>;
/**
* @deprecated Use `env.AI.aiSearch.get(id).chatCompletions(...)` instead.
* @see AiSearchInstanceService.chatCompletions
*/
aiSearch(params: AutoRagAiSearchRequest): Promise<AutoRagAiSearchResponse | Response>;
}
interface BasicImageTransformations {
@@ -9450,6 +9777,10 @@ interface D1Meta {
* The region of the database instance that executed the query.
*/
served_by_region?: string;
/**
* The three letters airport code of the colo that executed the query.
*/
served_by_colo?: string;
/**
* True if-and-only-if the database instance that executed the query was the primary.
*/
@@ -9538,6 +9869,15 @@ declare abstract class D1PreparedStatement {
// ignored when `Disposable` is included in the standard lib.
interface Disposable {
}
/**
* The returned data after sending an email
*/
interface EmailSendResult {
/**
* The Email Message ID
*/
messageId: string;
}
/**
* An email message that can be sent from a Worker.
*/
@@ -9579,19 +9919,50 @@ interface ForwardableEmailMessage extends EmailMessage {
* @param headers A [Headers object](https://developer.mozilla.org/en-US/docs/Web/API/Headers).
* @returns A promise that resolves when the email message is forwarded.
*/
forward(rcptTo: string, headers?: Headers): Promise<void>;
forward(rcptTo: string, headers?: Headers): Promise<EmailSendResult>;
/**
* Reply to the sender of this email message with a new EmailMessage object.
* @param message The reply message.
* @returns A promise that resolves when the email message is replied.
*/
reply(message: EmailMessage): Promise<void>;
reply(message: EmailMessage): Promise<EmailSendResult>;
}
/** A file attachment for an email message */
type EmailAttachment = {
disposition: 'inline';
contentId: string;
filename: string;
type: string;
content: string | ArrayBuffer | ArrayBufferView;
} | {
disposition: 'attachment';
contentId?: undefined;
filename: string;
type: string;
content: string | ArrayBuffer | ArrayBufferView;
};
/** An Email Address */
interface EmailAddress {
name: string;
email: string;
}
/**
* A binding that allows a Worker to send email messages.
*/
interface SendEmail {
send(message: EmailMessage): Promise<void>;
send(message: EmailMessage): Promise<EmailSendResult>;
send(builder: {
from: string | EmailAddress;
to: string | string[];
subject: string;
replyTo?: string | EmailAddress;
cc?: string | string[];
bcc?: string | string[];
headers?: Record<string, string>;
text?: string;
html?: string;
attachments?: EmailAttachment[];
}): Promise<EmailSendResult>;
}
declare abstract class EmailEvent extends ExtendableEvent {
readonly message: ForwardableEmailMessage;
@@ -9624,7 +9995,7 @@ interface Hyperdrive {
/**
* Connect directly to Hyperdrive as if it's your database, returning a TCP socket.
*
* Calling this method returns an idential socket to if you call
* Calling this method returns an identical socket to if you call
* `connect("host:port")` using the `host` and `port` fields from this object.
* Pick whichever approach works better with your preferred DB client library.
*
@@ -9737,6 +10108,83 @@ type ImageOutputOptions = {
background?: string;
anim?: boolean;
};
interface ImageMetadata {
id: string;
filename?: string;
uploaded?: string;
requireSignedURLs: boolean;
meta?: Record<string, unknown>;
variants: string[];
draft?: boolean;
creator?: string;
}
interface ImageUploadOptions {
id?: string;
filename?: string;
requireSignedURLs?: boolean;
metadata?: Record<string, unknown>;
creator?: string;
encoding?: 'base64';
}
interface ImageUpdateOptions {
requireSignedURLs?: boolean;
metadata?: Record<string, unknown>;
creator?: string;
}
interface ImageListOptions {
limit?: number;
cursor?: string;
sortOrder?: 'asc' | 'desc';
creator?: string;
}
interface ImageList {
images: ImageMetadata[];
cursor?: string;
listComplete: boolean;
}
interface HostedImagesBinding {
/**
* Get detailed metadata for a hosted image
* @param imageId The ID of the image (UUID or custom ID)
* @returns Image metadata, or null if not found
*/
details(imageId: string): Promise<ImageMetadata | null>;
/**
* Get the raw image data for a hosted image
* @param imageId The ID of the image (UUID or custom ID)
* @returns ReadableStream of image bytes, or null if not found
*/
image(imageId: string): Promise<ReadableStream<Uint8Array> | null>;
/**
* Upload a new hosted image
* @param image The image file to upload
* @param options Upload configuration
* @returns Metadata for the uploaded image
* @throws {@link ImagesError} if upload fails
*/
upload(image: ReadableStream<Uint8Array> | ArrayBuffer, options?: ImageUploadOptions): Promise<ImageMetadata>;
/**
* Update hosted image metadata
* @param imageId The ID of the image
* @param options Properties to update
* @returns Updated image metadata
* @throws {@link ImagesError} if update fails
*/
update(imageId: string, options: ImageUpdateOptions): Promise<ImageMetadata>;
/**
* Delete a hosted image
* @param imageId The ID of the image
* @returns True if deleted, false if not found
*/
delete(imageId: string): Promise<boolean>;
/**
* List hosted images with pagination
* @param options List configuration
* @returns List of images with pagination info
* @throws {@link ImagesError} if list fails
*/
list(options?: ImageListOptions): Promise<ImageList>;
}
interface ImagesBinding {
/**
* Get image metadata (type, width and height)
@@ -9750,6 +10198,10 @@ interface ImagesBinding {
* @returns A transform handle
*/
input(stream: ReadableStream<Uint8Array>, options?: ImageInputOptions): ImageTransformer;
/**
* Access hosted images CRUD operations
*/
readonly hosted: HostedImagesBinding;
}
interface ImageTransformer {
/**
@@ -9816,7 +10268,13 @@ interface MediaTransformer {
* @param transform - Configuration for how the media should be transformed
* @returns A generator for producing the transformed media output
*/
transform(transform: MediaTransformationInputOptions): MediaTransformationGenerator;
transform(transform?: MediaTransformationInputOptions): MediaTransformationGenerator;
/**
* Generates the final media output with specified options.
* @param output - Configuration for the output format and parameters
* @returns The final transformation result containing the transformed media
*/
output(output?: MediaTransformationOutputOptions): MediaTransformationResult;
}
/**
* Generator for producing media transformation results.
@@ -9828,7 +10286,7 @@ interface MediaTransformationGenerator {
* @param output - Configuration for the output format and parameters
* @returns The final transformation result containing the transformed media
*/
output(output: MediaTransformationOutputOptions): MediaTransformationResult;
output(output?: MediaTransformationOutputOptions): MediaTransformationResult;
}
/**
* Result of a media transformation operation.
@@ -9837,19 +10295,19 @@ interface MediaTransformationGenerator {
interface MediaTransformationResult {
/**
* Returns the transformed media as a readable stream of bytes.
* @returns A stream containing the transformed media data
* @returns A promise containing a readable stream with the transformed media
*/
media(): ReadableStream<Uint8Array>;
media(): Promise<ReadableStream<Uint8Array>>;
/**
* Returns the transformed media as an HTTP response object.
* @returns The transformed media as a Response, ready to store in cache or return to users
* @returns The transformed media as a Promise<Response>, ready to store in cache or return to users
*/
response(): Response;
response(): Promise<Response>;
/**
* Returns the MIME type of the transformed media.
* @returns The content type string (e.g., 'image/jpeg', 'video/mp4')
* @returns A promise containing the content type string (e.g., 'image/jpeg', 'video/mp4')
*/
contentType(): string;
contentType(): Promise<string>;
}
/**
* Configuration options for transforming media input.
@@ -9957,7 +10415,7 @@ declare module "cloudflare:pipelines" {
protected ctx: ExecutionContext;
constructor(ctx: ExecutionContext, env: Env);
/**
* run recieves an array of PipelineRecord which can be
* run receives an array of PipelineRecord which can be
* transformed and returned to the pipeline
* @param records Incoming records from the pipeline to be transformed
* @param metadata Information about the specific pipeline calling the transformation entrypoint
@@ -10238,6 +10696,7 @@ declare namespace CloudflareWorkersModule {
timeout?: WorkflowTimeoutDuration | number;
}): Promise<WorkflowStepEvent<T>>;
}
export type WorkflowInstanceStatus = 'queued' | 'running' | 'paused' | 'errored' | 'terminated' | 'complete' | 'waiting' | 'waitingForPause' | 'unknown';
export abstract class WorkflowEntrypoint<Env = unknown, T extends Rpc.Serializable<T> | unknown = unknown> implements Rpc.WorkflowEntrypointBranded {
[Rpc.__WORKFLOW_ENTRYPOINT_BRAND]: never;
protected ctx: ExecutionContext;
@@ -10271,12 +10730,14 @@ type MarkdownDocument = {
blob: Blob;
};
type ConversionResponse = {
id: string;
name: string;
mimeType: string;
format: 'markdown';
tokens: number;
data: string;
} | {
id: string;
name: string;
mimeType: string;
format: 'error';
@@ -10294,6 +10755,7 @@ type ConversionOptions = {
images?: EmbeddedImageConversionOptions & {
convertOGImage?: boolean;
};
hostname?: string;
};
docx?: {
images?: EmbeddedImageConversionOptions;
@@ -10431,6 +10893,15 @@ declare namespace TailStream {
readonly level: "debug" | "error" | "info" | "log" | "warn";
readonly message: object;
}
interface DroppedEventsDiagnostic {
readonly diagnosticsType: "droppedEvents";
readonly count: number;
}
interface StreamDiagnostic {
readonly type: 'streamDiagnostic';
// To add new diagnostic types, define a new interface and add it to this union type.
readonly diagnostic: DroppedEventsDiagnostic;
}
// This marks the worker handler return information.
// This is separate from Outcome because the worker invocation can live for a long time after
// returning. For example - Websockets that return an http upgrade response but then continue
@@ -10447,7 +10918,7 @@ declare namespace TailStream {
readonly type: "attributes";
readonly info: Attribute[];
}
type EventType = Onset | Outcome | SpanOpen | SpanClose | DiagnosticChannelEvent | Exception | Log | Return | Attributes;
type EventType = Onset | Outcome | SpanOpen | SpanClose | DiagnosticChannelEvent | Exception | Log | StreamDiagnostic | Return | Attributes;
// Context in which this trace event lives.
interface SpanContext {
// Single id for the entire top-level invocation
@@ -10461,7 +10932,7 @@ declare namespace TailStream {
// For Hibernate and Mark this would be the span under which they were emitted.
// spanId is not set ONLY if:
// 1. This is an Onset event
// 2. We are not inherting any SpanContext. (e.g. this is a cross-account service binding or a new top-level invocation)
// 2. We are not inheriting any SpanContext. (e.g. this is a cross-account service binding or a new top-level invocation)
readonly spanId?: string;
}
interface TailEvent<Event extends EventType> {

View File

@@ -1,7 +1,7 @@
{
"$schema": "node_modules/wrangler/config-schema.json",
"name": "artsite",
"compatibility_date": "2025-12-13",
"compatibility_date": "2026-02-24",
"compatibility_flags": [
"nodejs_compat"
],
@@ -73,4 +73,4 @@
]
}
}
}
}