diff --git a/Gemma/[Gemma_2]Finetune_with_LORA.ipynb b/Gemma/[Gemma_2]Finetune_with_LORA.ipynb new file mode 100644 index 0000000..7866259 --- /dev/null +++ b/Gemma/[Gemma_2]Finetune_with_LORA.ipynb @@ -0,0 +1,1159 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright 2025 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using PEFT (parameter efficient fine-tuning technique) with huggingface to fine-tune Gemma model\n", + " \n", + "
| \n",
+ " \n",
+ " Open in Colab\n", + " \n", + " | \n",
+ " \n",
+ " \n",
+ " \n",
+ " View on GitHub\n", + " \n", + " | \n",
+ "
SELECT MAX(salary) AS max_salary, MIN(salary) AS min_\n"
+ ]
+ }
+ ],
+ "source": [
+ "response = tokenizer.decode(response[0],skip_special_tokens=True)\n",
+ "print(response)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "mdm7c8klZZnw"
+ },
+ "source": [
+ "***PEFT***\n",
+ "\n",
+ "***Parameter-Efficient Fine-Tuning, is a technique used to adapt pre-trained language models (LLMs) for specific tasks by only training a small subset of the model's parameters. This is a much more efficient and less resource-intensive alternative to traditional fine-tuning, which would update every parameter in a large model.***\n",
+ "\n",
+ "***By freezing most of the original model's weights and training a small number of new or existing parameters, PEFT methods achieve comparable performance while saving significant computational power and memory.***\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "H7mqtR45tFfH"
+ },
+ "source": [
+ "#### ***Tuning Phase***"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "Target_modules = ['q_proj','k_proj','v_proj','o_proj']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "### QLORA hyperparameters ###\n",
+ "\n",
+ "lora_learning_rate = 1e-4\n",
+ "lora_rank = 8\n",
+ "lora_dropout = 0.2\n",
+ "lora_alpha = 16 # double of lora rank\n",
+ "\n",
+ "# even using QLORA lora config is required because LORA low rank optimization is applied after quantization and alpha should be double the rank"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "peft_config = LoraConfig(r=lora_rank,\n",
+ " lora_alpha=lora_alpha,\n",
+ " lora_dropout=lora_dropout, # A regularization technique used during training to prevent overfitting of the small, trainable LoRA matrices.\n",
+ " bias='none',\n",
+ " task_type='CAUSAL_LM', # CAUSAL_LM are those model that generates text by predicting the next word (or token) in a sequence based only on the words that have come before it\n",
+ " target_modules=Target_modules)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "IPTZ3jKIKUQ6"
+ },
+ "source": [
+ "***Data Preparation***"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "\n",
+ "splits = {'train': 'data/train-00000-of-00001-36a24700f19484dc.parquet', 'validation': 'data/validation-00000-of-00001-fa01d04c056ac579.parquet'}\n",
+ "df_train = pd.read_parquet(\"hf://datasets/lamini/spider_text_to_sql/\" + splits[\"train\"])\n",
+ "df_test = pd.read_parquet(\"hf://datasets/lamini/spider_text_to_sql/\" + splits[\"validation\"])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df = pd.merge(df_train,df_test,how ='outer')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def remove(row):\n",
+ " return row.split('\\n\\n')[-1].replace('[/INST]','')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df['input'] = df['input'].apply(remove)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data = []\n",
+ "for txt,query in zip(df['input'],df['output']):\n",
+ " template = f\" {txt.split(':')[-1]} , {query}\"\n",
+ " data.append(template)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(8034, 2)"
+ ]
+ },
+ "execution_count": 19,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df.shape"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "8034"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(data)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# we are only training on 2000 for quick training\n",
+ "\n",
+ "data_for_training = data[:2000]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data_for_training"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from datasets import Dataset\n",
+ "import pandas as pd\n",
+ "\n",
+ "pd_data = pd.DataFrame(data_for_training,columns=['text'])\n",
+ "hf_dataset = Dataset.from_pandas(pd_data)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Dataset({\n",
+ " features: ['text'],\n",
+ " num_rows: 2000\n",
+ "})"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "hf_dataset"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "-z1aVME3KPPU"
+ },
+ "source": [
+ "***Training Phase***"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "### training configuration ###\n",
+ "\n",
+ "output_dir = \"./results\"\n",
+ "\n",
+ "# Number of training epochs\n",
+ "num_train_epochs = 1\n",
+ "\n",
+ "# Batch size per GPU for training\n",
+ "train_batch_size_perGPU = 1\n",
+ "\n",
+ "# Batch size per GPU for evaluation\n",
+ "eval_batch_size_perGPU = 1\n",
+ "\n",
+ "# Number of update steps to accumulate the gradients for if our setup can manage it, keeping it simple with 1 works fine\n",
+ "gradient_accumulation_steps = 1\n",
+ "\n",
+ "# Enable gradient checkpointing\n",
+ "gradient_checkpointing = True\n",
+ "\n",
+ "# Maximum gradient normal (gradient clipping)\n",
+ "max_grad_norm = 0.3\n",
+ "\n",
+ "# Optimizer to use\n",
+ "optimizer_ = \"paged_adamw_32bit\"\n",
+ "\n",
+ "# learning rate (AdamW optimizer), lower learning rates tend to provide more stable and gradual learning.\n",
+ "learning_rate = 2e-4\n",
+ "\n",
+ "# Weight decay to apply to all layers except bias/LayerNorm weights\n",
+ "weight_decay = 0.001\n",
+ "\n",
+ "# Learning rate schedule\n",
+ "lr_scheduler_type = \"cosine\"\n",
+ "\n",
+ "# Number of training steps (overrides num_train_epochs)\n",
+ "max_steps = -1\n",
+ "\n",
+ "# Ratio of steps for a linear warmup (from 0 to learning rate) (optional)\n",
+ "warmup_ratio = 0.03"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "training_args = TrainingArguments(output_dir=output_dir,\n",
+ " num_train_epochs=num_train_epochs,\n",
+ " per_device_train_batch_size=train_batch_size_perGPU,\n",
+ " per_device_eval_batch_size=eval_batch_size_perGPU,\n",
+ " gradient_accumulation_steps=gradient_accumulation_steps,\n",
+ " optim=optimizer_,\n",
+ " save_steps=0,\n",
+ " logging_steps=25,\n",
+ " learning_rate=learning_rate,\n",
+ " weight_decay=weight_decay,\n",
+ " fp16=False,\n",
+ " bf16=True,\n",
+ " max_grad_norm=max_grad_norm,\n",
+ " max_steps=max_steps,\n",
+ " # warmup_ratio=warmup_ratio,\n",
+ " group_by_length=True, # Group sequences into batches with same length\n",
+ " lr_scheduler_type=lr_scheduler_type,\n",
+ " report_to=\"tensorboard\"\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "trainer = SFTTrainer(model=model,\n",
+ " args=training_args,\n",
+ " peft_config=peft_config,\n",
+ " train_dataset=hf_dataset,\n",
+ " processing_class=tokenizer,\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "torch.cuda.empty_cache()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "148"
+ ]
+ },
+ "execution_count": 31,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import gc\n",
+ "\n",
+ "gc.collect()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "The tokenizer has new PAD/BOS/EOS tokens that differ from the model config and generation config. The model config and generation config were aligned accordingly, being updated with the tokenizer's values. Updated tokens: {'pad_token_id': 1}.\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " [2000/2000 24:59, Epoch 1/1]\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " Step \n",
+ " Training Loss \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " 25 \n",
+ " 1.581900 \n",
+ " \n",
+ " \n",
+ " 50 \n",
+ " 1.224500 \n",
+ " \n",
+ " \n",
+ " 75 \n",
+ " 0.967200 \n",
+ " \n",
+ " \n",
+ " 100 \n",
+ " 1.154800 \n",
+ " \n",
+ " \n",
+ " 125 \n",
+ " 0.941500 \n",
+ " \n",
+ " \n",
+ " 150 \n",
+ " 1.078100 \n",
+ " \n",
+ " \n",
+ " 175 \n",
+ " 0.981500 \n",
+ " \n",
+ " \n",
+ " 200 \n",
+ " 1.025800 \n",
+ " \n",
+ " \n",
+ " 225 \n",
+ " 0.897000 \n",
+ " \n",
+ " \n",
+ " 250 \n",
+ " 1.017000 \n",
+ " \n",
+ " \n",
+ " 275 \n",
+ " 0.802200 \n",
+ " \n",
+ " \n",
+ " 300 \n",
+ " 0.902100 \n",
+ " \n",
+ " \n",
+ " 325 \n",
+ " 0.936700 \n",
+ " \n",
+ " \n",
+ " 350 \n",
+ " 1.037900 \n",
+ " \n",
+ " \n",
+ " 375 \n",
+ " 0.816500 \n",
+ " \n",
+ " \n",
+ " 400 \n",
+ " 1.083700 \n",
+ " \n",
+ " \n",
+ " 425 \n",
+ " 0.743400 \n",
+ " \n",
+ " \n",
+ " 450 \n",
+ " 0.984600 \n",
+ " \n",
+ " \n",
+ " 475 \n",
+ " 0.811000 \n",
+ " \n",
+ " \n",
+ " 500 \n",
+ " 0.966900 \n",
+ " \n",
+ " \n",
+ " 525 \n",
+ " 0.802600 \n",
+ " \n",
+ " \n",
+ " 550 \n",
+ " 1.042400 \n",
+ " \n",
+ " \n",
+ " 575 \n",
+ " 0.708000 \n",
+ " \n",
+ " \n",
+ " 600 \n",
+ " 0.915200 \n",
+ " \n",
+ " \n",
+ " 625 \n",
+ " 0.666700 \n",
+ " \n",
+ " \n",
+ " 650 \n",
+ " 0.913100 \n",
+ " \n",
+ " \n",
+ " 675 \n",
+ " 0.663700 \n",
+ " \n",
+ " \n",
+ " 700 \n",
+ " 0.863700 \n",
+ " \n",
+ " \n",
+ " 725 \n",
+ " 0.641200 \n",
+ " \n",
+ " \n",
+ " 750 \n",
+ " 0.883200 \n",
+ " \n",
+ " \n",
+ " 775 \n",
+ " 0.694100 \n",
+ " \n",
+ " \n",
+ " 800 \n",
+ " 0.772600 \n",
+ " \n",
+ " \n",
+ " 825 \n",
+ " 0.588200 \n",
+ " \n",
+ " \n",
+ " 850 \n",
+ " 0.949100 \n",
+ " \n",
+ " \n",
+ " 875 \n",
+ " 0.674700 \n",
+ " \n",
+ " \n",
+ " 900 \n",
+ " 0.875200 \n",
+ " \n",
+ " \n",
+ " 925 \n",
+ " 0.635000 \n",
+ " \n",
+ " \n",
+ " 950 \n",
+ " 0.783500 \n",
+ " \n",
+ " \n",
+ " 975 \n",
+ " 0.659000 \n",
+ " \n",
+ " \n",
+ " 1000 \n",
+ " 0.886500 \n",
+ " \n",
+ " \n",
+ " 1025 \n",
+ " 0.688900 \n",
+ " \n",
+ " \n",
+ " 1050 \n",
+ " 0.840300 \n",
+ " \n",
+ " \n",
+ " 1075 \n",
+ " 0.745900 \n",
+ " \n",
+ " \n",
+ " 1100 \n",
+ " 0.714500 \n",
+ " \n",
+ " \n",
+ " 1125 \n",
+ " 0.716300 \n",
+ " \n",
+ " \n",
+ " 1150 \n",
+ " 0.885100 \n",
+ " \n",
+ " \n",
+ " 1175 \n",
+ " 0.694600 \n",
+ " \n",
+ " \n",
+ " 1200 \n",
+ " 0.854000 \n",
+ " \n",
+ " \n",
+ " 1225 \n",
+ " 0.749700 \n",
+ " \n",
+ " \n",
+ " 1250 \n",
+ " 0.850900 \n",
+ " \n",
+ " \n",
+ " 1275 \n",
+ " 0.694900 \n",
+ " \n",
+ " \n",
+ " 1300 \n",
+ " 0.824100 \n",
+ " \n",
+ " \n",
+ " 1325 \n",
+ " 0.651900 \n",
+ " \n",
+ " \n",
+ " 1350 \n",
+ " 0.749800 \n",
+ " \n",
+ " \n",
+ " 1375 \n",
+ " 0.611700 \n",
+ " \n",
+ " \n",
+ " 1400 \n",
+ " 0.906700 \n",
+ " \n",
+ " \n",
+ " 1425 \n",
+ " 0.509800 \n",
+ " \n",
+ " \n",
+ " 1450 \n",
+ " 0.784100 \n",
+ " \n",
+ " \n",
+ " 1475 \n",
+ " 0.634900 \n",
+ " \n",
+ " \n",
+ " 1500 \n",
+ " 0.904100 \n",
+ " \n",
+ " \n",
+ " 1525 \n",
+ " 0.637300 \n",
+ " \n",
+ " \n",
+ " 1550 \n",
+ " 0.861400 \n",
+ " \n",
+ " \n",
+ " 1575 \n",
+ " 0.621300 \n",
+ " \n",
+ " \n",
+ " 1600 \n",
+ " 0.782500 \n",
+ " \n",
+ " \n",
+ " 1625 \n",
+ " 0.556800 \n",
+ " \n",
+ " \n",
+ " 1650 \n",
+ " 0.869100 \n",
+ " \n",
+ " \n",
+ " 1675 \n",
+ " 0.620900 \n",
+ " \n",
+ " \n",
+ " 1700 \n",
+ " 0.778200 \n",
+ " \n",
+ " \n",
+ " 1725 \n",
+ " 0.521900 \n",
+ " \n",
+ " \n",
+ " 1750 \n",
+ " 0.923000 \n",
+ " \n",
+ " \n",
+ " 1775 \n",
+ " 0.616000 \n",
+ " \n",
+ " \n",
+ " 1800 \n",
+ " 0.840300 \n",
+ " \n",
+ " \n",
+ " 1825 \n",
+ " 0.520800 \n",
+ " \n",
+ " \n",
+ " 1850 \n",
+ " 0.806700 \n",
+ " \n",
+ " \n",
+ " 1875 \n",
+ " 0.694200 \n",
+ " \n",
+ " \n",
+ " 1900 \n",
+ " 0.875100 \n",
+ " \n",
+ " \n",
+ " 1925 \n",
+ " 0.623100 \n",
+ " \n",
+ " \n",
+ " 1950 \n",
+ " 0.775000 \n",
+ " \n",
+ " \n",
+ " 1975 \n",
+ " 0.632900 \n",
+ " \n",
+ " \n",
+ " 2000 \n",
+ " 0.787400 \n",
+ " \n",
+ " \n",
+ "
"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "TrainOutput(global_step=2000, training_loss=0.8115992393493653, metrics={'train_runtime': 1502.325, 'train_samples_per_second': 1.331, 'train_steps_per_second': 1.331, 'total_flos': 1577734916802048.0, 'train_loss': 0.8115992393493653, 'epoch': 1.0})"
+ ]
+ },
+ "execution_count": 32,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "trainer.train()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "application/javascript": "\n (async () => {\n const url = new URL(await google.colab.kernel.proxyPort(6006, {'cache': true}));\n url.searchParams.set('tensorboardColab', 'true');\n const iframe = document.createElement('iframe');\n iframe.src = url;\n iframe.setAttribute('width', '100%');\n iframe.setAttribute('height', '800');\n iframe.setAttribute('frameborder', 0);\n document.body.appendChild(iframe);\n })();\n ",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "%load_ext tensorboard\n",
+ "%tensorboard --logdir results/runs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# save model to the local folder\n",
+ "\n",
+ "trainer.model.save_pretrained('finetuned_model')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0"
+ ]
+ },
+ "execution_count": 35,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "del model\n",
+ "del trainer\n",
+ "gc.collect()\n",
+ "gc.collect()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "eI0I71t5Jfxw"
+ },
+ "source": [
+ "#### ***Merging Weights of Lora Config with Base model and Pushing to huggingfacehub models***"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from peft import PeftModel\n",
+ "\n",
+ "base_model = AutoModelForCausalLM.from_pretrained(\n",
+ " model_name,\n",
+ " low_cpu_mem_usage=True,\n",
+ " return_dict=True,\n",
+ " torch_dtype=torch.float16,\n",
+ " device_map='auto',\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = PeftModel.from_pretrained(base_model,r'/content/finetuned_model') # This path is only for google colab\n",
+ "model = model.merge_and_unload()\n",
+ "\n",
+ "# reloading tokenizer\n",
+ "tokenizer = AutoTokenizer.from_pretrained(model_name,trust_remote_code=True)\n",
+ "tokenizer.pad_token = tokenizer.eos_token\n",
+ "tokenizer.padding_side = 'right'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import locale\n",
+ "\n",
+ "locale.preferred_encoding = lambda: \"UTF-8\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "name = \"shiv-am-04/gemma2-2b-SQL\"\n",
+ "\n",
+ "! huggingface-cli login\n",
+ "\n",
+ "model.push_to_hub(name, check_pr=True)\n",
+ "\n",
+ "tokenizer.push_to_hub(name,check_pr=True)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}