OPENAI-PROMPTING
Simon-Pierre Boucher
2024-09-14
Basic prompt example:¶
In [11]:
import os
import requests
from dotenv import load_dotenv
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params():
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
Returns:
- dict: Dictionnaire contenant les paramètres par défaut.
"""
return {
"model": "gpt-3.5-turbo", # Définir le modèle par défaut
"temperature": 0.7,
"max_tokens": 2000,
"top_p": 1.0,
"frequency_penalty": 0.0,
"presence_penalty": 0.0
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=2000, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
def format_openai_response(response):
"""
Formate la réponse de l'API OpenAI pour afficher uniquement le message de l'assistant.
"""
if response and "choices" in response:
assistant_message = response["choices"][0]["message"]["content"]
formatted_text = f"**Assistant:**\n\n{assistant_message}\n"
return formatted_text
else:
return "No valid response received."
# Exemple d'utilisation
params = set_open_params()
prompt = "The sky is"
# Créer les messages pour l'API
messages = [
{"role": "user", "content": prompt}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
formatted_response = format_openai_response(response)
print(formatted_response)
Text Summarization¶
In [12]:
import os
import requests
from dotenv import load_dotenv
from IPython.display import Markdown, display
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params(temperature=1.0, max_tokens=2000, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
Parameters:
- temperature (float): Niveau de créativité dans les réponses.
- max_tokens (int): Nombre maximum de tokens dans la réponse.
- top_p (float): Contrôle la diversité via nucleus sampling.
- frequency_penalty (float): Pénalité pour la répétition de mots.
- presence_penalty (float): Pénalité pour l'introduction de nouveaux sujets.
Returns:
- dict: Dictionnaire contenant les paramètres configurés.
"""
return {
"model": "gpt-3.5-turbo",
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=2000, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Définir les paramètres avec une température de 0.7
params = set_open_params(temperature=0.7)
# Prompt
prompt = """Antibiotics are a type of medication used to treat bacterial infections. They work by either killing the bacteria or preventing them from reproducing, allowing the body's immune system to fight off the infection. Antibiotics are usually taken orally in the form of pills, capsules, or liquid solutions, or sometimes administered intravenously. They are not effective against viral infections, and using them inappropriately can lead to antibiotic resistance.
Explain the above in one sentence:"""
# Créer les messages pour l'API
messages = [
{
"role": "user",
"content": prompt
}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
if response and "choices" in response:
display(Markdown(response['choices'][0]['message']['content']))
else:
print("No valid response received.")
Question Answering¶
In [13]:
import os
import requests
from dotenv import load_dotenv
from IPython.display import Markdown, display
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params(temperature=1.0, max_tokens=2000, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
"""
return {
"model": "gpt-3.5-turbo",
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=2000, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Définir les paramètres
params = set_open_params(temperature=0.7)
# Prompt
prompt = """Answer the question based on the context below. Keep the answer short and concise. Respond "Unsure about answer" if not sure about the answer.
Context: Teplizumab traces its roots to a New Jersey drug company called Ortho Pharmaceutical. There, scientists generated an early version of the antibody, dubbed OKT3. Originally sourced from mice, the molecule was able to bind to the surface of T cells and limit their cell-killing potential. In 1986, it was approved to help prevent organ rejection after kidney transplants, making it the first therapeutic antibody allowed for human use.
Question: What was OKT3 originally sourced from?
Answer:"""
# Créer les messages pour l'API
messages = [
{
"role": "user",
"content": prompt
}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
if response and "choices" in response:
display(Markdown(response['choices'][0]['message']['content']))
else:
print("No valid response received.")
Text Classification¶
In [14]:
import os
import requests
from dotenv import load_dotenv
from IPython.display import Markdown, display
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params(temperature=0.7, max_tokens=2000, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
"""
return {
"model": "gpt-3.5-turbo",
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=2000, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Définir les paramètres
params = set_open_params(temperature=0.7)
# Prompt pour classifier le texte
prompt = """Classify the text into neutral, negative, or positive.
Text: I think the food was okay.
Sentiment:"""
# Créer les messages pour l'API
messages = [
{
"role": "user",
"content": prompt
}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
if response and "choices" in response:
display(Markdown(response['choices'][0]['message']['content']))
else:
print("No valid response received.")
Role Playing¶
In [15]:
import os
import requests
from dotenv import load_dotenv
from IPython.display import Markdown, display
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params(temperature=0.7, max_tokens=2000, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
"""
return {
"model": "gpt-3.5-turbo",
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=2000, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Définir les paramètres
params = set_open_params(temperature=0.7)
# Prompt pour la conversation
prompt = """The following is a conversation with an AI research assistant. The assistant's tone is technical and scientific.
Human: Hello, who are you?
AI: Greetings! I am an AI research assistant. How can I help you today?
Human: Can you tell me about the creation of black holes?
AI:"""
# Créer les messages pour l'API
messages = [
{
"role": "user",
"content": prompt
}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
if response and "choices" in response:
display(Markdown(response['choices'][0]['message']['content']))
else:
print("No valid response received.")
Code Generation¶
In [16]:
import os
import requests
from dotenv import load_dotenv
from IPython.display import Markdown, display
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params(temperature=0.7, max_tokens=150, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
"""
return {
"model": "gpt-3.5-turbo",
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=150, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Définir les paramètres
params = set_open_params(temperature=0.7, max_tokens=100)
# Prompt pour la requête MySQL
prompt = """\"\"\"
Table departments, columns = [DepartmentId, DepartmentName]
Table students, columns = [DepartmentId, StudentId, StudentName]
Create a MySQL query for all students in the Computer Science Department
\"\"\""""
# Créer les messages pour l'API
messages = [
{
"role": "user",
"content": prompt
}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
if response and "choices" in response:
display(Markdown(response['choices'][0]['message']['content']))
else:
print("No valid response received.")
Reasoning¶
In [17]:
import os
import requests
from dotenv import load_dotenv
from IPython.display import Markdown, display
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params(temperature=0.7, max_tokens=150, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
"""
return {
"model": "gpt-3.5-turbo",
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=150, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Définir les paramètres
params = set_open_params(temperature=0.7, max_tokens=150)
# Prompt pour la tâche
prompt = """The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.
Solve by breaking the problem into steps. First, identify the odd numbers, add them, and indicate whether the result is odd or even."""
# Créer les messages pour l'API
messages = [
{
"role": "user",
"content": prompt
}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
if response and "choices" in response:
display(Markdown(response['choices'][0]['message']['content']))
else:
print("No valid response received.")
Few-shot prompts¶
In [18]:
import os
import requests
from dotenv import load_dotenv
from IPython.display import Markdown, display
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params(temperature=0.7, max_tokens=150, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
"""
return {
"model": "gpt-3.5-turbo",
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=150, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Définir les paramètres
params = set_open_params(temperature=0.7, max_tokens=150)
# Prompt pour l'analyse des nombres impairs
prompt = """The odd numbers in this group add up to an even number: 4, 8, 9, 15, 12, 2, 1.
A: The answer is False.
The odd numbers in this group add up to an even number: 17, 10, 19, 4, 8, 12, 24.
A: The answer is True.
The odd numbers in this group add up to an even number: 16, 11, 14, 4, 8, 13, 24.
A: The answer is True.
The odd numbers in this group add up to an even number: 17, 9, 10, 12, 13, 4, 2.
A: The answer is False.
The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.
A:"""
# Créer les messages pour l'API
messages = [
{
"role": "user",
"content": prompt
}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
if response and "choices" in response:
display(Markdown(response['choices'][0]['message']['content']))
else:
print("No valid response received.")
Chain-of-Thought (CoT) Prompting¶
In [19]:
import os
import requests
from dotenv import load_dotenv
from IPython.display import Markdown, display
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params(temperature=0.7, max_tokens=150, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
"""
return {
"model": "gpt-3.5-turbo",
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=150, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Définir les paramètres
params = set_open_params(temperature=0.7, max_tokens=150)
# Prompt pour l'analyse des nombres impairs
prompt = """The odd numbers in this group add up to an even number: 4, 8, 9, 15, 12, 2, 1.
A: Adding all the odd numbers (9, 15, 1) gives 25. The answer is False.
The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.
A:"""
# Créer les messages pour l'API
messages = [
{
"role": "user",
"content": prompt
}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
if response and "choices" in response:
display(Markdown(response['choices'][0]['message']['content']))
else:
print("No valid response received.")
Zero-shot CoT¶
In [20]:
import os
import requests
from dotenv import load_dotenv
from IPython.display import Markdown, display
# Charger les variables d'environnement depuis le fichier .env
load_dotenv()
# Obtenir la clé API depuis les variables d'environnement
api_key = os.getenv("OPENAI_API_KEY")
def set_open_params(temperature=0.7, max_tokens=150, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
"""
Configure les paramètres par défaut pour l'appel à l'API OpenAI.
"""
return {
"model": "gpt-3.5-turbo",
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
def generate_openai_text(api_key, model, messages, temperature=1.0, max_tokens=150, top_p=1.0,
frequency_penalty=0.0, presence_penalty=0.0):
"""
Génère du texte en utilisant l'API OpenAI.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Définir les paramètres
params = set_open_params(temperature=0.7, max_tokens=150)
# Prompt pour le problème d'apples
prompt = """I went to the market and bought 10 apples. I gave 2 apples to the neighbor and 2 to the repairman. I then went and bought 5 more apples and ate 1. How many apples did I remain with?
Let's think step by step."""
# Créer les messages pour l'API
messages = [
{
"role": "user",
"content": prompt
}
]
# Appeler la fonction pour obtenir la réponse du modèle
response = generate_openai_text(
api_key,
params["model"],
messages,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
top_p=params["top_p"],
frequency_penalty=params["frequency_penalty"],
presence_penalty=params["presence_penalty"]
)
# Formater et afficher la réponse
if response and "choices" in response:
display(Markdown(response['choices'][0]['message']['content']))
else:
print("No valid response received.")