Skip to content

Commit

Permalink
Add multi-language system prompts and BedrockChatAdapter implementation
Browse files Browse the repository at this point in the history
- Implement system prompts in English and Canadian French for AI
interactions in `system_prompts.py`.
- Enhance `BedrockChatAdapter` with prompt templates for QA,
conversation, and follow-up questions in `base.py`.
- Update `__init__.py` to include system prompt imports for easy access.
- Configure logger in `base.py` to trace key operations for QA prompts
and conversational prompts.
  • Loading branch information
michel-heon committed Sep 24, 2024
1 parent 03744fc commit 256279d
Show file tree
Hide file tree
Showing 6 changed files with 244 additions and 81 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -457,3 +457,6 @@ lib/user-interface/react-app/src/graphql/subscriptions.ts
# js function
!lib/authentication/lambda/updateUserPoolClient/index.js
!lib/authentication/lambda/updateOidcSecret/index.js
/.project
/.pydevproject
/outputs.json
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
from langchain_core.messages.ai import AIMessage, AIMessageChunk
from langchain_core.messages.human import HumanMessage
from langchain_aws import ChatBedrockConverse
from adapters.shared.prompts.system_prompts import prompts, lang # Import prompts and language


logger = Logger()

Expand Down Expand Up @@ -129,20 +131,62 @@ def get_memory(self, output_key=None, return_messages=False):
)

def get_prompt(self):
template = """The following is a friendly conversation between a human and an AI. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{chat_history}
Question: {input}""" # noqa: E501

# Fetch the conversation prompt based on the current language
conversation_prompt = prompts[lang]['conversation_prompt']
logger.info(f"Generating the conversation prompt for language: {lang}")

# Use the fetched prompt for the selected language
template = f"""{conversation_prompt}
{prompts[lang]['current_conversation_word']}:
{{chat_history}}
{prompts[lang]['question_word']}: {{input}}"""

logger.debug(f"Generated conversation prompt template for language {lang}: {template}")

return PromptTemplate.from_template(template)

def get_condense_question_prompt(self):
return CONDENSE_QUESTION_PROMPT
system_prompt = prompts[lang]['condense_question_prompt']

template = f"""{system_prompt}
<conv>
{{chat_history}}
</conv>
<followup>
{{question}}
</followup>
"""

logger.info(f"Language selected for get_condense_question_prompt: {lang}")
logger.info(f"Condense Question Prompt Template: {template}")

return PromptTemplate(
input_variables=["chat_history", "question"],
template=template
)

def get_qa_prompt(self):
return QA_PROMPT
system_prompt = prompts[lang]['qa_prompt']
question_word = prompts[lang]['question_word']


template = f"""{system_prompt}
{{context}}
{question_word}: {{question}}"""

# Tracer la langue choisie et le contenu du template
logger.info(f"Language selected for get_qa_prompt: {lang}")
logger.info(f"Base QA Prompt Template: {template}")

# Retourner le PromptTemplate avec les variables d'entrée "context" et "question"
return PromptTemplate(
template=template, input_variables=["context", "question"]
)

def run_with_chain_v2(self, user_prompt, workspace_id=None):
if not self.llm:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import logging
from typing import Any, List

from ..base import ModelAdapter
Expand All @@ -13,85 +14,99 @@
from langchain_aws import ChatBedrockConverse
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.prompts.prompt import PromptTemplate
from adapters.shared.prompts.system_prompts import prompts, lang # Import prompts and language

logger = Logger()

# Configure logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)

def get_guardrails() -> dict:
if "BEDROCK_GUARDRAILS_ID" in os.environ:
logger.info("Guardrails ID found in environment variables.")
return {
"guardrailIdentifier": os.environ["BEDROCK_GUARDRAILS_ID"],
"guardrailVersion": os.environ.get("BEDROCK_GUARDRAILS_VERSION", "DRAFT"),
}
logger.info("No guardrails ID found.")
return {}


class BedrockChatAdapter(ModelAdapter):
def __init__(self, model_id, *args, **kwargs):
self.model_id = model_id

logger.info(f"Initializing BedrockChatAdapter with model_id: {model_id}")
super().__init__(*args, **kwargs)

def get_qa_prompt(self):
system_prompt = (
"Use the following pieces of context to answer the question at the end."
" If you don't know the answer, just say that you don't know, "
"don't try to make up an answer. \n\n{context}"
)
return ChatPromptTemplate.from_messages(
# Fetch the QA prompt based on the current language
qa_system_prompt = prompts[lang]['qa_prompt']
# Append the context placeholder if needed
qa_system_prompt_with_context = qa_system_prompt + "\n\n{context}"
logger.info(f"Generating QA prompt template with: {qa_system_prompt_with_context}")

# Create the ChatPromptTemplate
chat_prompt_template = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
("system", qa_system_prompt_with_context),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)

# Trace the ChatPromptTemplate by logging its content
logger.debug(f"ChatPromptTemplate messages: {chat_prompt_template.messages}")

return chat_prompt_template

def get_prompt(self):
prompt_template = ChatPromptTemplate(
# Fetch the conversation prompt based on the current language
conversation_prompt = prompts[lang]['conversation_prompt']
logger.info("Generating general conversation prompt template.")
chat_prompt_template = ChatPromptTemplate.from_messages(
[
(
"system",
(
"The following is a friendly conversation between "
"a human and an AI."
"If the AI does not know the answer to a question, it "
"truthfully says it does not know."
),
),
("system", conversation_prompt),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
]
)

return prompt_template
# Trace the ChatPromptTemplate by logging its content
logger.debug(f"ChatPromptTemplate messages: {chat_prompt_template.messages}")
return chat_prompt_template

def get_condense_question_prompt(self):
contextualize_q_system_prompt = (
"Given the following conversation and a follow up"
" question, rephrase the follow up question to be a standalone question."
)
return ChatPromptTemplate.from_messages(
# Fetch the prompt based on the current language
contextualize_q_system_prompt = prompts[lang]['contextualize_q_system_prompt']
logger.info("Generating condense question prompt template.")
chat_prompt_template = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
# Trace the ChatPromptTemplate by logging its content
logger.debug(f"ChatPromptTemplate messages: {chat_prompt_template.messages}")
return chat_prompt_template

def get_llm(self, model_kwargs={}, extra={}):
bedrock = genai_core.clients.get_bedrock_client()
params = {}
if "temperature" in model_kwargs:
params["temperature"] = model_kwargs["temperature"]
logger.info(f"Temperature set to: {model_kwargs['temperature']}")
if "topP" in model_kwargs:
params["top_p"] = model_kwargs["topP"]
logger.info(f"topP set to: {model_kwargs['topP']}")
if "maxTokens" in model_kwargs:
params["max_tokens"] = model_kwargs["maxTokens"]
logger.info(f"maxTokens set to: {model_kwargs['maxTokens']}")

guardrails = get_guardrails()
if len(guardrails.keys()) > 0:
params["guardrails"] = guardrails
logger.info(f"Guardrails applied: {guardrails}")

logger.info(f"Fetching LLM model: {self.model_id}")
return ChatBedrockConverse(
client=bedrock,
model=self.model_id,
Expand All @@ -107,47 +122,101 @@ class BedrockChatNoStreamingAdapter(BedrockChatAdapter):
"""Some models do not support system streaming using the converse API"""

def __init__(self, *args, **kwargs):
logger.info("Initializing BedrockChatNoStreamingAdapter with disabled streaming.")
super().__init__(disable_streaming=True, *args, **kwargs)


class BedrockChatNoSystemPromptAdapter(BedrockChatAdapter):
"""Some models do not support system and message history in the conversion API"""
"""Some models do not support system and message history in the conversation API"""

def get_prompt(self):
template = """The following is a friendly conversation between a human and an AI. If the AI does not know the answer to a question, it truthfully says it does not know.
# Fetch the conversation prompt and translated words based on the current language
conversation_prompt = prompts[lang]['conversation_prompt']
question_word = prompts[lang]['question_word']
assistant_word = prompts[lang]['assistant_word']
logger.info("Generating no-system-prompt template for conversation.")

# Combine conversation prompt, chat history, and input into the template
template = f"""{conversation_prompt}
{{chat_history}}
Current conversation:
{chat_history}
{question_word}: {{input}}
Question: {input}
{assistant_word}:"""

Assistant:""" # noqa: E501
return PromptTemplateWithHistory(
template=template, input_variables=["input", "chat_history"]
# Create the PromptTemplateWithHistory instance
prompt_template = PromptTemplateWithHistory(
input_variables=["input", "chat_history"], template=template
)

# Log the content of PromptTemplateWithHistory before returning
logger.debug(f"PromptTemplateWithHistory template: {prompt_template.template}")

return prompt_template

def get_condense_question_prompt(self):
template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {input}
Standalone question:""" # noqa: E501
return PromptTemplateWithHistory(
template=template, input_variables=["input", "chat_history"]
# Change le niveau global à DEBUG
# Fetch the prompt and translated words based on the current language
contextualize_q_system_prompt = prompts[lang]['contextualize_q_system_prompt']
logger.info(f"contextualize_q_system_prompt: {contextualize_q_system_prompt}")

follow_up_input_word = prompts[lang]['follow_up_input_word']
logger.info(f"follow_up_input_word: {follow_up_input_word}")

standalone_question_word = prompts[lang]['standalone_question_word']
logger.info(f"standalone_question_word: {standalone_question_word}")

chat_history_word = prompts[lang]['chat_history_word']
logger.info(f"chat_history_word: {chat_history_word}")

logger.info("Generating no-system-prompt template for condensing question.")

# Combine the prompt with placeholders
template = f"""{contextualize_q_system_prompt}
{chat_history_word}:
{{chat_history}}
{follow_up_input_word}: {{input}}
{standalone_question_word}:"""
# Log the content of template
logger.info(f"get_condense_question_prompt: Template content: {template}")
# Create the PromptTemplateWithHistory instance
prompt_template = PromptTemplateWithHistory(
input_variables=["input", "chat_history"], template=template
)

# Log the content of PromptTemplateWithHistory before returning
logger.debug(f"PromptTemplateWithHistory template: {prompt_template.template}")

return prompt_template

def get_qa_prompt(self):
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
# Fetch the QA prompt and translated words based on the current language
qa_system_prompt = prompts[lang]['qa_prompt']
question_word = prompts[lang]['question_word']
helpful_answer_word = prompts[lang]['helpful_answer_word']
logger.info("Generating no-system-prompt QA template.")

# Append the context placeholder if needed

{context}
# Combine the prompt with placeholders
template = f"""{qa_system_prompt}
Question: {input}
Helpful Answer:""" # noqa: E501
return PromptTemplateWithHistory(
template=template, input_variables=["input", "content"]
{{context}}
{question_word}: {{input}}
{helpful_answer_word}:"""

# Create the PromptTemplateWithHistory instance
prompt_template = PromptTemplateWithHistory(
input_variables=["input", "context"], template=template
)

# Log the content of PromptTemplateWithHistory before returning
logger.debug(f"PromptTemplateWithHistory template: {prompt_template.template}")

return prompt_template


class BedrockChatNoStreamingNoSystemPromptAdapter(BedrockChatNoSystemPromptAdapter):
"""Some models do not support system streaming using the converse API"""
Expand All @@ -159,31 +228,14 @@ def __init__(self, *args, **kwargs):
# Register the adapters
registry.register(r"^bedrock.ai21.jamba*", BedrockChatAdapter)
registry.register(r"^bedrock.ai21.j2*", BedrockChatNoStreamingNoSystemPromptAdapter)
registry.register(
r"^bedrock\.cohere\.command-(text|light-text).*", BedrockChatNoSystemPromptAdapter
)
registry.register(r"^bedrock\.cohere\.command-(text|light-text).*", BedrockChatNoSystemPromptAdapter)
registry.register(r"^bedrock\.cohere\.command-r.*", BedrockChatAdapter)
registry.register(r"^bedrock.anthropic.claude*", BedrockChatAdapter)
registry.register(
r"^bedrock.meta.llama*",
BedrockChatAdapter,
)
registry.register(
r"^bedrock.mistral.mistral-large*",
BedrockChatAdapter,
)
registry.register(
r"^bedrock.mistral.mistral-small*",
BedrockChatAdapter,
)
registry.register(
r"^bedrock.mistral.mistral-7b-*",
BedrockChatNoSystemPromptAdapter,
)
registry.register(
r"^bedrock.mistral.mixtral-*",
BedrockChatNoSystemPromptAdapter,
)
registry.register(r"^bedrock.meta.llama*", BedrockChatAdapter)
registry.register(r"^bedrock.mistral.mistral-large*", BedrockChatAdapter)
registry.register(r"^bedrock.mistral.mistral-small*", BedrockChatAdapter)
registry.register(r"^bedrock.mistral.mistral-7b-*", BedrockChatNoSystemPromptAdapter)
registry.register(r"^bedrock.mistral.mixtral-*", BedrockChatNoSystemPromptAdapter)
registry.register(r"^bedrock.amazon.titan-t*", BedrockChatNoSystemPromptAdapter)


Expand Down
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
# flake8: noqa
from .meta.llama2_chat import *
from .prompts.system_prompts import *
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# flake8: noqa
from .system_prompts import *
Loading

0 comments on commit 256279d

Please sign in to comment.