This code might be roughly cut up into 3 major sections:
Each single python code wants libraries to perform effectively. These libraries are sort of like further installments that you should utilize to make issues simpler to code, visualize, or retailer.
import streamlit as st
from streamlit_chat import message
# Streamlit is a library wanted to create a primary web site to place our chatbot on.import openai
# OpenAI is a library wanted to entry the LLM that we're going to use for the bottom of our chatbot.
import toml
//Toml is required to _______
from loguru import logger
# Loguru is required to log the chats between person and LLM within the UI.
This part units up and initializes the complete course of stream that enables the person to remotely communicate to the LLM.
#Format/Customization of webpage
app_name = "Private ChatBot"
welcome_msg = """Welcome!!! Hey Mavericks. I'm a useful AI assistant, I may help you together with your work on Math, Physics, Chemistry and Programming."""st.set_page_config(page_title=app_name, format='broad')
st.title(app_name)
# secrets and techniques = toml.load('/Customers/rawsem/Paperwork/Code/aiml/xx_python_scripts/secrets and techniques.toml')
#CONFIGURATION OF MODEL AND API
mannequin= "gpt-4o-mini"
# openai.api_key = secrets and techniques['api_keys']['openai_api_key']
openai.api_key = 'sk-PzIu1D5g443Ucgs27il3QHuVplOSJum_u3qTj8@70rT3BlbkFJr87ZDHF3E@DWfKktWWIy20t@LKOxgpQIEWmgxBMnoA'
#Outline a brand new perform "init" (quick for initialization)
def init(clear=False):
"""
Initialization technique required to retain or reset the state of the Streamlit sessionAs chat session continues the person questions & assistant responses are added / appended to the "messages" object
If "clear dialog" is invoked, then the session state will probably be reset to solely maintain the System default Context
"""
logger.information(f'INIT Summoned with Clear: {clear}')
#The system message beneath provides the API an enter as to the context, scope, and most significantly boundaries of the question.
#The system message form of provides a character to the LLM, whereas additionally fencing the responses it will possibly generate.
if clear:
st.session_state['messages'] = [{"role": "system",
"content": "You are a helpful assistant, can help teens with their school work on Math, Physics, Chemisty and Programming."}]
else:
if 'messages' not in st.session_state:
st.session_state['messages'] = [{"role": "system",
"content": "You are a helpful assistant, can help teens with their school work on Math, Physics, Chemisty and Programming."}]
def generate_response(messages, generation_consistency=1):
"""
Calls OpenAI API to generate a chatbot response.
"""
completion = openai.chat.completions.create(
mannequin=mannequin,
messages=messages,
temperature=generation_consistency
)
return completion.selections[0].message.content material# Makes use of OpenAI's API to course of person messages and generate a response.
Fast word: Everytime you see features with “st.”, like st.spinner, bear in mind these are Streamlit features. (We imported Streamlit as “st”, so everytime you see a perform beginning with st, we’re utilizing that library.)
with st.sidebar.empty():
response_type = st.radio("Response Sort:", ['Consistent', 'Creative'], horizontal=True)
response_consistency = 0 if response_type == 'Constant' else 1logger.essential(f"Chosen Response Consistency: {response_consistency}")
clear_button = st.sidebar.button("Clear Dialog", key="clear")
if clear_button:
init(True)
# Permits customers to pick between constant and inventive response modes.
# Features a clear button to reset the chat.
st.markdown(welcome_msg)
response_container = st.container(border=True, peak=500)
prompt_container = st.container()with prompt_container:
messages = st.session_state['messages']
with st.type(key='dai_prompt', clear_on_submit=True):
user_input = st.text_area("Publish your query:", key='enter', placeholder="How can I aid you?", peak=100)
submitted = st.form_submit_button(label='Ship')
if submitted and user_input:
with st.spinner("Considering..."):
messages += [{"role": "user", "content": f"{user_input}"}]
assitants_response = generate_response(messages=messages, generation_consistency=response_consistency)
messages += [{'role': 'assistant', 'content': assitants_response}]
st.session_state['messages'] = messages
# Customers enter their questions in a textual content field. After they click on "Ship," the chatbot processes the enter and generates a response.
# Shops earlier messages so customers can see the dialog stream.
with response_container:
messages = st.session_state['messages']
for i, msg in enumerate(reversed(messages)):
if msg['role'] == 'assistant':
_msg = msg['content']
message(_msg, key=str(i), seed=244)
elif msg['role'] == 'person':
_msg = msg['content']
message(_msg, is_user=True, key=str(i) + '_user', seed=1)
# Messages are displayed utilizing streamlit_chat.message().
# Consumer messages and AI responses seem in an interactive chat format.
You’ll be able to customise the appear and feel of your web site utilizing this hyperlink, which is able to educate you the fundamentals of utilizing Streamlit.
After you have completed these steps, it’s best to have a totally practical Private Assistant!!
Keep in mind, that is just the start. When you discovered this matter attention-grabbing and need to dig just a little deeper, I extremely advocate exploring a strong rising idea known as Agentic Programming. It goes past utilizing a single LLM and as an alternative focuses on designing programs the place a number of LLMs, or “brokers”, discuss to one another, cause, and make selections in additional autonomous and coordinated methods. Agentic programming permits the automation of many duties, starting from AI ordering you pizza primarily based in your earlier orders to coordinating your calendar, routinely reserving flight tickets and even conducting analysis, all with minimal human enter. In one in all my different articles, which you’ll entry right here, I stroll you thru the right way to construct an actual conversational system that powered by these brokers, opening the door to extra dynamic and clever interactions.
When you run into any points or have any questions, please don’t hesitate to contact me at [email protected].
import streamlit as st
from streamlit_chat import message
# Streamlit is required to create a primary web site to place our chatbot on.import openai
# OpenAI is required to entry the LLM that we're going to use for the bottom of our chatbot.
import toml
# Toml is required to _______
from loguru import logger
# Loguru is required to log the chats between person and LLM within the UI
app_name = "Private ChatBot"
welcome_msg = """Welcome!!! Hey Mavericks. I'm a useful AI assistant, I may help you together with your work on Math, Physics, Chemistry and Programming."""
st.set_page_config(page_title=app_name, format='broad')
st.title(app_name)
# secrets and techniques = toml.load('/Customers/rawsem/Paperwork/Code/aiml/xx_python_scripts/secrets and techniques.toml')
mannequin= "gpt-4o-mini"
# openai.api_key = secrets and techniques['api_keys']['openai_api_key']
openai.api_key = 'ENTER API KEY HERE'
def init(clear=False):
"""
Initialization technique required to retain or reset the state of the Streamlit session
As chat session continues the person questions & assistant responses are added / appended to the "messages" object
If "clear dialog" is invoked, then the session state will probably be reset to solely maintain the System default Context
"""
logger.information(f'INIT Summoned with Clear: {clear}')
if clear:
st.session_state['messages'] = [{"role": "system",
"content": "You are a helpful assistant, can help teens with their school work on Math, Physics, Chemisty and Programming."}]
else:
if 'messages' not in st.session_state:
st.session_state['messages'] = [{"role": "system",
"content": "You are a helpful assistant, can help teens with their school work on Math, Physics, Chemisty and Programming."}]
def generate_response(messages, generation_consistency=1):
"""
Technique to invoke the OpenAI Chat completion API & generate applicable response
"""
completion = openai.chat.completions.create(
mannequin=mannequin,
messages=messages,
# max_completion_tokens=150
temperature=generation_consistency
)
return completion.selections[0].message.content material
init()
st.write(st.session_state.messages)
with st.sidebar.empty():
response_type = st.radio("Response Sort:", ['Consistent', 'Creative'], horizontal=True)
if response_type == 'Constant':
response_consistency = 0
else:
response_consistency = 1
logger.essential(f"Chosen Response Consistency: {response_consistency}")
clear_button = st.sidebar.button("Clear Dialog", key="clear")
if clear_button:
init(True)
st.markdown(welcome_msg)
response_container = st.container(border=True, peak=500)
prompt_container = st.container()
with prompt_container:
# st.markdown(welcome_msg)
messages = st.session_state['messages']
with st.type(key='dai_prompt', clear_on_submit=True):
user_input = st.text_area("Publish your query:", key='enter', placeholder="How can I aid you?", peak=100)
submitted = st.form_submit_button(label='Ship')
if submitted and user_input:
with st.spinner("Considering..."):
messages += [{"role": "user", "content": f"{user_input}"}]
assitants_response = generate_response(messages=messages, generation_consistency=response_consistency)
messages += [{'role': 'assistant', 'content': assitants_response}]
st.session_state['messages'] = messages
with response_container:
messages = st.session_state['messages']
for i, msg in enumerate(reversed(messages)):
if msg['role'] == 'assistant':
_msg = msg['content']
message(_msg, key=str(i), seed=244)
elif msg['role'] == 'person':
_msg = msg['content']
message(_msg, is_user=True, key=str(i) + '_user', seed=1)