Build Your Own Coding Assistant
About Me
What's The Plan?
1.
What is a coding assistant?
AI can
create software
now
mid 2023
2.
What are Large Language Models?
A language model is just a next-word predictor
If you predict and sample repeatedly, you'll generate text
Here's an implementation of this from Data Science from Scratch
transitions = defaultdict(list)
for prev, current in zip(document, document[1:]):
transitions[prev].append(current)
def generate_using_bigrams() -> str:
current = "." # this means the next word will start a sentence
result = []
while True:
next_word_candidates = transitions[current] # bigrams (current, _)
current = random.choice(next_word_candidates) # choose one at random
result.append(current) # append it to results
if current == ".": return " ".join(result) # if "." we're done
Recurrent Neural Networks
Honeybre - Midnight Song
Zomby - My Johnane Tame
The Novis Combo - Disappointing
Koko Taylor - What Is The Numbers
Majical Cloudz - Almast
La Makemak Braston - Super V, Abam
Eclipse Scopper - Down Back Up the Lord
Devics - This Olymphis!
M. Ward - Baby For Mile
Farunots - Tigraphes (feat. Daddy)
Finger Escape - Blowin' Ride On! Horns of Butter Days
Ray Norvells - Union From Town Sun
The Royal Room - Waterfalls
Otis Taylor - My Old Frees
Neurosis - I Hate Much Young
Carmen Sand Elevators - Nothing to Go for Government
Transformers
Pretraining
Fine-tuning
and
reinforcement
Prompt engineering
3.
How do we work with Large Language Models in Python?
import os
from openai import OpenAI
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
response = client.responses.create(
model="gpt-4.1-mini",
instructions=(
"You are puro San Antonio. Answer questions from that perspective."
),
input=(
"I am driving on the freeway. "
"Do I need to secure the items in the back of my truck?"
)
)
print(response.output_text)
Responses API
from openai import OpenAI
client = OpenAI()
completion = client.chat.completions.create(
model="gpt-4.1-mini",
messages=[
{
"role": "system",
"content": "You are puro San Antonio. Answer questions from that perspective."
},
{
"role": "user",
"content": (
"I am driving on the freeway. "
"Do I need to secure the items in the back of my truck?"
)
},
],
)
print(completion.choices[0].message.content)
Completions API
messages = [{ "role": "system", "content": (
"You are the organizer of a Python conference. "
"Your only goal is to get people to attend."
)}]
while True:
user_input = input("User: ")
messages.append({ "role": "user", "content": user_input })
completion = client.chat.completions.create(
model="gpt-4.1-mini",
messages=messages,
)
assistant_message = completion.choices[0].message
print(f"\nAssistant: {assistant_message.content}\n")
messages.append({ "role": "assistant", "content": assistant_message.content })
repeat in a loop
list of messages
get input from the user
add it to the messages
call the API
print the response
add it to the messages
4.
What is DSPy?
The Two Big Ideas Behind DSPy
Here we'll only focus on the first
(The second is very interesting though!)
Tired Idea: Movie Review Sentiment Analysis
Review: I absolutely loved this movie! The plot was thrilling and the characters were so well developed.
Sentiment: Positive
Review: The movie was okay, not great but not terrible either. It had some good moments.
Sentiment: Neutral
Review: I didn't like this movie at all. The story was boring and the acting was subpar.
Sentiment: Negative
Writing the Prompt By Hand, Like a Peasant
instructions = """
I am going to give you a movie review.
I need you to categorize it as Positive, Negative, or Neutral.
Please just respond with one of those three words.
"""
def categorize(review: str) -> str:
response = client.responses.create(
model="gpt-5-mini",
reasoning={"effort": "minimal"},
instructions=instructions,
input=review
)
return response.output_text.strip()
Using DSPy, Like a Champ
signature = "review -> sentiment: Literal['Positive', 'Negative', 'Neutral']"
Using DSPy, Like a Champ
signature = "review -> sentiment: Literal['Positive', 'Negative', 'Neutral']"
categorize = dspy.Predict(signature=signature)
Using DSPy, Like a Champ
signature = "review -> sentiment: Literal['Positive', 'Negative', 'Neutral']"
categorize = dspy.Predict(signature=signature)
or
class SentimentAnalysis(dspy.Signature):
review: str = dspy.InputField()
sentiment: Literal['Positive', 'Negative', 'Neutral'] = dspy.OutputField()
categorize = dspy.Predict(signature=SentimentAnalysis)
messages = [{ "role": "system", "content": (
"You are the organizer of a Python conference. "
"Your only goal is to get people to attend."
)}]
while True:
user_input = input("User: ")
messages.append({ "role": "user", "content": user_input })
completion = client.chat.completions.create(
model="gpt-4.1-mini",
messages=messages,
)
assistant_message = completion.choices[0].message
print(f"\nAssistant: {assistant_message.content}\n")
messages.append({ "role": "assistant", "content": assistant_message.content })
A simple chatbot
class Signature(dspy.Signature):
"""
You are the organizer of a Python conference.
Your only goal is to get people to attend.
Everything you say should be in pursuit of that goal.
"""
conversation_history: list = dspy.InputField()
response: str = dspy.OutputField()
ask = dspy.Predict(signature=Signature)
messages = []
while True:
user_input = input("User: ")
messages.append({"role": "user", "content": user_input})
prediction = ask(conversation_history=messages)
response = prediction.response
messages.append({"role": "assistant", "content": response})
print(f"\nAssistant: {response}\n")
DSPy version
5.
Giving tools to LLMs
ReAct pattern
ask = dspy.Predict(signature=Signature)
ReAct in DSPy
ask = dspy.Predict(signature=Signature)
vs
def get_weather(location: str) -> str:
"""
Return the weather forecast for the given location.
"""
if "san antonio" in location.lower():
return "Way too hot and way too dry."
else:
return "Weather should be pretty nice."
ask_with_tools = dspy.ReAct(
signature=Signature,
tools=[dspy.Tool(get_weather)]
)
ReAct in DSPy
User: what will the weather be like at the conference
Assistant: Expect very hot and dry conditions in San Antonio during the conference. Practical notes to help you stay comfortable and make the most of your visit:
[...]
--- Internal Reasoning Steps ---
{'thought_0': "User asked about the weather at the conference. I need the forecast for San Antonio to inform attendees and encourage them to come prepared. I'll fetch the weather for San Antonio, TX.", 'tool_name_0': 'get_weather', 'tool_args_0': {'location': 'San Antonio, TX'}, 'observation_0': 'Way too hot and way too dry.', 'thought_1': 'The user asked about the weather; I already fetched it and observed "Way too hot and way too dry." I should summarize that clearly for attendees and give actionable advice to encourage attendance (e.g., bring water, dress light, venue cooling info, transportation tips). No further tool calls are needed.', 'tool_name_1': 'finish', 'tool_args_1': {}, 'observation_1': 'Completed.'}
6.
How do we build our own coding assistant?
What do we need?
import os
import dspy
lm = dspy.LM(
model="openai/gpt-4.1-mini",
# this happens implicitly
api_key=os.environ['OPENAI_API_KEY']
)
dspy.configure(lm=lm)
Set up DSPy
class Signature(dspy.Signature):
"""
You are Clod, a coding assistant.
Try to help the user with their coding requests.
Don't do anything dangerous!
"""
request: str = dspy.InputField()
history: list = dspy.InputField()
response: str = dspy.OutputField()
Create a signature
import subprocess
def run_command(command: str) -> str:
"""
Run the given shell command.
I repeat, do not do anything dangerous!
"""
result = subprocess.run(
command, shell=True, capture_output=True, text=True
)
if result.returncode != 0:
return f"Error: {result.stderr}"
return result.stdout
Make a tool
import subprocess
def run_command(command: str) -> str:
"""
Run the given shell command.
I repeat, do not do anything dangerous!
"""
result = subprocess.run(
command, shell=True, capture_output=True, text=True
)
if result.returncode != 0:
return f"Error: {result.stderr}"
return result.stdout
Make a tool
CAUTION:
THIS TOOL IS VERY DANGEROUS!
DO NOT DO THIS AT HOME!
class Signature(dspy.Signature):
"""
You are Clod, a coding assistant.
Try to help the user with their coding requests.
Don't do anything dangerous!
"""
request: str = dspy.InputField()
history: list = dspy.InputField()
response: str = dspy.OutputField()
ask_with_tools = dspy.ReAct(
signature=Signature,
tools=[dspy.Tool(run_command)]
)
Create a module
history = []
while True:
request = input("User: ")
prediction = ask_with_tools(
request=request,
history=history
)
response = prediction.response
history.append({"role": "user", "content": request})
history.append({"role": "assistant", "content": response})
print(f"\nAssistant: {response}\n")
Put it in a loop
import os
import subprocess
import dspy
lm = dspy.LM(model="openai/gpt-5-nano", temperature=1.0, max_tokens=16_000, reasoning_effort="minimal", api_key=os.environ['OPENAI_API_KEY'])
dspy.configure(lm=lm)
class Signature(dspy.Signature):
"""You are Clod, a coding assistant. Try to help the user with their coding requests. Don't do anything dangerous!"""
request: str = dspy.InputField()
history: list = dspy.InputField()
response: str = dspy.OutputField()
def run_command(command: str) -> str:
"""Run the given shell command. I repeat, do not do anything dangerous!"""
result = subprocess.run(command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
return f"Error: {result.stderr}"
return result.stdout
ask_with_tools = dspy.ReAct(signature=Signature, tools=[dspy.Tool(run_command)])
history = []
while True:
request = input("User: ")
prediction = ask_with_tools(request=request, history=history)
response = prediction.response
history.append({"role": "user", "content": request})
history.append({"role": "assistant", "content": response})
print(f"\nAssistant: {response}\n")
Here's the whole thing!
Demo