Decoder.sh

LangChain Fundamentals: Build your First Chain

Description

LangChain is one of the most popular frameworks for coding complex LLM-powered logic. It provides the ability to batch and stream calls across different LLM providers, vector databases, 3rd party APIs, and much more. In this video, we explore the very basics of getting started with LangChain - understanding how to build a rudimentary chain complete with templating and an LLM call. Let's go!

Code

# set up environment
python -m venv .venv
source .venv/bin/activate
python -m pip install langchain notebook
jupyter notebook # start it up
from langchain_community.llms import Ollama

llm = Ollama(model="llama3")

print(llm.invoke("Tell me a joke"))
# these all do the same thing
print(llm.invoke("Tell me a joke"))
print(llm.batch(["Tell me a joke"]))
for chunk in llm.stream("Tell me a joke!"):
  print(chunk)

# async, must be run in event loop like asyncio
print(await llm.ainvoke("Tell me a joke"))
print(await llm.abatch(["Tell me a joke"]))
async for chunk in llm.astream("Tell me a joke!"):
  print(chunk)
# notice that we're importing from "chat_models" instead of "llms"
from langchain_community.chat_models import ChatOllama

chat_llm = ChatOllama(model="llama3")
print(chat_llm.invoke("tell me a joke."))
from langchain_core.messages import BaseMessage, HumanMessage

print(BaseMessage(content="Hello there", type="human"))
print(HumanMessage(content="Hello there").type)
# working with templates
from langchain_core.prompts import ASystemMessagePromptTemplate

# create the MessagePromptTemplate
system_prompt_template = SystemMessagePromptTemplate.from_template(
  "You are an expert in {subject}, and your name is {name}."
)
system_prompt = system_prompt_template.format(
  subject="kite surfing",
  name="Steve"
)
print(system_prompt)
from langchain_core.prompts import ChatPromptTemplate

  system_prompt_template = SystemMessagePromptTemplate.from_template(
    "You are helpful AI and your name is {name}."
  )
  template = ChatPromptTemplate.from_messages([
      system_prompt_template,
      HumanMessage("Hello, how are you doing?"),
      ("ai", "I'm doing well, thanks!"),
      "{user_input}"
  ])

  prompt_value = template.format(
    name="Bob",
    user_input="What is your name?"
  )
  print(prompt_value)

  # it's also a runnable!
  prompt_value = template.invoke(
      {
          "name": "Steve",
          "user_input": "What is your name?"
      }
  )
print(prompt_value)
from langchain_core.prompts import PromptTemplate

# Instantiation using from_template (recommended)
prompt = PromptTemplate.from_template("Say {foo}")
print(prompt.format(foo="hello")) # or invoke - it's a Runnable!

# This is the same as the native python implementation
prompt = "Say {foo}"
print(prompt.format(foo="hello"))
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.chat_models import ChatOllama

system_prompt_template = SystemMessagePromptTemplate.from_template(
  "You are helpful AI and an expert in {subject}. You give short answers."
)
template = ChatPromptTemplate.from_messages([
    system_prompt_template,
    ("human", "{user_input}"),
])
prompt_value = template.format(
  subject="LLMs",
  user_input="What is LangChain?"
)
print(prompt_value)
chat_llm = ChatOllama(model="llama3")

print(chat_llm.invoke(prompt_value))
# These are all the same
chain = template | chat_llm
chain = template.pipe(chat_llm)
chain = template.__or__(chat_llm)
chain = RunnableSequence(first=template, last=chat_llm)
# The final chain
from langchain_core.output_parsers import StrOutputParser

chain = template | chat_llm | StrOutputParser()

print(chain.invoke({
  "subject": "LLMs",
  "user_input": "what is langchain?"
})) # now returns a string!