LangChain 是一个用于构建语言模型应用的强大框架,它支持将多个 链”(Chain)组合起来进行复杂的推理任务。所谓 链式调用,就是将多个处理步骤像流水线一样依次串联,每个步骤的输出作为下一个步骤的输入,这种模式非常适合需要多个子任务协同完成的复杂场景。
1. 顺序执行
from langchain_core.runnables import RunnableLambda from langchain_core.runnables import RunnableSequence def demo01(): step_one = RunnableLambda(lambda x: x + 100) step_two = RunnableLambda(lambda x: x + 200) chain = RunnableSequence(step_one, step_two) print(chain.invoke(100)) # 使用管道运算符 chain = step_one | step_two print(chain.invoke(100)) from dotenv import load_dotenv load_dotenv('llm.env') from langchain_deepseek import ChatDeepSeek from langchain.prompts import PromptTemplate from langchain_core.output_parsers import StrOutputParser def demo02(): model = ChatDeepSeek(model='deepseek-chat') template = '你是一个情感分类专家,请将下面的文本进行情感分类,只能输出"好评"或者"差评"。\n{inputs}' prompt = PromptTemplate.from_template(template) chain = prompt | model | StrOutputParser() print(chain.invoke('我今天走在路上,被人泼了一身水。')) if __name__ == '__main__': demo01() demo02()
2. 并行执行
from langchain_core.runnables import RunnableLambda from langchain_core.runnables import RunnableParallel def demo01(): to_lower = RunnableLambda(lambda x: x.lower()) to_upper = RunnableLambda(lambda x: x.upper()) # chain = RunnableParallel({'lower': to_lower, 'upper': to_upper}) chain = RunnableParallel(lower=to_lower, upper=to_upper) print(chain.invoke('heLLo')) from dotenv import load_dotenv load_dotenv('llm.env') from langchain_deepseek import ChatDeepSeek from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate def demo02(): deepseek = ChatDeepSeek(model='deepseek-chat') joke_chain = (ChatPromptTemplate.from_template('将一个关于 {topic} 的简短笑话。') | deepseek | StrOutputParser()) poem_chain = (ChatPromptTemplate.from_template('写一个关于 {topic} 的七言诗歌。') | deepseek | StrOutputParser()) map_chain = RunnableParallel(joke=joke_chain, poem=poem_chain) response = map_chain.invoke('莲花') print(response['joke']) print('-' * 50) print(response['poem']) if __name__ == '__main__': demo01() # demo02()
3. 分支执行
from dotenv import load_dotenv load_dotenv('llm.env') from langchain_deepseek import ChatDeepSeek from langchain.prompts import PromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda model = ChatDeepSeek(model='deepseek-chat') cls_chain = PromptTemplate.from_template('请将下面用户输入的句子,将其分类为 `中英翻译` 或者 `英中翻译`,不要输出任何其他解释文本。\n{sentence}') | model | StrOutputParser() # 多个子链 cn_en = PromptTemplate.from_template('请将下面的句子翻译成英文,不要输出任何其他解释文本。:\n{sentence}') | model en_cn = PromptTemplate.from_template('请将下面的句子翻译成中文,不要输出任何其他解释文本。:\n{sentence}') | model default = PromptTemplate.from_template('请回答下面的问题,不要输出任何其他解释文本。:\n{sentence}') | model def debug_step(label): return RunnableLambda(lambda x: print(f"[DEBUG] {label}: {x}") or x) def demo01(): def route(info): if '中英翻译' in info['topic']: return cn_en elif '英中翻译' in info['topic']: return en_cn else: return default chain = {'topic': cls_chain, 'sentence': lambda x : x['sentence']} | debug_step('After branch execution') | RunnableLambda(route) | StrOutputParser() response = chain.invoke({'sentence': 'I Love You!'}) print(response) response = chain.invoke({'sentence': '我爱你!'}) print(response) response = chain.invoke({'sentence': 'Как тебя зовут?'}) print(response) from langchain_core.runnables import RunnableBranch def demo02(): branch = RunnableBranch( (lambda x: '中英翻译' in x['topic'], cn_en), (lambda x: '英中翻译' in x['topic'], en_cn), default, ) chain = {'topic': cls_chain, 'sentence': lambda x: x['sentence']} | debug_step('After branch execution') | branch | StrOutputParser() response = chain.invoke({'sentence': 'I Love You!'}) print(response) response = chain.invoke({'sentence': '我爱你!'}) print(response) response = chain.invoke({'sentence': 'Как тебя зовут?'}) print(response) if __name__ == '__main__': # demo01() demo02()