{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "1ca7b759", "metadata": {}, "outputs": [], "source": [ "!pip install -q git+https://github.com/srush/MiniChain\n", "!git clone https://github.com/srush/MiniChain; cp -fr MiniChain/examples/* . " ] }, { "cell_type": "code", "execution_count": null, "id": "cc3f2a69", "metadata": { "lines_to_next_cell": 0, "tags": [ "hide_inp" ] }, "outputs": [], "source": [ "desc = \"\"\"\n", "### Chat\n", "\n", "A chat-like example for multi-turn chat with state. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/chat.ipynb)\n", "\n", "(Adapted from [LangChain](https://langchain.readthedocs.io/en/latest/modules/memory/examples/chatgpt_clone.html)'s version of this [blog post](https://www.engraved.blog/building-a-virtual-machine-inside/).)\n", "\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": null, "id": "1ca0a1eb", "metadata": { "lines_to_next_cell": 2 }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "f4ac1417", "metadata": {}, "outputs": [], "source": [ "from dataclasses import dataclass, replace\n", "from typing import List, Tuple\n", "from minichain import OpenAI, prompt, show" ] }, { "cell_type": "markdown", "id": "56c8ab3d", "metadata": {}, "source": [ "Generic stateful Memory" ] }, { "cell_type": "code", "execution_count": null, "id": "7c3ffeaa", "metadata": { "lines_to_next_cell": 1 }, "outputs": [], "source": [ "MEMORY = 2" ] }, { "cell_type": "code", "execution_count": null, "id": "c309a94f", "metadata": { "lines_to_next_cell": 1 }, "outputs": [], "source": [ "@dataclass\n", "class State:\n", " memory: List[Tuple[str, str]]\n", " human_input: str = \"\"\n", "\n", " def push(self, response: str) -> \"State\":\n", " memory = self.memory if len(self.memory) < MEMORY else self.memory[1:]\n", " return State(memory + [(self.human_input, response)])" ] }, { "cell_type": "markdown", "id": "4c9a82f1", "metadata": {}, "source": [ "Chat prompt with memory" ] }, { "cell_type": "code", "execution_count": null, "id": "279179dd", "metadata": {}, "outputs": [], "source": [ "@prompt(OpenAI(), template_file=\"chat.pmpt.tpl\")\n", "def chat_prompt(model, state: State) -> State:\n", " out = model(state)\n", " result = out.split(\"Assistant:\")[-1]\n", " return state.push(result)" ] }, { "cell_type": "code", "execution_count": null, "id": "2d94b22d", "metadata": {}, "outputs": [], "source": [ "examples = [\n", " \"ls ~\",\n", " \"cd ~\",\n", " \"{Please make a file jokes.txt inside and put some jokes inside}\",\n", " \"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\",\n", " \"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\",\n", " \"\"\"echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\\nCOPY entrypoint.sh entrypoint.sh\\nENTRYPOINT [\\\"/bin/sh\\\",\\\"entrypoint.sh\\\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\"\"\",\n", " \"nvidia-smi\"\n", "]" ] }, { "cell_type": "code", "execution_count": null, "id": "d77406cf", "metadata": {}, "outputs": [], "source": [ "gradio = show(lambda command, state: chat_prompt(replace(state, human_input=command)),\n", " initial_state=State([]),\n", " subprompts=[chat_prompt],\n", " examples=examples,\n", " out_type=\"json\",\n", " description=desc,\n", ")\n", "if __name__ == \"__main__\":\n", " gradio.launch()" ] }, { "cell_type": "code", "execution_count": null, "id": "bd255c7b", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "jupytext": { "cell_metadata_filter": "tags,-all", "main_language": "python", "notebook_metadata_filter": "-all" } }, "nbformat": 4, "nbformat_minor": 5 }