|
| 1 | +# Copyright (c) Microsoft. All rights reserved. |
| 2 | + |
| 3 | +import asyncio |
| 4 | +from typing import Tuple |
| 5 | + |
| 6 | +import semantic_kernel as sk |
| 7 | +import semantic_kernel.connectors.ai.google_palm as sk_gp |
| 8 | + |
| 9 | +kernel = sk.Kernel() |
| 10 | +apikey = sk.google_palm_settings_from_dot_env() |
| 11 | +palm_text_embed = sk_gp.GooglePalmTextEmbedding("models/embedding-gecko-001", apikey) |
| 12 | +kernel.add_text_embedding_generation_service("gecko", palm_text_embed) |
| 13 | +palm_chat_completion = sk_gp.GooglePalmChatCompletion("models/chat-bison-001", apikey) |
| 14 | +kernel.add_chat_service("models/chat-bison-001", palm_chat_completion) |
| 15 | +kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore()) |
| 16 | +kernel.import_skill(sk.core_skills.TextMemorySkill()) |
| 17 | + |
| 18 | + |
| 19 | +async def populate_memory(kernel: sk.Kernel) -> None: |
| 20 | + # Add some documents to the semantic memory |
| 21 | + await kernel.memory.save_information_async( |
| 22 | + "aboutMe", id="info1", text="My name is Andrea" |
| 23 | + ) |
| 24 | + await kernel.memory.save_information_async( |
| 25 | + "aboutMe", id="info2", text="I currently work as a tour guide" |
| 26 | + ) |
| 27 | + await kernel.memory.save_information_async( |
| 28 | + "aboutMe", id="info3", text="My favorite hobby is hiking" |
| 29 | + ) |
| 30 | + await kernel.memory.save_information_async( |
| 31 | + "aboutMe", id="info4", text="I visitied Iceland last year." |
| 32 | + ) |
| 33 | + await kernel.memory.save_information_async( |
| 34 | + "aboutMe", id="info5", text="My family is from New York" |
| 35 | + ) |
| 36 | + |
| 37 | + |
| 38 | +async def search_memory_examples(kernel: sk.Kernel) -> None: |
| 39 | + questions = [ |
| 40 | + "what's my name", |
| 41 | + "what is my favorite hobby?", |
| 42 | + "where's my family from?", |
| 43 | + "where did I travel last year?", |
| 44 | + "what do I do for work", |
| 45 | + ] |
| 46 | + |
| 47 | + for question in questions: |
| 48 | + print(f"Question: {question}") |
| 49 | + result = await kernel.memory.search_async("aboutMe", question) |
| 50 | + print(f"Answer: {result}\n") |
| 51 | + |
| 52 | + |
| 53 | +async def setup_chat_with_memory( |
| 54 | + kernel: sk.Kernel, |
| 55 | +) -> Tuple[sk.SKFunctionBase, sk.SKContext]: |
| 56 | + """ |
| 57 | + When using Google PaLM to chat with memories, a chat prompt template is |
| 58 | + essential; otherwise, the kernel will send text prompts to the Google PaLM |
| 59 | + chat service. Unfortunately, when a text prompt includes memory, chat |
| 60 | + history, and the user's current message, PaLM often struggles to comprehend |
| 61 | + the user's message. To address this issue, the prompt containing memory is |
| 62 | + incorporated into the chat prompt template as a system message. |
| 63 | + Note that this is only an issue for the chat service; the text service |
| 64 | + does not require a chat prompt template. |
| 65 | + """ |
| 66 | + sk_prompt = """ |
| 67 | + ChatBot can have a conversation with you about any topic. |
| 68 | + It can give explicit instructions or say 'I don't know' if |
| 69 | + it does not have an answer. |
| 70 | +
|
| 71 | + Information about me, from previous conversations: |
| 72 | + - {{$fact1}} {{recall $fact1}} |
| 73 | + - {{$fact2}} {{recall $fact2}} |
| 74 | + - {{$fact3}} {{recall $fact3}} |
| 75 | + - {{$fact4}} {{recall $fact4}} |
| 76 | + - {{$fact5}} {{recall $fact5}} |
| 77 | +
|
| 78 | + """.strip() |
| 79 | + |
| 80 | + prompt_config = sk.PromptTemplateConfig.from_completion_parameters( |
| 81 | + max_tokens=2000, temperature=0.7, top_p=0.8 |
| 82 | + ) |
| 83 | + prompt_template = sk.ChatPromptTemplate( # Create the chat prompt template |
| 84 | + "{{$user_input}}", kernel.prompt_template_engine, prompt_config |
| 85 | + ) |
| 86 | + prompt_template.add_system_message(sk_prompt) # Add the memory as a system message |
| 87 | + function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) |
| 88 | + chat_func = kernel.register_semantic_function( |
| 89 | + None, "ChatWithMemory", function_config |
| 90 | + ) |
| 91 | + |
| 92 | + context = kernel.create_new_context() |
| 93 | + context["fact1"] = "what is my name?" |
| 94 | + context["fact2"] = "what is my favorite hobby?" |
| 95 | + context["fact3"] = "where's my family from?" |
| 96 | + context["fact4"] = "where did I travel last year?" |
| 97 | + context["fact5"] = "what do I do for work?" |
| 98 | + |
| 99 | + context[sk.core_skills.TextMemorySkill.COLLECTION_PARAM] = "aboutMe" |
| 100 | + context[sk.core_skills.TextMemorySkill.RELEVANCE_PARAM] = 0.6 |
| 101 | + |
| 102 | + context["chat_history"] = "" |
| 103 | + |
| 104 | + return chat_func, context |
| 105 | + |
| 106 | + |
| 107 | +async def chat( |
| 108 | + kernel: sk.Kernel, chat_func: sk.SKFunctionBase, context: sk.SKContext |
| 109 | +) -> bool: |
| 110 | + try: |
| 111 | + user_input = input("User:> ") |
| 112 | + context["user_input"] = user_input |
| 113 | + except KeyboardInterrupt: |
| 114 | + print("\n\nExiting chat...") |
| 115 | + return False |
| 116 | + except EOFError: |
| 117 | + print("\n\nExiting chat...") |
| 118 | + return False |
| 119 | + |
| 120 | + if user_input == "exit": |
| 121 | + print("\n\nExiting chat...") |
| 122 | + return False |
| 123 | + |
| 124 | + answer = await kernel.run_async(chat_func, input_vars=context.variables) |
| 125 | + context["chat_history"] += f"\nUser:> {user_input}\nChatBot:> {answer}\n" |
| 126 | + |
| 127 | + print(f"ChatBot:> {answer}") |
| 128 | + return True |
| 129 | + |
| 130 | + |
| 131 | +async def main() -> None: |
| 132 | + await populate_memory(kernel) |
| 133 | + await search_memory_examples(kernel) |
| 134 | + chat_func, context = await setup_chat_with_memory(kernel) |
| 135 | + print("Begin chatting (type 'exit' to exit):\n") |
| 136 | + chatting = True |
| 137 | + while chatting: |
| 138 | + chatting = await chat(kernel, chat_func, context) |
| 139 | + |
| 140 | + |
| 141 | +if __name__ == "__main__": |
| 142 | + asyncio.run(main()) |
0 commit comments