@@ -13,9 +13,9 @@ class ConversationSummarySkill:
13
13
"""
14
14
15
15
# The max tokens to process in a single semantic function call.
16
- MaxTokens = 1024
16
+ _max_tokens = 1024
17
17
18
- SummarizeConversationPromptTemplate = (
18
+ _summarize_conversation_prompt_template = (
19
19
"BEGIN CONTENT TO SUMMARIZE:\n "
20
20
"{{" + "$INPUT" + "}}\n "
21
21
"END CONTENT TO SUMMARIZE.\n "
@@ -28,10 +28,10 @@ class ConversationSummarySkill:
28
28
29
29
def __init__ (self , kernel : Kernel ):
30
30
self ._summarizeConversationFunction = kernel .create_semantic_function (
31
- ConversationSummarySkill .SummarizeConversationPromptTemplate ,
31
+ ConversationSummarySkill ._summarize_conversation_prompt_template ,
32
32
skill_name = ConversationSummarySkill .__name__ ,
33
33
description = "Given a section of a conversation transcript, summarize the part of the conversation." ,
34
- max_tokens = ConversationSummarySkill .MaxTokens ,
34
+ max_tokens = ConversationSummarySkill ._max_tokens ,
35
35
temperature = 0.1 ,
36
36
top_p = 0.5 ,
37
37
)
@@ -52,10 +52,10 @@ async def summarize_conversation_async(
52
52
:return: SKContext with the summarized conversation result.
53
53
"""
54
54
lines = text_chunker ._split_text_lines (
55
- input , ConversationSummarySkill .MaxTokens , True
55
+ input , ConversationSummarySkill ._max_tokens , True
56
56
)
57
57
paragraphs = text_chunker ._split_text_paragraph (
58
- lines , ConversationSummarySkill .MaxTokens
58
+ lines , ConversationSummarySkill ._max_tokens
59
59
)
60
60
61
61
return await aggregate_chunked_results_async (
0 commit comments