|
| 1 | +name: "hermes-2-pro-mistral" |
| 2 | +icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ggO2sBDJ8Bhc6w-zwTx5j.png |
| 3 | +license: apache-2.0 |
| 4 | + |
| 5 | +description: | |
| 6 | + Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house. |
| 7 | +
|
| 8 | + This new version of Hermes maintains its excellent general task and conversation capabilities - but also excels at Function Calling, JSON Structured Outputs, and has improved on several other metrics as well, scoring a 90% on our function calling evaluation built in partnership with Fireworks.AI, and an 81% on our structured JSON Output evaluation. |
| 9 | +
|
| 10 | + Hermes Pro takes advantage of a special system prompt and multi-turn function calling structure with a new chatml role in order to make function calling reliable and easy to parse. Learn more about prompting below. |
| 11 | +
|
| 12 | + This work was a collaboration between Nous Research, @interstellarninja, and Fireworks.AI |
| 13 | +
|
| 14 | + Learn more about the function calling on our github repo here: https://github.com/NousResearch/Hermes-Function-Calling/tree/main |
| 15 | +
|
| 16 | +urls: |
| 17 | +- https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF |
| 18 | + |
| 19 | +tags: |
| 20 | +- llm |
| 21 | +- gguf |
| 22 | +- gpu |
| 23 | +- cpu |
| 24 | + |
| 25 | +config_file: | |
| 26 | + mmap: true |
| 27 | + parameters: |
| 28 | + model: Hermes-2-Pro-Mistral-7B.Q6_K.gguf |
| 29 | +
|
| 30 | + template: |
| 31 | + chat_message: | |
| 32 | + <|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}} |
| 33 | + {{- if .FunctionCall }} |
| 34 | + <tool_call> |
| 35 | + {{- else if eq .RoleName "tool" }} |
| 36 | + <tool_response> |
| 37 | + {{- end }} |
| 38 | + {{- if .Content}} |
| 39 | + {{.Content }} |
| 40 | + {{- end }} |
| 41 | + {{- if .FunctionCall}} |
| 42 | + {{toJson .FunctionCall}} |
| 43 | + {{- end }} |
| 44 | + {{- if .FunctionCall }} |
| 45 | + </tool_call> |
| 46 | + {{- else if eq .RoleName "tool" }} |
| 47 | + </tool_response> |
| 48 | + {{- end }} |
| 49 | + <|im_end|> |
| 50 | + # https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF#prompt-format-for-function-calling |
| 51 | + function: | |
| 52 | + <|im_start|>system |
| 53 | + You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: |
| 54 | + <tools> |
| 55 | + {{range .Functions}} |
| 56 | + {'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }} |
| 57 | + {{end}} |
| 58 | + </tools> |
| 59 | + Use the following pydantic model json schema for each tool call you will make: |
| 60 | + {'title': 'FunctionCall', 'type': 'object', 'properties': {'arguments': {'title': 'Arguments', 'type': 'object'}, 'name': {'title': 'Name', 'type': 'string'}}, 'required': ['arguments', 'name']} |
| 61 | + For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows: |
| 62 | + <tool_call> |
| 63 | + {'arguments': <args-dict>, 'name': <function-name>} |
| 64 | + </tool_call> |
| 65 | + <|im_end|> |
| 66 | + {{.Input -}} |
| 67 | + <|im_start|>assistant |
| 68 | + <tool_call> |
| 69 | + chat: | |
| 70 | + {{.Input -}} |
| 71 | + <|im_start|>assistant |
| 72 | + completion: | |
| 73 | + {{.Input}} |
| 74 | + context_size: 4096 |
| 75 | + f16: true |
| 76 | + stopwords: |
| 77 | + - <|im_end|> |
| 78 | + - <dummy32000> |
| 79 | + - "\n</tool_call>" |
| 80 | + - "\n\n\n" |
| 81 | +
|
0 commit comments