mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-13 12:10:18 +00:00
tool-call
: promote getting chat templates w/ dedicated script rather than rely on test resources
This commit is contained in:
parent
f3538e755b
commit
9e502e89a5
@ -10,7 +10,7 @@
|
|||||||
# Nous Hermes 2 Pro Llama 3 8B
|
# Nous Hermes 2 Pro Llama 3 8B
|
||||||
./llama-server --jinja -fa --verbose \
|
./llama-server --jinja -fa --verbose \
|
||||||
-hfr NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF -hff Hermes-2-Pro-Llama-3-8B-Q8_0.gguf \
|
-hfr NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF -hff Hermes-2-Pro-Llama-3-8B-Q8_0.gguf \
|
||||||
--chat-template-file tests/chat/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja
|
--chat-template "$( python scripts/get_hf_chat_template.py NousResearch/Hermes-2-Pro-Llama-3-8B tool_use )"
|
||||||
|
|
||||||
# Llama 3.1 8B
|
# Llama 3.1 8B
|
||||||
./llama-server --jinja -fa --verbose \
|
./llama-server --jinja -fa --verbose \
|
||||||
@ -23,25 +23,25 @@
|
|||||||
# functionary-small-v3
|
# functionary-small-v3
|
||||||
./llama-server --jinja -fa --verbose \
|
./llama-server --jinja -fa --verbose \
|
||||||
-hfr meetkai/functionary-small-v3.2-GGUF -hff functionary-small-v3.2.Q4_0.gguf \
|
-hfr meetkai/functionary-small-v3.2-GGUF -hff functionary-small-v3.2.Q4_0.gguf \
|
||||||
--chat-template-file tests/chat/templates/meetkai-functionary-medium-v3.2.jinja
|
--chat-template "$( python scripts/get_hf_chat_template.py meetkai/functionary-medium-v3.2 )"
|
||||||
|
|
||||||
./llama-server --jinja -fa --verbose \
|
./llama-server --jinja -fa --verbose \
|
||||||
-m ~/Downloads/functionary-small-v3.2.Q4_0.gguf \
|
-m ~/Downloads/functionary-small-v3.2.Q4_0.gguf \
|
||||||
--chat-template-file tests/chat/templates/meetkai-functionary-medium-v3.2.jinja
|
--chat-template "$( python scripts/get_hf_chat_template.py meetkai/functionary-medium-v3.2 )"
|
||||||
|
|
||||||
# Llama 3.2 3B (poor adherence)
|
# Llama 3.2 3B (poor adherence)
|
||||||
./llama-server --jinja -fa --verbose \
|
./llama-server --jinja -fa --verbose \
|
||||||
-hfr lmstudio-community/Llama-3.2-3B-Instruct-GGUF -hff Llama-3.2-3B-Instruct-Q6_K_L.gguf \
|
-hfr lmstudio-community/Llama-3.2-3B-Instruct-GGUF -hff Llama-3.2-3B-Instruct-Q6_K_L.gguf \
|
||||||
--chat-template-file tests/chat/templates/meta-llama-Llama-3.2-3B-Instruct.jinja
|
--chat-template "$( python scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct )"
|
||||||
|
|
||||||
./llama-server --jinja -fa --verbose \
|
./llama-server --jinja -fa --verbose \
|
||||||
-m ~/Downloads/Llama-3.2-3B-Instruct-Q6_K_L.gguf \
|
-m ~/Downloads/Llama-3.2-3B-Instruct-Q6_K_L.gguf \
|
||||||
--chat-template-file tests/chat/templates/meta-llama-Llama-3.2-3B-Instruct.jinja
|
--chat-template "$( python scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct )"
|
||||||
|
|
||||||
# Llama 3.2 1B (very poor adherence)
|
# Llama 3.2 1B (very poor adherence)
|
||||||
./llama-server --jinja -fa --verbose \
|
./llama-server --jinja -fa --verbose \
|
||||||
-hfr lmstudio-community/Llama-3.2-1B-Instruct-GGUF -hff Llama-3.2-1B-Instruct-Q4_K_M.gguf \
|
-hfr lmstudio-community/Llama-3.2-1B-Instruct-GGUF -hff Llama-3.2-1B-Instruct-Q4_K_M.gguf \
|
||||||
--chat-template-file tests/chat/templates/meta-llama-Llama-3.2-3B-Instruct.jinja
|
--chat-template "$( python scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct )"
|
||||||
```
|
```
|
||||||
|
|
||||||
- Run the tools in [examples/agent/tools](./examples/agent/tools) inside a docker container (check http://localhost:8088/docs once running):
|
- Run the tools in [examples/agent/tools](./examples/agent/tools) inside a docker container (check http://localhost:8088/docs once running):
|
||||||
|
69
scripts/get_hf_chat_template.py
Normal file
69
scripts/get_hf_chat_template.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
'''
|
||||||
|
Fetches the Jinja chat template of a HuggingFace model.
|
||||||
|
If a model
|
||||||
|
|
||||||
|
Syntax:
|
||||||
|
get_hf_chat_template.py model_id [variant]
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
python ./scripts/get_hf_chat_template.py NousResearch/Meta-Llama-3-8B-Instruct
|
||||||
|
python ./scripts/get_hf_chat_template.py NousResearch/Hermes-3-Llama-3.1-70B tool_use
|
||||||
|
python ./scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct
|
||||||
|
'''
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
if len(args) < 1:
|
||||||
|
raise ValueError("Please provide a model ID and an optional variant name")
|
||||||
|
model_id = args[0]
|
||||||
|
variant = None if len(args) < 2 else args[1]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use huggingface_hub library if available.
|
||||||
|
# Allows access to gated models if the user has access and ran `huggingface-cli login`.
|
||||||
|
from huggingface_hub import hf_hub_download
|
||||||
|
with open(hf_hub_download(repo_id=model_id, filename="tokenizer_config.json")) as f:
|
||||||
|
config_str = f.read()
|
||||||
|
except ImportError:
|
||||||
|
import requests
|
||||||
|
assert re.match(r"^[\w.-]+/[\w.-]+$", model_id), f"Invalid model ID: {model_id}"
|
||||||
|
response = requests.get(f"https://huggingface.co/{model_id}/resolve/main/tokenizer_config.json")
|
||||||
|
if response.status_code == 401:
|
||||||
|
raise Exception('Access to this model is gated, please request access, authenticate with `huggingface-cli login` and make sure to run `pip install huggingface_hub`')
|
||||||
|
response.raise_for_status()
|
||||||
|
config_str = response.text
|
||||||
|
|
||||||
|
try:
|
||||||
|
config = json.loads(config_str)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# Fix https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct/blob/main/tokenizer_config.json
|
||||||
|
# (Remove extra '}' near the end of the file)
|
||||||
|
config = json.loads(re.sub(r'\}([\n\s]*\}[\n\s]*\],[\n\s]*"clean_up_tokenization_spaces")', r'\1', config_str))
|
||||||
|
|
||||||
|
chat_template = config['chat_template']
|
||||||
|
if isinstance(chat_template, str):
|
||||||
|
print(chat_template, end=None)
|
||||||
|
else:
|
||||||
|
variants = {
|
||||||
|
ct['name']: ct['template']
|
||||||
|
for ct in chat_template
|
||||||
|
}
|
||||||
|
format_variants = lambda: ', '.join(f'"{v}"' for v in variants.keys())
|
||||||
|
|
||||||
|
if variant is None:
|
||||||
|
if 'default' not in variants:
|
||||||
|
raise Exception(f'Please specify a chat template variant (one of {format_variants()})')
|
||||||
|
variant = 'default'
|
||||||
|
print(f'Note: picked "default" chat template variant (out of {format_variants()})', file=sys.stderr)
|
||||||
|
elif variant not in variants:
|
||||||
|
raise Exception(f"Variant {variant} not found in chat template (found {format_variants()})")
|
||||||
|
|
||||||
|
print(variants[variant], end=None)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(sys.argv[1:])
|
@ -10,7 +10,7 @@
|
|||||||
Fetches the Jinja2 templates of a few known models and use them to generate prompt goldens for a few predefined chat contexts.
|
Fetches the Jinja2 templates of a few known models and use them to generate prompt goldens for a few predefined chat contexts.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
python ./tests/update_jinja_goldens.py
|
python ./scripts/update_jinja_goldens.py
|
||||||
|
|
||||||
https://github.com/huggingface/transformers/blob/main/src/transformers/utils/chat_template_utils.py
|
https://github.com/huggingface/transformers/blob/main/src/transformers/utils/chat_template_utils.py
|
||||||
'''
|
'''
|
Loading…
Reference in New Issue
Block a user