mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-13 04:00:16 +00:00
eol fix
This commit is contained in:
parent
69c97bbead
commit
cd80fce5e8
@ -9,8 +9,8 @@ repos:
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies: [flake8-no-print]
|
||||
# - repo: https://github.com/PyCQA/flake8
|
||||
# rev: 7.0.0
|
||||
# hooks:
|
||||
# - id: flake8
|
||||
# additional_dependencies: [flake8-no-print]
|
||||
|
@ -117,4 +117,4 @@ Feature: llama.cpp server
|
||||
Given available models
|
||||
Then 1 models are supported
|
||||
Then model 0 is identified by tinyllama-2
|
||||
Then model 0 is trained on 128 tokens context
|
||||
Then model 0 is trained on 128 tokens context
|
||||
|
Loading…
Reference in New Issue
Block a user