mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
tts : add header + minor fixes
ggml-ci
This commit is contained in:
parent
eabc921355
commit
a92f4137fb
@ -297,7 +297,7 @@ class Model:
|
||||
|
||||
for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)):
|
||||
# TODO: why do we squeeze here?
|
||||
#data = data_torch.squeeze().numpy()
|
||||
# data = data_torch.squeeze().numpy()
|
||||
data = data_torch.numpy()
|
||||
|
||||
# if data ends up empty, it means data_torch was a scalar tensor -> restore
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdio>
|
||||
#include <cmath>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <fstream>
|
||||
@ -354,9 +355,9 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// TODO: not sure if this is correct
|
||||
{
|
||||
std::string prompt_clean;
|
||||
std::string prompt_user = params.prompt;
|
||||
std::string prompt_clean = "";
|
||||
//string_replace_all(prompt_user, " ", "<|text_sep|>");
|
||||
|
||||
for (auto & c : prompt_user) {
|
||||
if (c == ' ') {
|
||||
prompt_clean += "<|text_sep|>";
|
||||
|
Loading…
Reference in New Issue
Block a user