| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547 |
- #if defined(_MSC_VER)
- #define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
- #endif
- #include "ggml.h"
- #include "gguf.h"
- #include "common.h"
- #include "log.h"
- #include "llama.h"
- #include <algorithm>
- #include <cinttypes>
- #include <climits>
- #include <cmath>
- #include <codecvt>
- #include <cstdarg>
- #include <cstring>
- #include <ctime>
- #include <filesystem>
- #include <fstream>
- #include <iostream>
- #include <iterator>
- #include <regex>
- #include <sstream>
- #include <string>
- #include <thread>
- #include <unordered_map>
- #include <unordered_set>
- #include <vector>
- #if defined(__APPLE__) && defined(__MACH__)
- #include <sys/types.h>
- #include <sys/sysctl.h>
- #endif
- #if defined(_WIN32)
- #define WIN32_LEAN_AND_MEAN
- #ifndef NOMINMAX
- # define NOMINMAX
- #endif
- #include <locale>
- #include <windows.h>
- #include <fcntl.h>
- #include <io.h>
- #else
- #include <sys/ioctl.h>
- #include <sys/stat.h>
- #include <unistd.h>
- #endif
- #if defined(_MSC_VER)
- #pragma warning(disable: 4244 4267) // possible loss of data
- #endif
- //
- // CPU utils
- //
- int32_t cpu_get_num_physical_cores() {
- #ifdef __linux__
- // enumerate the set of thread siblings, num entries is num cores
- std::unordered_set<std::string> siblings;
- for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
- std::ifstream thread_siblings("/sys/devices/system/cpu/cpu"
- + std::to_string(cpu) + "/topology/thread_siblings");
- if (!thread_siblings.is_open()) {
- break; // no more cpus
- }
- std::string line;
- if (std::getline(thread_siblings, line)) {
- siblings.insert(line);
- }
- }
- if (!siblings.empty()) {
- return static_cast<int32_t>(siblings.size());
- }
- #elif defined(__APPLE__) && defined(__MACH__)
- int32_t num_physical_cores;
- size_t len = sizeof(num_physical_cores);
- int result = sysctlbyname("hw.perflevel0.physicalcpu", &num_physical_cores, &len, NULL, 0);
- if (result == 0) {
- return num_physical_cores;
- }
- result = sysctlbyname("hw.physicalcpu", &num_physical_cores, &len, NULL, 0);
- if (result == 0) {
- return num_physical_cores;
- }
- #elif defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later
- // TODO: windows + arm64 + mingw64
- unsigned int n_threads_win = std::thread::hardware_concurrency();
- unsigned int default_threads = n_threads_win > 0 ? (n_threads_win <= 4 ? n_threads_win : n_threads_win / 2) : 4;
- DWORD buffer_size = 0;
- if (!GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &buffer_size)) {
- if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
- return default_threads;
- }
- }
- std::vector<char> buffer(buffer_size);
- if (!GetLogicalProcessorInformationEx(RelationProcessorCore, reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data()), &buffer_size)) {
- return default_threads;
- }
- int32_t num_physical_cores = 0;
- PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data());
- while (buffer_size > 0) {
- if (info->Relationship == RelationProcessorCore) {
- num_physical_cores += info->Processor.GroupCount;
- }
- buffer_size -= info->Size;
- info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(reinterpret_cast<char*>(info) + info->Size);
- }
- return num_physical_cores > 0 ? num_physical_cores : default_threads;
- #endif
- unsigned int n_threads = std::thread::hardware_concurrency();
- return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
- }
- #if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
- #include <pthread.h>
- static void cpuid(unsigned leaf, unsigned subleaf,
- unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *edx) {
- __asm__("movq\t%%rbx,%%rsi\n\t"
- "cpuid\n\t"
- "xchgq\t%%rbx,%%rsi"
- : "=a"(*eax), "=S"(*ebx), "=c"(*ecx), "=d"(*edx)
- : "0"(leaf), "2"(subleaf));
- }
- static int pin_cpu(int cpu) {
- cpu_set_t mask;
- CPU_ZERO(&mask);
- CPU_SET(cpu, &mask);
- return pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
- }
- static bool is_hybrid_cpu(void) {
- unsigned eax, ebx, ecx, edx;
- cpuid(7, 0, &eax, &ebx, &ecx, &edx);
- return !!(edx & (1u << 15));
- }
- static bool is_running_on_efficiency_core(void) {
- unsigned eax, ebx, ecx, edx;
- cpuid(0x1a, 0, &eax, &ebx, &ecx, &edx);
- int intel_atom = 0x20;
- int core_type = (eax & 0xff000000u) >> 24;
- return core_type == intel_atom;
- }
- static int cpu_count_math_cpus(int n_cpu) {
- int result = 0;
- for (int cpu = 0; cpu < n_cpu; ++cpu) {
- if (pin_cpu(cpu)) {
- return -1;
- }
- if (is_running_on_efficiency_core()) {
- continue; // efficiency cores harm lockstep threading
- }
- ++cpu; // hyperthreading isn't useful for linear algebra
- ++result;
- }
- return result;
- }
- #endif // __x86_64__ && __linux__
- /**
- * Returns number of CPUs on system that are useful for math.
- */
- int32_t cpu_get_num_math() {
- #if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
- int n_cpu = sysconf(_SC_NPROCESSORS_ONLN);
- if (n_cpu < 1) {
- return cpu_get_num_physical_cores();
- }
- if (is_hybrid_cpu()) {
- cpu_set_t affinity;
- if (!pthread_getaffinity_np(pthread_self(), sizeof(affinity), &affinity)) {
- int result = cpu_count_math_cpus(n_cpu);
- pthread_setaffinity_np(pthread_self(), sizeof(affinity), &affinity);
- if (result > 0) {
- return result;
- }
- }
- }
- #endif
- return cpu_get_num_physical_cores();
- }
- // Helper for setting process priority
- #if defined(_WIN32)
- bool set_process_priority(enum ggml_sched_priority prio) {
- if (prio == GGML_SCHED_PRIO_NORMAL) {
- return true;
- }
- DWORD p = NORMAL_PRIORITY_CLASS;
- switch (prio) {
- case GGML_SCHED_PRIO_LOW: p = BELOW_NORMAL_PRIORITY_CLASS; break;
- case GGML_SCHED_PRIO_NORMAL: p = NORMAL_PRIORITY_CLASS; break;
- case GGML_SCHED_PRIO_MEDIUM: p = ABOVE_NORMAL_PRIORITY_CLASS; break;
- case GGML_SCHED_PRIO_HIGH: p = HIGH_PRIORITY_CLASS; break;
- case GGML_SCHED_PRIO_REALTIME: p = REALTIME_PRIORITY_CLASS; break;
- }
- if (!SetPriorityClass(GetCurrentProcess(), p)) {
- LOG_WRN("failed to set process priority class %d : (%d)\n", prio, (int) GetLastError());
- return false;
- }
- return true;
- }
- #else // MacOS and POSIX
- #include <sys/types.h>
- #include <sys/resource.h>
- bool set_process_priority(enum ggml_sched_priority prio) {
- if (prio == GGML_SCHED_PRIO_NORMAL) {
- return true;
- }
- int p = 0;
- switch (prio) {
- case GGML_SCHED_PRIO_LOW: p = 5; break;
- case GGML_SCHED_PRIO_NORMAL: p = 0; break;
- case GGML_SCHED_PRIO_MEDIUM: p = -5; break;
- case GGML_SCHED_PRIO_HIGH: p = -10; break;
- case GGML_SCHED_PRIO_REALTIME: p = -20; break;
- }
- if (!setpriority(PRIO_PROCESS, 0, p)) {
- LOG_WRN("failed to set process priority %d : %s (%d)\n", prio, strerror(errno), errno);
- return false;
- }
- return true;
- }
- #endif
- //
- // CLI argument parsing
- //
- void postprocess_cpu_params(cpu_params& cpuparams, const cpu_params* role_model) {
- int32_t n_set = 0;
- if (cpuparams.n_threads < 0) {
- // Assuming everything about cpuparams is invalid
- if (role_model != nullptr) {
- cpuparams = *role_model;
- } else {
- cpuparams.n_threads = cpu_get_num_math();
- }
- }
- for (int32_t i = 0; i < GGML_MAX_N_THREADS; i++) {
- if (cpuparams.cpumask[i]) {
- n_set++;
- }
- }
- if (n_set && n_set < cpuparams.n_threads) {
- // Not enough set bits, may experience performance issues.
- LOG_WRN("Not enough set bits in CPU mask (%d) to satisfy requested thread count: %d\n", n_set, cpuparams.n_threads);
- }
- }
- bool parse_cpu_range(const std::string & range, bool (&boolmask)[GGML_MAX_N_THREADS]) {
- size_t dash_loc = range.find('-');
- if (dash_loc == std::string::npos) {
- LOG_ERR("Format of CPU range is invalid! Expected [<start>]-[<end>].\n");
- return false;
- }
- size_t start_i;
- size_t end_i;
- if (dash_loc == 0) {
- start_i = 0;
- } else {
- start_i = std::stoull(range.substr(0, dash_loc));
- if (start_i >= GGML_MAX_N_THREADS) {
- LOG_ERR("Start index out of bounds!\n");
- return false;
- }
- }
- if (dash_loc == range.length() - 1) {
- end_i = GGML_MAX_N_THREADS - 1;
- } else {
- end_i = std::stoull(range.substr(dash_loc + 1));
- if (end_i >= GGML_MAX_N_THREADS) {
- LOG_ERR("End index out of bounds!\n");
- return false;
- }
- }
- for (size_t i = start_i; i <= end_i; i++) {
- boolmask[i] = true;
- }
- return true;
- }
- bool parse_cpu_mask(const std::string & mask, bool (&boolmask)[GGML_MAX_N_THREADS]) {
- // Discard potential 0x prefix
- size_t start_i = 0;
- if (mask.length() >= 2 && mask.substr(0, 2) == "0x") {
- start_i = 2;
- }
- size_t num_digits = mask.length() - start_i;
- if (num_digits > 128) num_digits = 128;
- size_t end_i = num_digits + start_i;
- for (size_t i = start_i, n = (num_digits*4 - 1); i < end_i; i++, n-=4) {
- char c = mask.at(i);
- int8_t id = c;
- if ((c >= '0' && c <= '9')) {
- id -= '0';
- } else if (c >= 'a' && c <= 'f') {
- id -= 'a' - 10;
- } else if (c >= 'A' && c <= 'F') {
- id -= 'A' - 10;
- } else {
- LOG_ERR("Invalid hex character '%c' at position %d\n", c, int32_t(i));
- return false;
- }
- boolmask[ n ] = boolmask[ n ] || ((id & 8) != 0);
- boolmask[n - 1] = boolmask[n - 1] || ((id & 4) != 0);
- boolmask[n - 2] = boolmask[n - 2] || ((id & 2) != 0);
- boolmask[n - 3] = boolmask[n - 3] || ((id & 1) != 0);
- }
- return true;
- }
- void common_init() {
- llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
- if (LOG_DEFAULT_LLAMA <= common_log_verbosity_thold) {
- common_log_add(common_log_main(), level, "%s", text);
- }
- }, NULL);
- #ifdef NDEBUG
- const char * build_type = "";
- #else
- const char * build_type = " (debug)";
- #endif
- LOG_INF("build: %d (%s) with %s for %s%s\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, LLAMA_BUILD_TARGET, build_type);
- }
- std::string common_params_get_system_info(const common_params & params) {
- std::ostringstream os;
- os << "system_info: n_threads = " << params.cpuparams.n_threads;
- if (params.cpuparams_batch.n_threads != -1) {
- os << " (n_threads_batch = " << params.cpuparams_batch.n_threads << ")";
- }
- #if defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later
- // TODO: windows + arm64 + mingw64
- DWORD logicalProcessorCount = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
- os << " / " << logicalProcessorCount << " | " << llama_print_system_info();
- #else
- os << " / " << std::thread::hardware_concurrency() << " | " << llama_print_system_info();
- #endif
- return os.str();
- }
- //
- // String utils
- //
- std::string string_format(const char * fmt, ...) {
- va_list ap;
- va_list ap2;
- va_start(ap, fmt);
- va_copy(ap2, ap);
- int size = vsnprintf(NULL, 0, fmt, ap);
- GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
- std::vector<char> buf(size + 1);
- int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
- GGML_ASSERT(size2 == size);
- va_end(ap2);
- va_end(ap);
- return std::string(buf.data(), size);
- }
- std::string string_strip(const std::string & str) {
- size_t start = 0;
- size_t end = str.size();
- while (start < end && std::isspace(str[start])) {
- start++;
- }
- while (end > start && std::isspace(str[end - 1])) {
- end--;
- }
- return str.substr(start, end - start);
- }
- std::string string_get_sortable_timestamp() {
- using clock = std::chrono::system_clock;
- const clock::time_point current_time = clock::now();
- const time_t as_time_t = clock::to_time_t(current_time);
- char timestamp_no_ns[100];
- std::strftime(timestamp_no_ns, 100, "%Y_%m_%d-%H_%M_%S", std::localtime(&as_time_t));
- const int64_t ns = std::chrono::duration_cast<std::chrono::nanoseconds>(
- current_time.time_since_epoch() % 1000000000).count();
- char timestamp_ns[11];
- snprintf(timestamp_ns, 11, "%09" PRId64, ns);
- return std::string(timestamp_no_ns) + "." + std::string(timestamp_ns);
- }
- void string_replace_all(std::string & s, const std::string & search, const std::string & replace) {
- if (search.empty()) {
- return;
- }
- std::string builder;
- builder.reserve(s.length());
- size_t pos = 0;
- size_t last_pos = 0;
- while ((pos = s.find(search, last_pos)) != std::string::npos) {
- builder.append(s, last_pos, pos - last_pos);
- builder.append(replace);
- last_pos = pos + search.length();
- }
- builder.append(s, last_pos, std::string::npos);
- s = std::move(builder);
- }
- bool string_ends_with(const std::string_view & str, const std::string_view & suffix) {
- return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
- }
- size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop) {
- if (!str.empty() && !stop.empty()) {
- const char text_last_char = str.back();
- for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
- if (stop[char_index] == text_last_char) {
- const auto current_partial = stop.substr(0, char_index + 1);
- if (string_ends_with(str, current_partial)) {
- return str.size() - char_index - 1;
- }
- }
- }
- }
- return std::string::npos;
- }
- std::string regex_escape(const std::string & s) {
- static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]");
- return std::regex_replace(s, special_chars, "\\$&");
- }
- std::string string_join(const std::vector<std::string> & values, const std::string & separator) {
- std::ostringstream result;
- for (size_t i = 0; i < values.size(); ++i) {
- if (i > 0) {
- result << separator;
- }
- result << values[i];
- }
- return result.str();
- }
- std::vector<std::string> string_split(const std::string & str, const std::string & delimiter) {
- std::vector<std::string> parts;
- size_t start = 0;
- size_t end = str.find(delimiter);
- while (end != std::string::npos) {
- parts.push_back(str.substr(start, end - start));
- start = end + delimiter.length();
- end = str.find(delimiter, start);
- }
- parts.push_back(str.substr(start));
- return parts;
- }
- std::string string_repeat(const std::string & str, size_t n) {
- if (n == 0) {
- return "";
- }
- std::string result;
- result.reserve(str.length() * n);
- for (size_t i = 0; i < n; ++i) {
- result += str;
- }
- return result;
- }
- std::string string_from(bool value) {
- return value ? "true" : "false";
- }
- std::string string_from(const std::vector<int> & values) {
- std::stringstream buf;
- buf << "[ ";
- bool first = true;
- for (auto e : values) {
- if (first) {
- first = false;
- } else {
- buf << ", ";
- }
- buf << std::to_string(e);
- }
- buf << " ]";
- return buf.str();
- }
- std::string string_from(const struct llama_context * ctx, const std::vector<llama_token> & tokens) {
- std::stringstream buf;
- buf << "[ ";
- bool first = true;
- for (const auto & token : tokens) {
- if (!first) {
- buf << ", ";
- } else {
- first = false;
- }
- auto detokenized = common_token_to_piece(ctx, token);
- detokenized.erase(
- std::remove_if(
- detokenized.begin(),
- detokenized.end(),
- [](const unsigned char c) { return !std::isprint(c); }),
- detokenized.end());
- buf << "'" << detokenized << "'"
- << ":" << std::to_string(token);
- }
- buf << " ]";
- return buf.str();
- }
- std::string string_from(const struct llama_context * ctx, const struct llama_batch & batch) {
- std::stringstream buf;
- buf << "[ ";
- bool first = true;
- for (int i = 0; i < batch.n_tokens; ++i) {
- if (!first) {
- buf << ", ";
- } else {
- first = false;
- }
- auto detokenized = common_token_to_piece(ctx, batch.token[i]);
- detokenized.erase(
- std::remove_if(
- detokenized.begin(),
- detokenized.end(),
- [](const unsigned char c) { return !std::isprint(c); }),
- detokenized.end());
- buf << "\n" << std::to_string(i)
- << ", token '" << detokenized << "'"
- << ", pos " << std::to_string(batch.pos[i])
- << ", n_seq_id " << std::to_string(batch.n_seq_id[i])
- << ", seq_id " << std::to_string(batch.seq_id[i][0])
- << ", logits " << std::to_string(batch.logits[i]);
- }
- buf << " ]";
- return buf.str();
- }
- void string_process_escapes(std::string & input) {
- std::size_t input_len = input.length();
- std::size_t output_idx = 0;
- for (std::size_t input_idx = 0; input_idx < input_len; ++input_idx) {
- if (input[input_idx] == '\\' && input_idx + 1 < input_len) {
- switch (input[++input_idx]) {
- case 'n': input[output_idx++] = '\n'; break;
- case 'r': input[output_idx++] = '\r'; break;
- case 't': input[output_idx++] = '\t'; break;
- case '\'': input[output_idx++] = '\''; break;
- case '\"': input[output_idx++] = '\"'; break;
- case '\\': input[output_idx++] = '\\'; break;
- case 'x':
- // Handle \x12, etc
- if (input_idx + 2 < input_len) {
- const char x[3] = { input[input_idx + 1], input[input_idx + 2], 0 };
- char *err_p = nullptr;
- const long val = std::strtol(x, &err_p, 16);
- if (err_p == x + 2) {
- input_idx += 2;
- input[output_idx++] = char(val);
- break;
- }
- }
- // fall through
- default: input[output_idx++] = '\\';
- input[output_idx++] = input[input_idx]; break;
- }
- } else {
- input[output_idx++] = input[input_idx];
- }
- }
- input.resize(output_idx);
- }
- bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides) {
- const char * sep = strchr(data, '=');
- if (sep == nullptr || sep - data >= 128) {
- LOG_ERR("%s: malformed KV override '%s'\n", __func__, data);
- return false;
- }
- llama_model_kv_override kvo;
- std::strncpy(kvo.key, data, sep - data);
- kvo.key[sep - data] = 0;
- sep++;
- if (strncmp(sep, "int:", 4) == 0) {
- sep += 4;
- kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
- kvo.val_i64 = std::atol(sep);
- } else if (strncmp(sep, "float:", 6) == 0) {
- sep += 6;
- kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
- kvo.val_f64 = std::atof(sep);
- } else if (strncmp(sep, "bool:", 5) == 0) {
- sep += 5;
- kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
- if (std::strcmp(sep, "true") == 0) {
- kvo.val_bool = true;
- } else if (std::strcmp(sep, "false") == 0) {
- kvo.val_bool = false;
- } else {
- LOG_ERR("%s: invalid boolean value for KV override '%s'\n", __func__, data);
- return false;
- }
- } else if (strncmp(sep, "str:", 4) == 0) {
- sep += 4;
- kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
- if (strlen(sep) > 127) {
- LOG_ERR("%s: malformed KV override '%s', value cannot exceed 127 chars\n", __func__, data);
- return false;
- }
- strncpy(kvo.val_str, sep, 127);
- kvo.val_str[127] = '\0';
- } else {
- LOG_ERR("%s: invalid type for KV override '%s'\n", __func__, data);
- return false;
- }
- overrides.emplace_back(std::move(kvo));
- return true;
- }
- //
- // Filesystem utils
- //
- // Validate if a filename is safe to use
- // To validate a full path, split the path by the OS-specific path separator, and validate each part with this function
- bool fs_validate_filename(const std::string & filename) {
- if (!filename.length()) {
- // Empty filename invalid
- return false;
- }
- if (filename.length() > 255) {
- // Limit at common largest possible filename on Linux filesystems
- // to avoid unnecessary further validation
- // (On systems with smaller limits it will be caught by the OS)
- return false;
- }
- std::u32string filename_utf32;
- try {
- #if defined(__clang__)
- // disable C++17 deprecation warning for std::codecvt_utf8
- # pragma clang diagnostic push
- # pragma clang diagnostic ignored "-Wdeprecated-declarations"
- #elif defined(__GNUC__)
- # pragma GCC diagnostic push
- # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
- #endif
- std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> converter;
- #if defined(__clang__)
- # pragma clang diagnostic pop
- #elif defined(__GNUC__)
- # pragma GCC diagnostic pop
- #endif
- filename_utf32 = converter.from_bytes(filename);
- // If the reverse conversion mismatches, it means overlong UTF-8 sequences were used,
- // or invalid encodings were encountered. Reject such attempts
- std::string filename_reencoded = converter.to_bytes(filename_utf32);
- if (filename_reencoded != filename) {
- return false;
- }
- } catch (const std::exception &) {
- return false;
- }
- // Check for forbidden codepoints:
- // - Control characters
- // - Unicode equivalents of illegal characters
- // - UTF-16 surrogate pairs
- // - UTF-8 replacement character
- // - Byte order mark (BOM)
- // - Illegal characters: / \ : * ? " < > |
- for (char32_t c : filename_utf32) {
- if (c <= 0x1F // Control characters (C0)
- || c == 0x7F // Control characters (DEL)
- || (c >= 0x80 && c <= 0x9F) // Control characters (C1)
- || c == 0xFF0E // Fullwidth Full Stop (period equivalent)
- || c == 0x2215 // Division Slash (forward slash equivalent)
- || c == 0x2216 // Set Minus (backslash equivalent)
- || (c >= 0xD800 && c <= 0xDFFF) // UTF-16 surrogate pairs
- || c == 0xFFFD // Replacement Character (UTF-8)
- || c == 0xFEFF // Byte Order Mark (BOM)
- || c == '/' || c == '\\' || c == ':' || c == '*' // Illegal characters
- || c == '?' || c == '"' || c == '<' || c == '>' || c == '|') {
- return false;
- }
- }
- // Reject any leading or trailing ' ', or any trailing '.', these are stripped on Windows and will cause a different filename
- // Unicode and other whitespace is not affected, only 0x20 space
- if (filename.front() == ' ' || filename.back() == ' ' || filename.back() == '.') {
- return false;
- }
- // Reject any ".." (currently stricter than necessary, it should be fine to just check for == ".." instead)
- if (filename.find("..") != std::string::npos) {
- return false;
- }
- // Reject "."
- if (filename == ".") {
- return false;
- }
- return true;
- }
- #include <iostream>
- // returns true if successful, false otherwise
- bool fs_create_directory_with_parents(const std::string & path) {
- #ifdef _WIN32
- std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
- std::wstring wpath = converter.from_bytes(path);
- // if the path already exists, check whether it's a directory
- const DWORD attributes = GetFileAttributesW(wpath.c_str());
- if ((attributes != INVALID_FILE_ATTRIBUTES) && (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
- return true;
- }
- size_t pos_slash = 0;
- // process path from front to back, procedurally creating directories
- while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) {
- const std::wstring subpath = wpath.substr(0, pos_slash);
- pos_slash += 1;
- // skip the drive letter, in some systems it can return an access denied error
- if (subpath.length() == 2 && subpath[1] == ':') {
- continue;
- }
- const bool success = CreateDirectoryW(subpath.c_str(), NULL);
- if (!success) {
- const DWORD error = GetLastError();
- // if the path already exists, ensure that it's a directory
- if (error == ERROR_ALREADY_EXISTS) {
- const DWORD attributes = GetFileAttributesW(subpath.c_str());
- if (attributes == INVALID_FILE_ATTRIBUTES || !(attributes & FILE_ATTRIBUTE_DIRECTORY)) {
- return false;
- }
- } else {
- return false;
- }
- }
- }
- return true;
- #else
- // if the path already exists, check whether it's a directory
- struct stat info;
- if (stat(path.c_str(), &info) == 0) {
- return S_ISDIR(info.st_mode);
- }
- size_t pos_slash = 1; // skip leading slashes for directory creation
- // process path from front to back, procedurally creating directories
- while ((pos_slash = path.find('/', pos_slash)) != std::string::npos) {
- const std::string subpath = path.substr(0, pos_slash);
- struct stat info;
- // if the path already exists, ensure that it's a directory
- if (stat(subpath.c_str(), &info) == 0) {
- if (!S_ISDIR(info.st_mode)) {
- return false;
- }
- } else {
- // create parent directories
- const int ret = mkdir(subpath.c_str(), 0755);
- if (ret != 0) {
- return false;
- }
- }
- pos_slash += 1;
- }
- return true;
- #endif // _WIN32
- }
- std::string fs_get_cache_directory() {
- std::string cache_directory = "";
- auto ensure_trailing_slash = [](std::string p) {
- // Make sure to add trailing slash
- if (p.back() != DIRECTORY_SEPARATOR) {
- p += DIRECTORY_SEPARATOR;
- }
- return p;
- };
- if (getenv("LLAMA_CACHE")) {
- cache_directory = std::getenv("LLAMA_CACHE");
- } else {
- #if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX) || defined(__OpenBSD__)
- if (std::getenv("XDG_CACHE_HOME")) {
- cache_directory = std::getenv("XDG_CACHE_HOME");
- } else {
- cache_directory = std::getenv("HOME") + std::string("/.cache/");
- }
- #elif defined(__APPLE__)
- cache_directory = std::getenv("HOME") + std::string("/Library/Caches/");
- #elif defined(_WIN32)
- cache_directory = std::getenv("LOCALAPPDATA");
- #else
- # error Unknown architecture
- #endif
- cache_directory = ensure_trailing_slash(cache_directory);
- cache_directory += "llama.cpp";
- }
- return ensure_trailing_slash(cache_directory);
- }
- std::string fs_get_cache_file(const std::string & filename) {
- GGML_ASSERT(filename.find(DIRECTORY_SEPARATOR) == std::string::npos);
- std::string cache_directory = fs_get_cache_directory();
- const bool success = fs_create_directory_with_parents(cache_directory);
- if (!success) {
- throw std::runtime_error("failed to create cache directory: " + cache_directory);
- }
- return cache_directory + filename;
- }
- //
- // Model utils
- //
- struct common_init_result common_init_from_params(common_params & params) {
- common_init_result iparams;
- auto mparams = common_model_params_to_llama(params);
- llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
- if (model == NULL) {
- LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str());
- return iparams;
- }
- const llama_vocab * vocab = llama_model_get_vocab(model);
- auto cparams = common_context_params_to_llama(params);
- llama_context * lctx = llama_init_from_model(model, cparams);
- if (lctx == NULL) {
- LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.path.c_str());
- llama_model_free(model);
- return iparams;
- }
- if (params.ctx_shift && !llama_memory_can_shift(llama_get_memory(lctx))) {
- LOG_WRN("%s: KV cache shifting is not supported for this context, disabling KV cache shifting\n", __func__);
- params.ctx_shift = false;
- }
- if (!params.control_vectors.empty()) {
- if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1;
- if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_model_n_layer(model);
- const auto cvec = common_control_vector_load(params.control_vectors);
- if (cvec.n_embd == -1) {
- llama_free(lctx);
- llama_model_free(model);
- return iparams;
- }
- int err = llama_apply_adapter_cvec(
- lctx,
- cvec.data.data(),
- cvec.data.size(),
- cvec.n_embd,
- params.control_vector_layer_start,
- params.control_vector_layer_end);
- if (err) {
- llama_free(lctx);
- llama_model_free(model);
- return iparams;
- }
- }
- if (llama_pooling_type(lctx) == LLAMA_POOLING_TYPE_RANK) {
- bool ok = true;
- if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) {
- LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__);
- ok = false;
- }
- bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL;
- bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL;
- if (!has_eos && !has_sep) {
- LOG_WRN("%s: warning: vocab does not have an EOS token or SEP token, reranking will not work\n", __func__);
- ok = false;
- } else if (!has_eos) {
- LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__);
- } else if (!has_sep) {
- LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__);
- ok = false;
- }
- if (!ok) {
- llama_free(lctx);
- llama_model_free(model);
- return iparams;
- }
- }
- // load and optionally apply lora adapters
- for (auto & la : params.lora_adapters) {
- llama_adapter_lora_ptr lora;
- lora.reset(llama_adapter_lora_init(model, la.path.c_str()));
- if (lora == nullptr) {
- LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str());
- llama_free(lctx);
- llama_model_free(model);
- return iparams;
- }
- la.ptr = lora.get();
- iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters
- }
- if (!params.lora_init_without_apply) {
- common_set_adapter_lora(lctx, params.lora_adapters);
- }
- if (params.sampling.ignore_eos && llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) {
- LOG_WRN("%s: warning: vocab does not have an EOS token, ignoring --ignore-eos\n", __func__);
- params.sampling.ignore_eos = false;
- }
- if (params.sampling.ignore_eos) {
- for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) {
- if (llama_vocab_is_eog(vocab, i)) {
- LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY);
- params.sampling.logit_bias.push_back({i, -INFINITY});
- }
- }
- }
- if (params.sampling.penalty_last_n == -1) {
- LOG_INF("%s: setting penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx));
- params.sampling.penalty_last_n = llama_n_ctx(lctx);
- }
- if (params.sampling.dry_penalty_last_n == -1) {
- LOG_INF("%s: setting dry_penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx));
- params.sampling.dry_penalty_last_n = llama_n_ctx(lctx);
- }
- if (params.warmup) {
- LOG_WRN("%s: warming up the model with an empty run - please wait ... (--no-warmup to disable)\n", __func__);
- llama_set_warmup(lctx, true);
- std::vector<llama_token> tmp;
- llama_token bos = llama_vocab_bos(vocab);
- llama_token eos = llama_vocab_eos(vocab);
- // some models (e.g. T5) don't have a BOS token
- if (bos != LLAMA_TOKEN_NULL) {
- tmp.push_back(bos);
- }
- if (eos != LLAMA_TOKEN_NULL) {
- tmp.push_back(eos);
- }
- if (tmp.empty()) {
- tmp.push_back(0);
- }
- if (llama_model_has_encoder(model)) {
- llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
- llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
- if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
- decoder_start_token_id = bos;
- }
- tmp.clear();
- tmp.push_back(decoder_start_token_id);
- }
- if (llama_model_has_decoder(model)) {
- llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
- }
- llama_memory_clear(llama_get_memory(lctx), true);
- llama_synchronize(lctx);
- llama_perf_context_reset(lctx);
- llama_set_warmup(lctx, false);
- }
- iparams.model.reset(model);
- iparams.context.reset(lctx);
- return iparams;
- }
- std::string get_model_endpoint() {
- const char * model_endpoint_env = getenv("MODEL_ENDPOINT");
- // We still respect the use of environment-variable "HF_ENDPOINT" for backward-compatibility.
- const char * hf_endpoint_env = getenv("HF_ENDPOINT");
- const char * endpoint_env = model_endpoint_env ? model_endpoint_env : hf_endpoint_env;
- std::string model_endpoint = "https://huggingface.co/";
- if (endpoint_env) {
- model_endpoint = endpoint_env;
- if (model_endpoint.back() != '/') model_endpoint += '/';
- }
- return model_endpoint;
- }
- void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) {
- llama_clear_adapter_lora(ctx);
- for (auto & la : lora) {
- if (la.scale != 0.0f) {
- llama_set_adapter_lora(ctx, la.ptr, la.scale);
- }
- }
- }
- struct llama_model_params common_model_params_to_llama(common_params & params) {
- auto mparams = llama_model_default_params();
- if (!params.devices.empty()) {
- mparams.devices = params.devices.data();
- }
- if (params.n_gpu_layers != -1) {
- mparams.n_gpu_layers = params.n_gpu_layers;
- }
- mparams.main_gpu = params.main_gpu;
- mparams.split_mode = params.split_mode;
- mparams.tensor_split = params.tensor_split;
- mparams.use_mmap = params.use_mmap;
- mparams.use_mlock = params.use_mlock;
- mparams.check_tensors = params.check_tensors;
- if (params.kv_overrides.empty()) {
- mparams.kv_overrides = NULL;
- } else {
- GGML_ASSERT(params.kv_overrides.back().key[0] == 0 && "KV overrides not terminated with empty key");
- mparams.kv_overrides = params.kv_overrides.data();
- }
- if (params.tensor_buft_overrides.empty()) {
- mparams.tensor_buft_overrides = NULL;
- } else {
- GGML_ASSERT(params.tensor_buft_overrides.back().pattern == nullptr && "Tensor buffer overrides not terminated with empty pattern");
- mparams.tensor_buft_overrides = params.tensor_buft_overrides.data();
- }
- mparams.progress_callback = params.load_progress_callback;
- mparams.progress_callback_user_data = params.load_progress_callback_user_data;
- return mparams;
- }
- struct llama_context_params common_context_params_to_llama(const common_params & params) {
- auto cparams = llama_context_default_params();
- cparams.n_ctx = params.n_ctx;
- cparams.n_seq_max = params.n_parallel;
- cparams.n_batch = params.n_batch;
- cparams.n_ubatch = params.n_ubatch;
- cparams.n_threads = params.cpuparams.n_threads;
- cparams.n_threads_batch = params.cpuparams_batch.n_threads == -1 ?
- params.cpuparams.n_threads : params.cpuparams_batch.n_threads;
- cparams.embeddings = params.embedding;
- cparams.rope_scaling_type = params.rope_scaling_type;
- cparams.rope_freq_base = params.rope_freq_base;
- cparams.rope_freq_scale = params.rope_freq_scale;
- cparams.yarn_ext_factor = params.yarn_ext_factor;
- cparams.yarn_attn_factor = params.yarn_attn_factor;
- cparams.yarn_beta_fast = params.yarn_beta_fast;
- cparams.yarn_beta_slow = params.yarn_beta_slow;
- cparams.yarn_orig_ctx = params.yarn_orig_ctx;
- cparams.pooling_type = params.pooling_type;
- cparams.attention_type = params.attention_type;
- cparams.defrag_thold = params.defrag_thold;
- cparams.cb_eval = params.cb_eval;
- cparams.cb_eval_user_data = params.cb_eval_user_data;
- cparams.offload_kqv = !params.no_kv_offload;
- cparams.flash_attn = params.flash_attn;
- cparams.no_perf = params.no_perf;
- cparams.op_offload = !params.no_op_offload;
- cparams.swa_full = params.swa_full;
- cparams.type_k = params.cache_type_k;
- cparams.type_v = params.cache_type_v;
- return cparams;
- }
- struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params) {
- struct ggml_threadpool_params tpp;
- ggml_threadpool_params_init(&tpp, params.n_threads); // setup the defaults
- if (params.mask_valid) {
- std::memcpy(&tpp.cpumask, ¶ms.cpumask, GGML_MAX_N_THREADS);
- }
- tpp.prio = params.priority;
- tpp.poll = params.poll;
- tpp.strict_cpu = params.strict_cpu;
- return tpp;
- }
- //
- // Batch utils
- //
- void common_batch_clear(struct llama_batch & batch) {
- batch.n_tokens = 0;
- }
- void common_batch_add(
- struct llama_batch & batch,
- llama_token id,
- llama_pos pos,
- const std::vector<llama_seq_id> & seq_ids,
- bool logits) {
- GGML_ASSERT(batch.seq_id[batch.n_tokens] && "llama_batch size exceeded");
- batch.token [batch.n_tokens] = id;
- batch.pos [batch.n_tokens] = pos;
- batch.n_seq_id[batch.n_tokens] = seq_ids.size();
- for (size_t i = 0; i < seq_ids.size(); ++i) {
- batch.seq_id[batch.n_tokens][i] = seq_ids[i];
- }
- batch.logits [batch.n_tokens] = logits;
- batch.n_tokens++;
- }
- //
- // Token utils
- //
- size_t common_lcp(const llama_tokens & a, const llama_tokens & b) {
- size_t i;
- for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
- return i;
- }
- size_t common_lcs(const llama_tokens & a, const llama_tokens & b) {
- // check for empty sequences
- if (a.empty() || b.empty()) {
- return 0;
- }
- // get the lengths of the input sequences
- size_t a_len = a.size();
- size_t b_len = b.size();
- // initialize the maximum length of the longest common subsequence (LCS)
- size_t max_length = 0;
- // use two rows instead of a 2D matrix to optimize space
- std::vector<size_t> prev_row(b_len + 1, 0);
- std::vector<size_t> curr_row(b_len + 1, 0);
- // iterate through the elements of a
- for (size_t i = 1; i <= a_len; i++) {
- // iterate through the elements of b
- for (size_t j = 1; j <= b_len; j++) {
- // if elements at the current positions match
- if (a[i - 1] == b[j - 1]) {
- // if it's the first element of either sequences, set LCS length to 1
- if (i == 1 || j == 1) {
- curr_row[j] = 1;
- } else {
- // increment LCS length by 1 compared to the previous element
- curr_row[j] = prev_row[j - 1] + 1;
- }
- // update max_length if necessary
- if (curr_row[j] > max_length) {
- max_length = curr_row[j];
- }
- } else {
- // reset LCS length if elements don't match
- curr_row[j] = 0;
- }
- }
- // update the previous row for the next iteration
- prev_row = curr_row;
- }
- // return the maximum length of the LCS
- return max_length;
- }
- //
- // Vocab utils
- //
- std::vector<llama_token> common_tokenize(
- const struct llama_context * ctx,
- const std::string & text,
- bool add_special,
- bool parse_special) {
- const llama_model * model = llama_get_model(ctx);
- const llama_vocab * vocab = llama_model_get_vocab(model);
- return common_tokenize(vocab, text, add_special, parse_special);
- }
- std::vector<llama_token> common_tokenize(
- const struct llama_vocab * vocab,
- const std::string & text,
- bool add_special,
- bool parse_special) {
- // upper limit for the number of tokens
- int n_tokens = text.length() + 2 * add_special;
- std::vector<llama_token> result(n_tokens);
- n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
- if (n_tokens < 0) {
- result.resize(-n_tokens);
- int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
- GGML_ASSERT(check == -n_tokens);
- } else {
- result.resize(n_tokens);
- }
- return result;
- }
- std::string common_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
- const llama_model * model = llama_get_model(ctx);
- const llama_vocab * vocab = llama_model_get_vocab(model);
- return common_token_to_piece(vocab, token, special);
- }
- std::string common_token_to_piece(const struct llama_vocab * vocab, llama_token token, bool special) {
- std::string piece;
- piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
- const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
- if (n_chars < 0) {
- piece.resize(-n_chars);
- int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
- GGML_ASSERT(check == -n_chars);
- }
- else {
- piece.resize(n_chars);
- }
- return piece;
- }
- std::string common_detokenize(const struct llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
- const llama_model * model = llama_get_model(ctx);
- const llama_vocab * vocab = llama_model_get_vocab(model);
- return common_detokenize(vocab, tokens, special);
- }
- std::string common_detokenize(const struct llama_vocab * vocab, const std::vector<llama_token> & tokens, bool special) {
- std::string text;
- text.resize(std::max(text.capacity(), tokens.size()));
- int32_t n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
- if (n_chars < 0) {
- text.resize(-n_chars);
- n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
- GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization
- }
- text.resize(n_chars);
- // NOTE: the original tokenizer decodes bytes after collecting the pieces.
- return text;
- }
- //
- // Embedding utils
- //
- void common_embd_normalize(const float * inp, float * out, int n, int embd_norm) {
- double sum = 0.0;
- switch (embd_norm) {
- case -1: // no normalisation
- sum = 1.0;
- break;
- case 0: // max absolute
- for (int i = 0; i < n; i++) {
- if (sum < std::abs(inp[i])) {
- sum = std::abs(inp[i]);
- }
- }
- sum /= 32760.0; // make an int16 range
- break;
- case 2: // euclidean
- for (int i = 0; i < n; i++) {
- sum += inp[i] * inp[i];
- }
- sum = std::sqrt(sum);
- break;
- default: // p-norm (euclidean is p-norm p=2)
- for (int i = 0; i < n; i++) {
- sum += std::pow(std::abs(inp[i]), embd_norm);
- }
- sum = std::pow(sum, 1.0 / embd_norm);
- break;
- }
- const float norm = sum > 0.0 ? 1.0 / sum : 0.0f;
- for (int i = 0; i < n; i++) {
- out[i] = inp[i] * norm;
- }
- }
- float common_embd_similarity_cos(const float * embd1, const float * embd2, int n){
- double sum = 0.0;
- double sum1 = 0.0;
- double sum2 = 0.0;
- for (int i = 0; i < n; i++) {
- sum += embd1[i] * embd2[i];
- sum1 += embd1[i] * embd1[i];
- sum2 += embd2[i] * embd2[i];
- }
- // Handle the case where one or both vectors are zero vectors
- if (sum1 == 0.0 || sum2 == 0.0) {
- if (sum1 == 0.0 && sum2 == 0.0) {
- return 1.0f; // two zero vectors are similar
- }
- return 0.0f;
- }
- return sum / (sqrt(sum1) * sqrt(sum2));
- }
- //
- // Control vector utils
- //
- static common_control_vector_data common_control_vector_load_one(const common_control_vector_load_info & load_info) {
- common_control_vector_data result = { -1, {} };
- ggml_context * ctx = nullptr;
- struct gguf_init_params meta_gguf_params = {
- /* .no_alloc = */ false,
- /* .ctx = */ &ctx,
- };
- struct gguf_context * ctx_gguf = gguf_init_from_file(load_info.fname.c_str(), meta_gguf_params);
- if (!ctx_gguf) {
- LOG_ERR("%s: failed to load control vector file from %s\n", __func__, load_info.fname.c_str());
- return result;
- }
- int32_t n_tensors = gguf_get_n_tensors(ctx_gguf);
- if (n_tensors == 0) {
- LOG_WRN("%s: no direction tensors found in %s\n", __func__, load_info.fname.c_str());
- }
- for (int i = 0; i < n_tensors; i++) {
- std::string name = gguf_get_tensor_name(ctx_gguf, i);
- int layer_idx = -1;
- // split on '.'
- size_t dotpos = name.find('.');
- if (dotpos != std::string::npos && name.substr(0, dotpos) == "direction") {
- try {
- layer_idx = std::stoi(name.substr(dotpos + 1));
- } catch (...) {
- layer_idx = -1;
- }
- }
- if (layer_idx < 0) {
- LOG_ERR("%s: invalid/unparsable direction tensor layer index in %s\n", __func__, load_info.fname.c_str());
- result.n_embd = -1;
- break;
- } else if (layer_idx == 0) {
- LOG_ERR("%s: invalid (zero) direction tensor layer index in %s\n", __func__, load_info.fname.c_str());
- result.n_embd = -1;
- break;
- }
- struct ggml_tensor * tensor = ggml_get_tensor(ctx, name.c_str());
- if (tensor->type != GGML_TYPE_F32) {
- LOG_ERR("%s: invalid (non-F32) direction tensor type in %s\n", __func__, load_info.fname.c_str());
- result.n_embd = -1;
- break;
- }
- if (ggml_n_dims(tensor) != 1) {
- LOG_ERR("%s: invalid (non-1D) direction tensor shape in %s\n", __func__, load_info.fname.c_str());
- result.n_embd = -1;
- break;
- }
- if (result.n_embd == -1) {
- result.n_embd = ggml_nelements(tensor);
- } else if (ggml_nelements(tensor) != result.n_embd) {
- LOG_ERR("%s: direction tensor in %s does not match previous dimensions\n", __func__, load_info.fname.c_str());
- result.n_embd = -1;
- break;
- }
- // extend if necessary - do not store data for layer 0 (it's not used)
- result.data.resize(std::max(result.data.size(), static_cast<size_t>(result.n_embd * layer_idx)), 0.0f);
- const float * src = (const float *) tensor->data;
- float * dst = result.data.data() + result.n_embd * (layer_idx - 1); // layer 1 at [0]
- for (int j = 0; j < result.n_embd; j++) {
- dst[j] += src[j] * load_info.strength; // allows multiple directions for same layer in same file
- }
- }
- if (result.n_embd == -1) {
- LOG_WRN("%s: skipping %s due to invalid direction tensors\n", __func__, load_info.fname.c_str());
- result.data.clear();
- }
- gguf_free(ctx_gguf);
- ggml_free(ctx);
- return result;
- }
- common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) {
- common_control_vector_data result = { -1, {} };
- for (const auto & info : load_infos) {
- auto cur = common_control_vector_load_one(info);
- if (cur.n_embd == -1) {
- result.n_embd = -1;
- break;
- }
- if (result.n_embd != -1 && result.n_embd != cur.n_embd) {
- LOG_ERR("%s: control vectors in %s does not match previous dimensions\n", __func__, info.fname.c_str());
- result.n_embd = -1;
- break;
- }
- if (result.n_embd == -1) {
- result = std::move(cur);
- } else {
- result.data.resize(std::max(result.data.size(), cur.data.size()), 0.0f); // extend if necessary
- for (size_t i = 0; i < cur.data.size(); i++) {
- result.data[i] += cur.data[i];
- }
- }
- }
- if (result.n_embd == -1) {
- LOG_ERR("%s: no valid control vector files passed\n", __func__);
- result.data.clear();
- }
- return result;
- }
- ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride) {
- const int64_t ne_datapoint = llama_n_ctx(ctx);
- const int64_t ndata = (tokens.size() - ne_datapoint - 1) / stride;
- ggml_opt_dataset_t result = ggml_opt_dataset_init(
- GGML_TYPE_I32, GGML_TYPE_I32, ne_datapoint, ne_datapoint, ndata, /*ndata_shard =*/ 1);
- llama_token * data = (llama_token *) ggml_opt_dataset_data(result)->data;
- llama_token * labels = (llama_token *) ggml_opt_dataset_labels(result)->data;
- for (int64_t idata = 0; idata < ndata; ++idata) {
- memcpy(data + idata*ne_datapoint, tokens.data() + idata*stride + 0, ne_datapoint*sizeof(llama_token));
- memcpy(labels + idata*ne_datapoint, tokens.data() + idata*stride + 1, ne_datapoint*sizeof(llama_token));
- }
- return result;
- }
|