diff --git a/src/llama.cpp b/src/llama.cpp index e6f303d31..bc88c8487 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -1801,7 +1801,6 @@ struct llama_mmap { } if (prefetch > 0) { -#if _WIN32_WINNT >= 0x602 // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG); HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll"); @@ -1819,9 +1818,6 @@ struct llama_mmap { llama_format_win_err(GetLastError()).c_str()); } } -#else - throw std::runtime_error("PrefetchVirtualMemory unavailable"); -#endif } }