llama : support WinXP build with MinGW 8.1.0 (#3419)

This commit is contained in:
Karthik Kumar Viswanathan 2024-01-14 00:41:44 -08:00 committed by GitHub
parent 147b17ac94
commit ac32902a87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 10 additions and 2 deletions

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.13) # for add_link_options
cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
project("llama.cpp" C CXX)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
@ -76,6 +76,10 @@ if (NOT MSVC)
option(LLAMA_F16C "llama: enable F16C" ${INS_ENB})
endif()
if (WIN32)
option(LLAMA_WIN_VER "llama: Windows Version" 0x602)
endif()
# 3rd party libs
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
option(LLAMA_BLAS "llama: use BLAS" OFF)
@ -686,7 +690,7 @@ endif()
if (MINGW)
# Target Windows 8 for PrefetchVirtualMemory
add_compile_definitions(_WIN32_WINNT=0x602)
add_compile_definitions(_WIN32_WINNT=${LLAMA_WIN_VER})
endif()
#

View File

@ -987,6 +987,7 @@ struct llama_mmap {
}
if (prefetch > 0) {
#if _WIN32_WINNT >= 0x602
// PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
@ -1004,6 +1005,9 @@ struct llama_mmap {
llama_format_win_err(GetLastError()).c_str());
}
}
#else
throw std::runtime_error("PrefetchVirtualMemory unavailable");
#endif
}
}