xmake.lua 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243
  1. package("llama.cpp")
  2. set_homepage("https://github.com/ggerganov/llama.cpp")
  3. set_description("Port of Facebook's LLaMA model in C/C++")
  4. add_urls("https://github.com/ggerganov/llama.cpp.git")
  5. add_versions("2023.03.11", "7d9ed7b25fe17db3fc8848b5116d14682864ce8e")
  6. if is_plat("macosx") then
  7. add_frameworks("Accelerate")
  8. elseif is_plat("linux") then
  9. add_syslinks("pthread")
  10. end
  11. on_install("linux", "macosx", function (package)
  12. local configs = {}
  13. io.writefile("xmake.lua", [[
  14. add_rules("mode.release", "mode.debug")
  15. target("llama")
  16. set_kind("$(kind)")
  17. add_files("*.c")
  18. add_headerfiles("(*.h)")
  19. set_languages("c11")
  20. add_cflags("-pthread")
  21. if is_plat("macosx") then
  22. add_defines("GGML_USE_ACCELERATE")
  23. add_frameworks("Accelerate")
  24. end
  25. if is_arch("x86_64", "x64", "i386", "x86") then
  26. add_vectorexts("avx", "avx2", "sse3")
  27. add_cflags("-mf16c")
  28. elseif is_arch("arm.*") then
  29. add_vectorexts("neon")
  30. end
  31. ]])
  32. if package:config("shared") then
  33. configs.kind = "shared"
  34. end
  35. import("package.tools.xmake").install(package, configs)
  36. end)
  37. on_test(function (package)
  38. assert(package:has_cfuncs("ggml_time_us", {includes = "ggml.h"}))
  39. end)