From 6d8888b26702f5d6cb135acae52f5b534e20301e Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Wed, 15 May 2024 12:46:12 -0400 Subject: [PATCH] llamamodel: free the batch in embedInternal (#2348) Signed-off-by: Jared Van Bortel --- gpt4all-backend/llamamodel.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index 5ab89e2b9d9a..7c66be9fe3be 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -940,6 +940,8 @@ void LLamaModel::embedInternal( } if (tokenCount) { *tokenCount = totalTokens; } + + llama_batch_free(batch); } #if defined(_WIN32)