Skip to content

Commit

Permalink
renamed misnamed ACCELERATE define, and removed all -march=native and…
Browse files Browse the repository at this point in the history
… -mtune=native flags
  • Loading branch information
LostRuins committed Apr 5, 2023
1 parent 14273fe commit 57e9f92
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 15 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ endif
# feel free to update the Makefile for your architecture and send a pull request or issue
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
# Use all CPU extensions that are available:
CFLAGS += -march=native -mtune=native
CFLAGS +=
endif
ifneq ($(filter ppc64%,$(UNAME_M)),)
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
Expand Down
Binary file modified koboldcpp.dll
Binary file not shown.
Binary file modified koboldcpp_blas.dll
Binary file not shown.
28 changes: 14 additions & 14 deletions otherarch/ggml_v1.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ typedef void* thread_ret_t;
#define GGML_V1_SOFT_MAX_UNROLL 4
#define GGML_V1_VEC_DOT_UNROLL 2

#ifdef GGML_V1_USE_ACCELERATE
#ifdef GGML_USE_ACCELERATE
// uncomment to use vDSP for soft max computation
// note: not sure if it is actually faster
//#define GGML_V1_SOFT_MAX_ACCELERATE
Expand All @@ -110,9 +110,9 @@ typedef void* thread_ret_t;
} \
} while (0)

#ifdef GGML_V1_USE_ACCELERATE
#ifdef GGML_USE_ACCELERATE
#include <Accelerate/Accelerate.h>
#elif GGML_V1_USE_OPENBLAS
#elif GGML_USE_OPENBLAS
#include <cblas.h>
#endif

Expand Down Expand Up @@ -1742,7 +1742,7 @@ inline static void ggml_v1_vec_gelu_f32(const int n, float * y, const float * x)
#endif

inline static void ggml_v1_vec_sum_f32(const int n, float * s, const float * x) {
#ifndef GGML_V1_USE_ACCELERATE
#ifndef GGML_USE_ACCELERATE
ggml_v1_float sum = 0.0;
for (int i = 0; i < n; ++i) {
sum += x[i];
Expand All @@ -1754,7 +1754,7 @@ inline static void ggml_v1_vec_sum_f32(const int n, float * s, const float * x)
}

inline static void ggml_v1_vec_max_f32(const int n, float * s, const float * x) {
#ifndef GGML_V1_USE_ACCELERATE
#ifndef GGML_USE_ACCELERATE
ggml_v1_float max = -INFINITY;
for (int i = 0; i < n; ++i) {
max = MAX(max, x[i]);
Expand Down Expand Up @@ -5077,7 +5077,7 @@ static void ggml_v1_compute_forward_norm(

// ggml_v1_compute_forward_mul_mat

#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
// helper function to determine if it is better to use BLAS or not
// for large matrices, BLAS is faster
static bool ggml_v1_compute_forward_mul_mat_use_blas(
Expand Down Expand Up @@ -5169,7 +5169,7 @@ static void ggml_v1_compute_forward_mul_mat_f32(
// nb00 < nb01 - src0 is transposed
// compute by src0 columns

#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_v1_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
GGML_V1_ASSERT(nb10 == sizeof(float));

Expand Down Expand Up @@ -5414,7 +5414,7 @@ static void ggml_v1_compute_forward_mul_mat_f16_f32(
// nb00 < nb01 - src0 is transposed
// compute by src0 columns

#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_v1_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
GGML_V1_ASSERT(nb10 == sizeof(float));

Expand Down Expand Up @@ -5720,7 +5720,7 @@ static void ggml_v1_compute_forward_mul_mat_q4_0_f32(
// nb00 < nb01 - src0 is transposed
// compute by src0 columns

#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_v1_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
GGML_V1_ASSERT(nb10 == sizeof(float));

Expand Down Expand Up @@ -6020,7 +6020,7 @@ static void ggml_v1_compute_forward_mul_mat_q4_1_f32(
// nb00 < nb01 - src0 is transposed
// compute by src0 columns

#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_v1_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
GGML_V1_ASSERT(nb10 == sizeof(float));

Expand Down Expand Up @@ -8870,7 +8870,7 @@ void ggml_v1_graph_compute(struct ggml_v1_context * ctx, struct ggml_v1_cgraph *
} else {
if (node->src0->type == GGML_V1_TYPE_F16 &&
node->src1->type == GGML_V1_TYPE_F32) {
#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_v1_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
node->n_tasks = 1; // TODO: this actually is doing nothing
// the threads are still spinning
Expand All @@ -8889,7 +8889,7 @@ void ggml_v1_graph_compute(struct ggml_v1_context * ctx, struct ggml_v1_cgraph *
cur = 0;
} else if (node->src0->type == GGML_V1_TYPE_Q4_0 &&
node->src1->type == GGML_V1_TYPE_F32) {
#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_v1_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
node->n_tasks = 1;
cur = GGML_V1_TYPE_SIZE[GGML_V1_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
Expand All @@ -8901,7 +8901,7 @@ void ggml_v1_graph_compute(struct ggml_v1_context * ctx, struct ggml_v1_cgraph *
#endif
} else if (node->src0->type == GGML_V1_TYPE_Q4_1 &&
node->src1->type == GGML_V1_TYPE_F32) {
#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_v1_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
node->n_tasks = 1;
cur = GGML_V1_TYPE_SIZE[GGML_V1_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
Expand Down Expand Up @@ -10150,7 +10150,7 @@ int ggml_v1_cpu_has_wasm_simd(void) {
}

int ggml_v1_cpu_has_blas(void) {
#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
return 1;
#else
return 0;
Expand Down
Binary file modified quantize.exe
Binary file not shown.

0 comments on commit 57e9f92

Please sign in to comment.