Skip to content

Commit

Permalink
ggml : fix CPU implementation
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Dec 21, 2023
1 parent 199f6bd commit 36c3f41
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 13 deletions.
9 changes: 3 additions & 6 deletions ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -10337,19 +10337,17 @@ static void ggml_compute_forward_out_prod(
static void ggml_compute_forward_scale_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
GGML_ASSERT(ggml_is_contiguous(src0));
GGML_ASSERT(ggml_is_contiguous(dst));
GGML_ASSERT(ggml_are_same_shape(src0, dst));
GGML_ASSERT(ggml_is_scalar(src1));

if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
}

// scale factor
const float v = *(float *) src1->data;
const float v = *(float *) dst->op_params;

const int ith = params->ith;
const int nth = params->nth;
Expand Down Expand Up @@ -10380,12 +10378,11 @@ static void ggml_compute_forward_scale_f32(
static void ggml_compute_forward_scale(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
switch (src0->type) {
case GGML_TYPE_F32:
{
ggml_compute_forward_scale_f32(params, src0, src1, dst);
ggml_compute_forward_scale_f32(params, src0, dst);
} break;
default:
{
Expand Down Expand Up @@ -14395,7 +14392,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
} break;
case GGML_OP_SCALE:
{
ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor);
ggml_compute_forward_scale(params, tensor->src[0], tensor);
} break;
case GGML_OP_SET:
{
Expand Down
9 changes: 5 additions & 4 deletions tests/test-backend-ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -766,18 +766,19 @@ struct test_bin_bcast : public test_case {
struct test_scale : public test_case {
const ggml_type type;
const std::array<int64_t, 4> ne;
float scale;

std::string vars() override {
return VARS_TO_STR2(type, ne);
return VARS_TO_STR3(type, ne, scale);
}

test_scale(ggml_type type = GGML_TYPE_F32,
std::array<int64_t, 4> ne = {10, 10, 10, 10})
: type(type), ne(ne) {}
std::array<int64_t, 4> ne = {10, 10, 10, 10},
float scale = 2.0f)
: type(type), ne(ne), scale(scale) {}

ggml_tensor * build_graph(ggml_context * ctx) override {
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
ggml_tensor * scale = ggml_new_tensor_1d(ctx, type, 1);
ggml_tensor * out = ggml_scale(ctx, a, scale);
return out;
}
Expand Down
7 changes: 4 additions & 3 deletions tests/test-grad0.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -887,13 +887,14 @@ int main(int argc, const char ** argv) {
ne2[0] = 1;

for (int ndims = 1; ndims <= 2; ++ndims) {
x[1] = get_random_tensor_f32(ctx0, 1, ne2, -1.0f, 1.0f);
x[0] = get_random_tensor_f32(ctx0, ndims, ne, -1.0f, 1.0f);

const float s = -1.0f + 2.0f*frand();

ggml_set_param(ctx0, x[0]);
ggml_set_param(ctx0, x[1]);

struct ggml_tensor * f = ggml_sum(ctx0, ggml_scale(ctx0, x[0], x[1]));
struct ggml_tensor * f = ggml_sum(ctx0, ggml_scale(ctx0, x[0], s));

check_gradient("scale", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
}
Expand Down Expand Up @@ -1395,7 +1396,7 @@ int main(int argc, const char ** argv) {
ggml_add1(ctx0,
ggml_scale(ctx0,
ggml_soft_max(ctx0, x[0]),
ggml_new_f32(ctx0, 1.0f - eps)),
1.0f - eps),
ggml_new_f32(ctx0, eps))));

check_gradient("softmax", ctx0, x, f, ndims, nargs, 1e-3f, 2e-1f, INFINITY);
Expand Down

0 comments on commit 36c3f41

Please sign in to comment.