From dcf8b53abc0638cbe49a0b4b365bf3fffcbfe07c Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 17 Jan 2024 08:02:36 +0100 Subject: [PATCH 01/42] (BIDS-2872) wip --- cmd/explorer/main.go | 4 ++++ local-deployment/docker-compose.yml | 9 +++++++++ local-deployment/provision-explorer-config.sh | 6 ++++++ local-deployment/run.sh | 8 +++++++- metrics/metrics.go | 1 + types/config.go | 1 + 6 files changed, 28 insertions(+), 1 deletion(-) diff --git a/cmd/explorer/main.go b/cmd/explorer/main.go index 3d3751a0de..8e944b7e62 100644 --- a/cmd/explorer/main.go +++ b/cmd/explorer/main.go @@ -11,6 +11,7 @@ import ( "eth2-exporter/handlers" "eth2-exporter/metrics" "eth2-exporter/price" + "eth2-exporter/ratelimit" "eth2-exporter/rpc" "eth2-exporter/services" "eth2-exporter/static" @@ -606,6 +607,9 @@ func main() { router.Use(metrics.HttpMiddleware) } + ratelimit.Init(utils.Config.RedisSessionStoreEndpoint, "/api/") + router.Use(ratelimit.HttpMiddleware) + n := negroni.New(negroni.NewRecovery()) n.Use(gzip.Gzip(gzip.DefaultCompression)) diff --git a/local-deployment/docker-compose.yml b/local-deployment/docker-compose.yml index f579fd80e7..e617c43441 100644 --- a/local-deployment/docker-compose.yml +++ b/local-deployment/docker-compose.yml @@ -35,3 +35,12 @@ services: command: go run ./cmd/explorer -config /app/local-deployment/config.yml environment: - FRONTEND_ENABLED=true + redis-sessions: + image: redis:7 + volumes: + - redis-sessions:/data + ports: + - "$REDIS_SESSIONS_PORT:6379" + +volumes: + redis-sessions: diff --git a/local-deployment/provision-explorer-config.sh b/local-deployment/provision-explorer-config.sh index 57a1ac2235..e478d4bf44 100644 --- a/local-deployment/provision-explorer-config.sh +++ b/local-deployment/provision-explorer-config.sh @@ -1,4 +1,5 @@ #! /bin/bash + CL_PORT=$(kurtosis enclave inspect my-testnet | grep 4000/tcp | tr -s ' ' | cut -d " " -f 6 | sed -e 's/http\:\/\/127.0.0.1\://' | head -n 1) echo "CL Node port is $CL_PORT" @@ -8,6 +9,9 @@ echo "EL Node port is $EL_PORT" REDIS_PORT=$(kurtosis enclave inspect my-testnet | grep 6379/tcp | tr -s ' ' | cut -d " " -f 6 | sed -e 's/tcp\:\/\/127.0.0.1\://' | head -n 1) echo "Redis port is $REDIS_PORT" +REDIS_SESSIONS_PORT=$(comm -23 <(seq 49152 65535 | sort) <(ss -Htan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n 1) +echo "Redis sessions port is $REDIS_SESSIONS_PORT" + POSTGRES_PORT=$(kurtosis enclave inspect my-testnet | grep 5432/tcp | tr -s ' ' | cut -d " " -f 6 | sed -e 's/postgresql\:\/\/127.0.0.1\://' | head -n 1) echo "Postgres port is $POSTGRES_PORT" @@ -18,6 +22,7 @@ cat < .env CL_PORT=$CL_PORT EL_PORT=$EL_PORT REDIS_PORT=$REDIS_PORT +REDIS_SESSIONS_PORT=$REDIS_SESSIONS_PORT POSTGRES_PORT=$POSTGRES_PORT LBT_PORT=$LBT_PORT EOF @@ -56,6 +61,7 @@ bigtable: eth1ErigonEndpoint: 'http://127.0.0.1:$EL_PORT' eth1GethEndpoint: 'http://127.0.0.1:$EL_PORT' redisCacheEndpoint: '127.0.0.1:$REDIS_PORT' +redisSessionStoreEndpoint: '127.0.0.1:$REDIS_SESSIONS_PORT' tieredCacheProvider: 'redis' frontend: siteDomain: "localhost:8080" diff --git a/local-deployment/run.sh b/local-deployment/run.sh index 4c76aa56ab..539ea8dcf6 100755 --- a/local-deployment/run.sh +++ b/local-deployment/run.sh @@ -22,6 +22,7 @@ fn_main() { start) shift; fn_start "$@"; exit;; stop) shift; fn_stop "$@"; exit;; sql) shift; fn_sql "$@"; exit;; + redis) shift; fn_redis "$@"; exit;; *) echo "$var_help" esac shift @@ -32,6 +33,11 @@ fn_sql() { PGPASSWORD=pass psql -h localhost -p$POSTGRES_PORT -U postgres -d db } +fn_redis() { + docker compose exec redis-sessions redis-cli + #redis-cli -h localhost -p $REDIS_PORT +} + fn_start() { fn_stop kurtosis run --enclave my-testnet . "$(cat network-params.json)" @@ -42,7 +48,7 @@ fn_start() { } fn_stop() { - docker compose down + docker compose down -v kurtosis clean -a } diff --git a/metrics/metrics.go b/metrics/metrics.go index 8577fe18c0..dd4187df71 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -125,6 +125,7 @@ type responseWriterDelegator struct { } func (r *responseWriterDelegator) WriteHeader(code int) { + logrus.Infof("metrics writeheader %v", code) r.status = code r.wroteHeader = true r.ResponseWriter.WriteHeader(code) diff --git a/types/config.go b/types/config.go index e8c4eb497c..2377df388d 100644 --- a/types/config.go +++ b/types/config.go @@ -132,6 +132,7 @@ type Config struct { Sapphire string `yaml:"sapphire" envconfig:"FRONTEND_STRIPE_SAPPHIRE"` Emerald string `yaml:"emerald" envconfig:"FRONTEND_STRIPE_EMERALD"` Diamond string `yaml:"diamond" envconfig:"FRONTEND_STRIPE_DIAMOND"` + Custom string `yaml:"custom" envconfig:"FRONTEND_STRIPE_CUSTOM"` Whale string `yaml:"whale" envconfig:"FRONTEND_STRIPE_WHALE"` Goldfish string `yaml:"goldfish" envconfig:"FRONTEND_STRIPE_GOLDFISH"` Plankton string `yaml:"plankton" envconfig:"FRONTEND_STRIPE_PLANKTON"` From ae8b4e0d01ad1a7780ef4c574ba2d6e7be46c7af Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 18 Jan 2024 10:43:00 +0100 Subject: [PATCH 02/42] (BIDS-2872) wip --- cmd/misc/main.go | 56 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/cmd/misc/main.go b/cmd/misc/main.go index 4a2f21c290..6abb956f94 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -66,7 +66,7 @@ func main() { statsPartitionCommand := commands.StatsMigratorCommand{} configPath := flag.String("config", "config/default.config.yml", "Path to the config file") - flag.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases") + flag.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases, update-ratelimits") flag.Uint64Var(&opts.StartEpoch, "start-epoch", 0, "start epoch") flag.Uint64Var(&opts.EndEpoch, "end-epoch", 0, "end epoch") flag.Uint64Var(&opts.User, "user", 0, "user id") @@ -391,6 +391,8 @@ func main() { err = fixEns(erigonClient) case "fix-ens-addresses": err = fixEnsAddresses(erigonClient) + case "update-ratelimits": + err = updateRatelimits() default: utils.LogFatal(nil, fmt.Sprintf("unknown command %s", opts.Command), 0) } @@ -1933,3 +1935,55 @@ func reExportSyncCommittee(rpcClient rpc.Client, p uint64, dryRun bool) error { return tx.Commit() } } + +func updateRatelimits() error { + var err error + _, err = db.WriterDb.Exec( + `insert into api_keys (user_id, api_key) + select id, api_key from users where api_key is not null + on conflict (user_id, api_key) do nothing`, + ) + if err != nil { + return err + } + _, err = db.WriterDb.Exec( + `insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) + + select + id as user_id, + case + when product = 'free' then 5 + when product = $1 then 10 + when product = $2 then 10 + when product = $3 then 30 + when product = $4 then 50 + when product = 'plankton' then 20 + when product = 'goldfish' then 20 + when product = 'whale' then 25 + else 50 + end as second, + 0 as hour, + case + when product = 'free' then 120000 + when product = $1 then 500000 + when product = $2 then 1000000 + when product = $3 then 6000000 + when product = $4 then 500000000 + when product = 'plankton' then 120000 + when product = 'goldfish' then 200000 + when product = 'whale' then 700000 + else 4000000000 + end as month, + now() + interval '1 month' as valid_until, + now() as changed_at + from ( + select id, price_id as product, api_key as key, coalesce(active,false) as active from users left join (select * from users_stripe_subscriptions where price_id = any('{$1,$2,$3,$4}')) as us on users.stripe_customer_id = us.customer_id where api_key is not null AND (price_id is not null OR id not in (select user_id from app_subs_view where app_subs_view.user_id = users.id AND active = true)) UNION SELECT user_id, product_id as product, api_key as key, active from app_subs_view left join users on users.id = app_subs_view.user_id where active = true AND api_key is not null AND (stripe_customer_id is null OR stripe_customer_id NOT IN (select customer_id from users_stripe_subscriptions where active = true and price_id = any('{$1,$2,$3,$4}')))) t where product is null or active = true + ) x + where active = true`, + utils.Config.Frontend.Stripe.Sapphire, + utils.Config.Frontend.Stripe.Emerald, + utils.Config.Frontend.Stripe.Diamond, + utils.Config.Frontend.Stripe.Custom, + ) + return err +} From 6ccd2eecd9b9d33c1e99d6cc2a8c515533e5fc73 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Mon, 22 Jan 2024 10:24:02 +0100 Subject: [PATCH 03/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 717 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 717 insertions(+) create mode 100644 ratelimit/ratelimit.go diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go new file mode 100644 index 0000000000..3127452e15 --- /dev/null +++ b/ratelimit/ratelimit.go @@ -0,0 +1,717 @@ +package RateLimit + +import ( + "context" + "eth2-exporter/db" + "eth2-exporter/metrics" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/v8" + "github.com/gorilla/mux" + "github.com/sirupsen/logrus" + "golang.org/x/time/rate" +) + +type TimeWindow string + +const ( + HeaderRateLimitLimit = "X-RateLimit-Limit" // the rate limit ceiling that is applicable for the current request + HeaderRateLimitRemaining = "X-RateLimit-Remaining" // the number of requests left for the current rate-limit window + HeaderRateLimitReset = "X-RateLimit-Reset" // the number of seconds until the quota resets + HeaderRetryAfter = "Retry-After" // the number of seconds until the quota resets, same as HeaderRateLimitReset, RFC 7231, 7.1.3 + + NokeyRateLimitSecond = 5 // RateLimit for requests without or with invalid apikey + NokeyRateLimitHour = 500 // RateLimit for requests without or with invalid apikey + NokeyRateLimitMonth = 0 // RateLimit for requests without or with invalid apikey + + FallbackRateLimitSecond = 20 // RateLimit for when redis is offline + FallbackRateLimitBurst = 20 // RateLimit for when redis is offline + + SecondTimeWindow = "second" + HourTimeWindow = "hour" + MonthTimeWindow = "month" +) + +var NoKeyRateLimit = &RateLimit{ + Second: NokeyRateLimitSecond, + Hour: NokeyRateLimitHour, + Month: NokeyRateLimitMonth, +} + +var redisClient *redis.Client +var redisIsHealthy atomic.Bool + +var fallbackRateLimiter = NewFallbackRateLimiter() // if redis is offline, use this rate limiter + +var initializedWg = &sync.WaitGroup{} // wait for everything to be initialized before serving requests + +var rateLimitsMu = &sync.RWMutex{} +var rateLimits = make(map[string]*RateLimit) // guarded by RateLimitsMu +var rateLimitsByKey = make(map[string]*RateLimit) // guarded by RateLimitsMu + +var weightsMu = &sync.RWMutex{} +var weights = map[string]int64{} // guarded by weightsMu + +var pathPrefix = "" // only requests with this prefix will be RateLimited + +var logger = logrus.StandardLogger().WithField("module", "ratelimit") + +type dbEntry struct { + Date time.Time + Key string + Path string + Count int64 +} + +type RateLimit struct { + Second int64 + Hour int64 + Month int64 +} + +type RateLimitResult struct { + Time time.Time + Weight int64 + Route string + IP string + Key string + IsValidKey bool + RedisKeys []RedisKey + RedisStatsKey string + RateLimit *RateLimit + Limit int64 + Remaining int64 + Reset int64 + Window TimeWindow +} + +type RedisKey struct { + Key string + ExpireAt time.Time +} + +type responseWriterDelegator struct { + http.ResponseWriter + written int64 + status int + wroteHeader bool +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +// Init initializes the RateLimiting middleware, the RateLimiting middleware will not work without calling Init first. +func Init(redisAddress, pathPrefixOpt string) { + pathPrefix = pathPrefixOpt + + redisClient = redis.NewClient(&redis.Options{ + Addr: redisAddress, + ReadTimeout: time.Second * 3, + }) + + initializedWg.Add(3) + + go func() { + firstRun := true + for { + err := updateWeights(firstRun) + if err != nil { + logger.WithError(err).Errorf("error updating weights") + time.Sleep(time.Second * 2) + continue + } + if firstRun { + initializedWg.Done() + firstRun = false + } + time.Sleep(time.Second * 60) + } + }() + go func() { + firstRun := true + lastRunTime := time.Unix(0, 0) + for { + t, err := updateRateLimits(lastRunTime) + if err != nil { + logger.WithError(err).Errorf("error updating RateLimits") + time.Sleep(time.Second * 2) + continue + } + lastRunTime = t + if firstRun { + initializedWg.Done() + firstRun = false + } + time.Sleep(time.Second * 60) + } + }() + go func() { + firstRun := true + for { + err := updateRedisStatus() + if err != nil { + logger.WithError(err).Errorf("error checking redis") + time.Sleep(time.Second * 1) + continue + } + if firstRun { + initializedWg.Done() + firstRun = false + } + time.Sleep(time.Second * 1) + } + }() + go func() { + for { + err := updateStats() + if err != nil { + logger.WithError(err).Errorf("error updating stats") + } + time.Sleep(time.Second * 60) + } + }() + + initializedWg.Wait() +} + +// HttpMiddleware returns an http.Handler that can be used as middleware to RateLimit requests. If redis is offline, it will use a fallback rate limiter. +func HttpMiddleware(next http.Handler) http.Handler { + initializedWg.Wait() + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, pathPrefix) { + next.ServeHTTP(w, r) + return + } + + if !redisIsHealthy.Load() { + fallbackRateLimiter.Handle(w, r, next.ServeHTTP) + return + } + + rl, err := rateLimitRequest(r) + if err != nil { + // just serve the request if there is a problem with getting the rate limit + logger.WithFields(logrus.Fields{"error": err}).Errorf("error getting rate limit") + next.ServeHTTP(w, r) + return + } + // logrus.WithFields(logrus.Fields{"route": rl.Route, "key": rl.Key, "limit": rl.Limit, "remaining": rl.Remaining, "reset": rl.Reset, "window": rl.Window}).Infof("RateLimiting") + + w.Header().Set(HeaderRateLimitLimit, strconv.FormatInt(rl.Limit, 10)) + w.Header().Set(HeaderRateLimitRemaining, strconv.FormatInt(rl.Remaining, 10)) + w.Header().Set(HeaderRateLimitReset, strconv.FormatInt(rl.Reset, 10)) + if rl.Weight > rl.Remaining { + w.Header().Set(HeaderRetryAfter, strconv.FormatInt(rl.Reset, 10)) + http.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests) + err = postRateLimit(rl, http.StatusTooManyRequests) + if err != nil { + logger.WithFields(logrus.Fields{"error": err}).Errorf("error calling postRateLimit") + } + return + } + d := &responseWriterDelegator{ResponseWriter: w} + next.ServeHTTP(d, r) + err = postRateLimit(rl, d.Status()) + if err != nil { + logger.WithFields(logrus.Fields{"error": err}).Errorf("error calling postRateLimit") + } + }) +} + +// updateWeights gets the weights from postgres and updates the weights map. +func updateWeights(firstRun bool) error { + dbWeights := []struct { + Endpoint string `db:"endpoint"` + Weight int64 `db:"weight"` + ValidFrom time.Time `db:"valid_from"` + }{} + err := db.WriterDb.Select(&dbWeights, "SELECT DISTINCT ON (endpoint) endpoint, weight, valid_from FROM api_weights WHERE valid_from <= NOW() ORDER BY endpoint, valid_from DESC") + if err != nil { + return err + } + weightsMu.Lock() + defer weightsMu.Unlock() + oldWeights := weights + weights = make(map[string]int64, len(dbWeights)) + for _, w := range dbWeights { + weights[w.Endpoint] = w.Weight + if !firstRun && oldWeights[w.Endpoint] != w.Weight { + logger.WithFields(logrus.Fields{"endpoint": w.Endpoint, "weight": w.Weight, "oldWeight": oldWeights[w.Endpoint]}).Infof("weight changed") + } + } + return nil +} + +// updateRedisStatus checks if redis is healthy and updates redisIsHealthy accordingly. +func updateRedisStatus() error { + oldStatus := redisIsHealthy.Load() + newStatus := true + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*1)) + defer cancel() + err := redisClient.Ping(ctx).Err() + if err != nil { + logger.WithError(err).Errorf("error pinging redis") + newStatus = false + } + if oldStatus != newStatus { + logger.WithFields(logrus.Fields{"oldStatus": oldStatus, "newStatus": newStatus}).Infof("redis status changed") + } + redisIsHealthy.Store(newStatus) + return nil +} + +// updateStats scans redis for ratelimit:stats:* keys and inserts them into postgres, if the key's date is in the past it will also delete the key in redis. +func updateStats() error { + allKeys := []string{} + cursor := uint64(0) + ctx := context.Background() + for { + cmd := redisClient.Scan(ctx, cursor, "ratelimit:stats:*:*:*", 1000) + if cmd.Err() != nil { + return cmd.Err() + } + keys, nextCursor, err := cmd.Result() + if err != nil { + return err + } + cursor = nextCursor + allKeys = append(allKeys, keys...) + if cursor == 0 { + break + } + } + + batchSize := 10000 + for i := 0; i <= len(allKeys); i += batchSize { + start := i + end := i + batchSize + if end > len(allKeys) { + end = len(allKeys) + } + keys := allKeys[start:end] + entries := make([]dbEntry, len(keys)) + values := make([]*redis.StringCmd, len(keys)) + cmds, err := redisClient.Pipelined(ctx, func(pipe redis.Pipeliner) error { + for i, k := range keys { + ks := strings.Split(k, ":") + if len(ks) != 5 { + return fmt.Errorf("error parsing key %s: split-len != 5", k) + } + dateString := ks[2] + date, err := time.Parse("2006-01-02", dateString) + if err != nil { + return fmt.Errorf("error parsing date in key %s: %v", k, err) + } + key := ks[3] + path := ks[4] + values[i] = pipe.Get(ctx, k) + entries[i] = dbEntry{ + Date: date, + Key: key, + Path: path, + } + } + return nil + }) + for i := range cmds { + entries[i].Count, err = values[i].Int64() + if err != nil { + return fmt.Errorf("error parsing count of key %s: %v: %w", entries[i].Key, entries[i].Count, err) + } + } + if err != nil { + return err + } + err = updateStatsEntries(entries) + if err != nil { + return err + } + } + + return nil +} + +func updateStatsEntries(entries []dbEntry) error { + tx, err := db.WriterDb.Beginx() + if err != nil { + return err + } + defer tx.Rollback() + + numArgs := 4 + batchSize := 65535 / numArgs // max 65535 params per batch, since postgres uses int16 for binding input params + valueArgs := make([]interface{}, 0, batchSize*numArgs) + valueStrings := make([]string, 0, batchSize) + valueStringArr := make([]string, numArgs) + batchIdx, allIdx := 0, 0 + for _, entry := range entries { + for u := 0; u < numArgs; u++ { + valueStringArr[u] = fmt.Sprintf("$%d", batchIdx*numArgs+1+u) + } + + valueStrings = append(valueStrings, "("+strings.Join(valueStringArr, ",")+")") + valueArgs = append(valueArgs, entry.Date) + valueArgs = append(valueArgs, entry.Key) + valueArgs = append(valueArgs, entry.Path) + valueArgs = append(valueArgs, entry.Count) + + logger.WithFields(logrus.Fields{"count": entry.Count, "key": entry.Key}).Infof("inserting stats entry %v/%v", allIdx, len(entries)) + + batchIdx++ + allIdx++ + + if batchIdx >= batchSize || allIdx >= len(entries) { + stmt := fmt.Sprintf(`INSERT INTO api_statistics (ts, apikey, call, count) VALUES %s ON CONFLICT (ts, apikey, call) DO UPDATE SET count = excluded.count`, strings.Join(valueStrings, ",")) + _, err := tx.Exec(stmt, valueArgs...) + if err != nil { + return err + } + batchIdx = 0 + valueArgs = valueArgs[:0] + valueStrings = valueStrings[:0] + } + } + + err = tx.Commit() + if err != nil { + return err + } + + return nil +} + +// updateRateLimits gets the ratelimits from postgres and updates the ratelimits map. it will delete expired ratelimits and assumes that no other process deletes entries in the table api_ratelimits. +func updateRateLimits(lastUpdate time.Time) (time.Time, error) { + start := time.Now() + defer func() { + metrics.TaskDuration.WithLabelValues("ratelimit_updateRateLimits").Observe(time.Since(start).Seconds()) + }() + dbRateLimits := []struct { + UserID int64 `db:"user_id"` + ApiKey string `db:"apikey"` + Second int64 `db:"second"` + Hour int64 `db:"hour"` + Month int64 `db:"month"` + ValidUntil time.Time `db:"valid_until"` + ChangedAt time.Time `db:"changed_at"` + }{} + err := db.WriterDb.Select(&dbRateLimits, "SELECT ar.user_id, ak.apikey, ar.second, ar.hour, ar.month, ar.valid_until, ar.changed_at FROM api_ratelimits ar LEFT JOIN users u ON u.id = ar.user_id LEFT JOIN api_keys ak ON ak.user_id = u.id WHERE ar.changed_at > $1", lastUpdate) + if err != nil { + return lastUpdate, fmt.Errorf("error getting ratelimits: %w", err) + } + + rateLimitsMu.Lock() + now := time.Now() + newestChange := time.Unix(0, 0) + for _, dbRl := range dbRateLimits { + if dbRl.ChangedAt.After(newestChange) { + newestChange = dbRl.ChangedAt + } + rlStr := fmt.Sprintf("%d/%d/%d", dbRl.Second, dbRl.Hour, dbRl.Month) + rl, exists := rateLimits[rlStr] + if !exists { + rl = &RateLimit{ + Second: dbRl.Second, + Hour: dbRl.Hour, + Month: dbRl.Month, + } + rateLimits[rlStr] = rl + } + _, exists = rateLimitsByKey[dbRl.ApiKey] + if !exists { + rateLimitsByKey[dbRl.ApiKey] = rl + } + if dbRl.ValidUntil.Before(now) { + delete(rateLimitsByKey, dbRl.ApiKey) + } + } + rateLimitsMu.Unlock() + metrics.TaskDuration.WithLabelValues("ratelimit_updateRateLimits_lock").Observe(time.Since(now).Seconds()) + + return newestChange, nil +} + +func postRateLimit(rl *RateLimitResult, status int) error { + if status == 200 { + return nil + } + // logger.WithFields(logrus.Fields{"key": rl.Key, "status": status}).Infof("decreasing key") + // if status is not 200 decrement keys since we do not count unsuccessful requests + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + pipe := redisClient.Pipeline() + for _, k := range rl.RedisKeys { + pipe.DecrBy(ctx, k.Key, rl.Weight) + pipe.ExpireAt(ctx, k.Key, k.ExpireAt) // make sure all keys have a TTL + } + pipe.DecrBy(ctx, rl.RedisStatsKey, 1) + _, err := pipe.Exec(ctx) + if err != nil { + return err + } + return nil +} + +func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { + start := time.Now() + defer func() { + metrics.TaskDuration.WithLabelValues("ratelimit_total").Observe(time.Since(start).Seconds()) + }() + + ctx, cancel := context.WithTimeout(r.Context(), time.Millisecond*1000) + defer cancel() + + res := &RateLimitResult{} + + key, ip := getKey(r) + res.Key = key + res.IP = ip + + rateLimitsMu.RLock() + limit, ok := rateLimits[key] + rateLimitsMu.RUnlock() + if !ok { + res.IsValidKey = false + res.RateLimit = &RateLimit{ + Second: NokeyRateLimitSecond, + Hour: NokeyRateLimitHour, + Month: NokeyRateLimitMonth, + } + } else { + res.IsValidKey = true + res.RateLimit = limit + } + + weight, path := getWeight(r) + res.Weight = weight + res.Route = path + + startUtc := start.UTC() + res.Time = startUtc + t := startUtc.AddDate(0, 1, -startUtc.Day()) + endOfMonthUtc := time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, time.UTC) + timeUntilEndOfMonthUtc := endOfMonthUtc.Sub(startUtc) + endOfHourUtc := time.Now().Truncate(time.Hour).Add(time.Hour) + timeUntilEndOfHourUtc := endOfHourUtc.Sub(startUtc) + + RateLimitSecondKey := "ratelimit:second:" + res.Key + RateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s", startUtc.Year(), startUtc.Month(), res.Key) + RateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d:%s", startUtc.Year(), startUtc.Month(), startUtc.Hour(), res.Key) + + statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), res.Key, path) + if !res.IsValidKey { + statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), "nokey", path) + } + res.RedisStatsKey = statsKey + + pipe := redisClient.Pipeline() + + var RateLimitSecond, RateLimitHour, RateLimitMonth *redis.IntCmd + + if res.RateLimit.Second > 0 { + RateLimitSecond = pipe.IncrBy(ctx, RateLimitSecondKey, weight) + pipe.ExpireNX(ctx, RateLimitSecondKey, time.Second) + } + + if res.RateLimit.Hour > 0 { + RateLimitHour = pipe.IncrBy(ctx, RateLimitHourKey, weight) + pipe.ExpireAt(ctx, RateLimitHourKey, endOfHourUtc) + res.RedisKeys = append(res.RedisKeys, RedisKey{RateLimitHourKey, endOfHourUtc}) + } + + if res.RateLimit.Month > 0 { + RateLimitMonth = pipe.IncrBy(ctx, RateLimitMonthKey, weight) + pipe.ExpireAt(ctx, RateLimitMonthKey, endOfMonthUtc) + res.RedisKeys = append(res.RedisKeys, RedisKey{RateLimitMonthKey, endOfMonthUtc}) + } + + pipe.Incr(ctx, statsKey) + _, err := pipe.Exec(ctx) + if err != nil { + return nil, err + } + + if res.RateLimit.Second > 0 { + if RateLimitSecond.Val() > res.RateLimit.Second { + res.Limit = res.RateLimit.Second + res.Remaining = 0 + res.Reset = int64(1) + res.Window = SecondTimeWindow + return res, nil + } else if res.RateLimit.Second-RateLimitSecond.Val() > res.Limit { + res.Limit = res.RateLimit.Second - RateLimitSecond.Val() + res.Remaining = res.RateLimit.Second - RateLimitSecond.Val() + res.Reset = int64(1) + res.Window = SecondTimeWindow + } + } + + if res.RateLimit.Hour > 0 { + if RateLimitSecond.Val() > res.RateLimit.Hour { + res.Limit = res.RateLimit.Hour + res.Remaining = 0 + res.Reset = int64(timeUntilEndOfHourUtc.Seconds()) + res.Window = HourTimeWindow + return res, nil + } else if res.RateLimit.Hour-RateLimitHour.Val() > res.Limit { + res.Limit = res.RateLimit.Hour - RateLimitHour.Val() + res.Remaining = res.RateLimit.Hour - RateLimitHour.Val() + res.Reset = int64(timeUntilEndOfHourUtc.Seconds()) + res.Window = HourTimeWindow + } + } + + if res.RateLimit.Month > 0 { + if RateLimitSecond.Val() > res.RateLimit.Month { + res.Limit = res.RateLimit.Month + res.Remaining = 0 + res.Reset = int64(timeUntilEndOfMonthUtc.Seconds()) + res.Window = MonthTimeWindow + return res, nil + } else if res.RateLimit.Month-RateLimitMonth.Val() > res.Limit { + res.Limit = res.RateLimit.Month - RateLimitMonth.Val() + res.Remaining = res.RateLimit.Month - RateLimitMonth.Val() + res.Reset = int64(timeUntilEndOfMonthUtc.Seconds()) + res.Window = MonthTimeWindow + } + } + + return res, nil +} + +// getKey returns the key used for RateLimiting. It first checks the query params, then the header and finally the ip address. +func getKey(r *http.Request) (key, ip string) { + ip = getIP(r) + key = r.URL.Query().Get("apikey") + if key != "" { + return key, ip + } + key = r.Header.Get("X-API-KEY") + if key != "" { + return key, ip + } + return "ip_" + strings.ReplaceAll(ip, ":", "_"), ip +} + +// getWeight returns the weight of an endpoint. if the weight of the endpoint is not defined, it returns 1. +func getWeight(r *http.Request) (cost int64, identifier string) { + route := getRoute(r) + weightsMu.RLock() + weight, ok := weights[route] + weightsMu.RUnlock() + if ok { + return weight, route + } + return 1, route +} + +func getRoute(r *http.Request) string { + route := mux.CurrentRoute(r) + path, err := route.GetPathTemplate() + if err != nil { + path = "UNDEFINED" + } + return path +} + +// getIP returns the ip address from the http request +func getIP(r *http.Request) string { + ips := r.Header.Get("CF-Connecting-IP") + if ips == "" { + ips = r.Header.Get("X-Forwarded-For") + } + splitIps := strings.Split(ips, ",") + + if len(splitIps) > 0 { + // get last IP in list since ELB prepends other user defined IPs, meaning the last one is the actual client IP. + netIP := net.ParseIP(splitIps[len(splitIps)-1]) + if netIP != nil { + return netIP.String() + } + } + + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return "INVALID" + } + + netIP := net.ParseIP(ip) + if netIP != nil { + ip := netIP.String() + if ip == "::1" { + return "127.0.0.1" + } + return ip + } + + return "INVALID" +} + +type FallbackRateLimiterClient struct { + limiter *rate.Limiter + lastSeen time.Time +} + +type FallbackRateLimiter struct { + clients map[string]*FallbackRateLimiterClient + mu sync.Mutex +} + +func NewFallbackRateLimiter() *FallbackRateLimiter { + rl := &FallbackRateLimiter{ + clients: make(map[string]*FallbackRateLimiterClient), + } + go func() { + for { + time.Sleep(time.Minute) + rl.mu.Lock() + for ip, client := range rl.clients { + if time.Since(client.lastSeen) > 3*time.Minute { + delete(rl.clients, ip) + } + } + rl.mu.Unlock() + } + }() + return rl +} + +func (rl *FallbackRateLimiter) Handle(w http.ResponseWriter, r *http.Request, next func(writer http.ResponseWriter, request *http.Request)) { + key, _ := getKey(r) + rl.mu.Lock() + if _, found := rl.clients[key]; !found { + rl.clients[key] = &FallbackRateLimiterClient{limiter: rate.NewLimiter(FallbackRateLimitSecond, FallbackRateLimitBurst)} + } + rl.clients[key].lastSeen = time.Now() + if !rl.clients[key].limiter.Allow() { + rl.mu.Unlock() + w.Header().Set(HeaderRateLimitLimit, strconv.FormatInt(FallbackRateLimitSecond, 10)) + w.Header().Set(HeaderRateLimitReset, strconv.FormatInt(1, 10)) + http.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests) + return + } + rl.mu.Unlock() + next(w, r) +} From 66ced50fe88e220e71ccb52feaf1a957e3fbbdc1 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Mon, 22 Jan 2024 12:37:36 +0100 Subject: [PATCH 04/42] (BIDS-2872) wip --- cmd/misc/main.go | 3 +- ratelimit/ratelimit.go | 131 +++++++++++++++++++++++++++-------------- 2 files changed, 88 insertions(+), 46 deletions(-) diff --git a/cmd/misc/main.go b/cmd/misc/main.go index 6abb956f94..876002a648 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -1983,7 +1983,8 @@ func updateRatelimits() error { utils.Config.Frontend.Stripe.Sapphire, utils.Config.Frontend.Stripe.Emerald, utils.Config.Frontend.Stripe.Diamond, - utils.Config.Frontend.Stripe.Custom, + // utils.Config.Frontend.Stripe.Custom1, + // utils.Config.Frontend.Stripe.Custom2, ) return err } diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 3127452e15..2b6cc22ca3 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -1,4 +1,4 @@ -package RateLimit +package ratelimit import ( "context" @@ -21,6 +21,12 @@ import ( type TimeWindow string +const ( + SecondTimeWindow = "second" + HourTimeWindow = "hour" + MonthTimeWindow = "month" +) + const ( HeaderRateLimitLimit = "X-RateLimit-Limit" // the rate limit ceiling that is applicable for the current request HeaderRateLimitRemaining = "X-RateLimit-Remaining" // the number of requests left for the current rate-limit window @@ -31,12 +37,10 @@ const ( NokeyRateLimitHour = 500 // RateLimit for requests without or with invalid apikey NokeyRateLimitMonth = 0 // RateLimit for requests without or with invalid apikey - FallbackRateLimitSecond = 20 // RateLimit for when redis is offline - FallbackRateLimitBurst = 20 // RateLimit for when redis is offline + FallbackRateLimitSecond = 20 // RateLimit per second for when redis is offline + FallbackRateLimitBurst = 20 // RateLimit burst for when redis is offline - SecondTimeWindow = "second" - HourTimeWindow = "hour" - MonthTimeWindow = "month" + ratelimitStatsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration ) var NoKeyRateLimit = &RateLimit{ @@ -64,10 +68,10 @@ var pathPrefix = "" // only requests with this prefix will be RateLimited var logger = logrus.StandardLogger().WithField("module", "ratelimit") type dbEntry struct { - Date time.Time - Key string - Path string - Count int64 + Date time.Time + ApiKey string + Path string + Count int64 } type RateLimit struct { @@ -242,6 +246,11 @@ func HttpMiddleware(next http.Handler) http.Handler { // updateWeights gets the weights from postgres and updates the weights map. func updateWeights(firstRun bool) error { + start := time.Now() + defer func() { + metrics.TaskDuration.WithLabelValues("ratelimit_updateWeights").Observe(time.Since(start).Seconds()) + }() + dbWeights := []struct { Endpoint string `db:"endpoint"` Weight int64 `db:"weight"` @@ -282,11 +291,22 @@ func updateRedisStatus() error { return nil } -// updateStats scans redis for ratelimit:stats:* keys and inserts them into postgres, if the key's date is in the past it will also delete the key in redis. +// updateStats scans redis for ratelimit:stats:* keys and inserts them into postgres, if the key's truncated date is older than specified stats-truncation it will also delete the key in redis. func updateStats() error { + start := time.Now() + defer func() { + metrics.TaskDuration.WithLabelValues("ratelimit_updateStats").Observe(time.Since(start).Seconds()) + }() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*300) + defer cancel() + + var err error + startTruncated := start.Truncate(ratelimitStatsTruncateDuration) + allKeys := []string{} cursor := uint64(0) - ctx := context.Background() + for { cmd := redisClient.Scan(ctx, cursor, "ratelimit:stats:*:*:*", 1000) if cmd.Err() != nil { @@ -310,43 +330,65 @@ func updateStats() error { if end > len(allKeys) { end = len(allKeys) } + keysToDelete := []string{} keys := allKeys[start:end] entries := make([]dbEntry, len(keys)) - values := make([]*redis.StringCmd, len(keys)) - cmds, err := redisClient.Pipelined(ctx, func(pipe redis.Pipeliner) error { - for i, k := range keys { - ks := strings.Split(k, ":") - if len(ks) != 5 { - return fmt.Errorf("error parsing key %s: split-len != 5", k) - } - dateString := ks[2] - date, err := time.Parse("2006-01-02", dateString) - if err != nil { - return fmt.Errorf("error parsing date in key %s: %v", k, err) - } - key := ks[3] - path := ks[4] - values[i] = pipe.Get(ctx, k) - entries[i] = dbEntry{ - Date: date, - Key: key, - Path: path, - } + for i, k := range keys { + ks := strings.Split(k, ":") + if len(ks) != 5 { + return fmt.Errorf("error parsing key %s: split-len != 5", k) } - return nil - }) - for i := range cmds { - entries[i].Count, err = values[i].Int64() + dateString := ks[2] + date, err := time.Parse("2006-01-02", dateString) if err != nil { - return fmt.Errorf("error parsing count of key %s: %v: %w", entries[i].Key, entries[i].Count, err) + return fmt.Errorf("error parsing date in key %s: %v", k, err) + } + dateTruncated := date.Truncate(ratelimitStatsTruncateDuration) + if dateTruncated.Before(startTruncated) { + keysToDelete = append(keysToDelete, k) + } + entries[i] = dbEntry{ + Date: dateTruncated, + ApiKey: ks[3], + Path: ks[4], } } - if err != nil { - return err + + mgetSize := 500 + for j := 0; j <= len(keys); j += mgetSize { + mgetStart := j + mgetEnd := j + mgetSize + if mgetEnd > len(keys) { + mgetEnd = len(keys) + } + mgetRes, err := redisClient.MGet(ctx, keys[mgetStart:mgetEnd]...).Result() + if err != nil { + return fmt.Errorf("error getting stats-count from redis: %w", err) + } + for k, v := range mgetRes { + entries[mgetStart+k].Count, err = v.(*redis.StringCmd).Int64() + if err != nil { + return err + } + } } + err = updateStatsEntries(entries) if err != nil { - return err + return fmt.Errorf("error updating stats entries: %w", err) + } + + delSize := 500 + for j := 0; j <= len(keys); j += delSize { + delStart := j + delEnd := j + delSize + if delEnd > len(keysToDelete) { + delEnd = len(keysToDelete) + } + _, err = redisClient.Del(ctx, keysToDelete[delStart:delEnd]...).Result() + if err != nil { + logger.Errorf("error deleting stats-keys from redis: %v", err) + } } } @@ -373,17 +415,17 @@ func updateStatsEntries(entries []dbEntry) error { valueStrings = append(valueStrings, "("+strings.Join(valueStringArr, ",")+")") valueArgs = append(valueArgs, entry.Date) - valueArgs = append(valueArgs, entry.Key) + valueArgs = append(valueArgs, entry.ApiKey) valueArgs = append(valueArgs, entry.Path) valueArgs = append(valueArgs, entry.Count) - logger.WithFields(logrus.Fields{"count": entry.Count, "key": entry.Key}).Infof("inserting stats entry %v/%v", allIdx, len(entries)) + logger.WithFields(logrus.Fields{"count": entry.Count, "apikey": entry.ApiKey, "path": entry.Path, "date": entry.Date}).Infof("inserting stats entry %v/%v", allIdx, len(entries)) batchIdx++ allIdx++ if batchIdx >= batchSize || allIdx >= len(entries) { - stmt := fmt.Sprintf(`INSERT INTO api_statistics (ts, apikey, call, count) VALUES %s ON CONFLICT (ts, apikey, call) DO UPDATE SET count = excluded.count`, strings.Join(valueStrings, ",")) + stmt := fmt.Sprintf(`INSERT INTO api_statistics (ts, apikey, call, count) VALUES %s ON CONFLICT (ts, apikey, call) DO UPDATE SET count = EXCLUDED.count`, strings.Join(valueStrings, ",")) _, err := tx.Exec(stmt, valueArgs...) if err != nil { return err @@ -457,7 +499,6 @@ func postRateLimit(rl *RateLimitResult, status int) error { if status == 200 { return nil } - // logger.WithFields(logrus.Fields{"key": rl.Key, "status": status}).Infof("decreasing key") // if status is not 200 decrement keys since we do not count unsuccessful requests ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -477,7 +518,7 @@ func postRateLimit(rl *RateLimitResult, status int) error { func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { start := time.Now() defer func() { - metrics.TaskDuration.WithLabelValues("ratelimit_total").Observe(time.Since(start).Seconds()) + metrics.TaskDuration.WithLabelValues("ratelimit_rateLimitRequest").Observe(time.Since(start).Seconds()) }() ctx, cancel := context.WithTimeout(r.Context(), time.Millisecond*1000) From f08aa09ce860ec97e94cdfa8385683ceb49f7a17 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Tue, 23 Jan 2024 09:55:03 +0100 Subject: [PATCH 05/42] (BIDS-2872) wip --- cmd/misc/main.go | 98 ++++++++-- .../20240123120000_add_ratelimits.sql | 42 +++++ ratelimit/ratelimit.go | 177 ++++++++++++------ types/config.go | 3 +- 4 files changed, 243 insertions(+), 77 deletions(-) create mode 100644 db/migrations/20240123120000_add_ratelimits.sql diff --git a/cmd/misc/main.go b/cmd/misc/main.go index 876002a648..2391328c99 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -392,7 +392,7 @@ func main() { case "fix-ens-addresses": err = fixEnsAddresses(erigonClient) case "update-ratelimits": - err = updateRatelimits() + err = updateRatelimitsLoop() default: utils.LogFatal(nil, fmt.Sprintf("unknown command %s", opts.Command), 0) } @@ -1936,7 +1936,53 @@ func reExportSyncCommittee(rpcClient rpc.Client, p uint64, dryRun bool) error { } } +func addUsers() error { + tx, err := db.WriterDb.Beginx() + if err != nil { + logrus.Fatalf("error starting tx: %v", err) + } + defer tx.Rollback() + for i := 0; i < 100000; i++ { + _, err := tx.Exec(`INSERT INTO users (email, password, api_key) VALUES ($1, 'xxx', 'apikey_'+$3)`, i) + if err != nil { + return err + } + } + err = tx.Commit() + if err != nil { + return err + } + return nil +} + +func updateRatelimitsLoop() error { + for { + err := updateRatelimits() + if err != nil { + logrus.WithError(err).Errorf("error in updateRatelimits") + time.Sleep(time.Second * 10) + continue + } else { + logrus.Infof("updated ratelimits") + } + time.Sleep(time.Minute) + } + return nil +} + func updateRatelimits() error { + for _, k := range []string{ + utils.Config.Frontend.Stripe.Sapphire, + utils.Config.Frontend.Stripe.Emerald, + utils.Config.Frontend.Stripe.Diamond, + utils.Config.Frontend.Stripe.Custom1, + utils.Config.Frontend.Stripe.Custom2, + } { + if k == "" { + logrus.Fatalf("invalid config.frontend.stripe key") + } + } + var err error _, err = db.WriterDb.Exec( `insert into api_keys (user_id, api_key) @@ -1952,39 +1998,53 @@ func updateRatelimits() error { select id as user_id, case - when product = 'free' then 5 - when product = $1 then 10 - when product = $2 then 10 - when product = $3 then 30 - when product = $4 then 50 + when product = 'free' then 5 + when product = $1 then 10 + when product = $2 then 10 + when product = $3 then 30 + when product = $4 then 50 when product = 'plankton' then 20 when product = 'goldfish' then 20 - when product = 'whale' then 25 + when product = 'whale' then 25 else 50 end as second, 0 as hour, case - when product = 'free' then 120000 - when product = $1 then 500000 - when product = $2 then 1000000 - when product = $3 then 6000000 - when product = $4 then 500000000 - when product = 'plankton' then 120000 - when product = 'goldfish' then 200000 - when product = 'whale' then 700000 + when product = 'free' then 120000 + when product = $1 then 500000 + when product = $2 then 1000000 + when product = $3 then 6000000 + when product = $4 then 500000000 + when product = 'plankton' then 120000 + when product = 'goldfish' then 200000 + when product = 'whale' then 700000 else 4000000000 end as month, - now() + interval '1 month' as valid_until, + case + when product = 'free' then to_timestamp('3000-01-01', 'YYYY-MM-DD') + when active = false then now() + else now() + interval '1 month' + end as valid_until, now() as changed_at from ( select id, price_id as product, api_key as key, coalesce(active,false) as active from users left join (select * from users_stripe_subscriptions where price_id = any('{$1,$2,$3,$4}')) as us on users.stripe_customer_id = us.customer_id where api_key is not null AND (price_id is not null OR id not in (select user_id from app_subs_view where app_subs_view.user_id = users.id AND active = true)) UNION SELECT user_id, product_id as product, api_key as key, active from app_subs_view left join users on users.id = app_subs_view.user_id where active = true AND api_key is not null AND (stripe_customer_id is null OR stripe_customer_id NOT IN (select customer_id from users_stripe_subscriptions where active = true and price_id = any('{$1,$2,$3,$4}')))) t where product is null or active = true ) x - where active = true`, + on conflict (user_id) do update set + second = excluded.second, + hour = excluded.hour, + month = excluded.month, + valid = excluded.valid_until, + changed_at = excluded.changed_at + where + api_ratelimits.second != excluded.second + or api_ratelimits.hour != excluded.hour + or api_ratelimits.month != excluded.month + `, utils.Config.Frontend.Stripe.Sapphire, utils.Config.Frontend.Stripe.Emerald, utils.Config.Frontend.Stripe.Diamond, - // utils.Config.Frontend.Stripe.Custom1, - // utils.Config.Frontend.Stripe.Custom2, + utils.Config.Frontend.Stripe.Custom1, + utils.Config.Frontend.Stripe.Custom2, ) return err } diff --git a/db/migrations/20240123120000_add_ratelimits.sql b/db/migrations/20240123120000_add_ratelimits.sql new file mode 100644 index 0000000000..9e9059329c --- /dev/null +++ b/db/migrations/20240123120000_add_ratelimits.sql @@ -0,0 +1,42 @@ +-- +goose Up +-- +goose StatementBegin +SELECT 'up SQL query - add table api_ratelimits'; +CREATE TABLE IF NOT EXISTS + api_ratelimits ( + user_id INT NOT NULL, + second INT NOT NULL DEFAULT 0, + hour INT NOT NULL DEFAULT 0, + month INT NOT NULL DEFAULT 0, + valid_until TIMESTAMP WITHOUT TIME ZONE NOT NULL, + changed_at TIMESTAMP WITHOUT TIME ZONE NOT NULL, + PRIMARY KEY (user_id) + ); +SELECT 'up SQL query - add table api_keys'; +CREATE TABLE IF NOT EXISTS + api_keys ( + user_id INT NOT NULL, + api_key VARCHAR(256) NOT NULL, + valid_until TIMESTAMP WITHOUT TIME ZONE NOT NULL, + changed_at TIMESTAMP WITHOUT TIME ZONE NOT NULL, + PRIMARY KEY (user_id, api_key) + ); +SELECT 'up SQL query - add table api_weights'; +CREATE TABLE IF NOT EXISTS + api_weights ( + endpoint TEXT NOT NULL, + method TEXT NOT NULL, + weight INT NOT NULL DEFAULT 0, + valid_from TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT TO_TIMESTAMP(0), + PRIMARY KEY (endpoint, valid_from) + ); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +SELECT 'down SQL query - drop table api_ratelimits'; +DROP TABLE IF EXISTS api_ratelimits; +SELECT 'down SQL query - drop table api_keys'; +DROP TABLE IF EXISTS api_keys; +SELECT 'down SQL query - drop table api_weights'; +DROP TABLE IF EXISTS api_weights; +-- +goose StatementEnd diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 2b6cc22ca3..9d59785e8f 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -28,19 +28,22 @@ const ( ) const ( - HeaderRateLimitLimit = "X-RateLimit-Limit" // the rate limit ceiling that is applicable for the current request - HeaderRateLimitRemaining = "X-RateLimit-Remaining" // the number of requests left for the current rate-limit window - HeaderRateLimitReset = "X-RateLimit-Reset" // the number of seconds until the quota resets - HeaderRetryAfter = "Retry-After" // the number of seconds until the quota resets, same as HeaderRateLimitReset, RFC 7231, 7.1.3 - - NokeyRateLimitSecond = 5 // RateLimit for requests without or with invalid apikey + HeaderRateLimitLimit = "X-RateLimit-Limit" // the rate limit ceiling that is applicable for the current request + HeaderRateLimitRemaining = "X-RateLimit-Remaining" // the number of requests left for the current rate-limit window + HeaderRateLimitReset = "X-RateLimit-Reset" // the number of seconds until the quota resets + HeaderRetryAfter = "Retry-After" // the number of seconds until the quota resets, same as HeaderRateLimitReset, RFC 7231, 7.1.3 + HeaderRateLimitLimitSecond = "X-RateLimit-Limit-Second" // the rate limit ceiling that is applicable for the current user + HeaderRateLimitLimitHour = "X-RateLimit-Limit-Hour" // the rate limit ceiling that is applicable for the current user + HeaderRateLimitLimitMonth = "X-RateLimit-Limit-Month" // the rate limit ceiling that is applicable for the current user + + NokeyRateLimitSecond = 2 // RateLimit for requests without or with invalid apikey NokeyRateLimitHour = 500 // RateLimit for requests without or with invalid apikey NokeyRateLimitMonth = 0 // RateLimit for requests without or with invalid apikey FallbackRateLimitSecond = 20 // RateLimit per second for when redis is offline FallbackRateLimitBurst = 20 // RateLimit burst for when redis is offline - ratelimitStatsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration + statsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration ) var NoKeyRateLimit = &RateLimit{ @@ -52,18 +55,23 @@ var NoKeyRateLimit = &RateLimit{ var redisClient *redis.Client var redisIsHealthy atomic.Bool +var lastRateLimitUpdateKeys = time.Unix(0, 0) // guarded by lastRateLimitUpdateMu +var lastRateLimitUpdateRateLimits = time.Unix(0, 0) // guarded by lastRateLimitUpdateMu +var lastRateLimitUpdateMu = &sync.Mutex{} + var fallbackRateLimiter = NewFallbackRateLimiter() // if redis is offline, use this rate limiter var initializedWg = &sync.WaitGroup{} // wait for everything to be initialized before serving requests var rateLimitsMu = &sync.RWMutex{} -var rateLimits = make(map[string]*RateLimit) // guarded by RateLimitsMu -var rateLimitsByKey = make(map[string]*RateLimit) // guarded by RateLimitsMu +var rateLimits = map[string]*RateLimit{} // guarded by rateLimitsMu +var rateLimitsByUserId = map[int64]*RateLimit{} // guarded by rateLimitsMu +var userIdByApiKey = map[string]int64{} // guarded by rateLimitsMu var weightsMu = &sync.RWMutex{} var weights = map[string]int64{} // guarded by weightsMu -var pathPrefix = "" // only requests with this prefix will be RateLimited +var pathPrefix = "" // only requests with this prefix will be ratelimited var logger = logrus.StandardLogger().WithField("module", "ratelimit") @@ -87,6 +95,7 @@ type RateLimitResult struct { IP string Key string IsValidKey bool + UserId int64 RedisKeys []RedisKey RedisStatsKey string RateLimit *RateLimit @@ -155,15 +164,14 @@ func Init(redisAddress, pathPrefixOpt string) { }() go func() { firstRun := true - lastRunTime := time.Unix(0, 0) + for { - t, err := updateRateLimits(lastRunTime) + err := updateRateLimits() if err != nil { - logger.WithError(err).Errorf("error updating RateLimits") + logger.WithError(err).Errorf("error updating rateLimits") time.Sleep(time.Second * 2) continue } - lastRunTime = t if firstRun { initializedWg.Done() firstRun = false @@ -226,6 +234,17 @@ func HttpMiddleware(next http.Handler) http.Handler { w.Header().Set(HeaderRateLimitLimit, strconv.FormatInt(rl.Limit, 10)) w.Header().Set(HeaderRateLimitRemaining, strconv.FormatInt(rl.Remaining, 10)) w.Header().Set(HeaderRateLimitReset, strconv.FormatInt(rl.Reset, 10)) + + if rl.RateLimit.Second > 0 { + w.Header().Set(HeaderRateLimitLimitSecond, strconv.FormatInt(rl.RateLimit.Second, 10)) + } + if rl.RateLimit.Hour > 0 { + w.Header().Set(HeaderRateLimitLimitHour, strconv.FormatInt(rl.RateLimit.Hour, 10)) + } + if rl.RateLimit.Month > 0 { + w.Header().Set(HeaderRateLimitLimitMonth, strconv.FormatInt(rl.RateLimit.Month, 10)) + } + if rl.Weight > rl.Remaining { w.Header().Set(HeaderRetryAfter, strconv.FormatInt(rl.Reset, 10)) http.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests) @@ -302,7 +321,7 @@ func updateStats() error { defer cancel() var err error - startTruncated := start.Truncate(ratelimitStatsTruncateDuration) + startTruncated := start.Truncate(statsTruncateDuration) allKeys := []string{} cursor := uint64(0) @@ -330,6 +349,11 @@ func updateStats() error { if end > len(allKeys) { end = len(allKeys) } + + if start == end { + break + } + keysToDelete := []string{} keys := allKeys[start:end] entries := make([]dbEntry, len(keys)) @@ -339,11 +363,11 @@ func updateStats() error { return fmt.Errorf("error parsing key %s: split-len != 5", k) } dateString := ks[2] - date, err := time.Parse("2006-01-02", dateString) + date, err := time.Parse("2006-01-02-15", dateString) if err != nil { return fmt.Errorf("error parsing date in key %s: %v", k, err) } - dateTruncated := date.Truncate(ratelimitStatsTruncateDuration) + dateTruncated := date.Truncate(statsTruncateDuration) if dateTruncated.Before(startTruncated) { keysToDelete = append(keysToDelete, k) } @@ -363,12 +387,16 @@ func updateStats() error { } mgetRes, err := redisClient.MGet(ctx, keys[mgetStart:mgetEnd]...).Result() if err != nil { - return fmt.Errorf("error getting stats-count from redis: %w", err) + return fmt.Errorf("error getting stats-count from redis (%v-%v/%v): %w", mgetStart, mgetEnd, len(keys), err) } for k, v := range mgetRes { - entries[mgetStart+k].Count, err = v.(*redis.StringCmd).Int64() + vStr, ok := v.(string) + if !ok { + return fmt.Errorf("error parsing stats-count from redis: value is not string: %v: %v: %w", k, v, err) + } + entries[mgetStart+k].Count, err = strconv.ParseInt(vStr, 10, 64) if err != nil { - return err + return fmt.Errorf("error parsing stats-count from redis: value is not int64: %v: %v: %w", k, v, err) } } } @@ -378,16 +406,18 @@ func updateStats() error { return fmt.Errorf("error updating stats entries: %w", err) } - delSize := 500 - for j := 0; j <= len(keys); j += delSize { - delStart := j - delEnd := j + delSize - if delEnd > len(keysToDelete) { - delEnd = len(keysToDelete) - } - _, err = redisClient.Del(ctx, keysToDelete[delStart:delEnd]...).Result() - if err != nil { - logger.Errorf("error deleting stats-keys from redis: %v", err) + if len(keysToDelete) > 0 { + delSize := 500 + for j := 0; j <= len(keys); j += delSize { + delStart := j + delEnd := j + delSize + if delEnd > len(keysToDelete) { + delEnd = len(keysToDelete) + } + _, err = redisClient.Del(ctx, keysToDelete[delStart:delEnd]...).Result() + if err != nil { + logger.Errorf("error deleting stats-keys from redis: %v", err) + } } } } @@ -419,7 +449,7 @@ func updateStatsEntries(entries []dbEntry) error { valueArgs = append(valueArgs, entry.Path) valueArgs = append(valueArgs, entry.Count) - logger.WithFields(logrus.Fields{"count": entry.Count, "apikey": entry.ApiKey, "path": entry.Path, "date": entry.Date}).Infof("inserting stats entry %v/%v", allIdx, len(entries)) + // logger.WithFields(logrus.Fields{"count": entry.Count, "apikey": entry.ApiKey, "path": entry.Path, "date": entry.Date}).Infof("inserting stats entry %v/%v", allIdx+1, len(entries)) batchIdx++ allIdx++ @@ -444,32 +474,64 @@ func updateStatsEntries(entries []dbEntry) error { return nil } -// updateRateLimits gets the ratelimits from postgres and updates the ratelimits map. it will delete expired ratelimits and assumes that no other process deletes entries in the table api_ratelimits. -func updateRateLimits(lastUpdate time.Time) (time.Time, error) { +// updateRateLimits updates the maps rateLimits, rateLimitsByUserId and userIdByApiKey with data from postgres-tables api_keys and api_ratelimits. +func updateRateLimits() error { start := time.Now() defer func() { metrics.TaskDuration.WithLabelValues("ratelimit_updateRateLimits").Observe(time.Since(start).Seconds()) }() + + lastRateLimitUpdateMu.Lock() + lastTKeys := lastRateLimitUpdateKeys + lastTRateLimits := lastRateLimitUpdateRateLimits + lastRateLimitUpdateMu.Unlock() + + var err error + + dbApiKeys := []struct { + UserID int64 `db:"user_id"` + ApiKey string `db:"api_key"` + ValidUntil time.Time `db:"valid_until"` + ChangedAt time.Time `db:"changed_at"` + }{} + + err = db.WriterDb.Select(&dbApiKeys, `SELECT user_id, api_key, valid_until, changed_at FROM api_keys WHERE changed_at > $1 OR valid_until < NOW()`, lastTKeys) + if err != nil { + return err + } + dbRateLimits := []struct { UserID int64 `db:"user_id"` - ApiKey string `db:"apikey"` Second int64 `db:"second"` Hour int64 `db:"hour"` Month int64 `db:"month"` ValidUntil time.Time `db:"valid_until"` ChangedAt time.Time `db:"changed_at"` }{} - err := db.WriterDb.Select(&dbRateLimits, "SELECT ar.user_id, ak.apikey, ar.second, ar.hour, ar.month, ar.valid_until, ar.changed_at FROM api_ratelimits ar LEFT JOIN users u ON u.id = ar.user_id LEFT JOIN api_keys ak ON ak.user_id = u.id WHERE ar.changed_at > $1", lastUpdate) + + err = db.WriterDb.Select(&dbRateLimits, `SELECT user_id, second, hour, month, valid_until, changed_at FROM api_ratelimits WHERE changed_at > $1 OR valid_until < NOW()`, lastTRateLimits) if err != nil { - return lastUpdate, fmt.Errorf("error getting ratelimits: %w", err) + return err } rateLimitsMu.Lock() now := time.Now() - newestChange := time.Unix(0, 0) + for _, dbKey := range dbApiKeys { + if dbKey.ChangedAt.After(lastTKeys) { + lastTKeys = dbKey.ChangedAt + } + if dbKey.ValidUntil.Before(now) { + delete(userIdByApiKey, dbKey.ApiKey) + } + userIdByApiKey[dbKey.ApiKey] = dbKey.UserID + } + for _, dbRl := range dbRateLimits { - if dbRl.ChangedAt.After(newestChange) { - newestChange = dbRl.ChangedAt + if dbRl.ChangedAt.After(lastTRateLimits) { + lastTRateLimits = dbRl.ChangedAt + } + if dbRl.ValidUntil.Before(now) { + delete(rateLimitsByUserId, dbRl.UserID) } rlStr := fmt.Sprintf("%d/%d/%d", dbRl.Second, dbRl.Hour, dbRl.Month) rl, exists := rateLimits[rlStr] @@ -481,18 +543,16 @@ func updateRateLimits(lastUpdate time.Time) (time.Time, error) { } rateLimits[rlStr] = rl } - _, exists = rateLimitsByKey[dbRl.ApiKey] - if !exists { - rateLimitsByKey[dbRl.ApiKey] = rl - } - if dbRl.ValidUntil.Before(now) { - delete(rateLimitsByKey, dbRl.ApiKey) - } } rateLimitsMu.Unlock() metrics.TaskDuration.WithLabelValues("ratelimit_updateRateLimits_lock").Observe(time.Since(now).Seconds()) - return newestChange, nil + lastRateLimitUpdateMu.Lock() + lastRateLimitUpdateKeys = lastTKeys + lastRateLimitUpdateRateLimits = lastTRateLimits + lastRateLimitUpdateMu.Unlock() + + return nil } func postRateLimit(rl *RateLimitResult, status int) error { @@ -531,19 +591,22 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { res.IP = ip rateLimitsMu.RLock() - limit, ok := rateLimits[key] - rateLimitsMu.RUnlock() + userId, ok := userIdByApiKey[key] if !ok { + res.UserId = -1 res.IsValidKey = false - res.RateLimit = &RateLimit{ - Second: NokeyRateLimitSecond, - Hour: NokeyRateLimitHour, - Month: NokeyRateLimitMonth, - } + res.RateLimit = NoKeyRateLimit } else { + res.UserId = userId res.IsValidKey = true - res.RateLimit = limit + limit, ok := rateLimitsByUserId[userId] + if ok { + res.RateLimit = limit + } else { + res.RateLimit = NoKeyRateLimit + } } + rateLimitsMu.RUnlock() weight, path := getWeight(r) res.Weight = weight @@ -561,9 +624,9 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { RateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s", startUtc.Year(), startUtc.Month(), res.Key) RateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d:%s", startUtc.Year(), startUtc.Month(), startUtc.Hour(), res.Key) - statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), res.Key, path) + statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Key, path) if !res.IsValidKey { - statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), "nokey", path) + statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), "nokey", path) } res.RedisStatsKey = statsKey diff --git a/types/config.go b/types/config.go index 2377df388d..31b70a52b2 100644 --- a/types/config.go +++ b/types/config.go @@ -132,7 +132,8 @@ type Config struct { Sapphire string `yaml:"sapphire" envconfig:"FRONTEND_STRIPE_SAPPHIRE"` Emerald string `yaml:"emerald" envconfig:"FRONTEND_STRIPE_EMERALD"` Diamond string `yaml:"diamond" envconfig:"FRONTEND_STRIPE_DIAMOND"` - Custom string `yaml:"custom" envconfig:"FRONTEND_STRIPE_CUSTOM"` + Custom1 string `yaml:"custom1" envconfig:"FRONTEND_STRIPE_CUSTOM_1"` + Custom2 string `yaml:"custom2" envconfig:"FRONTEND_STRIPE_CUSTOM_2"` Whale string `yaml:"whale" envconfig:"FRONTEND_STRIPE_WHALE"` Goldfish string `yaml:"goldfish" envconfig:"FRONTEND_STRIPE_GOLDFISH"` Plankton string `yaml:"plankton" envconfig:"FRONTEND_STRIPE_PLANKTON"` From a686b872b56d1ed08fd9082de43300ed23f7eade Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 24 Jan 2024 12:44:50 +0100 Subject: [PATCH 06/42] (BIDS-2872) wip --- cmd/explorer/main.go | 2 +- cmd/misc/main.go | 89 +++++++++++---- .../20240123120000_add_ratelimits.sql | 17 +++ local-deployment/docker-compose.yml | 8 +- local-deployment/provision-explorer-config.sh | 6 + local-deployment/run.sh | 11 +- ratelimit/ratelimit.go | 106 ++++++++++++------ 7 files changed, 184 insertions(+), 55 deletions(-) diff --git a/cmd/explorer/main.go b/cmd/explorer/main.go index 8e944b7e62..c4e7b519de 100644 --- a/cmd/explorer/main.go +++ b/cmd/explorer/main.go @@ -607,7 +607,7 @@ func main() { router.Use(metrics.HttpMiddleware) } - ratelimit.Init(utils.Config.RedisSessionStoreEndpoint, "/api/") + ratelimit.Init(utils.Config.RedisSessionStoreEndpoint, ratelimit.DefaultRequestCollector) router.Use(ratelimit.HttpMiddleware) n := negroni.New(negroni.NewRecovery()) diff --git a/cmd/misc/main.go b/cmd/misc/main.go index 2391328c99..ed17985a7f 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -393,6 +393,8 @@ func main() { err = fixEnsAddresses(erigonClient) case "update-ratelimits": err = updateRatelimitsLoop() + case "add-users": + err = addUsers() default: utils.LogFatal(nil, fmt.Sprintf("unknown command %s", opts.Command), 0) } @@ -1937,13 +1939,21 @@ func reExportSyncCommittee(rpcClient rpc.Client, p uint64, dryRun bool) error { } func addUsers() error { + logrus.Infof("addUsers") tx, err := db.WriterDb.Beginx() if err != nil { logrus.Fatalf("error starting tx: %v", err) } defer tx.Rollback() - for i := 0; i < 100000; i++ { - _, err := tx.Exec(`INSERT INTO users (email, password, api_key) VALUES ($1, 'xxx', 'apikey_'+$3)`, i) + + for i := 0; i < 10000; i++ { + _, err = tx.Exec(`INSERT INTO users (email, password, api_key, stripe_customer_id) VALUES ($1, 'xxx', $2, $3)`, fmt.Sprintf("user%d@email.com", i), fmt.Sprintf("apikey_%d", i), fmt.Sprintf("stripe_customer_%d", i)) + if err != nil { + return err + } + } + for i := 0; i < 100; i++ { + _, err = tx.Exec(`INSERT INTO users_stripe_subscriptions (subscription_id, customer_id, price_id, active, payload, purchase_group) VALUES ($1, $2, $3, $4, $5, $6)`, fmt.Sprintf("stripe_sub_%d", i), fmt.Sprintf("stripe_customer_%d", i), "price_diamond", true, "{}", "x") if err != nil { return err } @@ -1983,16 +1993,33 @@ func updateRatelimits() error { } } + now := time.Now() var err error - _, err = db.WriterDb.Exec( - `insert into api_keys (user_id, api_key) - select id, api_key from users where api_key is not null - on conflict (user_id, api_key) do nothing`, + res, err := db.WriterDb.Exec( + `insert into api_keys (user_id, api_key, valid_until, changed_at) + select + id as user_id, + api_key, + to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + now() as changed_at + from users + where api_key is not null + on conflict (user_id, api_key) do update set + valid_until = excluded.valid_until, + changed_at = excluded.changed_at + where api_keys.valid_until != excluded.valid_until`, ) + if err != nil { + return fmt.Errorf("error updating api_keys: %w", err) + } + ra, err := res.RowsAffected() if err != nil { return err } - _, err = db.WriterDb.Exec( + logrus.Infof("updated %v api_keys in %v", ra, time.Since(now)) + + now = time.Now() + res, err = db.WriterDb.Exec( `insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) select @@ -2003,22 +2030,24 @@ func updateRatelimits() error { when product = $2 then 10 when product = $3 then 30 when product = $4 then 50 + when product = $5 then 50 when product = 'plankton' then 20 when product = 'goldfish' then 20 when product = 'whale' then 25 - else 50 + else 50 end as second, 0 as hour, case - when product = 'free' then 120000 - when product = $1 then 500000 - when product = $2 then 1000000 - when product = $3 then 6000000 - when product = $4 then 500000000 - when product = 'plankton' then 120000 - when product = 'goldfish' then 200000 - when product = 'whale' then 700000 - else 4000000000 + when product = 'free' then 120000 + when product = $1 then 500000 + when product = $2 then 1000000 + when product = $3 then 6000000 + when product = $4 then 500000000 + when product = $5 then 13000000 + when product = 'plankton' then 120000 + when product = 'goldfish' then 200000 + when product = 'whale' then 700000 + else 4000000000 end as month, case when product = 'free' then to_timestamp('3000-01-01', 'YYYY-MM-DD') @@ -2027,13 +2056,13 @@ func updateRatelimits() error { end as valid_until, now() as changed_at from ( - select id, price_id as product, api_key as key, coalesce(active,false) as active from users left join (select * from users_stripe_subscriptions where price_id = any('{$1,$2,$3,$4}')) as us on users.stripe_customer_id = us.customer_id where api_key is not null AND (price_id is not null OR id not in (select user_id from app_subs_view where app_subs_view.user_id = users.id AND active = true)) UNION SELECT user_id, product_id as product, api_key as key, active from app_subs_view left join users on users.id = app_subs_view.user_id where active = true AND api_key is not null AND (stripe_customer_id is null OR stripe_customer_id NOT IN (select customer_id from users_stripe_subscriptions where active = true and price_id = any('{$1,$2,$3,$4}')))) t where product is null or active = true + select id, coalesce(product,'free') as product, key, active FROM (select id, price_id as product, api_key as key, coalesce(active,'f') as active from users left join (select * from users_stripe_subscriptions where price_id = any('{$1,$2,$3,$4,$5}')) as us on users.stripe_customer_id = us.customer_id where api_key is not null AND (price_id is not null OR id not in (select user_id from app_subs_view where app_subs_view.user_id = users.id AND active = true)) UNION SELECT user_id, product_id as product, api_key as key, active from app_subs_view left join users on users.id = app_subs_view.user_id where active = true AND api_key is not null AND (stripe_customer_id is null OR stripe_customer_id NOT IN (select customer_id from users_stripe_subscriptions where active = true and price_id = any('{$1,$2,$3,$4,$5}')))) t where product is null or active = true ) x on conflict (user_id) do update set second = excluded.second, hour = excluded.hour, month = excluded.month, - valid = excluded.valid_until, + valid_until = excluded.valid_until, changed_at = excluded.changed_at where api_ratelimits.second != excluded.second @@ -2046,5 +2075,27 @@ func updateRatelimits() error { utils.Config.Frontend.Stripe.Custom1, utils.Config.Frontend.Stripe.Custom2, ) + if err != nil { + return fmt.Errorf("error updating api_ratelimits: %w", err) + } + ra, err = res.RowsAffected() + if err != nil { + return err + } + logrus.Infof("updated %v api_ratelimits in %v", ra, time.Since(now)) + + res, err = db.WriterDb.Exec(` + update api_ratelimits + set valid_until = now() + where valid_until > now() and user_id not in (select id from users where api_key is not null)`) + if err != nil { + return fmt.Errorf("error invalidating api_ratelimits: %w", err) + } + ra, err = res.RowsAffected() + if err != nil { + return err + } + logrus.Infof("invalidated %v api_ratelimits in %v", ra, time.Since(now)) + return err } diff --git a/db/migrations/20240123120000_add_ratelimits.sql b/db/migrations/20240123120000_add_ratelimits.sql index 9e9059329c..098caf1538 100644 --- a/db/migrations/20240123120000_add_ratelimits.sql +++ b/db/migrations/20240123120000_add_ratelimits.sql @@ -23,12 +23,29 @@ CREATE TABLE IF NOT EXISTS SELECT 'up SQL query - add table api_weights'; CREATE TABLE IF NOT EXISTS api_weights ( + bucket VARCHAR(20) NOT NULL, endpoint TEXT NOT NULL, method TEXT NOT NULL, + params TEXT NOT NULL, weight INT NOT NULL DEFAULT 0, valid_from TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT TO_TIMESTAMP(0), PRIMARY KEY (endpoint, valid_from) ); + +SELECT 'up SQL query - add view app_subs_view'; +CREATE OR REPLACE VIEW app_subs_view AS + SELECT users_app_subscriptions.id, + users_app_subscriptions.user_id, + users_app_subscriptions.product_id, + users_app_subscriptions.created_at, + users_app_subscriptions.updated_at, + users_app_subscriptions.validate_remotely, + users_app_subscriptions.active, + users_app_subscriptions.store, + users_app_subscriptions.expires_at, + users_app_subscriptions.reject_reason, + users_app_subscriptions.receipt_hash + FROM users_app_subscriptions; -- +goose StatementEnd -- +goose Down diff --git a/local-deployment/docker-compose.yml b/local-deployment/docker-compose.yml index e617c43441..1e40109ea4 100644 --- a/local-deployment/docker-compose.yml +++ b/local-deployment/docker-compose.yml @@ -12,7 +12,7 @@ services: <<: *default-service profiles: - build-once - command: /bin/bash -c "git config --global --add safe.directory '*' && make -B all" + command: /bin/bash -c "git config --global --add safe.directory '*' && make -j -B all" indexer: <<: *default-service command: go run ./cmd/explorer -config /app/local-deployment/config.yml @@ -35,6 +35,12 @@ services: command: go run ./cmd/explorer -config /app/local-deployment/config.yml environment: - FRONTEND_ENABLED=true + ratelimits-updater: + <<: *default-service + command: go run ./cmd/misc -config /app/local-deployment/config.yml -command=update-ratelimits + misc: + <<: *default-service + command: /bin/bash -c "while true; do date; sleep 1; done" redis-sessions: image: redis:7 volumes: diff --git a/local-deployment/provision-explorer-config.sh b/local-deployment/provision-explorer-config.sh index e478d4bf44..121e9fa92f 100644 --- a/local-deployment/provision-explorer-config.sh +++ b/local-deployment/provision-explorer-config.sh @@ -97,6 +97,12 @@ frontend: termsOfServiceUrl: "tos.pdf" privacyPolicyUrl: "privacy.pdf" imprintTemplate: '{{ define "js" }}{{ end }}{{ define "css" }}{{ end }}{{ define "content" }}Imprint{{ end }}' + stripe: + sapphire: price_sapphire + emerald: price_emerald + diamond: price_diamond + custom1: price_custom1 + custom2: price_custom2 indexer: # fullIndexOnStartup: false # Perform a one time full db index on startup diff --git a/local-deployment/run.sh b/local-deployment/run.sh index 539ea8dcf6..2d79343b32 100755 --- a/local-deployment/run.sh +++ b/local-deployment/run.sh @@ -23,12 +23,17 @@ fn_main() { stop) shift; fn_stop "$@"; exit;; sql) shift; fn_sql "$@"; exit;; redis) shift; fn_redis "$@"; exit;; + misc) shift; fn_misc "$@"; exit;; *) echo "$var_help" esac shift done } +fn_misc() { + docker compose exec misc go run ./cmd/misc -config /app/local-deployment/config.yml $@ +} + fn_sql() { PGPASSWORD=pass psql -h localhost -p$POSTGRES_PORT -U postgres -d db } @@ -40,9 +45,11 @@ fn_redis() { fn_start() { fn_stop - kurtosis run --enclave my-testnet . "$(cat network-params.json)" + # build once before starting all services to prevent multiple parallel builds + docker compose --profile=build-once run build-once & + kurtosis run --enclave my-testnet . "$(cat network-params.json)" & + wait bash provision-explorer-config.sh - docker compose --profile=build-once run build-once # build once before starting all services to prevent multiple parallel builds docker compose up -d echo "Waiting for explorer to start, then browse http://localhost:8080" } diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 9d59785e8f..7d5186a485 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -43,6 +43,8 @@ const ( FallbackRateLimitSecond = 20 // RateLimit per second for when redis is offline FallbackRateLimitBurst = 20 // RateLimit burst for when redis is offline + defaultBucket = "default" + statsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration ) @@ -69,7 +71,8 @@ var rateLimitsByUserId = map[int64]*RateLimit{} // guarded by rateLimitsMu var userIdByApiKey = map[string]int64{} // guarded by rateLimitsMu var weightsMu = &sync.RWMutex{} -var weights = map[string]int64{} // guarded by weightsMu +var weights = map[string]int64{} // guarded by weightsMu +var buckets = map[string]string{} // guarded by weightsMu var pathPrefix = "" // only requests with this prefix will be ratelimited @@ -102,6 +105,7 @@ type RateLimitResult struct { Limit int64 Remaining int64 Reset int64 + Bucket string Window TimeWindow } @@ -135,15 +139,24 @@ func (r *responseWriterDelegator) Status() int { return r.status } -// Init initializes the RateLimiting middleware, the RateLimiting middleware will not work without calling Init first. -func Init(redisAddress, pathPrefixOpt string) { - pathPrefix = pathPrefixOpt +var DefaultRequestCollector = func(req *http.Request) bool { + if req.URL == nil || !strings.HasPrefix(req.URL.Path, "/api") { + return false + } + return true +} + +var requestSelector func(req *http.Request) bool +// Init initializes the RateLimiting middleware, the rateLimiting middleware will not work without calling Init first. The second parameter is a function the will get called on every request, it will only apply ratelimiting to requests when this func returns true. +func Init(redisAddress string, requestSelectorOpt func(req *http.Request) bool) { redisClient = redis.NewClient(&redis.Options{ Addr: redisAddress, ReadTimeout: time.Second * 3, }) + requestSelector = requestSelectorOpt + initializedWg.Add(3) go func() { @@ -168,7 +181,7 @@ func Init(redisAddress, pathPrefixOpt string) { for { err := updateRateLimits() if err != nil { - logger.WithError(err).Errorf("error updating rateLimits") + logger.WithError(err).Errorf("error updating ratelimits") time.Sleep(time.Second * 2) continue } @@ -212,7 +225,7 @@ func Init(redisAddress, pathPrefixOpt string) { func HttpMiddleware(next http.Handler) http.Handler { initializedWg.Wait() return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !strings.HasPrefix(r.URL.Path, pathPrefix) { + if !requestSelector(r) { next.ServeHTTP(w, r) return } @@ -229,7 +242,7 @@ func HttpMiddleware(next http.Handler) http.Handler { next.ServeHTTP(w, r) return } - // logrus.WithFields(logrus.Fields{"route": rl.Route, "key": rl.Key, "limit": rl.Limit, "remaining": rl.Remaining, "reset": rl.Reset, "window": rl.Window}).Infof("RateLimiting") + logrus.WithFields(logrus.Fields{"route": rl.Route, "key": rl.Key, "limit": rl.Limit, "remaining": rl.Remaining, "reset": rl.Reset, "window": rl.Window, "validKey": rl.IsValidKey}).Infof("rateLimiting") w.Header().Set(HeaderRateLimitLimit, strconv.FormatInt(rl.Limit, 10)) w.Header().Set(HeaderRateLimitRemaining, strconv.FormatInt(rl.Remaining, 10)) @@ -245,6 +258,7 @@ func HttpMiddleware(next http.Handler) http.Handler { w.Header().Set(HeaderRateLimitLimitMonth, strconv.FormatInt(rl.RateLimit.Month, 10)) } + // note: maybe just look for rl.Remaining > 0 instead of rl.Weight > rl.Remaining if rl.Weight > rl.Remaining { w.Header().Set(HeaderRetryAfter, strconv.FormatInt(rl.Reset, 10)) http.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests) @@ -263,7 +277,7 @@ func HttpMiddleware(next http.Handler) http.Handler { }) } -// updateWeights gets the weights from postgres and updates the weights map. +// updateWeights gets the weights and buckets from postgres and updates the weights and buckets maps. func updateWeights(firstRun bool) error { start := time.Now() defer func() { @@ -273,21 +287,30 @@ func updateWeights(firstRun bool) error { dbWeights := []struct { Endpoint string `db:"endpoint"` Weight int64 `db:"weight"` + Bucket string `db:"bucket"` ValidFrom time.Time `db:"valid_from"` }{} - err := db.WriterDb.Select(&dbWeights, "SELECT DISTINCT ON (endpoint) endpoint, weight, valid_from FROM api_weights WHERE valid_from <= NOW() ORDER BY endpoint, valid_from DESC") + err := db.WriterDb.Select(&dbWeights, "SELECT DISTINCT ON (endpoint) endpoint, bucket, weight, valid_from FROM api_weights WHERE valid_from <= NOW() ORDER BY endpoint, valid_from DESC") if err != nil { return err } weightsMu.Lock() defer weightsMu.Unlock() oldWeights := weights + oldBuckets := buckets weights = make(map[string]int64, len(dbWeights)) for _, w := range dbWeights { weights[w.Endpoint] = w.Weight - if !firstRun && oldWeights[w.Endpoint] != w.Weight { + if !firstRun && oldWeights[w.Endpoint] != weights[w.Endpoint] { logger.WithFields(logrus.Fields{"endpoint": w.Endpoint, "weight": w.Weight, "oldWeight": oldWeights[w.Endpoint]}).Infof("weight changed") } + buckets[w.Endpoint] = strings.ReplaceAll(w.Bucket, ":", "_") + if buckets[w.Endpoint] == "" { + buckets[w.Endpoint] = defaultBucket + } + if !firstRun && oldBuckets[w.Endpoint] != buckets[w.Endpoint] { + logger.WithFields(logrus.Fields{"endpoint": w.Endpoint, "bucket": w.Weight, "oldBucket": oldBuckets[w.Endpoint]}).Infof("bucket changed") + } } return nil } @@ -486,7 +509,11 @@ func updateRateLimits() error { lastTRateLimits := lastRateLimitUpdateRateLimits lastRateLimitUpdateMu.Unlock() - var err error + tx, err := db.WriterDb.Beginx() + if err != nil { + return err + } + defer tx.Rollback() dbApiKeys := []struct { UserID int64 `db:"user_id"` @@ -495,9 +522,9 @@ func updateRateLimits() error { ChangedAt time.Time `db:"changed_at"` }{} - err = db.WriterDb.Select(&dbApiKeys, `SELECT user_id, api_key, valid_until, changed_at FROM api_keys WHERE changed_at > $1 OR valid_until < NOW()`, lastTKeys) + err = tx.Select(&dbApiKeys, `SELECT user_id, api_key, valid_until, changed_at FROM api_keys WHERE changed_at > $1 OR valid_until < NOW()`, lastTKeys) if err != nil { - return err + return fmt.Errorf("error getting api_keys: %w", err) } dbRateLimits := []struct { @@ -509,7 +536,12 @@ func updateRateLimits() error { ChangedAt time.Time `db:"changed_at"` }{} - err = db.WriterDb.Select(&dbRateLimits, `SELECT user_id, second, hour, month, valid_until, changed_at FROM api_ratelimits WHERE changed_at > $1 OR valid_until < NOW()`, lastTRateLimits) + err = tx.Select(&dbRateLimits, `SELECT user_id, second, hour, month, valid_until, changed_at FROM api_ratelimits WHERE changed_at > $1 OR valid_until < NOW()`, lastTRateLimits) + if err != nil { + return fmt.Errorf("error getting api_ratelimits: %w", err) + } + + err = tx.Commit() if err != nil { return err } @@ -522,6 +554,7 @@ func updateRateLimits() error { } if dbKey.ValidUntil.Before(now) { delete(userIdByApiKey, dbKey.ApiKey) + continue } userIdByApiKey[dbKey.ApiKey] = dbKey.UserID } @@ -532,6 +565,7 @@ func updateRateLimits() error { } if dbRl.ValidUntil.Before(now) { delete(rateLimitsByUserId, dbRl.UserID) + continue } rlStr := fmt.Sprintf("%d/%d/%d", dbRl.Second, dbRl.Hour, dbRl.Month) rl, exists := rateLimits[rlStr] @@ -543,6 +577,7 @@ func updateRateLimits() error { } rateLimits[rlStr] = rl } + rateLimitsByUserId[dbRl.UserID] = rl } rateLimitsMu.Unlock() metrics.TaskDuration.WithLabelValues("ratelimit_updateRateLimits_lock").Observe(time.Since(now).Seconds()) @@ -608,9 +643,10 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { } rateLimitsMu.RUnlock() - weight, path := getWeight(r) + weight, route, bucket := getWeight(r) res.Weight = weight - res.Route = path + res.Route = route + res.Bucket = bucket startUtc := start.UTC() res.Time = startUtc @@ -620,13 +656,15 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { endOfHourUtc := time.Now().Truncate(time.Hour).Add(time.Hour) timeUntilEndOfHourUtc := endOfHourUtc.Sub(startUtc) - RateLimitSecondKey := "ratelimit:second:" + res.Key - RateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s", startUtc.Year(), startUtc.Month(), res.Key) - RateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d:%s", startUtc.Year(), startUtc.Month(), startUtc.Hour(), res.Key) + fmt.Printf("startUtc: %v, endOfMonthUtc: %v\n, diff: %v", startUtc, endOfMonthUtc, endOfMonthUtc.Sub(startUtc).Seconds()) - statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Key, path) + RateLimitSecondKey := fmt.Sprintf("ratelimit:second:%s:%s", res.Bucket, res.Key) + RateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Hour(), res.Bucket, res.Key) + RateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), res.Bucket, res.Key) + + statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Key, res.Route) if !res.IsValidKey { - statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), "nokey", path) + statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), "nokey", res.Route) } res.RedisStatsKey = statsKey @@ -665,7 +703,7 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { res.Window = SecondTimeWindow return res, nil } else if res.RateLimit.Second-RateLimitSecond.Val() > res.Limit { - res.Limit = res.RateLimit.Second - RateLimitSecond.Val() + res.Limit = res.RateLimit.Second res.Remaining = res.RateLimit.Second - RateLimitSecond.Val() res.Reset = int64(1) res.Window = SecondTimeWindow @@ -680,7 +718,7 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { res.Window = HourTimeWindow return res, nil } else if res.RateLimit.Hour-RateLimitHour.Val() > res.Limit { - res.Limit = res.RateLimit.Hour - RateLimitHour.Val() + res.Limit = res.RateLimit.Hour res.Remaining = res.RateLimit.Hour - RateLimitHour.Val() res.Reset = int64(timeUntilEndOfHourUtc.Seconds()) res.Window = HourTimeWindow @@ -695,7 +733,7 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { res.Window = MonthTimeWindow return res, nil } else if res.RateLimit.Month-RateLimitMonth.Val() > res.Limit { - res.Limit = res.RateLimit.Month - RateLimitMonth.Val() + res.Limit = res.RateLimit.Month res.Remaining = res.RateLimit.Month - RateLimitMonth.Val() res.Reset = int64(timeUntilEndOfMonthUtc.Seconds()) res.Window = MonthTimeWindow @@ -720,24 +758,28 @@ func getKey(r *http.Request) (key, ip string) { } // getWeight returns the weight of an endpoint. if the weight of the endpoint is not defined, it returns 1. -func getWeight(r *http.Request) (cost int64, identifier string) { +func getWeight(r *http.Request) (cost int64, identifier, bucket string) { route := getRoute(r) weightsMu.RLock() - weight, ok := weights[route] + weight, weightOk := weights[route] + bucket, bucketOk := buckets[route] weightsMu.RUnlock() - if ok { - return weight, route + if !weightOk { + weight = 1 + } + if !bucketOk { + bucket = defaultBucket } - return 1, route + return weight, route, bucket } func getRoute(r *http.Request) string { route := mux.CurrentRoute(r) - path, err := route.GetPathTemplate() + pathTpl, err := route.GetPathTemplate() if err != nil { - path = "UNDEFINED" + return "UNDEFINED" } - return path + return pathTpl } // getIP returns the ip address from the http request From 044defbf9d2e659ddaa6887843d690c636fb9cd4 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 25 Jan 2024 11:15:27 +0100 Subject: [PATCH 07/42] (BIDS-2872) wip --- cmd/misc/main.go | 63 ++++++++++++++++++++++++++++++++++++++--- local-deployment/run.sh | 6 +++- ratelimit/ratelimit.go | 29 +++++++++---------- 3 files changed, 78 insertions(+), 20 deletions(-) diff --git a/cmd/misc/main.go b/cmd/misc/main.go index ed17985a7f..e27b944360 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -1975,7 +1975,7 @@ func updateRatelimitsLoop() error { } else { logrus.Infof("updated ratelimits") } - time.Sleep(time.Minute) + time.Sleep(time.Second * 10) } return nil } @@ -1995,7 +1995,7 @@ func updateRatelimits() error { now := time.Now() var err error - res, err := db.WriterDb.Exec( + res, err := db.FrontendWriterDB.Exec( `insert into api_keys (user_id, api_key, valid_until, changed_at) select id as user_id, @@ -2018,8 +2018,63 @@ func updateRatelimits() error { } logrus.Infof("updated %v api_keys in %v", ra, time.Since(now)) + if false { + fmt.Printf(`insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) + +select + id as user_id, + case + when product = 'free' then 5 + when product = '%[1]v' then 10 + when product = '%[2]v' then 10 + when product = '%[3]v' then 30 + when product = '%[4]v' then 50 + when product = '%[5]v' then 50 + when product = 'plankton' then 20 + when product = 'goldfish' then 20 + when product = 'whale' then 25 + else 50 + end as second, + 0 as hour, + case + when product = 'free' then 120000 + when product = '%[1]v' then 500000 + when product = '%[2]v' then 1000000 + when product = '%[3]v' then 6000000 + when product = '%[4]v' then 500000000 + when product = '%[5]v' then 13000000 + when product = 'plankton' then 120000 + when product = 'goldfish' then 200000 + when product = 'whale' then 700000 + else 4000000000 + end as month, + case + when product = 'free' then to_timestamp('3000-01-01', 'YYYY-MM-DD') + when active = false then now() + else now() + interval '1 month' + end as valid_until, + now() as changed_at +from ( + select id, coalesce(product,'free') as product, key, active FROM (select id, price_id as product, api_key as key, coalesce(active,'f') as active from users left join (select * from users_stripe_subscriptions where price_id = any('{%[1]v,%[2]v,%[3]v,%[4]v,%[5]v}')) as us on users.stripe_customer_id = us.customer_id where api_key is not null AND (price_id is not null OR id not in (select user_id from app_subs_view where app_subs_view.user_id = users.id AND active = true)) UNION SELECT user_id, product_id as product, api_key as key, active from app_subs_view left join users on users.id = app_subs_view.user_id where active = true AND api_key is not null AND (stripe_customer_id is null OR stripe_customer_id NOT IN (select customer_id from users_stripe_subscriptions where active = true and price_id = any('{%[1]v,%[2]v,%[3]v,%[4]v,%[5]v}')))) t where product is null or active = true +) x +on conflict (user_id) do update set + second = excluded.second, + hour = excluded.hour, + month = excluded.month, + valid_until = excluded.valid_until, + changed_at = excluded.changed_at +where + api_ratelimits.second != excluded.second + or api_ratelimits.hour != excluded.hour + or api_ratelimits.month != excluded.month +`+"\n", utils.Config.Frontend.Stripe.Sapphire, + utils.Config.Frontend.Stripe.Emerald, + utils.Config.Frontend.Stripe.Diamond, + utils.Config.Frontend.Stripe.Custom1, + utils.Config.Frontend.Stripe.Custom2) + } now = time.Now() - res, err = db.WriterDb.Exec( + res, err = db.FrontendWriterDB.Exec( `insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) select @@ -2084,7 +2139,7 @@ func updateRatelimits() error { } logrus.Infof("updated %v api_ratelimits in %v", ra, time.Since(now)) - res, err = db.WriterDb.Exec(` + res, err = db.FrontendWriterDB.Exec(` update api_ratelimits set valid_until = now() where valid_until > now() and user_id not in (select id from users where api_key is not null)`) diff --git a/local-deployment/run.sh b/local-deployment/run.sh index 2d79343b32..06473c9eb3 100755 --- a/local-deployment/run.sh +++ b/local-deployment/run.sh @@ -35,7 +35,11 @@ fn_misc() { } fn_sql() { - PGPASSWORD=pass psql -h localhost -p$POSTGRES_PORT -U postgres -d db + if [ -z "${1}" ]; then + PGPASSWORD=pass psql -h localhost -p$POSTGRES_PORT -U postgres -d db + else + PGPASSWORD=pass psql -h localhost -p$POSTGRES_PORT -U postgres -d db -c "$@" --csv --pset=pager=off + fi } fn_redis() { diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 7d5186a485..2d3cff2b20 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -242,7 +242,7 @@ func HttpMiddleware(next http.Handler) http.Handler { next.ServeHTTP(w, r) return } - logrus.WithFields(logrus.Fields{"route": rl.Route, "key": rl.Key, "limit": rl.Limit, "remaining": rl.Remaining, "reset": rl.Reset, "window": rl.Window, "validKey": rl.IsValidKey}).Infof("rateLimiting") + // logrus.WithFields(logrus.Fields{"route": rl.Route, "key": rl.Key, "limit": rl.Limit, "remaining": rl.Remaining, "reset": rl.Reset, "window": rl.Window, "validKey": rl.IsValidKey}).Infof("rateLimiting") w.Header().Set(HeaderRateLimitLimit, strconv.FormatInt(rl.Limit, 10)) w.Header().Set(HeaderRateLimitRemaining, strconv.FormatInt(rl.Remaining, 10)) @@ -650,13 +650,12 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { startUtc := start.UTC() res.Time = startUtc - t := startUtc.AddDate(0, 1, -startUtc.Day()) - endOfMonthUtc := time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, time.UTC) - timeUntilEndOfMonthUtc := endOfMonthUtc.Sub(startUtc) - endOfHourUtc := time.Now().Truncate(time.Hour).Add(time.Hour) - timeUntilEndOfHourUtc := endOfHourUtc.Sub(startUtc) - fmt.Printf("startUtc: %v, endOfMonthUtc: %v\n, diff: %v", startUtc, endOfMonthUtc, endOfMonthUtc.Sub(startUtc).Seconds()) + nextHourUtc := time.Now().Truncate(time.Hour).Add(time.Hour) + nextMonthUtc := time.Date(startUtc.Year(), startUtc.Month()+1, 1, 0, 0, 0, 0, time.UTC) + + timeUntilNextHourUtc := nextHourUtc.Sub(startUtc) + timeUntilNextMonthUtc := nextMonthUtc.Sub(startUtc) RateLimitSecondKey := fmt.Sprintf("ratelimit:second:%s:%s", res.Bucket, res.Key) RateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Hour(), res.Bucket, res.Key) @@ -679,14 +678,14 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { if res.RateLimit.Hour > 0 { RateLimitHour = pipe.IncrBy(ctx, RateLimitHourKey, weight) - pipe.ExpireAt(ctx, RateLimitHourKey, endOfHourUtc) - res.RedisKeys = append(res.RedisKeys, RedisKey{RateLimitHourKey, endOfHourUtc}) + pipe.ExpireAt(ctx, RateLimitHourKey, nextHourUtc) + res.RedisKeys = append(res.RedisKeys, RedisKey{RateLimitHourKey, nextHourUtc}) } if res.RateLimit.Month > 0 { RateLimitMonth = pipe.IncrBy(ctx, RateLimitMonthKey, weight) - pipe.ExpireAt(ctx, RateLimitMonthKey, endOfMonthUtc) - res.RedisKeys = append(res.RedisKeys, RedisKey{RateLimitMonthKey, endOfMonthUtc}) + pipe.ExpireAt(ctx, RateLimitMonthKey, nextMonthUtc) + res.RedisKeys = append(res.RedisKeys, RedisKey{RateLimitMonthKey, nextMonthUtc}) } pipe.Incr(ctx, statsKey) @@ -714,13 +713,13 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { if RateLimitSecond.Val() > res.RateLimit.Hour { res.Limit = res.RateLimit.Hour res.Remaining = 0 - res.Reset = int64(timeUntilEndOfHourUtc.Seconds()) + res.Reset = int64(timeUntilNextHourUtc.Seconds()) res.Window = HourTimeWindow return res, nil } else if res.RateLimit.Hour-RateLimitHour.Val() > res.Limit { res.Limit = res.RateLimit.Hour res.Remaining = res.RateLimit.Hour - RateLimitHour.Val() - res.Reset = int64(timeUntilEndOfHourUtc.Seconds()) + res.Reset = int64(timeUntilNextHourUtc.Seconds()) res.Window = HourTimeWindow } } @@ -729,13 +728,13 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { if RateLimitSecond.Val() > res.RateLimit.Month { res.Limit = res.RateLimit.Month res.Remaining = 0 - res.Reset = int64(timeUntilEndOfMonthUtc.Seconds()) + res.Reset = int64(timeUntilNextMonthUtc.Seconds()) res.Window = MonthTimeWindow return res, nil } else if res.RateLimit.Month-RateLimitMonth.Val() > res.Limit { res.Limit = res.RateLimit.Month res.Remaining = res.RateLimit.Month - RateLimitMonth.Val() - res.Reset = int64(timeUntilEndOfMonthUtc.Seconds()) + res.Reset = int64(timeUntilNextMonthUtc.Seconds()) res.Window = MonthTimeWindow } } From b47dbf05faf2c25269af50f153868d68810e8b90 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 25 Jan 2024 18:07:02 +0100 Subject: [PATCH 08/42] (BIDS-2872) wip --- cmd/misc/main.go | 184 +----------------- .../20240125110000_add_apps_subs_view.sql | 23 +++ ....sql => 20240125120000_add_ratelimits.sql} | 37 ++-- handlers/user.go | 23 +-- ratelimit/ratelimit.go | 177 ++++++++++++++--- types/config.go | 10 - utils/utils.go | 25 --- 7 files changed, 212 insertions(+), 267 deletions(-) create mode 100644 db/migrations/20240125110000_add_apps_subs_view.sql rename db/migrations/{20240123120000_add_ratelimits.sql => 20240125120000_add_ratelimits.sql} (60%) diff --git a/cmd/misc/main.go b/cmd/misc/main.go index e27b944360..3b8b229d67 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -10,6 +10,7 @@ import ( "eth2-exporter/cmd/misc/commands" "eth2-exporter/db" "eth2-exporter/exporter" + "eth2-exporter/ratelimit" "eth2-exporter/rpc" "eth2-exporter/services" "eth2-exporter/types" @@ -392,7 +393,7 @@ func main() { case "fix-ens-addresses": err = fixEnsAddresses(erigonClient) case "update-ratelimits": - err = updateRatelimitsLoop() + err = updateRatelimits() case "add-users": err = addUsers() default: @@ -1965,13 +1966,11 @@ func addUsers() error { return nil } -func updateRatelimitsLoop() error { +func updateRatelimits() error { for { - err := updateRatelimits() + err := ratelimit.DBUpdate() if err != nil { logrus.WithError(err).Errorf("error in updateRatelimits") - time.Sleep(time.Second * 10) - continue } else { logrus.Infof("updated ratelimits") } @@ -1979,178 +1978,3 @@ func updateRatelimitsLoop() error { } return nil } - -func updateRatelimits() error { - for _, k := range []string{ - utils.Config.Frontend.Stripe.Sapphire, - utils.Config.Frontend.Stripe.Emerald, - utils.Config.Frontend.Stripe.Diamond, - utils.Config.Frontend.Stripe.Custom1, - utils.Config.Frontend.Stripe.Custom2, - } { - if k == "" { - logrus.Fatalf("invalid config.frontend.stripe key") - } - } - - now := time.Now() - var err error - res, err := db.FrontendWriterDB.Exec( - `insert into api_keys (user_id, api_key, valid_until, changed_at) - select - id as user_id, - api_key, - to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, - now() as changed_at - from users - where api_key is not null - on conflict (user_id, api_key) do update set - valid_until = excluded.valid_until, - changed_at = excluded.changed_at - where api_keys.valid_until != excluded.valid_until`, - ) - if err != nil { - return fmt.Errorf("error updating api_keys: %w", err) - } - ra, err := res.RowsAffected() - if err != nil { - return err - } - logrus.Infof("updated %v api_keys in %v", ra, time.Since(now)) - - if false { - fmt.Printf(`insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) - -select - id as user_id, - case - when product = 'free' then 5 - when product = '%[1]v' then 10 - when product = '%[2]v' then 10 - when product = '%[3]v' then 30 - when product = '%[4]v' then 50 - when product = '%[5]v' then 50 - when product = 'plankton' then 20 - when product = 'goldfish' then 20 - when product = 'whale' then 25 - else 50 - end as second, - 0 as hour, - case - when product = 'free' then 120000 - when product = '%[1]v' then 500000 - when product = '%[2]v' then 1000000 - when product = '%[3]v' then 6000000 - when product = '%[4]v' then 500000000 - when product = '%[5]v' then 13000000 - when product = 'plankton' then 120000 - when product = 'goldfish' then 200000 - when product = 'whale' then 700000 - else 4000000000 - end as month, - case - when product = 'free' then to_timestamp('3000-01-01', 'YYYY-MM-DD') - when active = false then now() - else now() + interval '1 month' - end as valid_until, - now() as changed_at -from ( - select id, coalesce(product,'free') as product, key, active FROM (select id, price_id as product, api_key as key, coalesce(active,'f') as active from users left join (select * from users_stripe_subscriptions where price_id = any('{%[1]v,%[2]v,%[3]v,%[4]v,%[5]v}')) as us on users.stripe_customer_id = us.customer_id where api_key is not null AND (price_id is not null OR id not in (select user_id from app_subs_view where app_subs_view.user_id = users.id AND active = true)) UNION SELECT user_id, product_id as product, api_key as key, active from app_subs_view left join users on users.id = app_subs_view.user_id where active = true AND api_key is not null AND (stripe_customer_id is null OR stripe_customer_id NOT IN (select customer_id from users_stripe_subscriptions where active = true and price_id = any('{%[1]v,%[2]v,%[3]v,%[4]v,%[5]v}')))) t where product is null or active = true -) x -on conflict (user_id) do update set - second = excluded.second, - hour = excluded.hour, - month = excluded.month, - valid_until = excluded.valid_until, - changed_at = excluded.changed_at -where - api_ratelimits.second != excluded.second - or api_ratelimits.hour != excluded.hour - or api_ratelimits.month != excluded.month -`+"\n", utils.Config.Frontend.Stripe.Sapphire, - utils.Config.Frontend.Stripe.Emerald, - utils.Config.Frontend.Stripe.Diamond, - utils.Config.Frontend.Stripe.Custom1, - utils.Config.Frontend.Stripe.Custom2) - } - now = time.Now() - res, err = db.FrontendWriterDB.Exec( - `insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) - - select - id as user_id, - case - when product = 'free' then 5 - when product = $1 then 10 - when product = $2 then 10 - when product = $3 then 30 - when product = $4 then 50 - when product = $5 then 50 - when product = 'plankton' then 20 - when product = 'goldfish' then 20 - when product = 'whale' then 25 - else 50 - end as second, - 0 as hour, - case - when product = 'free' then 120000 - when product = $1 then 500000 - when product = $2 then 1000000 - when product = $3 then 6000000 - when product = $4 then 500000000 - when product = $5 then 13000000 - when product = 'plankton' then 120000 - when product = 'goldfish' then 200000 - when product = 'whale' then 700000 - else 4000000000 - end as month, - case - when product = 'free' then to_timestamp('3000-01-01', 'YYYY-MM-DD') - when active = false then now() - else now() + interval '1 month' - end as valid_until, - now() as changed_at - from ( - select id, coalesce(product,'free') as product, key, active FROM (select id, price_id as product, api_key as key, coalesce(active,'f') as active from users left join (select * from users_stripe_subscriptions where price_id = any('{$1,$2,$3,$4,$5}')) as us on users.stripe_customer_id = us.customer_id where api_key is not null AND (price_id is not null OR id not in (select user_id from app_subs_view where app_subs_view.user_id = users.id AND active = true)) UNION SELECT user_id, product_id as product, api_key as key, active from app_subs_view left join users on users.id = app_subs_view.user_id where active = true AND api_key is not null AND (stripe_customer_id is null OR stripe_customer_id NOT IN (select customer_id from users_stripe_subscriptions where active = true and price_id = any('{$1,$2,$3,$4,$5}')))) t where product is null or active = true - ) x - on conflict (user_id) do update set - second = excluded.second, - hour = excluded.hour, - month = excluded.month, - valid_until = excluded.valid_until, - changed_at = excluded.changed_at - where - api_ratelimits.second != excluded.second - or api_ratelimits.hour != excluded.hour - or api_ratelimits.month != excluded.month - `, - utils.Config.Frontend.Stripe.Sapphire, - utils.Config.Frontend.Stripe.Emerald, - utils.Config.Frontend.Stripe.Diamond, - utils.Config.Frontend.Stripe.Custom1, - utils.Config.Frontend.Stripe.Custom2, - ) - if err != nil { - return fmt.Errorf("error updating api_ratelimits: %w", err) - } - ra, err = res.RowsAffected() - if err != nil { - return err - } - logrus.Infof("updated %v api_ratelimits in %v", ra, time.Since(now)) - - res, err = db.FrontendWriterDB.Exec(` - update api_ratelimits - set valid_until = now() - where valid_until > now() and user_id not in (select id from users where api_key is not null)`) - if err != nil { - return fmt.Errorf("error invalidating api_ratelimits: %w", err) - } - ra, err = res.RowsAffected() - if err != nil { - return err - } - logrus.Infof("invalidated %v api_ratelimits in %v", ra, time.Since(now)) - - return err -} diff --git a/db/migrations/20240125110000_add_apps_subs_view.sql b/db/migrations/20240125110000_add_apps_subs_view.sql new file mode 100644 index 0000000000..7253a4434e --- /dev/null +++ b/db/migrations/20240125110000_add_apps_subs_view.sql @@ -0,0 +1,23 @@ +-- +goose Up +-- +goose StatementBegin +SELECT 'up SQL query - add view app_subs_view'; +CREATE OR REPLACE VIEW app_subs_view AS + SELECT users_app_subscriptions.id, + users_app_subscriptions.user_id, + users_app_subscriptions.product_id, + users_app_subscriptions.created_at, + users_app_subscriptions.updated_at, + users_app_subscriptions.validate_remotely, + users_app_subscriptions.active, + users_app_subscriptions.store, + users_app_subscriptions.expires_at, + users_app_subscriptions.reject_reason, + users_app_subscriptions.receipt_hash + FROM users_app_subscriptions; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +SELECT 'down SQL query - drop view app_subs_view'; +DROP VIEW app_subs_view; +-- +goose StatementEnd diff --git a/db/migrations/20240123120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql similarity index 60% rename from db/migrations/20240123120000_add_ratelimits.sql rename to db/migrations/20240125120000_add_ratelimits.sql index 098caf1538..fe7a3e3ab6 100644 --- a/db/migrations/20240123120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -11,6 +11,7 @@ CREATE TABLE IF NOT EXISTS changed_at TIMESTAMP WITHOUT TIME ZONE NOT NULL, PRIMARY KEY (user_id) ); + SELECT 'up SQL query - add table api_keys'; CREATE TABLE IF NOT EXISTS api_keys ( @@ -20,6 +21,7 @@ CREATE TABLE IF NOT EXISTS changed_at TIMESTAMP WITHOUT TIME ZONE NOT NULL, PRIMARY KEY (user_id, api_key) ); + SELECT 'up SQL query - add table api_weights'; CREATE TABLE IF NOT EXISTS api_weights ( @@ -32,20 +34,27 @@ CREATE TABLE IF NOT EXISTS PRIMARY KEY (endpoint, valid_from) ); -SELECT 'up SQL query - add view app_subs_view'; -CREATE OR REPLACE VIEW app_subs_view AS - SELECT users_app_subscriptions.id, - users_app_subscriptions.user_id, - users_app_subscriptions.product_id, - users_app_subscriptions.created_at, - users_app_subscriptions.updated_at, - users_app_subscriptions.validate_remotely, - users_app_subscriptions.active, - users_app_subscriptions.store, - users_app_subscriptions.expires_at, - users_app_subscriptions.reject_reason, - users_app_subscriptions.receipt_hash - FROM users_app_subscriptions; +SELECT 'up SQL query - add table api_products'; +CREATE TABLE IF NOT EXISTS + api_products ( + name VARCHAR(20) NOT NULL, + stripe_price_id VARCHAR(256) NOT NULL, + second INT NOT NULL DEFAULT 0, + hour INT NOT NULL DEFAULT 0, + month INT NOT NULL DEFAULT 0, + valid_from TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT TO_TIMESTAMP(0), + PRIMARY KEY (name, valid_from) + ); +INSERT INTO api_products (name, stripe_price_id, second, hour, month) VALUES + ('free' , 'price_free' , 5, 0, 30000), + ('sapphire', 'price_sapphire', 10, 0, 500000), + ('emerald' , 'price_emerald' , 10, 0, 1000000), + ('diamond' , 'price_diamond' , 30, 0, 6000000), + ('custom2' , 'price_custom2' , 50, 0, 13000000), + ('custom1' , 'price_custom1' , 50, 0, 500000000), + ('whale' , 'price_whale' , 25, 0, 700000), + ('goldfish', 'price_goldfish', 20, 0, 200000), + ('plankton', 'price_plankton', 20, 0, 120000); -- +goose StatementEnd -- +goose Down diff --git a/handlers/user.go b/handlers/user.go index 2a87ffd5da..f7accc1005 100644 --- a/handlers/user.go +++ b/handlers/user.go @@ -6,6 +6,7 @@ import ( "encoding/json" "eth2-exporter/db" "eth2-exporter/mail" + "eth2-exporter/ratelimit" "eth2-exporter/services" "eth2-exporter/templates" "eth2-exporter/types" @@ -87,21 +88,17 @@ func UserSettings(w http.ResponseWriter, r *http.Request) { statsSharing = false } - maxDaily := utils.Config.Frontend.Ratelimits.FreeDay - maxMonthly := utils.Config.Frontend.Ratelimits.FreeMonth - if subscription.PriceID != nil { - if *subscription.PriceID == utils.Config.Frontend.Stripe.Sapphire { - maxDaily = utils.Config.Frontend.Ratelimits.SapphierDay - maxMonthly = utils.Config.Frontend.Ratelimits.SapphierMonth - } else if *subscription.PriceID == utils.Config.Frontend.Stripe.Emerald { - maxDaily = utils.Config.Frontend.Ratelimits.EmeraldDay - maxMonthly = utils.Config.Frontend.Ratelimits.EmeraldMonth - } else if *subscription.PriceID == utils.Config.Frontend.Stripe.Diamond { - maxDaily = utils.Config.Frontend.Ratelimits.DiamondDay - maxMonthly = utils.Config.Frontend.Ratelimits.DiamondMonth - } + rl, err := ratelimit.DBGetUserApiRateLimit(int64(user.UserID)) + if err != nil { + logger.Errorf("Error retrieving the api-ratelimit for user: %v %v", user.UserID, err) + utils.SetFlash(w, r, "", "Error: Something went wrong.") + http.Redirect(w, r, "/user/settings", http.StatusSeeOther) + return } + maxDaily := int(rl.Hour * 24) + maxMonthly := int(rl.Month) + userSettingsData.ApiStatistics = &types.ApiStatistics{} if subscription.ApiKey != nil && len(*subscription.ApiKey) > 0 { diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 2d3cff2b20..d51173a229 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -2,6 +2,7 @@ package ratelimit import ( "context" + "database/sql" "eth2-exporter/db" "eth2-exporter/metrics" "fmt" @@ -172,7 +173,7 @@ func Init(redisAddress string, requestSelectorOpt func(req *http.Request) bool) initializedWg.Done() firstRun = false } - time.Sleep(time.Second * 60) + time.Sleep(time.Second * 10) } }() go func() { @@ -189,7 +190,7 @@ func Init(redisAddress string, requestSelectorOpt func(req *http.Request) bool) initializedWg.Done() firstRun = false } - time.Sleep(time.Second * 60) + time.Sleep(time.Second * 10) } }() go func() { @@ -214,7 +215,7 @@ func Init(redisAddress string, requestSelectorOpt func(req *http.Request) bool) if err != nil { logger.WithError(err).Errorf("error updating stats") } - time.Sleep(time.Second * 60) + time.Sleep(time.Second * 10) } }() @@ -242,6 +243,7 @@ func HttpMiddleware(next http.Handler) http.Handler { next.ServeHTTP(w, r) return } + // logrus.WithFields(logrus.Fields{"route": rl.Route, "key": rl.Key, "limit": rl.Limit, "remaining": rl.Remaining, "reset": rl.Reset, "window": rl.Window, "validKey": rl.IsValidKey}).Infof("rateLimiting") w.Header().Set(HeaderRateLimitLimit, strconv.FormatInt(rl.Limit, 10)) @@ -258,7 +260,6 @@ func HttpMiddleware(next http.Handler) http.Handler { w.Header().Set(HeaderRateLimitLimitMonth, strconv.FormatInt(rl.RateLimit.Month, 10)) } - // note: maybe just look for rl.Remaining > 0 instead of rl.Weight > rl.Remaining if rl.Weight > rl.Remaining { w.Header().Set(HeaderRetryAfter, strconv.FormatInt(rl.Reset, 10)) http.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests) @@ -268,6 +269,7 @@ func HttpMiddleware(next http.Handler) http.Handler { } return } + d := &responseWriterDelegator{ResponseWriter: w} next.ServeHTTP(d, r) err = postRateLimit(rl, d.Status()) @@ -640,6 +642,7 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { } else { res.RateLimit = NoKeyRateLimit } + logger.Infof("userId: %v, key: %v, ip: %v, ok: %v, limit: %+v", userId, key, ip, ok, res.RateLimit) } rateLimitsMu.RUnlock() @@ -657,9 +660,9 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { timeUntilNextHourUtc := nextHourUtc.Sub(startUtc) timeUntilNextMonthUtc := nextMonthUtc.Sub(startUtc) - RateLimitSecondKey := fmt.Sprintf("ratelimit:second:%s:%s", res.Bucket, res.Key) - RateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Hour(), res.Bucket, res.Key) - RateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), res.Bucket, res.Key) + rateLimitSecondKey := fmt.Sprintf("ratelimit:second:%s:%s", res.Bucket, res.Key) + rateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Hour(), res.Bucket, res.Key) + rateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), res.Bucket, res.Key) statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Key, res.Route) if !res.IsValidKey { @@ -669,23 +672,23 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { pipe := redisClient.Pipeline() - var RateLimitSecond, RateLimitHour, RateLimitMonth *redis.IntCmd + var rateLimitSecond, rateLimitHour, rateLimitMonth *redis.IntCmd if res.RateLimit.Second > 0 { - RateLimitSecond = pipe.IncrBy(ctx, RateLimitSecondKey, weight) - pipe.ExpireNX(ctx, RateLimitSecondKey, time.Second) + rateLimitSecond = pipe.IncrBy(ctx, rateLimitSecondKey, weight) + pipe.ExpireNX(ctx, rateLimitSecondKey, time.Second) } if res.RateLimit.Hour > 0 { - RateLimitHour = pipe.IncrBy(ctx, RateLimitHourKey, weight) - pipe.ExpireAt(ctx, RateLimitHourKey, nextHourUtc) - res.RedisKeys = append(res.RedisKeys, RedisKey{RateLimitHourKey, nextHourUtc}) + rateLimitHour = pipe.IncrBy(ctx, rateLimitHourKey, weight) + pipe.ExpireAt(ctx, rateLimitHourKey, nextHourUtc.Add(time.Second*600)) + res.RedisKeys = append(res.RedisKeys, RedisKey{rateLimitHourKey, nextHourUtc}) } if res.RateLimit.Month > 0 { - RateLimitMonth = pipe.IncrBy(ctx, RateLimitMonthKey, weight) - pipe.ExpireAt(ctx, RateLimitMonthKey, nextMonthUtc) - res.RedisKeys = append(res.RedisKeys, RedisKey{RateLimitMonthKey, nextMonthUtc}) + rateLimitMonth = pipe.IncrBy(ctx, rateLimitMonthKey, weight) + pipe.ExpireAt(ctx, rateLimitMonthKey, nextMonthUtc.Add(time.Second*600)) + res.RedisKeys = append(res.RedisKeys, RedisKey{rateLimitMonthKey, nextMonthUtc}) } pipe.Incr(ctx, statsKey) @@ -694,46 +697,48 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { return nil, err } + logger.Infof("ratelimit: %+v", res.RateLimit) + if res.RateLimit.Second > 0 { - if RateLimitSecond.Val() > res.RateLimit.Second { + if rateLimitSecond.Val() > res.RateLimit.Second { res.Limit = res.RateLimit.Second res.Remaining = 0 res.Reset = int64(1) res.Window = SecondTimeWindow return res, nil - } else if res.RateLimit.Second-RateLimitSecond.Val() > res.Limit { + } else if res.RateLimit.Second-rateLimitSecond.Val() > res.Limit { res.Limit = res.RateLimit.Second - res.Remaining = res.RateLimit.Second - RateLimitSecond.Val() + res.Remaining = res.RateLimit.Second - rateLimitSecond.Val() res.Reset = int64(1) res.Window = SecondTimeWindow } } if res.RateLimit.Hour > 0 { - if RateLimitSecond.Val() > res.RateLimit.Hour { + if rateLimitHour.Val() > res.RateLimit.Hour { res.Limit = res.RateLimit.Hour res.Remaining = 0 res.Reset = int64(timeUntilNextHourUtc.Seconds()) res.Window = HourTimeWindow return res, nil - } else if res.RateLimit.Hour-RateLimitHour.Val() > res.Limit { + } else if res.RateLimit.Hour-rateLimitHour.Val() > res.Limit { res.Limit = res.RateLimit.Hour - res.Remaining = res.RateLimit.Hour - RateLimitHour.Val() + res.Remaining = res.RateLimit.Hour - rateLimitHour.Val() res.Reset = int64(timeUntilNextHourUtc.Seconds()) res.Window = HourTimeWindow } } if res.RateLimit.Month > 0 { - if RateLimitSecond.Val() > res.RateLimit.Month { + if rateLimitMonth.Val() > res.RateLimit.Month { res.Limit = res.RateLimit.Month res.Remaining = 0 res.Reset = int64(timeUntilNextMonthUtc.Seconds()) res.Window = MonthTimeWindow return res, nil - } else if res.RateLimit.Month-RateLimitMonth.Val() > res.Limit { + } else if res.RateLimit.Month-rateLimitMonth.Val() > res.Limit { res.Limit = res.RateLimit.Month - res.Remaining = res.RateLimit.Month - RateLimitMonth.Val() + res.Remaining = res.RateLimit.Month - rateLimitMonth.Val() res.Reset = int64(timeUntilNextMonthUtc.Seconds()) res.Window = MonthTimeWindow } @@ -860,3 +865,125 @@ func (rl *FallbackRateLimiter) Handle(w http.ResponseWriter, r *http.Request, ne rl.mu.Unlock() next(w, r) } + +type ApiProduct struct { + Name string + Second int64 + Hour int64 + Month int64 +} + +func DBGetUserApiRateLimit(userId int64) (*RateLimit, error) { + rl := &RateLimit{} + err := db.FrontendWriterDB.Get(rl, ` + select second, hour, month + from api_ratelimits + where user_id = $1`, userId) + return rl, err +} + +func DBGetCurrentApiProducts() ([]*ApiProduct, error) { + apiProducts := []*ApiProduct{} + err := db.FrontendWriterDB.Select(&apiProducts, ` + select distinct on (product) product, second, hour, month, valid_from + from api_products + where valid_from <= now() + order by product, valid_from desc`) + return apiProducts, err +} + +func DBUpdate() error { + var err error + now := time.Now() + res, err := DBUpdateApiKeys() + if err != nil { + return err + } + ra, err := res.RowsAffected() + if err != nil { + return err + } + logrus.Infof("updated %v api_keys in %v", ra, time.Since(now)) + + _, err = DBUpdateApiRatelimits() + if err != nil { + return err + } + ra, err = res.RowsAffected() + if err != nil { + return err + } + logrus.Infof("updated %v api_ratelimits in %v", ra, time.Since(now)) + + _, err = DBInvalidateApiKeys() + if err != nil { + return err + } + ra, err = res.RowsAffected() + if err != nil { + return err + } + logrus.Infof("invalidated %v api_keys in %v", ra, time.Since(now)) + + return nil +} + +func DBInvalidateApiKeys() (sql.Result, error) { + return db.FrontendWriterDB.Exec(` + update api_ratelimits + set changed_at = now(), valid_until = now() + where valid_until > now() + and user_id not in (select user_id from api_keys where api_key is not null)`) +} + +func DBUpdateApiKeys() (sql.Result, error) { + return db.FrontendWriterDB.Exec( + `insert into api_keys (user_id, api_key, valid_until, changed_at) + select + id as user_id, + api_key, + to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + now() as changed_at + from users + where api_key is not null + on conflict (user_id, api_key) do update set + valid_until = excluded.valid_until, + changed_at = excluded.changed_at + where api_keys.valid_until != excluded.valid_until`, + ) +} + +func DBUpdateApiRatelimits() (sql.Result, error) { + return db.FrontendWriterDB.Exec( + `with + current_api_products as ( + select distinct on (name) name, stripe_price_id, second, hour, month, valid_from + from api_products + where valid_from <= now() + order by product, valid_from desc + ) + insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) + select + u.id as user_id, + greatest(coalesce(cap1.second,0),coalesce(cap2.second,0)) as second, + greatest(coalesce(cap1.hour ,0),coalesce(cap2.hour ,0)) as hour, + greatest(coalesce(cap1.month ,0),coalesce(cap2.month ,0)) as month, + to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + now() as changed_at + from users u + left join users_stripe_subscriptions uss on uss.customer_id = u.stripe_customer_id and uss.active = true + left join current_api_products cap on cap.stripe_price_id = uss.price_id + left join current_api_products cap1 on cap1.name = coalesce(cap.name,'free') + left join app_subs_view asv on asv.user_id = u.id and asv.active = true + left join current_api_products cap2 on cap2.name = coalesce(asv.product_id,'free') + on conflict (user_id) do update set + second = excluded.second, + hour = excluded.hour, + month = excluded.month, + valid_until = excluded.valid_until, + changed_at = now() + where + api_ratelimits.second != excluded.second + or api_ratelimits.hour != excluded.hour + or api_ratelimits.month != excluded.month`) +} diff --git a/types/config.go b/types/config.go index 31b70a52b2..222d48ad45 100644 --- a/types/config.go +++ b/types/config.go @@ -139,16 +139,6 @@ type Config struct { Plankton string `yaml:"plankton" envconfig:"FRONTEND_STRIPE_PLANKTON"` Webhook string `yaml:"webhook" envconfig:"FRONTEND_STRIPE_WEBHOOK"` } - Ratelimits struct { - FreeDay int `yaml:"freeDay" envconfig:"FRONTEND_RATELIMITS_FREE_DAY"` - FreeMonth int `yaml:"freeMonth" envconfig:"FRONTEND_RATELIMITS_FREE_MONTH"` - SapphierDay int `yaml:"sapphireDay" envconfig:"FRONTEND_RATELIMITS_SAPPHIRE_DAY"` - SapphierMonth int `yaml:"sapphireDay" envconfig:"FRONTEND_RATELIMITS_SAPPHIRE_MONTH"` - EmeraldDay int `yaml:"emeraldDay" envconfig:"FRONTEND_RATELIMITS_EMERALD_DAY"` - EmeraldMonth int `yaml:"emeraldMonth" envconfig:"FRONTEND_RATELIMITS_EMERALD_MONTH"` - DiamondDay int `yaml:"diamondDay" envconfig:"FRONTEND_RATELIMITS_DIAMOND_DAY"` - DiamondMonth int `yaml:"diamondMonth" envconfig:"FRONTEND_RATELIMITS_DIAMOND_MONTH"` - } `yaml:"ratelimits"` SessionSecret string `yaml:"sessionSecret" envconfig:"FRONTEND_SESSION_SECRET"` JwtSigningSecret string `yaml:"jwtSigningSecret" envconfig:"FRONTEND_JWT_SECRET"` JwtIssuer string `yaml:"jwtIssuer" envconfig:"FRONTEND_JWT_ISSUER"` diff --git a/utils/utils.go b/utils/utils.go index 3f7ebbb215..626dd80bc4 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -744,31 +744,6 @@ func ReadConfig(cfg *types.Config, path string) error { cfg.Frontend.Keywords = "open source ethereum block explorer, ethereum block explorer, beacon chain explorer, ethereum blockchain explorer" } - if cfg.Frontend.Ratelimits.FreeDay == 0 { - cfg.Frontend.Ratelimits.FreeDay = 30000 - } - if cfg.Frontend.Ratelimits.FreeMonth == 0 { - cfg.Frontend.Ratelimits.FreeMonth = 30000 - } - if cfg.Frontend.Ratelimits.SapphierDay == 0 { - cfg.Frontend.Ratelimits.SapphierDay = 100000 - } - if cfg.Frontend.Ratelimits.SapphierMonth == 0 { - cfg.Frontend.Ratelimits.SapphierMonth = 500000 - } - if cfg.Frontend.Ratelimits.EmeraldDay == 0 { - cfg.Frontend.Ratelimits.EmeraldDay = 200000 - } - if cfg.Frontend.Ratelimits.EmeraldMonth == 0 { - cfg.Frontend.Ratelimits.EmeraldMonth = 1000000 - } - if cfg.Frontend.Ratelimits.DiamondDay == 0 { - cfg.Frontend.Ratelimits.DiamondDay = 6000000 - } - if cfg.Frontend.Ratelimits.DiamondMonth == 0 { - cfg.Frontend.Ratelimits.DiamondMonth = 6000000 - } - if cfg.Chain.Id != 0 { switch cfg.Chain.Name { case "mainnet", "ethereum": From 4ee41f5d07c7d1cb46689d23abf736495464cd0e Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Tue, 30 Jan 2024 16:31:05 +0100 Subject: [PATCH 09/42] (BIDS-2872) wip --- db/ratelimit.go | 64 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 db/ratelimit.go diff --git a/db/ratelimit.go b/db/ratelimit.go new file mode 100644 index 0000000000..c350867710 --- /dev/null +++ b/db/ratelimit.go @@ -0,0 +1,64 @@ +package db + +import ( + "database/sql" + "eth2-exporter/utils" +) + +func GetUserRatelimitProduct() {} + +func UpdateApiRatelimits() (sql.Result, error) { + return FrontendWriterDB.Exec( + `with + stripe_price_ids as ( + select product, price_id from ( values + ('sapphire', $1), + ('emerald', $2), + ('diamond', $3), + ('custom1', $4), + ('custom2', $5), + ('whale', $6), + ('goldfish', $7), + ('plankton', $8) + ) as x(product, price_id) + ), + current_api_products as ( + select distinct on (product) product, second, hour, month, valid_from + from api_products + where valid_from <= now() + order by product, valid_from desc + ) + insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) + select + u.id as user_id, + greatest(coalesce(cap1.second,0),coalesce(cap2.second,0)) as second, + greatest(coalesce(cap1.hour ,0),coalesce(cap2.hour ,0)) as hour, + greatest(coalesce(cap1.month ,0),coalesce(cap2.month ,0)) as month, + to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + now() as changed_at + from users u + left join users_stripe_subscriptions uss on uss.customer_id = u.stripe_customer_id and uss.active = true + left join stripe_price_ids spi1 on spi1.price_id = uss.price_id + left join current_api_products cap1 on cap1.product = coalesce(spi1.product,'free') + left join app_subs_view asv on asv.user_id = u.id and asv.active = true + left join current_api_products cap2 on cap2.product = coalesce(asv.product_id,'free') + on conflict (user_id) do update set + second = excluded.second, + hour = excluded.hour, + month = excluded.month, + valid_until = excluded.valid_until, + changed_at = now() + where + api_ratelimits.second != excluded.second + or api_ratelimits.hour != excluded.hour + or api_ratelimits.month != excluded.month`, + utils.Config.Frontend.Stripe.Sapphire, + utils.Config.Frontend.Stripe.Emerald, + utils.Config.Frontend.Stripe.Diamond, + utils.Config.Frontend.Stripe.Custom1, + utils.Config.Frontend.Stripe.Custom2, + utils.Config.Frontend.Stripe.Whale, + utils.Config.Frontend.Stripe.Goldfish, + utils.Config.Frontend.Stripe.Plankton, + ) +} From 8f2cbcb5bb859d9280e4a17e1ec13c4fb3336331 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 8 Feb 2024 15:35:39 +0100 Subject: [PATCH 10/42] (BIDS-2872) wip --- cmd/misc/main.go | 29 -------- .../20240125120000_add_ratelimits.sql | 12 +-- ratelimit/ratelimit.go | 73 ++++++++++++------- 3 files changed, 50 insertions(+), 64 deletions(-) diff --git a/cmd/misc/main.go b/cmd/misc/main.go index 3b8b229d67..ef201dbb64 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -394,8 +394,6 @@ func main() { err = fixEnsAddresses(erigonClient) case "update-ratelimits": err = updateRatelimits() - case "add-users": - err = addUsers() default: utils.LogFatal(nil, fmt.Sprintf("unknown command %s", opts.Command), 0) } @@ -1939,33 +1937,6 @@ func reExportSyncCommittee(rpcClient rpc.Client, p uint64, dryRun bool) error { } } -func addUsers() error { - logrus.Infof("addUsers") - tx, err := db.WriterDb.Beginx() - if err != nil { - logrus.Fatalf("error starting tx: %v", err) - } - defer tx.Rollback() - - for i := 0; i < 10000; i++ { - _, err = tx.Exec(`INSERT INTO users (email, password, api_key, stripe_customer_id) VALUES ($1, 'xxx', $2, $3)`, fmt.Sprintf("user%d@email.com", i), fmt.Sprintf("apikey_%d", i), fmt.Sprintf("stripe_customer_%d", i)) - if err != nil { - return err - } - } - for i := 0; i < 100; i++ { - _, err = tx.Exec(`INSERT INTO users_stripe_subscriptions (subscription_id, customer_id, price_id, active, payload, purchase_group) VALUES ($1, $2, $3, $4, $5, $6)`, fmt.Sprintf("stripe_sub_%d", i), fmt.Sprintf("stripe_customer_%d", i), "price_diamond", true, "{}", "x") - if err != nil { - return err - } - } - err = tx.Commit() - if err != nil { - return err - } - return nil -} - func updateRatelimits() error { for { err := ratelimit.DBUpdate() diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index fe7a3e3ab6..37086e5516 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -45,16 +45,6 @@ CREATE TABLE IF NOT EXISTS valid_from TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT TO_TIMESTAMP(0), PRIMARY KEY (name, valid_from) ); -INSERT INTO api_products (name, stripe_price_id, second, hour, month) VALUES - ('free' , 'price_free' , 5, 0, 30000), - ('sapphire', 'price_sapphire', 10, 0, 500000), - ('emerald' , 'price_emerald' , 10, 0, 1000000), - ('diamond' , 'price_diamond' , 30, 0, 6000000), - ('custom2' , 'price_custom2' , 50, 0, 13000000), - ('custom1' , 'price_custom1' , 50, 0, 500000000), - ('whale' , 'price_whale' , 25, 0, 700000), - ('goldfish', 'price_goldfish', 20, 0, 200000), - ('plankton', 'price_plankton', 20, 0, 120000); -- +goose StatementEnd -- +goose Down @@ -65,4 +55,6 @@ SELECT 'down SQL query - drop table api_keys'; DROP TABLE IF EXISTS api_keys; SELECT 'down SQL query - drop table api_weights'; DROP TABLE IF EXISTS api_weights; +SELECT 'down SQL query - drop table api_products'; +DROP TABLE IF EXISTS api_products; -- +goose StatementEnd diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index d51173a229..7f916afd66 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -37,9 +37,9 @@ const ( HeaderRateLimitLimitHour = "X-RateLimit-Limit-Hour" // the rate limit ceiling that is applicable for the current user HeaderRateLimitLimitMonth = "X-RateLimit-Limit-Month" // the rate limit ceiling that is applicable for the current user - NokeyRateLimitSecond = 2 // RateLimit for requests without or with invalid apikey - NokeyRateLimitHour = 500 // RateLimit for requests without or with invalid apikey - NokeyRateLimitMonth = 0 // RateLimit for requests without or with invalid apikey + DefaultRateLimitSecond = 2 // RateLimit per second if no ratelimits are set in database + DefaultRateLimitHour = 500 // RateLimit per second if no ratelimits are set in database + DefaultRateLimitMonth = 0 // RateLimit per second if no ratelimits are set in database FallbackRateLimitSecond = 20 // RateLimit per second for when redis is offline FallbackRateLimitBurst = 20 // RateLimit burst for when redis is offline @@ -50,11 +50,13 @@ const ( ) var NoKeyRateLimit = &RateLimit{ - Second: NokeyRateLimitSecond, - Hour: NokeyRateLimitHour, - Month: NokeyRateLimitMonth, + Second: DefaultRateLimitSecond, + Hour: DefaultRateLimitHour, + Month: DefaultRateLimitMonth, } +var FreeRatelimit = NoKeyRateLimit + var redisClient *redis.Client var redisIsHealthy atomic.Bool @@ -548,8 +550,27 @@ func updateRateLimits() error { return err } + dbApiProducts, err := DBGetCurrentApiProducts() + if err != nil { + return err + } + rateLimitsMu.Lock() now := time.Now() + + for _, dbApiProduct := range dbApiProducts { + if dbApiProduct.Name == "nokey" { + NoKeyRateLimit.Second = dbApiProduct.Second + NoKeyRateLimit.Hour = dbApiProduct.Hour + NoKeyRateLimit.Month = dbApiProduct.Month + } + if dbApiProduct.Name == "free" { + FreeRatelimit.Second = dbApiProduct.Second + FreeRatelimit.Hour = dbApiProduct.Hour + FreeRatelimit.Month = dbApiProduct.Month + } + } + for _, dbKey := range dbApiKeys { if dbKey.ChangedAt.After(lastTKeys) { lastTKeys = dbKey.ChangedAt @@ -592,6 +613,7 @@ func updateRateLimits() error { return nil } +// postRateLimit decrements the rate limit keys in redis if the status is not 200. func postRateLimit(rl *RateLimitResult, status int) error { if status == 200 { return nil @@ -612,6 +634,7 @@ func postRateLimit(rl *RateLimitResult, status int) error { return nil } +// rateLimitRequest is the main function for rate limiting, it will check the rate limits for the request and update the rate limits in redis. func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { start := time.Now() defer func() { @@ -622,6 +645,7 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { defer cancel() res := &RateLimitResult{} + // defer func() { logger.Infof("rateLimitRequest: %+v", *res) }() key, ip := getKey(r) res.Key = key @@ -640,9 +664,8 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { if ok { res.RateLimit = limit } else { - res.RateLimit = NoKeyRateLimit + res.RateLimit = FreeRatelimit } - logger.Infof("userId: %v, key: %v, ip: %v, ok: %v, limit: %+v", userId, key, ip, ok, res.RateLimit) } rateLimitsMu.RUnlock() @@ -660,11 +683,11 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { timeUntilNextHourUtc := nextHourUtc.Sub(startUtc) timeUntilNextMonthUtc := nextMonthUtc.Sub(startUtc) - rateLimitSecondKey := fmt.Sprintf("ratelimit:second:%s:%s", res.Bucket, res.Key) - rateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Hour(), res.Bucket, res.Key) - rateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), res.Bucket, res.Key) + rateLimitSecondKey := fmt.Sprintf("ratelimit:second:%s:%s", res.Bucket, res.UserId) + rateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Bucket, res.UserId) + rateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), res.Bucket, res.UserId) - statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Key, res.Route) + statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%d:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.UserId, res.Route) if !res.IsValidKey { statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), "nokey", res.Route) } @@ -681,14 +704,14 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { if res.RateLimit.Hour > 0 { rateLimitHour = pipe.IncrBy(ctx, rateLimitHourKey, weight) - pipe.ExpireAt(ctx, rateLimitHourKey, nextHourUtc.Add(time.Second*600)) - res.RedisKeys = append(res.RedisKeys, RedisKey{rateLimitHourKey, nextHourUtc}) + pipe.ExpireAt(ctx, rateLimitHourKey, nextHourUtc.Add(time.Second*60)) // expire 1 minute after the window to make sure we do not miss any requests due to time-sync + res.RedisKeys = append(res.RedisKeys, RedisKey{rateLimitHourKey, nextHourUtc.Add(time.Second * 60)}) } if res.RateLimit.Month > 0 { rateLimitMonth = pipe.IncrBy(ctx, rateLimitMonthKey, weight) - pipe.ExpireAt(ctx, rateLimitMonthKey, nextMonthUtc.Add(time.Second*600)) - res.RedisKeys = append(res.RedisKeys, RedisKey{rateLimitMonthKey, nextMonthUtc}) + pipe.ExpireAt(ctx, rateLimitMonthKey, nextMonthUtc.Add(time.Second*60)) // expire 1 minute after the window to make sure we do not miss any requests due to time-sync + res.RedisKeys = append(res.RedisKeys, RedisKey{rateLimitMonthKey, nextMonthUtc.Add(time.Second * 60)}) } pipe.Incr(ctx, statsKey) @@ -697,8 +720,6 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { return nil, err } - logger.Infof("ratelimit: %+v", res.RateLimit) - if res.RateLimit.Second > 0 { if rateLimitSecond.Val() > res.RateLimit.Second { res.Limit = res.RateLimit.Second @@ -867,10 +888,12 @@ func (rl *FallbackRateLimiter) Handle(w http.ResponseWriter, r *http.Request, ne } type ApiProduct struct { - Name string - Second int64 - Hour int64 - Month int64 + Name string `db:"name"` + StripePriceID string `db:"stripe_price_id"` + Second int64 `db:"second"` + Hour int64 `db:"hour"` + Month int64 `db:"month"` + ValidFrom time.Time `db:"valid_from"` } func DBGetUserApiRateLimit(userId int64) (*RateLimit, error) { @@ -885,10 +908,10 @@ func DBGetUserApiRateLimit(userId int64) (*RateLimit, error) { func DBGetCurrentApiProducts() ([]*ApiProduct, error) { apiProducts := []*ApiProduct{} err := db.FrontendWriterDB.Select(&apiProducts, ` - select distinct on (product) product, second, hour, month, valid_from + select distinct on (name) name, stripe_price_id, second, hour, month, valid_from from api_products where valid_from <= now() - order by product, valid_from desc`) + order by name, valid_from desc`) return apiProducts, err } @@ -960,7 +983,7 @@ func DBUpdateApiRatelimits() (sql.Result, error) { select distinct on (name) name, stripe_price_id, second, hour, month, valid_from from api_products where valid_from <= now() - order by product, valid_from desc + order by name, valid_from desc ) insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) select From 49de9a7db8c254427ae4aaa62677e5a4615968cd Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Fri, 9 Feb 2024 13:52:54 +0100 Subject: [PATCH 11/42] (BIDS-2872) wip --- cmd/misc/main.go | 2 - .../20240125120000_add_ratelimits.sql | 8 +++ db/ratelimit.go | 64 ------------------- ratelimit/ratelimit.go | 30 +++++---- 4 files changed, 26 insertions(+), 78 deletions(-) delete mode 100644 db/ratelimit.go diff --git a/cmd/misc/main.go b/cmd/misc/main.go index ef201dbb64..cf3201198f 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -1942,8 +1942,6 @@ func updateRatelimits() error { err := ratelimit.DBUpdate() if err != nil { logrus.WithError(err).Errorf("error in updateRatelimits") - } else { - logrus.Infof("updated ratelimits") } time.Sleep(time.Second * 10) } diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index 37086e5516..fd05e1fe2d 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -12,6 +12,8 @@ CREATE TABLE IF NOT EXISTS PRIMARY KEY (user_id) ); +CREATE INDEX IF NOT EXISTS idx_api_ratelimits_changed_at ON api_ratelimits (changed_at); + SELECT 'up SQL query - add table api_keys'; CREATE TABLE IF NOT EXISTS api_keys ( @@ -22,6 +24,8 @@ CREATE TABLE IF NOT EXISTS PRIMARY KEY (user_id, api_key) ); +CREATE INDEX IF NOT EXISTS idx_api_keys_changed_at ON api_keys (changed_at); + SELECT 'up SQL query - add table api_weights'; CREATE TABLE IF NOT EXISTS api_weights ( @@ -51,8 +55,12 @@ CREATE TABLE IF NOT EXISTS -- +goose StatementBegin SELECT 'down SQL query - drop table api_ratelimits'; DROP TABLE IF EXISTS api_ratelimits; +SELECT 'down SQL query - drop index idx_api_ratelimits_changed_at'; +DROP INDEX IF EXISTS idx_api_ratelimits_changed_at; SELECT 'down SQL query - drop table api_keys'; DROP TABLE IF EXISTS api_keys; +SELECT 'down SQL query - drop index idx_api_keys_changed_at'; +DROP INDEX IF EXISTS idx_api_keys_changed_at; SELECT 'down SQL query - drop table api_weights'; DROP TABLE IF EXISTS api_weights; SELECT 'down SQL query - drop table api_products'; diff --git a/db/ratelimit.go b/db/ratelimit.go deleted file mode 100644 index c350867710..0000000000 --- a/db/ratelimit.go +++ /dev/null @@ -1,64 +0,0 @@ -package db - -import ( - "database/sql" - "eth2-exporter/utils" -) - -func GetUserRatelimitProduct() {} - -func UpdateApiRatelimits() (sql.Result, error) { - return FrontendWriterDB.Exec( - `with - stripe_price_ids as ( - select product, price_id from ( values - ('sapphire', $1), - ('emerald', $2), - ('diamond', $3), - ('custom1', $4), - ('custom2', $5), - ('whale', $6), - ('goldfish', $7), - ('plankton', $8) - ) as x(product, price_id) - ), - current_api_products as ( - select distinct on (product) product, second, hour, month, valid_from - from api_products - where valid_from <= now() - order by product, valid_from desc - ) - insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) - select - u.id as user_id, - greatest(coalesce(cap1.second,0),coalesce(cap2.second,0)) as second, - greatest(coalesce(cap1.hour ,0),coalesce(cap2.hour ,0)) as hour, - greatest(coalesce(cap1.month ,0),coalesce(cap2.month ,0)) as month, - to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, - now() as changed_at - from users u - left join users_stripe_subscriptions uss on uss.customer_id = u.stripe_customer_id and uss.active = true - left join stripe_price_ids spi1 on spi1.price_id = uss.price_id - left join current_api_products cap1 on cap1.product = coalesce(spi1.product,'free') - left join app_subs_view asv on asv.user_id = u.id and asv.active = true - left join current_api_products cap2 on cap2.product = coalesce(asv.product_id,'free') - on conflict (user_id) do update set - second = excluded.second, - hour = excluded.hour, - month = excluded.month, - valid_until = excluded.valid_until, - changed_at = now() - where - api_ratelimits.second != excluded.second - or api_ratelimits.hour != excluded.hour - or api_ratelimits.month != excluded.month`, - utils.Config.Frontend.Stripe.Sapphire, - utils.Config.Frontend.Stripe.Emerald, - utils.Config.Frontend.Stripe.Diamond, - utils.Config.Frontend.Stripe.Custom1, - utils.Config.Frontend.Stripe.Custom2, - utils.Config.Frontend.Stripe.Whale, - utils.Config.Frontend.Stripe.Goldfish, - utils.Config.Frontend.Stripe.Plankton, - ) -} diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 7f916afd66..ca504ce390 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -285,6 +285,7 @@ func HttpMiddleware(next http.Handler) http.Handler { func updateWeights(firstRun bool) error { start := time.Now() defer func() { + logger.Infof("updateWeights took %v", time.Since(start).Seconds()) metrics.TaskDuration.WithLabelValues("ratelimit_updateWeights").Observe(time.Since(start).Seconds()) }() @@ -341,6 +342,7 @@ func updateRedisStatus() error { func updateStats() error { start := time.Now() defer func() { + logger.Infof("updateStats took %v", time.Since(start).Seconds()) metrics.TaskDuration.WithLabelValues("ratelimit_updateStats").Observe(time.Since(start).Seconds()) }() @@ -505,6 +507,7 @@ func updateStatsEntries(entries []dbEntry) error { func updateRateLimits() error { start := time.Now() defer func() { + logger.Infof("updateRateLimits took %v", time.Since(start).Seconds()) metrics.TaskDuration.WithLabelValues("ratelimit_updateRateLimits").Observe(time.Since(start).Seconds()) }() @@ -928,6 +931,7 @@ func DBUpdate() error { } logrus.Infof("updated %v api_keys in %v", ra, time.Since(now)) + now = time.Now() _, err = DBUpdateApiRatelimits() if err != nil { return err @@ -938,15 +942,15 @@ func DBUpdate() error { } logrus.Infof("updated %v api_ratelimits in %v", ra, time.Since(now)) - _, err = DBInvalidateApiKeys() - if err != nil { - return err - } - ra, err = res.RowsAffected() - if err != nil { - return err - } - logrus.Infof("invalidated %v api_keys in %v", ra, time.Since(now)) + // _, err = DBInvalidateApiKeys() + // if err != nil { + // return err + // } + // ra, err = res.RowsAffected() + // if err != nil { + // return err + // } + // logrus.Infof("invalidated %v api_keys in %v", ra, time.Since(now)) return nil } @@ -955,8 +959,7 @@ func DBInvalidateApiKeys() (sql.Result, error) { return db.FrontendWriterDB.Exec(` update api_ratelimits set changed_at = now(), valid_until = now() - where valid_until > now() - and user_id not in (select user_id from api_keys where api_key is not null)`) + where valid_until > now() and not exists (select id from api_keys where api_keys.user_id = api_ratelimits.user_id)`) } func DBUpdateApiKeys() (sql.Result, error) { @@ -968,7 +971,7 @@ func DBUpdateApiKeys() (sql.Result, error) { to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, now() as changed_at from users - where api_key is not null + where api_key is not null and not exists (select user_id from api_keys where api_keys.user_id = users.id) on conflict (user_id, api_key) do update set valid_until = excluded.valid_until, changed_at = excluded.changed_at @@ -999,6 +1002,9 @@ func DBUpdateApiRatelimits() (sql.Result, error) { left join current_api_products cap1 on cap1.name = coalesce(cap.name,'free') left join app_subs_view asv on asv.user_id = u.id and asv.active = true left join current_api_products cap2 on cap2.name = coalesce(asv.product_id,'free') + left join api_ratelimits ar on ar.user_id = u.id + where + cap1.name != 'free' or cap2.name != 'free' or ar.user_id is not null on conflict (user_id) do update set second = excluded.second, hour = excluded.hour, From 4b52d57ad63da78add089085a574716e3bee3ed5 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Mon, 12 Feb 2024 09:05:38 +0100 Subject: [PATCH 12/42] (BIDS-2872) wip --- db/migrations/20240125120000_add_ratelimits.sql | 4 ++-- ratelimit/ratelimit.go | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index fd05e1fe2d..8f2a6a239b 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -12,7 +12,7 @@ CREATE TABLE IF NOT EXISTS PRIMARY KEY (user_id) ); -CREATE INDEX IF NOT EXISTS idx_api_ratelimits_changed_at ON api_ratelimits (changed_at); +CREATE INDEX IF NOT EXISTS idx_api_ratelimits_changed_at_valid_until ON api_ratelimits (changed_at, valid_until); SELECT 'up SQL query - add table api_keys'; CREATE TABLE IF NOT EXISTS @@ -24,7 +24,7 @@ CREATE TABLE IF NOT EXISTS PRIMARY KEY (user_id, api_key) ); -CREATE INDEX IF NOT EXISTS idx_api_keys_changed_at ON api_keys (changed_at); +CREATE INDEX IF NOT EXISTS idx_api_keys_changed_at_valid_until ON api_keys (changed_at, valid_until); SELECT 'up SQL query - add table api_weights'; CREATE TABLE IF NOT EXISTS diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index ca504ce390..ae2668f697 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -285,7 +285,7 @@ func HttpMiddleware(next http.Handler) http.Handler { func updateWeights(firstRun bool) error { start := time.Now() defer func() { - logger.Infof("updateWeights took %v", time.Since(start).Seconds()) + logger.WithField("duration", time.Since(start)).Infof("updateWeights") metrics.TaskDuration.WithLabelValues("ratelimit_updateWeights").Observe(time.Since(start).Seconds()) }() @@ -342,7 +342,7 @@ func updateRedisStatus() error { func updateStats() error { start := time.Now() defer func() { - logger.Infof("updateStats took %v", time.Since(start).Seconds()) + logger.WithField("duration", time.Since(start)).Infof("updateStats") metrics.TaskDuration.WithLabelValues("ratelimit_updateStats").Observe(time.Since(start).Seconds()) }() @@ -507,7 +507,7 @@ func updateStatsEntries(entries []dbEntry) error { func updateRateLimits() error { start := time.Now() defer func() { - logger.Infof("updateRateLimits took %v", time.Since(start).Seconds()) + logger.WithField("duration", time.Since(start)).Infof("updateRateLimits") metrics.TaskDuration.WithLabelValues("ratelimit_updateRateLimits").Observe(time.Since(start).Seconds()) }() @@ -690,7 +690,7 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { rateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Bucket, res.UserId) rateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), res.Bucket, res.UserId) - statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%d:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.UserId, res.Route) + statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Key, res.Route) if !res.IsValidKey { statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), "nokey", res.Route) } @@ -920,7 +920,7 @@ func DBGetCurrentApiProducts() ([]*ApiProduct, error) { func DBUpdate() error { var err error - now := time.Now() + start := time.Now() res, err := DBUpdateApiKeys() if err != nil { return err @@ -929,9 +929,9 @@ func DBUpdate() error { if err != nil { return err } - logrus.Infof("updated %v api_keys in %v", ra, time.Since(now)) + logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_keys") - now = time.Now() + start = time.Now() _, err = DBUpdateApiRatelimits() if err != nil { return err @@ -940,7 +940,7 @@ func DBUpdate() error { if err != nil { return err } - logrus.Infof("updated %v api_ratelimits in %v", ra, time.Since(now)) + logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_ratelimits") // _, err = DBInvalidateApiKeys() // if err != nil { From c6cfbef0a05207d22335d44db79cf2c3494bb491 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Mon, 12 Feb 2024 12:26:08 +0100 Subject: [PATCH 13/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index ae2668f697..178635a5c3 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -942,24 +942,26 @@ func DBUpdate() error { } logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_ratelimits") - // _, err = DBInvalidateApiKeys() - // if err != nil { - // return err - // } - // ra, err = res.RowsAffected() - // if err != nil { - // return err - // } - // logrus.Infof("invalidated %v api_keys in %v", ra, time.Since(now)) + start = time.Now() + _, err = DBInvalidateApiKeys() + if err != nil { + return err + } + ra, err = res.RowsAffected() + if err != nil { + return err + } + logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("invalidated api_keys") return nil } +// DBInvalidateApiKeys invalidates api-keys where there is no corresponding user func DBInvalidateApiKeys() (sql.Result, error) { return db.FrontendWriterDB.Exec(` - update api_ratelimits + update api_keys set changed_at = now(), valid_until = now() - where valid_until > now() and not exists (select id from api_keys where api_keys.user_id = api_ratelimits.user_id)`) + where valid_until > now() and not exists (select apikey from users where aid = api_keys.user_id)`) } func DBUpdateApiKeys() (sql.Result, error) { From 15eb4be46538eb404be981aceff00c6b73c7415d Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Tue, 13 Feb 2024 09:24:42 +0100 Subject: [PATCH 14/42] (BIDS-2872) wip --- cmd/explorer/main.go | 2 +- cmd/misc/main.go | 13 +- .../20240125120000_add_ratelimits.sql | 19 +- local-deployment/network-params.json | 2 +- local-deployment/provision-explorer-config.sh | 2 - local-deployment/run.sh | 6 +- ratelimit/ratelimit.go | 171 ++++++++++++------ types/config.go | 2 - 8 files changed, 138 insertions(+), 79 deletions(-) diff --git a/cmd/explorer/main.go b/cmd/explorer/main.go index c4e7b519de..2ecf59860c 100644 --- a/cmd/explorer/main.go +++ b/cmd/explorer/main.go @@ -607,7 +607,7 @@ func main() { router.Use(metrics.HttpMiddleware) } - ratelimit.Init(utils.Config.RedisSessionStoreEndpoint, ratelimit.DefaultRequestCollector) + ratelimit.Init() router.Use(ratelimit.HttpMiddleware) n := negroni.New(negroni.NewRecovery()) diff --git a/cmd/misc/main.go b/cmd/misc/main.go index cf3201198f..a98962860a 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -393,7 +393,7 @@ func main() { case "fix-ens-addresses": err = fixEnsAddresses(erigonClient) case "update-ratelimits": - err = updateRatelimits() + ratelimit.DBUpdater() default: utils.LogFatal(nil, fmt.Sprintf("unknown command %s", opts.Command), 0) } @@ -1936,14 +1936,3 @@ func reExportSyncCommittee(rpcClient rpc.Client, p uint64, dryRun bool) error { return tx.Commit() } } - -func updateRatelimits() error { - for { - err := ratelimit.DBUpdate() - if err != nil { - logrus.WithError(err).Errorf("error in updateRatelimits") - } - time.Sleep(time.Second * 10) - } - return nil -} diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index 8f2a6a239b..62df738c4c 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -33,7 +33,7 @@ CREATE TABLE IF NOT EXISTS endpoint TEXT NOT NULL, method TEXT NOT NULL, params TEXT NOT NULL, - weight INT NOT NULL DEFAULT 0, + weight INT NOT NULL DEFAULT 1, valid_from TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT TO_TIMESTAMP(0), PRIMARY KEY (endpoint, valid_from) ); @@ -49,6 +49,19 @@ CREATE TABLE IF NOT EXISTS valid_from TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT TO_TIMESTAMP(0), PRIMARY KEY (name, valid_from) ); + +SELECT 'up SQL query - add table api_products'; +CREATE TABLE IF NOT EXISTS + api_stats ( + ts TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT NOW(), + user_id INT NOT NULL, + api_key VARCHAR(256) NOT NULL, + endpoint TEXT NOT NULL, + count INT NOT NULL, + PRIMARY KEY (ts, user_id, api_key, endpoint) + ); + +CREATE INDEX IF NOT EXISTS idx_api_stats_ts_user_id ON api_stats (ts, user_id); -- +goose StatementEnd -- +goose Down @@ -65,4 +78,8 @@ SELECT 'down SQL query - drop table api_weights'; DROP TABLE IF EXISTS api_weights; SELECT 'down SQL query - drop table api_products'; DROP TABLE IF EXISTS api_products; +SELECT 'down SQL query - drop table api_stats'; +DROP TABLE IF EXISTS api_stats; +SELECT 'down SQL query - drop index idx_api_stats_ts_user_id'; +DROP INDEX IF EXISTS idx_api_stats_ts_user_id; -- +goose StatementEnd diff --git a/local-deployment/network-params.json b/local-deployment/network-params.json index 90e419ad9c..6a9739401f 100644 --- a/local-deployment/network-params.json +++ b/local-deployment/network-params.json @@ -26,7 +26,7 @@ "electra_fork_epoch": 999999999 }, "global_client_log_level": "info", - "start_tx_spammer": true, + "start_tx_spammer": false, "start_blob_spammer": false } \ No newline at end of file diff --git a/local-deployment/provision-explorer-config.sh b/local-deployment/provision-explorer-config.sh index 121e9fa92f..38d7b15dc8 100644 --- a/local-deployment/provision-explorer-config.sh +++ b/local-deployment/provision-explorer-config.sh @@ -101,8 +101,6 @@ frontend: sapphire: price_sapphire emerald: price_emerald diamond: price_diamond - custom1: price_custom1 - custom2: price_custom2 indexer: # fullIndexOnStartup: false # Perform a one time full db index on startup diff --git a/local-deployment/run.sh b/local-deployment/run.sh index 06473c9eb3..6c4b93c110 100755 --- a/local-deployment/run.sh +++ b/local-deployment/run.sh @@ -43,7 +43,11 @@ fn_sql() { } fn_redis() { - docker compose exec redis-sessions redis-cli + if [ -z "${1}" ]; then + docker compose exec redis-sessions redis-cli + else + docker compose exec redis-sessions redis-cli "$@" + fi #redis-cli -h localhost -p $REDIS_PORT } diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 178635a5c3..d33a570043 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -5,6 +5,7 @@ import ( "database/sql" "eth2-exporter/db" "eth2-exporter/metrics" + "eth2-exporter/utils" "fmt" "net" "net/http" @@ -44,7 +45,7 @@ const ( FallbackRateLimitSecond = 20 // RateLimit per second for when redis is offline FallbackRateLimitBurst = 20 // RateLimit burst for when redis is offline - defaultBucket = "default" + defaultBucket = "default" // if no bucket is set for a route, use this one statsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration ) @@ -81,11 +82,12 @@ var pathPrefix = "" // only requests with this prefix will be ratelimited var logger = logrus.StandardLogger().WithField("module", "ratelimit") -type dbEntry struct { - Date time.Time - ApiKey string - Path string - Count int64 +type DbEntry struct { + Date time.Time + UserId int64 + ApiKey string + Endpoint string + Count int64 } type RateLimit struct { @@ -117,6 +119,15 @@ type RedisKey struct { ExpireAt time.Time } +type ApiProduct struct { + Name string `db:"name"` + StripePriceID string `db:"stripe_price_id"` + Second int64 `db:"second"` + Hour int64 `db:"hour"` + Month int64 `db:"month"` + ValidFrom time.Time `db:"valid_from"` +} + type responseWriterDelegator struct { http.ResponseWriter written int64 @@ -136,30 +147,52 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { func (r *responseWriterDelegator) WriteHeader(code int) { r.status = code r.ResponseWriter.WriteHeader(code) + r.wroteHeader = true } func (r *responseWriterDelegator) Status() int { return r.status } -var DefaultRequestCollector = func(req *http.Request) bool { +var DefaultRequestFilter = func(req *http.Request) bool { if req.URL == nil || !strings.HasPrefix(req.URL.Path, "/api") { return false } return true } -var requestSelector func(req *http.Request) bool +var requestFilter = DefaultRequestFilter +var requestFilterMu = &sync.RWMutex{} + +func SetRequestFilter(filter func(req *http.Request) bool) { + requestFilterMu.Lock() + defer requestFilterMu.Unlock() + requestFilter = filter +} + +func GetRequestFilter() func(req *http.Request) bool { + requestFilterMu.RLock() + defer requestFilterMu.RUnlock() + return requestFilter +} + +var maxBadRequestWeight int64 = 1 + +func SetMaxBadRquestWeight(weight int64) { + atomic.StoreInt64(&maxBadRequestWeight, weight) +} + +func GetMaxBadRquestWeight() int64 { + return atomic.LoadInt64(&maxBadRequestWeight) +} // Init initializes the RateLimiting middleware, the rateLimiting middleware will not work without calling Init first. The second parameter is a function the will get called on every request, it will only apply ratelimiting to requests when this func returns true. -func Init(redisAddress string, requestSelectorOpt func(req *http.Request) bool) { +func Init() { redisClient = redis.NewClient(&redis.Options{ - Addr: redisAddress, + Addr: utils.Config.RedisSessionStoreEndpoint, ReadTimeout: time.Second * 3, }) - requestSelector = requestSelectorOpt - initializedWg.Add(3) go func() { @@ -180,7 +213,6 @@ func Init(redisAddress string, requestSelectorOpt func(req *http.Request) bool) }() go func() { firstRun := true - for { err := updateRateLimits() if err != nil { @@ -211,15 +243,6 @@ func Init(redisAddress string, requestSelectorOpt func(req *http.Request) bool) time.Sleep(time.Second * 1) } }() - go func() { - for { - err := updateStats() - if err != nil { - logger.WithError(err).Errorf("error updating stats") - } - time.Sleep(time.Second * 10) - } - }() initializedWg.Wait() } @@ -228,7 +251,8 @@ func Init(redisAddress string, requestSelectorOpt func(req *http.Request) bool) func HttpMiddleware(next http.Handler) http.Handler { initializedWg.Wait() return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !requestSelector(r) { + f := GetRequestFilter() + if !f(r) { next.ServeHTTP(w, r) return } @@ -339,10 +363,9 @@ func updateRedisStatus() error { } // updateStats scans redis for ratelimit:stats:* keys and inserts them into postgres, if the key's truncated date is older than specified stats-truncation it will also delete the key in redis. -func updateStats() error { +func updateStats(redisClient *redis.Client) error { start := time.Now() defer func() { - logger.WithField("duration", time.Since(start)).Infof("updateStats") metrics.TaskDuration.WithLabelValues("ratelimit_updateStats").Observe(time.Since(start).Seconds()) }() @@ -356,7 +379,8 @@ func updateStats() error { cursor := uint64(0) for { - cmd := redisClient.Scan(ctx, cursor, "ratelimit:stats:*:*:*", 1000) + // ratelimit:stats:---::: + cmd := redisClient.Scan(ctx, cursor, "ratelimit:stats:*:*:*:*", 1000) if cmd.Err() != nil { return cmd.Err() } @@ -385,10 +409,10 @@ func updateStats() error { keysToDelete := []string{} keys := allKeys[start:end] - entries := make([]dbEntry, len(keys)) + entries := make([]DbEntry, len(keys)) for i, k := range keys { ks := strings.Split(k, ":") - if len(ks) != 5 { + if len(ks) != 6 { return fmt.Errorf("error parsing key %s: split-len != 5", k) } dateString := ks[2] @@ -400,10 +424,16 @@ func updateStats() error { if dateTruncated.Before(startTruncated) { keysToDelete = append(keysToDelete, k) } - entries[i] = dbEntry{ - Date: dateTruncated, - ApiKey: ks[3], - Path: ks[4], + userIdStr := ks[3] + userId, err := strconv.ParseInt(userIdStr, 10, 64) + if err != nil { + return fmt.Errorf("error parsing userId in key %s: %v", k, err) + } + entries[i] = DbEntry{ + Date: dateTruncated, + UserId: userId, + ApiKey: ks[4], + Endpoint: ks[5], } } @@ -454,14 +484,14 @@ func updateStats() error { return nil } -func updateStatsEntries(entries []dbEntry) error { +func updateStatsEntries(entries []DbEntry) error { tx, err := db.WriterDb.Beginx() if err != nil { return err } defer tx.Rollback() - numArgs := 4 + numArgs := 5 batchSize := 65535 / numArgs // max 65535 params per batch, since postgres uses int16 for binding input params valueArgs := make([]interface{}, 0, batchSize*numArgs) valueStrings := make([]string, 0, batchSize) @@ -474,8 +504,9 @@ func updateStatsEntries(entries []dbEntry) error { valueStrings = append(valueStrings, "("+strings.Join(valueStringArr, ",")+")") valueArgs = append(valueArgs, entry.Date) + valueArgs = append(valueArgs, entry.UserId) valueArgs = append(valueArgs, entry.ApiKey) - valueArgs = append(valueArgs, entry.Path) + valueArgs = append(valueArgs, entry.Endpoint) valueArgs = append(valueArgs, entry.Count) // logger.WithFields(logrus.Fields{"count": entry.Count, "apikey": entry.ApiKey, "path": entry.Path, "date": entry.Date}).Infof("inserting stats entry %v/%v", allIdx+1, len(entries)) @@ -484,7 +515,7 @@ func updateStatsEntries(entries []dbEntry) error { allIdx++ if batchIdx >= batchSize || allIdx >= len(entries) { - stmt := fmt.Sprintf(`INSERT INTO api_statistics (ts, apikey, call, count) VALUES %s ON CONFLICT (ts, apikey, call) DO UPDATE SET count = EXCLUDED.count`, strings.Join(valueStrings, ",")) + stmt := fmt.Sprintf(`INSERT INTO api_stats (ts, user_id, api_key, endpoint, count) VALUES %s ON CONFLICT (ts, user_id, api_key, endpoint) DO UPDATE SET count = EXCLUDED.count`, strings.Join(valueStrings, ",")) _, err := tx.Exec(stmt, valueArgs...) if err != nil { return err @@ -618,15 +649,23 @@ func updateRateLimits() error { // postRateLimit decrements the rate limit keys in redis if the status is not 200. func postRateLimit(rl *RateLimitResult, status int) error { - if status == 200 { + // if status == http.StatusOK { + if !(status >= 500 && status <= 599) { + // anything other than 5xx is considered successful and counts towards the rate limit return nil } - // if status is not 200 decrement keys since we do not count unsuccessful requests ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() pipe := redisClient.Pipeline() + + decrByWeight := rl.Weight + mbrw := GetMaxBadRquestWeight() + if decrByWeight > mbrw { + decrByWeight = mbrw + } + for _, k := range rl.RedisKeys { - pipe.DecrBy(ctx, k.Key, rl.Weight) + pipe.DecrBy(ctx, k.Key, decrByWeight) pipe.ExpireAt(ctx, k.Key, k.ExpireAt) // make sure all keys have a TTL } pipe.DecrBy(ctx, rl.RedisStatsKey, 1) @@ -686,13 +725,15 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { timeUntilNextHourUtc := nextHourUtc.Sub(startUtc) timeUntilNextMonthUtc := nextMonthUtc.Sub(startUtc) - rateLimitSecondKey := fmt.Sprintf("ratelimit:second:%s:%s", res.Bucket, res.UserId) - rateLimitHourKey := fmt.Sprintf("ratelimit:hour:%04d-%02d-%02d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Bucket, res.UserId) - rateLimitMonthKey := fmt.Sprintf("ratelimit:month:%04d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), res.Bucket, res.UserId) - - statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Key, res.Route) + rateLimitSecondKey := fmt.Sprintf("ratelimit:current:second:%s:%d", res.Bucket, res.UserId) + rateLimitHourKey := fmt.Sprintf("ratelimit:current:hour:%04d-%02d-%02d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Bucket, res.UserId) + rateLimitMonthKey := fmt.Sprintf("ratelimit:current:month:%04d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), res.Bucket, res.UserId) + statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.UserId, res.Key, res.Route) if !res.IsValidKey { - statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), "nokey", res.Route) + rateLimitSecondKey = fmt.Sprintf("ratelimit:current:second:%s:%s", res.Bucket, res.IP) + rateLimitHourKey = fmt.Sprintf("ratelimit:current:hour:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Bucket, res.IP) + rateLimitMonthKey = fmt.Sprintf("ratelimit:current:month:%04d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), res.Bucket, res.IP) + statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.UserId, "nokey", res.Route) } res.RedisStatsKey = statsKey @@ -890,13 +931,18 @@ func (rl *FallbackRateLimiter) Handle(w http.ResponseWriter, r *http.Request, ne next(w, r) } -type ApiProduct struct { - Name string `db:"name"` - StripePriceID string `db:"stripe_price_id"` - Second int64 `db:"second"` - Hour int64 `db:"hour"` - Month int64 `db:"month"` - ValidFrom time.Time `db:"valid_from"` +func DBUpdater() { + redisClient = redis.NewClient(&redis.Options{ + Addr: utils.Config.RedisSessionStoreEndpoint, + ReadTimeout: time.Second * 3, + }) + for { + err := DBUpdate(redisClient) + if err != nil { + logger.WithError(err).Errorf("error updating ratelimits") + } + time.Sleep(time.Second * 10) + } } func DBGetUserApiRateLimit(userId int64) (*RateLimit, error) { @@ -918,12 +964,12 @@ func DBGetCurrentApiProducts() ([]*ApiProduct, error) { return apiProducts, err } -func DBUpdate() error { +func DBUpdate(redisClient *redis.Client) error { var err error start := time.Now() res, err := DBUpdateApiKeys() if err != nil { - return err + return fmt.Errorf("error updating api_keys: %w", err) } ra, err := res.RowsAffected() if err != nil { @@ -932,9 +978,9 @@ func DBUpdate() error { logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_keys") start = time.Now() - _, err = DBUpdateApiRatelimits() + res, err = DBUpdateApiRatelimits() if err != nil { - return err + return fmt.Errorf("error updating api_ratelimit: %w", err) } ra, err = res.RowsAffected() if err != nil { @@ -943,9 +989,9 @@ func DBUpdate() error { logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_ratelimits") start = time.Now() - _, err = DBInvalidateApiKeys() + res, err = DBInvalidateApiKeys() if err != nil { - return err + return fmt.Errorf("error invalidating api_keys: %w", err) } ra, err = res.RowsAffected() if err != nil { @@ -953,6 +999,13 @@ func DBUpdate() error { } logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("invalidated api_keys") + start = time.Now() + err = updateStats(redisClient) + if err != nil { + return fmt.Errorf("error updating stats: %w", err) + } + logrus.WithField("duration", time.Since(start)).Infof("updated stats") + return nil } @@ -961,7 +1014,7 @@ func DBInvalidateApiKeys() (sql.Result, error) { return db.FrontendWriterDB.Exec(` update api_keys set changed_at = now(), valid_until = now() - where valid_until > now() and not exists (select apikey from users where aid = api_keys.user_id)`) + where valid_until > now() and not exists (select api_key from users where id = api_keys.user_id)`) } func DBUpdateApiKeys() (sql.Result, error) { diff --git a/types/config.go b/types/config.go index 222d48ad45..919f65088d 100644 --- a/types/config.go +++ b/types/config.go @@ -132,8 +132,6 @@ type Config struct { Sapphire string `yaml:"sapphire" envconfig:"FRONTEND_STRIPE_SAPPHIRE"` Emerald string `yaml:"emerald" envconfig:"FRONTEND_STRIPE_EMERALD"` Diamond string `yaml:"diamond" envconfig:"FRONTEND_STRIPE_DIAMOND"` - Custom1 string `yaml:"custom1" envconfig:"FRONTEND_STRIPE_CUSTOM_1"` - Custom2 string `yaml:"custom2" envconfig:"FRONTEND_STRIPE_CUSTOM_2"` Whale string `yaml:"whale" envconfig:"FRONTEND_STRIPE_WHALE"` Goldfish string `yaml:"goldfish" envconfig:"FRONTEND_STRIPE_GOLDFISH"` Plankton string `yaml:"plankton" envconfig:"FRONTEND_STRIPE_PLANKTON"` From fb5d67973317b94715601a416e9dd3456a982ef0 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Tue, 13 Feb 2024 13:24:53 +0100 Subject: [PATCH 15/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 152 ++++++++++++++++++++++++++--------------- 1 file changed, 97 insertions(+), 55 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index d33a570043..11bc571f77 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -948,19 +948,19 @@ func DBUpdater() { func DBGetUserApiRateLimit(userId int64) (*RateLimit, error) { rl := &RateLimit{} err := db.FrontendWriterDB.Get(rl, ` - select second, hour, month - from api_ratelimits - where user_id = $1`, userId) + select second, hour, month + from api_ratelimits + where user_id = $1`, userId) return rl, err } func DBGetCurrentApiProducts() ([]*ApiProduct, error) { apiProducts := []*ApiProduct{} err := db.FrontendWriterDB.Select(&apiProducts, ` - select distinct on (name) name, stripe_price_id, second, hour, month, valid_from - from api_products - where valid_from <= now() - order by name, valid_from desc`) + select distinct on (name) name, stripe_price_id, second, hour, month, valid_from + from api_products + where valid_from <= now() + order by name, valid_from desc`) return apiProducts, err } @@ -988,6 +988,17 @@ func DBUpdate(redisClient *redis.Client) error { } logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_ratelimits") + start = time.Now() + res, err = DBUpdateUnlimitedRatelimits() + if err != nil { + return fmt.Errorf("error updating unlikmited api_ratelimit: %w", err) + } + ra, err = res.RowsAffected() + if err != nil { + return err + } + logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated unlimited api_ratelimits") + start = time.Now() res, err = DBInvalidateApiKeys() if err != nil { @@ -1009,65 +1020,96 @@ func DBUpdate(redisClient *redis.Client) error { return nil } -// DBInvalidateApiKeys invalidates api-keys where there is no corresponding user func DBInvalidateApiKeys() (sql.Result, error) { return db.FrontendWriterDB.Exec(` - update api_keys - set changed_at = now(), valid_until = now() - where valid_until > now() and not exists (select api_key from users where id = api_keys.user_id)`) + update api_keys + set changed_at = now(), valid_until = now() + where valid_until > now() and not exists (select api_key from users where id = api_keys.user_id)`) } func DBUpdateApiKeys() (sql.Result, error) { return db.FrontendWriterDB.Exec( `insert into api_keys (user_id, api_key, valid_until, changed_at) - select - id as user_id, - api_key, - to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, - now() as changed_at - from users - where api_key is not null and not exists (select user_id from api_keys where api_keys.user_id = users.id) - on conflict (user_id, api_key) do update set - valid_until = excluded.valid_until, - changed_at = excluded.changed_at - where api_keys.valid_until != excluded.valid_until`, + select + id as user_id, + api_key, + to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + now() as changed_at + from users + where api_key is not null and not exists (select user_id from api_keys where api_keys.user_id = users.id) + on conflict (user_id, api_key) do update set + valid_until = excluded.valid_until, + changed_at = excluded.changed_at + where api_keys.valid_until != excluded.valid_until`, ) } func DBUpdateApiRatelimits() (sql.Result, error) { return db.FrontendWriterDB.Exec( `with - current_api_products as ( - select distinct on (name) name, stripe_price_id, second, hour, month, valid_from - from api_products - where valid_from <= now() - order by name, valid_from desc - ) - insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) - select - u.id as user_id, - greatest(coalesce(cap1.second,0),coalesce(cap2.second,0)) as second, - greatest(coalesce(cap1.hour ,0),coalesce(cap2.hour ,0)) as hour, - greatest(coalesce(cap1.month ,0),coalesce(cap2.month ,0)) as month, - to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, - now() as changed_at - from users u - left join users_stripe_subscriptions uss on uss.customer_id = u.stripe_customer_id and uss.active = true - left join current_api_products cap on cap.stripe_price_id = uss.price_id - left join current_api_products cap1 on cap1.name = coalesce(cap.name,'free') - left join app_subs_view asv on asv.user_id = u.id and asv.active = true - left join current_api_products cap2 on cap2.name = coalesce(asv.product_id,'free') - left join api_ratelimits ar on ar.user_id = u.id - where - cap1.name != 'free' or cap2.name != 'free' or ar.user_id is not null - on conflict (user_id) do update set - second = excluded.second, - hour = excluded.hour, - month = excluded.month, - valid_until = excluded.valid_until, - changed_at = now() - where - api_ratelimits.second != excluded.second - or api_ratelimits.hour != excluded.hour - or api_ratelimits.month != excluded.month`) + current_api_products as ( + select distinct on (name) name, stripe_price_id, second, hour, month, valid_from + from api_products + where valid_from <= now() + order by name, valid_from desc + ) + insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) + select + u.id as user_id, + greatest(coalesce(cap1.second,0),coalesce(cap2.second,0)) as second, + greatest(coalesce(cap1.hour ,0),coalesce(cap2.hour ,0)) as hour, + greatest(coalesce(cap1.month ,0),coalesce(cap2.month ,0)) as month, + to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + now() as changed_at + from users u + left join users_stripe_subscriptions uss on uss.customer_id = u.stripe_customer_id and uss.active = true + left join current_api_products cap on cap.stripe_price_id = uss.price_id + left join current_api_products cap1 on cap1.name = coalesce(cap.name,'free') + left join app_subs_view asv on asv.user_id = u.id and asv.active = true + left join current_api_products cap2 on cap2.name = coalesce(asv.product_id,'free') + left join api_ratelimits ar on ar.user_id = u.id + where + cap1.name != 'free' or cap2.name != 'free' or ar.user_id is not null + on conflict (user_id) do update set + second = excluded.second, + hour = excluded.hour, + month = excluded.month, + valid_until = excluded.valid_until, + changed_at = now() + where + api_ratelimits.second != excluded.second + or api_ratelimits.hour != excluded.hour + or api_ratelimits.month != excluded.month`) +} + +func DBUpdateUnlimitedRatelimits() (sql.Result, error) { + return db.FrontendWriterDB.Exec( + `with + unlimited_ratelimit as ( + select second, hour, month + from api_products + where name = 'unlimited' and valid_from <= now() + order by valid_from desc + limit 1 + ) + insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) + select + id as user_id, + unlimited_ratelimit.second, + unlimited_ratelimit.hour, + unlimited_ratelimit.month, + to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + now() as changed_at + from unlimited_ratelimit, users + where user_group = 'ADMIN' + on conflict (user_id) do update set + second = excluded.second, + hour = excluded.hour, + month = excluded.month, + valid_until = excluded.valid_until, + changed_at = now() + where + api_ratelimits.second != excluded.second + or api_ratelimits.hour != excluded.hour + or api_ratelimits.month != excluded.month`) } From cde7119fd7616ef5f68f8ed4590abbee36f07e66 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Tue, 13 Feb 2024 13:34:36 +0100 Subject: [PATCH 16/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 11bc571f77..427c1d0fc4 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -1024,7 +1024,7 @@ func DBInvalidateApiKeys() (sql.Result, error) { return db.FrontendWriterDB.Exec(` update api_keys set changed_at = now(), valid_until = now() - where valid_until > now() and not exists (select api_key from users where id = api_keys.user_id)`) + where valid_until > now() and not exists (select id from users where id = api_keys.user_id)`) } func DBUpdateApiKeys() (sql.Result, error) { From c2c313e64edceb54be1754b1b17a17f7483ea7ad Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Tue, 13 Feb 2024 14:04:47 +0100 Subject: [PATCH 17/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 143 ++++++++++++++++++++++------------------- 1 file changed, 78 insertions(+), 65 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 427c1d0fc4..e91cea0be5 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -48,6 +48,8 @@ const ( defaultBucket = "default" // if no bucket is set for a route, use this one statsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration + + updateInterval = time.Second * 2 ) var NoKeyRateLimit = &RateLimit{ @@ -208,7 +210,7 @@ func Init() { initializedWg.Done() firstRun = false } - time.Sleep(time.Second * 10) + time.Sleep(updateInterval) } }() go func() { @@ -224,7 +226,7 @@ func Init() { initializedWg.Done() firstRun = false } - time.Sleep(time.Second * 10) + time.Sleep(updateInterval) } }() go func() { @@ -931,20 +933,6 @@ func (rl *FallbackRateLimiter) Handle(w http.ResponseWriter, r *http.Request, ne next(w, r) } -func DBUpdater() { - redisClient = redis.NewClient(&redis.Options{ - Addr: utils.Config.RedisSessionStoreEndpoint, - ReadTimeout: time.Second * 3, - }) - for { - err := DBUpdate(redisClient) - if err != nil { - logger.WithError(err).Errorf("error updating ratelimits") - } - time.Sleep(time.Second * 10) - } -} - func DBGetUserApiRateLimit(userId int64) (*RateLimit, error) { rl := &RateLimit{} err := db.FrontendWriterDB.Get(rl, ` @@ -964,60 +952,85 @@ func DBGetCurrentApiProducts() ([]*ApiProduct, error) { return apiProducts, err } -func DBUpdate(redisClient *redis.Client) error { - var err error - start := time.Now() - res, err := DBUpdateApiKeys() - if err != nil { - return fmt.Errorf("error updating api_keys: %w", err) - } - ra, err := res.RowsAffected() - if err != nil { - return err - } - logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_keys") - - start = time.Now() - res, err = DBUpdateApiRatelimits() - if err != nil { - return fmt.Errorf("error updating api_ratelimit: %w", err) - } - ra, err = res.RowsAffected() - if err != nil { - return err +func DBUpdater() { + redisClient = redis.NewClient(&redis.Options{ + Addr: utils.Config.RedisSessionStoreEndpoint, + ReadTimeout: time.Second * 3, + }) + for { + DBUpdate(redisClient) + time.Sleep(updateInterval) } - logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_ratelimits") +} - start = time.Now() - res, err = DBUpdateUnlimitedRatelimits() - if err != nil { - return fmt.Errorf("error updating unlikmited api_ratelimit: %w", err) - } - ra, err = res.RowsAffected() - if err != nil { - return err - } - logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated unlimited api_ratelimits") +func DBUpdate(redisClient *redis.Client) { + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + start := time.Now() + err := updateStats(redisClient) + if err != nil { + logger.WithError(err).Errorf("error updating stats") + return + } + logrus.WithField("duration", time.Since(start)).Infof("updated stats") + }() + go func() { + defer wg.Done() + start := time.Now() + res, err := DBUpdateApiKeys() + if err != nil { + logger.WithError(err).Errorf("error updating api_keys") + return + } + ra, err := res.RowsAffected() + if err != nil { + logger.WithError(err).Errorf("error getting rows affected") + return + } + logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_keys") - start = time.Now() - res, err = DBInvalidateApiKeys() - if err != nil { - return fmt.Errorf("error invalidating api_keys: %w", err) - } - ra, err = res.RowsAffected() - if err != nil { - return err - } - logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("invalidated api_keys") + start = time.Now() + res, err = DBUpdateApiRatelimits() + if err != nil { + logger.WithError(err).Errorf("error updating api_ratelimit") + return + } + ra, err = res.RowsAffected() + if err != nil { + logger.WithError(err).Errorf("error getting rows affected") + return + } + logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_ratelimits") - start = time.Now() - err = updateStats(redisClient) - if err != nil { - return fmt.Errorf("error updating stats: %w", err) - } - logrus.WithField("duration", time.Since(start)).Infof("updated stats") + start = time.Now() + res, err = DBUpdateUnlimitedRatelimits() + if err != nil { + logger.WithError(err).Errorf("error updating unlikmited api_ratelimit") + return + } + ra, err = res.RowsAffected() + if err != nil { + logger.WithError(err).Errorf("error getting rows affected") + return + } + logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated unlimited api_ratelimits") - return nil + start = time.Now() + res, err = DBInvalidateApiKeys() + if err != nil { + logger.WithError(err).Errorf("error invalidating api_keys") + return + } + ra, err = res.RowsAffected() + if err != nil { + logger.WithError(err).Errorf("error getting rows affected") + return + } + logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("invalidated api_keys") + }() + wg.Wait() } func DBInvalidateApiKeys() (sql.Result, error) { From da5307f300dc0af53041d7b48b0ca2aa4160756c Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Tue, 13 Feb 2024 14:17:43 +0100 Subject: [PATCH 18/42] (BIDS-2872) wip --- db/migrations/20240125120000_add_ratelimits.sql | 2 +- ratelimit/ratelimit.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index 62df738c4c..b76ac767d9 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -50,7 +50,7 @@ CREATE TABLE IF NOT EXISTS PRIMARY KEY (name, valid_from) ); -SELECT 'up SQL query - add table api_products'; +SELECT 'up SQL query - add table api_stats'; CREATE TABLE IF NOT EXISTS api_stats ( ts TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT NOW(), diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index e91cea0be5..d437f62a81 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -49,7 +49,7 @@ const ( statsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration - updateInterval = time.Second * 2 + updateInterval = time.Second * 60 // how often to update ratelimits, weights and stats ) var NoKeyRateLimit = &RateLimit{ From 4119e07addaea2a37d913af28ca3c6f7fbbceaa3 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Tue, 13 Feb 2024 14:56:03 +0100 Subject: [PATCH 19/42] (BIDS-2872) wip --- db/migrations/20240125120000_add_ratelimits.sql | 17 ----------------- ratelimit/ratelimit.go | 7 +++---- 2 files changed, 3 insertions(+), 21 deletions(-) diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index b76ac767d9..bef9671681 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -49,19 +49,6 @@ CREATE TABLE IF NOT EXISTS valid_from TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT TO_TIMESTAMP(0), PRIMARY KEY (name, valid_from) ); - -SELECT 'up SQL query - add table api_stats'; -CREATE TABLE IF NOT EXISTS - api_stats ( - ts TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT NOW(), - user_id INT NOT NULL, - api_key VARCHAR(256) NOT NULL, - endpoint TEXT NOT NULL, - count INT NOT NULL, - PRIMARY KEY (ts, user_id, api_key, endpoint) - ); - -CREATE INDEX IF NOT EXISTS idx_api_stats_ts_user_id ON api_stats (ts, user_id); -- +goose StatementEnd -- +goose Down @@ -78,8 +65,4 @@ SELECT 'down SQL query - drop table api_weights'; DROP TABLE IF EXISTS api_weights; SELECT 'down SQL query - drop table api_products'; DROP TABLE IF EXISTS api_products; -SELECT 'down SQL query - drop table api_stats'; -DROP TABLE IF EXISTS api_stats; -SELECT 'down SQL query - drop index idx_api_stats_ts_user_id'; -DROP INDEX IF EXISTS idx_api_stats_ts_user_id; -- +goose StatementEnd diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index d437f62a81..9debbb7376 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -49,7 +49,7 @@ const ( statsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration - updateInterval = time.Second * 60 // how often to update ratelimits, weights and stats + updateInterval = time.Second * 1 // how often to update ratelimits, weights and stats ) var NoKeyRateLimit = &RateLimit{ @@ -493,7 +493,7 @@ func updateStatsEntries(entries []DbEntry) error { } defer tx.Rollback() - numArgs := 5 + numArgs := 4 batchSize := 65535 / numArgs // max 65535 params per batch, since postgres uses int16 for binding input params valueArgs := make([]interface{}, 0, batchSize*numArgs) valueStrings := make([]string, 0, batchSize) @@ -506,7 +506,6 @@ func updateStatsEntries(entries []DbEntry) error { valueStrings = append(valueStrings, "("+strings.Join(valueStringArr, ",")+")") valueArgs = append(valueArgs, entry.Date) - valueArgs = append(valueArgs, entry.UserId) valueArgs = append(valueArgs, entry.ApiKey) valueArgs = append(valueArgs, entry.Endpoint) valueArgs = append(valueArgs, entry.Count) @@ -517,7 +516,7 @@ func updateStatsEntries(entries []DbEntry) error { allIdx++ if batchIdx >= batchSize || allIdx >= len(entries) { - stmt := fmt.Sprintf(`INSERT INTO api_stats (ts, user_id, api_key, endpoint, count) VALUES %s ON CONFLICT (ts, user_id, api_key, endpoint) DO UPDATE SET count = EXCLUDED.count`, strings.Join(valueStrings, ",")) + stmt := fmt.Sprintf(`INSERT INTO api_statistics (ts, apikey, call, count) VALUES %s ON CONFLICT (ts, apikey, call) DO UPDATE SET count = EXCLUDED.count`, strings.Join(valueStrings, ",")) _, err := tx.Exec(stmt, valueArgs...) if err != nil { return err From 1581cce9932bef033ef72687491b8ac43dd651c8 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 14 Feb 2024 14:25:40 +0100 Subject: [PATCH 20/42] (BIDS-2872) wip --- local-deployment/provision-explorer-config.sh | 1 + ratelimit/ratelimit.go | 10 ++++++++-- types/config.go | 13 +++++++------ 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/local-deployment/provision-explorer-config.sh b/local-deployment/provision-explorer-config.sh index 38d7b15dc8..a2c72d3a2c 100644 --- a/local-deployment/provision-explorer-config.sh +++ b/local-deployment/provision-explorer-config.sh @@ -101,6 +101,7 @@ frontend: sapphire: price_sapphire emerald: price_emerald diamond: price_diamond + # rateLimitUpdateInterval: 1s indexer: # fullIndexOnStartup: false # Perform a one time full db index on startup diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 9debbb7376..c264126fec 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -48,8 +48,6 @@ const ( defaultBucket = "default" // if no bucket is set for a route, use this one statsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration - - updateInterval = time.Second * 1 // how often to update ratelimits, weights and stats ) var NoKeyRateLimit = &RateLimit{ @@ -58,6 +56,8 @@ var NoKeyRateLimit = &RateLimit{ Month: DefaultRateLimitMonth, } +var updateInterval = time.Second * 60 // how often to update ratelimits, weights and stats + var FreeRatelimit = NoKeyRateLimit var redisClient *redis.Client @@ -195,6 +195,12 @@ func Init() { ReadTimeout: time.Second * 3, }) + updateInterval = utils.Config.Frontend.RatelimitUpdateInterval + if updateInterval == 0 { + logger.Warnf("updateInterval is not set, setting to 60s") + updateInterval = time.Second * 60 + } + initializedWg.Add(3) go func() { diff --git a/types/config.go b/types/config.go index 919f65088d..34b92e228f 100644 --- a/types/config.go +++ b/types/config.go @@ -137,12 +137,13 @@ type Config struct { Plankton string `yaml:"plankton" envconfig:"FRONTEND_STRIPE_PLANKTON"` Webhook string `yaml:"webhook" envconfig:"FRONTEND_STRIPE_WEBHOOK"` } - SessionSecret string `yaml:"sessionSecret" envconfig:"FRONTEND_SESSION_SECRET"` - JwtSigningSecret string `yaml:"jwtSigningSecret" envconfig:"FRONTEND_JWT_SECRET"` - JwtIssuer string `yaml:"jwtIssuer" envconfig:"FRONTEND_JWT_ISSUER"` - JwtValidityInMinutes int `yaml:"jwtValidityInMinutes" envconfig:"FRONTEND_JWT_VALIDITY_INMINUTES"` - MaxMailsPerEmailPerDay int `yaml:"maxMailsPerEmailPerDay" envconfig:"FRONTEND_MAX_MAIL_PER_EMAIL_PER_DAY"` - Mail struct { + RatelimitUpdateInterval time.Duration `yaml:"ratelimitUpdateInterval" envconfig:"FRONTEND_RATELIMIT_UPDATE_INTERVAL"` + SessionSecret string `yaml:"sessionSecret" envconfig:"FRONTEND_SESSION_SECRET"` + JwtSigningSecret string `yaml:"jwtSigningSecret" envconfig:"FRONTEND_JWT_SECRET"` + JwtIssuer string `yaml:"jwtIssuer" envconfig:"FRONTEND_JWT_ISSUER"` + JwtValidityInMinutes int `yaml:"jwtValidityInMinutes" envconfig:"FRONTEND_JWT_VALIDITY_INMINUTES"` + MaxMailsPerEmailPerDay int `yaml:"maxMailsPerEmailPerDay" envconfig:"FRONTEND_MAX_MAIL_PER_EMAIL_PER_DAY"` + Mail struct { SMTP struct { Server string `yaml:"server" envconfig:"FRONTEND_MAIL_SMTP_SERVER"` Host string `yaml:"host" envconfig:"FRONTEND_MAIL_SMTP_HOST"` From 495042a6c212b3e10fcb0f5e0f88223671e332bf Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 14 Feb 2024 14:42:00 +0100 Subject: [PATCH 21/42] (BIDS-2872) wip --- local-deployment/provision-explorer-config.sh | 2 +- ratelimit/ratelimit.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/local-deployment/provision-explorer-config.sh b/local-deployment/provision-explorer-config.sh index a2c72d3a2c..c2f70c12c5 100644 --- a/local-deployment/provision-explorer-config.sh +++ b/local-deployment/provision-explorer-config.sh @@ -101,7 +101,7 @@ frontend: sapphire: price_sapphire emerald: price_emerald diamond: price_diamond - # rateLimitUpdateInterval: 1s + rateLimitUpdateInterval: 1s indexer: # fullIndexOnStartup: false # Perform a one time full db index on startup diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index c264126fec..65e5a387fe 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -196,8 +196,8 @@ func Init() { }) updateInterval = utils.Config.Frontend.RatelimitUpdateInterval - if updateInterval == 0 { - logger.Warnf("updateInterval is not set, setting to 60s") + if updateInterval < time.Second { + logger.Warnf("updateInterval is below 1s, setting to 60s") updateInterval = time.Second * 60 } From 3c657cfa8520dbbd734d0b7a3b4fcab631e50528 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 14 Feb 2024 15:42:11 +0100 Subject: [PATCH 22/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 65e5a387fe..b6037a18d6 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -516,7 +516,7 @@ func updateStatsEntries(entries []DbEntry) error { valueArgs = append(valueArgs, entry.Endpoint) valueArgs = append(valueArgs, entry.Count) - // logger.WithFields(logrus.Fields{"count": entry.Count, "apikey": entry.ApiKey, "path": entry.Path, "date": entry.Date}).Infof("inserting stats entry %v/%v", allIdx+1, len(entries)) + // logger.WithFields(logger.Fields{"count": entry.Count, "apikey": entry.ApiKey, "path": entry.Path, "date": entry.Date}).Infof("inserting stats entry %v/%v", allIdx+1, len(entries)) batchIdx++ allIdx++ @@ -958,6 +958,7 @@ func DBGetCurrentApiProducts() ([]*ApiProduct, error) { } func DBUpdater() { + logger.WithField("redis", utils.Config.RedisSessionStoreEndpoint).Infof("starting db updater") redisClient = redis.NewClient(&redis.Options{ Addr: utils.Config.RedisSessionStoreEndpoint, ReadTimeout: time.Second * 3, @@ -979,7 +980,7 @@ func DBUpdate(redisClient *redis.Client) { logger.WithError(err).Errorf("error updating stats") return } - logrus.WithField("duration", time.Since(start)).Infof("updated stats") + logger.WithField("duration", time.Since(start)).Infof("updated stats") }() go func() { defer wg.Done() @@ -994,7 +995,7 @@ func DBUpdate(redisClient *redis.Client) { logger.WithError(err).Errorf("error getting rows affected") return } - logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_keys") + logger.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_keys") start = time.Now() res, err = DBUpdateApiRatelimits() @@ -1007,7 +1008,7 @@ func DBUpdate(redisClient *redis.Client) { logger.WithError(err).Errorf("error getting rows affected") return } - logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_ratelimits") + logger.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_ratelimits") start = time.Now() res, err = DBUpdateUnlimitedRatelimits() @@ -1020,7 +1021,7 @@ func DBUpdate(redisClient *redis.Client) { logger.WithError(err).Errorf("error getting rows affected") return } - logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated unlimited api_ratelimits") + logger.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated unlimited api_ratelimits") start = time.Now() res, err = DBInvalidateApiKeys() @@ -1033,7 +1034,7 @@ func DBUpdate(redisClient *redis.Client) { logger.WithError(err).Errorf("error getting rows affected") return } - logrus.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("invalidated api_keys") + logger.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("invalidated api_keys") }() wg.Wait() } From 41ce3b454ba503e26d14e7c9b0697f7859beb51d Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 14 Feb 2024 16:29:12 +0100 Subject: [PATCH 23/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index b6037a18d6..b431e70a52 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -1075,9 +1075,9 @@ func DBUpdateApiRatelimits() (sql.Result, error) { insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) select u.id as user_id, - greatest(coalesce(cap1.second,0),coalesce(cap2.second,0)) as second, - greatest(coalesce(cap1.hour ,0),coalesce(cap2.hour ,0)) as hour, - greatest(coalesce(cap1.month ,0),coalesce(cap2.month ,0)) as month, + max(greatest(coalesce(cap1.second,0),coalesce(cap2.second,0))) as second, + max(greatest(coalesce(cap1.hour ,0),coalesce(cap2.hour ,0))) as hour, + max(greatest(coalesce(cap1.month ,0),coalesce(cap2.month ,0))) as month, to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, now() as changed_at from users u @@ -1089,6 +1089,7 @@ func DBUpdateApiRatelimits() (sql.Result, error) { left join api_ratelimits ar on ar.user_id = u.id where cap1.name != 'free' or cap2.name != 'free' or ar.user_id is not null + group by u.id, valid_until, changed_at on conflict (user_id) do update set second = excluded.second, hour = excluded.hour, From 2acfcca3ffeb1bf31fbd0ca4c1cf11e9a724756f Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 14 Feb 2024 16:41:51 +0100 Subject: [PATCH 24/42] (BIDS-2872) wip --- .../workflows/publish-ratelimit-images.yml | 21 +++++++++++++++++++ ratelimit/ratelimit.go | 7 ++++++- 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/publish-ratelimit-images.yml diff --git a/.github/workflows/publish-ratelimit-images.yml b/.github/workflows/publish-ratelimit-images.yml new file mode 100644 index 0000000000..668ba7f1b2 --- /dev/null +++ b/.github/workflows/publish-ratelimit-images.yml @@ -0,0 +1,21 @@ +name: Publish Docker dencun images + +on: + # Trigger the workflow on push or pull request, + # but only for the staging branch + push: + branches: + - dencun + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Publish to Dockerhub Registry + uses: elgohr/Publish-Docker-Github-Action@master + with: + name: gobitfly/eth2-beaconchain-explorer + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + tags: "dencun" diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index b431e70a52..22f743a32e 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -958,6 +958,11 @@ func DBGetCurrentApiProducts() ([]*ApiProduct, error) { } func DBUpdater() { + iv := utils.Config.Frontend.RatelimitUpdateInterval + if iv < time.Second { + logger.Warnf("updateInterval is below 1s, setting to 60s") + iv = time.Second * 60 + } logger.WithField("redis", utils.Config.RedisSessionStoreEndpoint).Infof("starting db updater") redisClient = redis.NewClient(&redis.Options{ Addr: utils.Config.RedisSessionStoreEndpoint, @@ -965,7 +970,7 @@ func DBUpdater() { }) for { DBUpdate(redisClient) - time.Sleep(updateInterval) + time.Sleep(iv) } } From d27e6858b0cc32489888bf819750ff09857488c2 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 14 Feb 2024 16:42:43 +0100 Subject: [PATCH 25/42] (BIDS-2872) wip --- .github/workflows/publish-ratelimit-images.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish-ratelimit-images.yml b/.github/workflows/publish-ratelimit-images.yml index 668ba7f1b2..af83ac8d71 100644 --- a/.github/workflows/publish-ratelimit-images.yml +++ b/.github/workflows/publish-ratelimit-images.yml @@ -5,7 +5,7 @@ on: # but only for the staging branch push: branches: - - dencun + - ratelimit jobs: build: @@ -18,4 +18,4 @@ jobs: name: gobitfly/eth2-beaconchain-explorer username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - tags: "dencun" + tags: "ratelimit" From fc3daeb01e7b65b7ecde1ad308a88d63f81c1f1a Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 08:19:33 +0100 Subject: [PATCH 26/42] (BIDS-2872) wip --- .../20240125120000_add_ratelimits.sql | 4 +- local-deployment/provision-explorer-config.sh | 2 +- ratelimit/ratelimit.go | 133 +++++++----------- 3 files changed, 56 insertions(+), 83 deletions(-) diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index bef9671681..db934e90ac 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -19,8 +19,8 @@ CREATE TABLE IF NOT EXISTS api_keys ( user_id INT NOT NULL, api_key VARCHAR(256) NOT NULL, - valid_until TIMESTAMP WITHOUT TIME ZONE NOT NULL, - changed_at TIMESTAMP WITHOUT TIME ZONE NOT NULL, + valid_until TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT '9999-12-31 23:59:59', + changed_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (user_id, api_key) ); diff --git a/local-deployment/provision-explorer-config.sh b/local-deployment/provision-explorer-config.sh index c2f70c12c5..4d38fb35ac 100644 --- a/local-deployment/provision-explorer-config.sh +++ b/local-deployment/provision-explorer-config.sh @@ -101,7 +101,7 @@ frontend: sapphire: price_sapphire emerald: price_emerald diamond: price_diamond - rateLimitUpdateInterval: 1s + ratelimitUpdateInterval: 1s indexer: # fullIndexOnStartup: false # Perform a one time full db index on startup diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 22f743a32e..2a8001c83d 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -1015,19 +1015,6 @@ func DBUpdate(redisClient *redis.Client) { } logger.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated api_ratelimits") - start = time.Now() - res, err = DBUpdateUnlimitedRatelimits() - if err != nil { - logger.WithError(err).Errorf("error updating unlikmited api_ratelimit") - return - } - ra, err = res.RowsAffected() - if err != nil { - logger.WithError(err).Errorf("error getting rows affected") - return - } - logger.WithField("duration", time.Since(start)).WithField("updates", ra).Infof("updated unlimited api_ratelimits") - start = time.Now() res, err = DBInvalidateApiKeys() if err != nil { @@ -1044,6 +1031,7 @@ func DBUpdate(redisClient *redis.Client) { wg.Wait() } +// DBInvalidateApiKeys invalidates api_keys that are not associated with a user. This func is only needed until api-key-mgmt is fully implemented - where users.apikey column is not used anymore. func DBInvalidateApiKeys() (sql.Result, error) { return db.FrontendWriterDB.Exec(` update api_keys @@ -1051,13 +1039,14 @@ func DBInvalidateApiKeys() (sql.Result, error) { where valid_until > now() and not exists (select id from users where id = api_keys.user_id)`) } +// DBUpdateApiKeys updates the api_keys table with the api_keys from the users table. This func is only needed until api-key-mgmt is fully implemented - where users.apikey column is not used anymore. func DBUpdateApiKeys() (sql.Result, error) { return db.FrontendWriterDB.Exec( `insert into api_keys (user_id, api_key, valid_until, changed_at) select id as user_id, api_key, - to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + to_timestamp('9999-12-31 23:59:59', 'YYYY-MM-DD HH:MI:SS') as valid_until, now() as changed_at from users where api_key is not null and not exists (select user_id from api_keys where api_keys.user_id = users.id) @@ -1071,70 +1060,54 @@ func DBUpdateApiKeys() (sql.Result, error) { func DBUpdateApiRatelimits() (sql.Result, error) { return db.FrontendWriterDB.Exec( `with - current_api_products as ( - select distinct on (name) name, stripe_price_id, second, hour, month, valid_from - from api_products - where valid_from <= now() - order by name, valid_from desc - ) - insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) - select - u.id as user_id, - max(greatest(coalesce(cap1.second,0),coalesce(cap2.second,0))) as second, - max(greatest(coalesce(cap1.hour ,0),coalesce(cap2.hour ,0))) as hour, - max(greatest(coalesce(cap1.month ,0),coalesce(cap2.month ,0))) as month, - to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, - now() as changed_at - from users u - left join users_stripe_subscriptions uss on uss.customer_id = u.stripe_customer_id and uss.active = true - left join current_api_products cap on cap.stripe_price_id = uss.price_id - left join current_api_products cap1 on cap1.name = coalesce(cap.name,'free') - left join app_subs_view asv on asv.user_id = u.id and asv.active = true - left join current_api_products cap2 on cap2.name = coalesce(asv.product_id,'free') - left join api_ratelimits ar on ar.user_id = u.id - where - cap1.name != 'free' or cap2.name != 'free' or ar.user_id is not null - group by u.id, valid_until, changed_at - on conflict (user_id) do update set - second = excluded.second, - hour = excluded.hour, - month = excluded.month, - valid_until = excluded.valid_until, - changed_at = now() - where - api_ratelimits.second != excluded.second - or api_ratelimits.hour != excluded.hour - or api_ratelimits.month != excluded.month`) -} - -func DBUpdateUnlimitedRatelimits() (sql.Result, error) { - return db.FrontendWriterDB.Exec( - `with - unlimited_ratelimit as ( - select second, hour, month - from api_products - where name = 'unlimited' and valid_from <= now() - order by valid_from desc - limit 1 - ) - insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) - select - id as user_id, - unlimited_ratelimit.second, - unlimited_ratelimit.hour, - unlimited_ratelimit.month, - to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, - now() as changed_at - from unlimited_ratelimit, users - where user_group = 'ADMIN' - on conflict (user_id) do update set - second = excluded.second, - hour = excluded.hour, - month = excluded.month, - valid_until = excluded.valid_until, - changed_at = now() - where - api_ratelimits.second != excluded.second - or api_ratelimits.hour != excluded.hour - or api_ratelimits.month != excluded.month`) + current_api_products as ( + select distinct on (name) name, stripe_price_id, second, hour, month, valid_from + from api_products + where valid_from <= now() + order by name, valid_from desc + ) + insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) + select + user_id, + max(second) as second, + max(hour) as hour, + max(month) as month, + to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + now() as changed_at + from ( + -- set all current ratelimits to free + select user_id, cap.second, cap.hour, cap.month + from api_ratelimits + left join current_api_products cap on cap.name = 'free' + union + -- set ratelimits for stripe subscriptions + select u.id as user_id, cap.second, cap.hour, cap.month + from users_stripe_subscriptions uss + left join users u on u.stripe_customer_id = uss.customer_id + left join current_api_products cap on cap.stripe_price_id = uss.price_id + where uss.active = true and u.id is not null + union + -- set ratelimits for app subscriptions + select asv.user_id, cap.second, cap.hour, cap.month + from app_subs_view asv + left join current_api_products cap on cap.name = asv.product_id + where asv.active = true + union + -- set ratelimits for admins to unlimited + select u.id as user_id, cap.second, cap.hour, cap.month + from users u + left join current_api_products cap on cap.name = 'unlimited' + where u.user_group = 'ADMIN' and cap.second is not null + ) a + group by user_id + on conflict (user_id) do update set + second = excluded.second, + hour = excluded.hour, + month = excluded.month, + valid_until = excluded.valid_until, + changed_at = now() + where + api_ratelimits.second != excluded.second + or api_ratelimits.hour != excluded.hour + or api_ratelimits.month != excluded.month`) } From 3bba31e7010e9cb37ee862e961ab48d78c64831d Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 08:30:21 +0100 Subject: [PATCH 27/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 2a8001c83d..1c72f48d19 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -1046,7 +1046,7 @@ func DBUpdateApiKeys() (sql.Result, error) { select id as user_id, api_key, - to_timestamp('9999-12-31 23:59:59', 'YYYY-MM-DD HH:MI:SS') as valid_until, + to_timestamp('9999-12-31 23:59:59', 'YYYY-MM-DD HH24:MI:SS') as valid_until, now() as changed_at from users where api_key is not null and not exists (select user_id from api_keys where api_keys.user_id = users.id) From 9dd2062e5e7bf0b4b5c968bbe847c8cdbf77f7ac Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 09:00:35 +0100 Subject: [PATCH 28/42] (BIDS-2872) wip --- db/migrations/20240125120000_add_ratelimits.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index db934e90ac..29b5de6573 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -17,11 +17,11 @@ CREATE INDEX IF NOT EXISTS idx_api_ratelimits_changed_at_valid_until ON api_rate SELECT 'up SQL query - add table api_keys'; CREATE TABLE IF NOT EXISTS api_keys ( + api_key VARCHAR(256) NOT NULL UNIQUE, user_id INT NOT NULL, - api_key VARCHAR(256) NOT NULL, valid_until TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT '9999-12-31 23:59:59', changed_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (user_id, api_key) + PRIMARY KEY (api_key) ); CREATE INDEX IF NOT EXISTS idx_api_keys_changed_at_valid_until ON api_keys (changed_at, valid_until); From fe08b2fe6ac37ee908f20dbf4ec3a67bc399c130 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 09:06:48 +0100 Subject: [PATCH 29/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 1c72f48d19..b5dc205c0e 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -1050,7 +1050,8 @@ func DBUpdateApiKeys() (sql.Result, error) { now() as changed_at from users where api_key is not null and not exists (select user_id from api_keys where api_keys.user_id = users.id) - on conflict (user_id, api_key) do update set + on conflict (api_key) do update set + user_id = excluded.user_id, valid_until = excluded.valid_until, changed_at = excluded.changed_at where api_keys.valid_until != excluded.valid_until`, From f11aaa5c0c663e06c85a31ad5e23034af62de711 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 09:37:27 +0100 Subject: [PATCH 30/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index b5dc205c0e..8350d7cb9d 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -327,7 +327,7 @@ func updateWeights(firstRun bool) error { Bucket string `db:"bucket"` ValidFrom time.Time `db:"valid_from"` }{} - err := db.WriterDb.Select(&dbWeights, "SELECT DISTINCT ON (endpoint) endpoint, bucket, weight, valid_from FROM api_weights WHERE valid_from <= NOW() ORDER BY endpoint, valid_from DESC") + err := db.FrontendWriterDB.Select(&dbWeights, "SELECT DISTINCT ON (endpoint) endpoint, bucket, weight, valid_from FROM api_weights WHERE valid_from <= NOW() ORDER BY endpoint, valid_from DESC") if err != nil { return err } @@ -493,7 +493,7 @@ func updateStats(redisClient *redis.Client) error { } func updateStatsEntries(entries []DbEntry) error { - tx, err := db.WriterDb.Beginx() + tx, err := db.FrontendWriterDB.Beginx() if err != nil { return err } @@ -554,7 +554,7 @@ func updateRateLimits() error { lastTRateLimits := lastRateLimitUpdateRateLimits lastRateLimitUpdateMu.Unlock() - tx, err := db.WriterDb.Beginx() + tx, err := db.FrontendWriterDB.Beginx() if err != nil { return err } From 777456261e2e26096db14f50ed20f96d14ef006f Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 09:43:05 +0100 Subject: [PATCH 31/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 8350d7cb9d..7a7558c406 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -1073,7 +1073,7 @@ func DBUpdateApiRatelimits() (sql.Result, error) { max(second) as second, max(hour) as hour, max(month) as month, - to_timestamp('3000-01-01', 'YYYY-MM-DD') as valid_until, + to_timestamp('9999-12-31 23:59:59', 'YYYY-MM-DD HH24:MI:SS') as valid_until, now() as changed_at from ( -- set all current ratelimits to free From ca4a629e508c8b8cf9f11ca478d1b4d0655c5db5 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 13:45:10 +0100 Subject: [PATCH 32/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 7a7558c406..7b6a53f651 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -1070,9 +1070,9 @@ func DBUpdateApiRatelimits() (sql.Result, error) { insert into api_ratelimits (user_id, second, hour, month, valid_until, changed_at) select user_id, - max(second) as second, - max(hour) as hour, - max(month) as month, + case when min(second) = 0 then 0 else max(second) end as second, + case when min(hour) = 0 then 0 else max(hour) end as hour, + case when min(month) = 0 then 0 else max(month) end as month, to_timestamp('9999-12-31 23:59:59', 'YYYY-MM-DD HH24:MI:SS') as valid_until, now() as changed_at from ( From 8b34b6035baa294c148a542c9cca34bbff84da45 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 14:18:37 +0100 Subject: [PATCH 33/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 7b6a53f651..e279189f79 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -1071,7 +1071,7 @@ func DBUpdateApiRatelimits() (sql.Result, error) { select user_id, case when min(second) = 0 then 0 else max(second) end as second, - case when min(hour) = 0 then 0 else max(hour) end as hour, + case when min(hour) = 0 then 0 else max(hour) end as hour, case when min(month) = 0 then 0 else max(month) end as month, to_timestamp('9999-12-31 23:59:59', 'YYYY-MM-DD HH24:MI:SS') as valid_until, now() as changed_at From 5f76f305738203a1f27f85c393ed1115a3ac019a Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 14:41:21 +0100 Subject: [PATCH 34/42] (BIDS-2872) wip --- .../workflows/publish-ratelimit-images.yml | 21 ------------------- 1 file changed, 21 deletions(-) delete mode 100644 .github/workflows/publish-ratelimit-images.yml diff --git a/.github/workflows/publish-ratelimit-images.yml b/.github/workflows/publish-ratelimit-images.yml deleted file mode 100644 index af83ac8d71..0000000000 --- a/.github/workflows/publish-ratelimit-images.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Publish Docker dencun images - -on: - # Trigger the workflow on push or pull request, - # but only for the staging branch - push: - branches: - - ratelimit - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - name: Publish to Dockerhub Registry - uses: elgohr/Publish-Docker-Github-Action@master - with: - name: gobitfly/eth2-beaconchain-explorer - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - tags: "ratelimit" From e7fafcd02120733d43aefe98a04dfbf59aea4bd6 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 14:44:17 +0100 Subject: [PATCH 35/42] (BIDS-2872) wip --- metrics/metrics.go | 1 - 1 file changed, 1 deletion(-) diff --git a/metrics/metrics.go b/metrics/metrics.go index dd4187df71..8577fe18c0 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -125,7 +125,6 @@ type responseWriterDelegator struct { } func (r *responseWriterDelegator) WriteHeader(code int) { - logrus.Infof("metrics writeheader %v", code) r.status = code r.wroteHeader = true r.ResponseWriter.WriteHeader(code) From 59d5c59903e69a7249931fd566e8b087258fb41e Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Thu, 15 Feb 2024 15:24:16 +0100 Subject: [PATCH 36/42] (BIDS-2872) wip --- ratelimit/ratelimit.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index e279189f79..ea5c4eaf4e 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -80,8 +80,6 @@ var weightsMu = &sync.RWMutex{} var weights = map[string]int64{} // guarded by weightsMu var buckets = map[string]string{} // guarded by weightsMu -var pathPrefix = "" // only requests with this prefix will be ratelimited - var logger = logrus.StandardLogger().WithField("module", "ratelimit") type DbEntry struct { From f79524cb233e41280429505c9d36bcfd04933013 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Fri, 16 Feb 2024 12:16:08 +0100 Subject: [PATCH 37/42] (BIDS-2872) fix ratelimit.DBGetUserApiRateLimit --- handlers/user.go | 5 ++++- ratelimit/ratelimit.go | 19 ++++++++++++++++--- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/handlers/user.go b/handlers/user.go index f7accc1005..d0363dcc1b 100644 --- a/handlers/user.go +++ b/handlers/user.go @@ -96,8 +96,11 @@ func UserSettings(w http.ResponseWriter, r *http.Request) { return } - maxDaily := int(rl.Hour * 24) + maxDaily := int(rl.Second * 24 * 3600) maxMonthly := int(rl.Month) + if maxDaily > maxMonthly { + maxDaily = maxMonthly + } userSettingsData.ApiStatistics = &types.ApiStatistics{} diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index ea5c4eaf4e..8c21408480 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -58,7 +58,11 @@ var NoKeyRateLimit = &RateLimit{ var updateInterval = time.Second * 60 // how often to update ratelimits, weights and stats -var FreeRatelimit = NoKeyRateLimit +var FreeRatelimit = &RateLimit{ + Second: DefaultRateLimitSecond, + Hour: DefaultRateLimitHour, + Month: DefaultRateLimitMonth, +} var redisClient *redis.Client var redisIsHealthy atomic.Bool @@ -604,6 +608,9 @@ func updateRateLimits() error { NoKeyRateLimit.Month = dbApiProduct.Month } if dbApiProduct.Name == "free" { + if FreeRatelimit.Second != dbApiProduct.Second || FreeRatelimit.Hour != dbApiProduct.Hour || FreeRatelimit.Month != dbApiProduct.Month { + logger.WithFields(logrus.Fields{"second": dbApiProduct.Second, "hour": dbApiProduct.Hour, "month": dbApiProduct.Month}).Infof("free ratelimit changed") + } FreeRatelimit.Second = dbApiProduct.Second FreeRatelimit.Hour = dbApiProduct.Hour FreeRatelimit.Month = dbApiProduct.Month @@ -939,9 +946,15 @@ func (rl *FallbackRateLimiter) Handle(w http.ResponseWriter, r *http.Request, ne func DBGetUserApiRateLimit(userId int64) (*RateLimit, error) { rl := &RateLimit{} err := db.FrontendWriterDB.Get(rl, ` - select second, hour, month - from api_ratelimits + select second, hour, month + from api_ratelimits where user_id = $1`, userId) + if err != nil && err == sql.ErrNoRows { + rl.Second = FreeRatelimit.Second + rl.Hour = FreeRatelimit.Hour + rl.Month = FreeRatelimit.Month + return rl, nil + } return rl, err } From ad5e31a4e75c8b32c2beb10d294550563ec1fdcf Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Mon, 19 Feb 2024 09:30:23 +0100 Subject: [PATCH 38/42] (BIDS-2872) improve sql-migration --- db/migrations/20240125120000_add_ratelimits.sql | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index 29b5de6573..c18bd9d1d5 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -42,13 +42,18 @@ SELECT 'up SQL query - add table api_products'; CREATE TABLE IF NOT EXISTS api_products ( name VARCHAR(20) NOT NULL, - stripe_price_id VARCHAR(256) NOT NULL, + stripe_price_id VARCHAR(256) NOT NULL DEFAULT '', second INT NOT NULL DEFAULT 0, hour INT NOT NULL DEFAULT 0, month INT NOT NULL DEFAULT 0, valid_from TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT TO_TIMESTAMP(0), PRIMARY KEY (name, valid_from) - ); + ); +INSERT INTO api_products (name, second, hour, month) VALUES + ('nokey', 2, 1000, 0), + ('free', 10, 0, 0), + ('unlimited', 100, 0, 0) +ON CONFLICT DO NOTHING; -- +goose StatementEnd -- +goose Down From d0c66918a5a166129d1d98b46b5e103a9ff5aaf0 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Mon, 19 Feb 2024 09:46:04 +0100 Subject: [PATCH 39/42] (BIDS-2872) improve logging --- ratelimit/ratelimit.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index 8c21408480..a1089115aa 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -608,9 +608,6 @@ func updateRateLimits() error { NoKeyRateLimit.Month = dbApiProduct.Month } if dbApiProduct.Name == "free" { - if FreeRatelimit.Second != dbApiProduct.Second || FreeRatelimit.Hour != dbApiProduct.Hour || FreeRatelimit.Month != dbApiProduct.Month { - logger.WithFields(logrus.Fields{"second": dbApiProduct.Second, "hour": dbApiProduct.Hour, "month": dbApiProduct.Month}).Infof("free ratelimit changed") - } FreeRatelimit.Second = dbApiProduct.Second FreeRatelimit.Hour = dbApiProduct.Hour FreeRatelimit.Month = dbApiProduct.Month From 97e06f87781f0541c4e99707183aea09b507dce3 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 21 Feb 2024 12:02:48 +0100 Subject: [PATCH 40/42] (BIDS-2872) fix updateStats --- ratelimit/ratelimit.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index a1089115aa..b47c1027db 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -448,7 +448,7 @@ func updateStats(redisClient *redis.Client) error { } mgetSize := 500 - for j := 0; j <= len(keys); j += mgetSize { + for j := 0; j < len(keys); j += mgetSize { mgetStart := j mgetEnd := j + mgetSize if mgetEnd > len(keys) { @@ -477,7 +477,7 @@ func updateStats(redisClient *redis.Client) error { if len(keysToDelete) > 0 { delSize := 500 - for j := 0; j <= len(keys); j += delSize { + for j := 0; j < len(keys); j += delSize { delStart := j delEnd := j + delSize if delEnd > len(keysToDelete) { From e26215b94cdcfdcaefd3bb144cf08cc680c32bcd Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 21 Feb 2024 12:20:20 +0100 Subject: [PATCH 41/42] (BIDS-2872) use api_statistics.endpoint instead of api_statistics.call, shorter keys for redis-keys --- .../20240125120000_add_ratelimits.sql | 9 ++++++++ ratelimit/ratelimit.go | 22 +++++++++---------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/db/migrations/20240125120000_add_ratelimits.sql b/db/migrations/20240125120000_add_ratelimits.sql index c18bd9d1d5..6410da9721 100644 --- a/db/migrations/20240125120000_add_ratelimits.sql +++ b/db/migrations/20240125120000_add_ratelimits.sql @@ -54,6 +54,11 @@ INSERT INTO api_products (name, second, hour, month) VALUES ('free', 10, 0, 0), ('unlimited', 100, 0, 0) ON CONFLICT DO NOTHING; + +ALTER TABLE api_statistics ADD COLUMN IF NOT EXISTS endpoint TEXT NOT NULL DEFAULT ''; +ALTER TABLE api_statistics DROP CONSTRAINT IF EXISTS api_statistics_pkey; +ALTER TABLE api_statistics ADD PRIMARY KEY (ts, apikey, endpoint); +ALTER TABLE api_statistics ALTER COLUMN call SET DEFAULT ''; -- +goose StatementEnd -- +goose Down @@ -70,4 +75,8 @@ SELECT 'down SQL query - drop table api_weights'; DROP TABLE IF EXISTS api_weights; SELECT 'down SQL query - drop table api_products'; DROP TABLE IF EXISTS api_products; +SELECT 'down SQL query - drop column api_statistics.endpoint'; +ALTER TABLE api_statistics DROP COLUMN IF EXISTS endpoint; +ALTER TABLE api_statistics DROP CONSTRAINT IF EXISTS api_statistics_pkey; +ALTER TABLE api_statistics ADD PRIMARY KEY (ts, apikey, call); -- +goose StatementEnd diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index b47c1027db..efeaad50e0 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -389,8 +389,8 @@ func updateStats(redisClient *redis.Client) error { cursor := uint64(0) for { - // ratelimit:stats:---::: - cmd := redisClient.Scan(ctx, cursor, "ratelimit:stats:*:*:*:*", 1000) + // rl:s:---::: + cmd := redisClient.Scan(ctx, cursor, "rl:s:*:*:*:*", 1000) if cmd.Err() != nil { return cmd.Err() } @@ -524,7 +524,7 @@ func updateStatsEntries(entries []DbEntry) error { allIdx++ if batchIdx >= batchSize || allIdx >= len(entries) { - stmt := fmt.Sprintf(`INSERT INTO api_statistics (ts, apikey, call, count) VALUES %s ON CONFLICT (ts, apikey, call) DO UPDATE SET count = EXCLUDED.count`, strings.Join(valueStrings, ",")) + stmt := fmt.Sprintf(`INSERT INTO api_statistics (ts, apikey, endpoint, count) VALUES %s ON CONFLICT (ts, apikey, endpoint) DO UPDATE SET count = EXCLUDED.count`, strings.Join(valueStrings, ",")) _, err := tx.Exec(stmt, valueArgs...) if err != nil { return err @@ -734,15 +734,15 @@ func rateLimitRequest(r *http.Request) (*RateLimitResult, error) { timeUntilNextHourUtc := nextHourUtc.Sub(startUtc) timeUntilNextMonthUtc := nextMonthUtc.Sub(startUtc) - rateLimitSecondKey := fmt.Sprintf("ratelimit:current:second:%s:%d", res.Bucket, res.UserId) - rateLimitHourKey := fmt.Sprintf("ratelimit:current:hour:%04d-%02d-%02d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Bucket, res.UserId) - rateLimitMonthKey := fmt.Sprintf("ratelimit:current:month:%04d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), res.Bucket, res.UserId) - statsKey := fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.UserId, res.Key, res.Route) + rateLimitSecondKey := fmt.Sprintf("rl:c:s:%s:%d", res.Bucket, res.UserId) + rateLimitHourKey := fmt.Sprintf("rl:c:h:%04d-%02d-%02d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Bucket, res.UserId) + rateLimitMonthKey := fmt.Sprintf("rl:c:m:%04d-%02d:%s:%d", startUtc.Year(), startUtc.Month(), res.Bucket, res.UserId) + statsKey := fmt.Sprintf("rl:s:%04d-%02d-%02d-%02d:%d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.UserId, res.Key, res.Route) if !res.IsValidKey { - rateLimitSecondKey = fmt.Sprintf("ratelimit:current:second:%s:%s", res.Bucket, res.IP) - rateLimitHourKey = fmt.Sprintf("ratelimit:current:hour:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Bucket, res.IP) - rateLimitMonthKey = fmt.Sprintf("ratelimit:current:month:%04d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), res.Bucket, res.IP) - statsKey = fmt.Sprintf("ratelimit:stats:%04d-%02d-%02d-%02d:%d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.UserId, "nokey", res.Route) + rateLimitSecondKey = fmt.Sprintf("rl:c:s:%s:%s", res.Bucket, res.IP) + rateLimitHourKey = fmt.Sprintf("rl:c:h:%04d-%02d-%02d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.Bucket, res.IP) + rateLimitMonthKey = fmt.Sprintf("rl:c:m:%04d-%02d:%s:%s", startUtc.Year(), startUtc.Month(), res.Bucket, res.IP) + statsKey = fmt.Sprintf("rl:s:%04d-%02d-%02d-%02d:%d:%s:%s", startUtc.Year(), startUtc.Month(), startUtc.Day(), startUtc.Hour(), res.UserId, "nokey", res.Route) } res.RedisStatsKey = statsKey From ee9cf42e593c4ffd9c78b9eb171208ac414f4a95 Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Wed, 21 Feb 2024 13:36:23 +0100 Subject: [PATCH 42/42] (BIDS-2872) fix updateStats --- ratelimit/ratelimit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ratelimit/ratelimit.go b/ratelimit/ratelimit.go index efeaad50e0..70a316bdc2 100644 --- a/ratelimit/ratelimit.go +++ b/ratelimit/ratelimit.go @@ -477,7 +477,7 @@ func updateStats(redisClient *redis.Client) error { if len(keysToDelete) > 0 { delSize := 500 - for j := 0; j < len(keys); j += delSize { + for j := 0; j < len(keysToDelete); j += delSize { delStart := j delEnd := j + delSize if delEnd > len(keysToDelete) {