-
Notifications
You must be signed in to change notification settings - Fork 46
/
uwsgi-config.yml
94 lines (74 loc) · 4.57 KB
/
uwsgi-config.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# YOU WILL SEE this in the wsgi error log whenever a new worker PROCESS is created
# Exception ignored in: <function _after_at_fork_child_reinit_locks at 0x7f1729f97820>
# Traceback (most recent call last):
# File "/usr/lib64/python3.8/logging/__init__.py", line 264, in _after_at_fork_child_reinit_locks spawned uWSGI http 1 (pid: 14102)
# _releaseLock() # Acquired by os.register_at_fork(before=.
# File "/usr/lib64/python3.8/logging/__init__.py", line 232, in _releaseLock_lock.release()
# RuntimeError: cannot release un-acquired lock
#
# This error appears to have no effect.
uwsgi:
http: :8000
wsgi-file: wsgi.py
master: true
# threads: 10 does not appear to override the cheaper-busyness settings, retain so that everything
# works if that section is commented out by someone.
threads: 10
# logs state this should be on. Confirmed multiple threads were already in use. Cannot explain.
enable-threads: true
# poorly documented, I think directs uwgi to use a different mutex type to solve a many processes
# + threads concurrency issue.
thunder-lock: true
# py-call-uwsgi-fork-hooks - undocumented, but recommended in logs.
py-call-uwsgi-fork-hooks: true
# It is possible to hit connection count limit issues on heavily loaded servers with more than 2
# cpu cores. uwsgi logs a message like "uWSGI listen queue of socket ... (fd: 3) full !!!" when
# this occurs. The default value is 100, values greater than 128 requires we run the command
# sysctl -w net.core.somaxconn=1024 in our deployment commands
listen: 1024
# enabled as an attempt to improve 502 timeouts, unclear if there is any effect.
# there appears to be no effect
http-keepalive: true
# default timeout is 60 or 30 seconds, the better solution is to stream data on a case by case
# basis, but in general we want to support long timeouts. There are a lot of timeout options...
http-timeout: 120
# ah, "harakiri" is seppuku; this is the thread recycle timeout.
harakiri: 120
# log us when we commit harakiri
harakiri-verbose: true
# wsgi logging is only useful for specific debugging scenarios, we already have request logging
# from apache and print statements from python.
disable-logging: true
reload-mercy: 5
worker-reload-mercy: 5
# this setting should speed up deploys, eb expects term to kill the procfile process
die-on-term: true
# allows worker threads to receive signals - actually it looks like this is is bugged in a way
# that will be incredibly difficult to identify in the future so let's not enable it.
# https://github.com/unbit/uwsgi/issues/1978
# py-call-osafterfork: true
# makes threads easier to identify
auto-procname: true
# This article (since when did bloomberge have server documentation) has a lot of good options
# https://www.bloomberg.com/company/stories/configuring-uwsgi-production-deployment/
# After a lot of testing, at at least max-requests killed the entire process, not just the single
# thread that finished the request, and terminating those http sessions. Do not enable.
# max-requests: 1000 # Restart workers after this many requests
# max-worker-lifetime: 3600 # Restart workers after this many seconds
# reload-on-rss: 2048 # Restart workers after this much resident memory
# worker-reload-mercy: 60 # How long to wait before forcefully killing workers
# From the article, this is a more robust scaling mechanism than our old cpu count * 2:
# %k automatically detects the number of cpu cores
# processes: %( %k + %k )
# these processes and threads are pretty cheap, this SHOULD solve certain classes of site sluggishness
cheaper-algo: busyness
processes: 500 # Maximum number of workers allowed
cheaper: 8 # Minimum number of workers allowed
cheaper-initial: 16 # Workers created at startup
cheaper-overload: 1 # Length of a cycle in seconds
cheaper-step: 16 # How many workers to spawn at a time
cheaper-busyness-multiplier: 30 # How many cycles to wait before killing workers
cheaper-busyness-min: 20 # Below this threshold, kill workers (if stable for multiplier cycles)
cheaper-busyness-max: 70 # Above this threshold, spawn new workers
cheaper-busyness-backlog-alert: 16 # Spawn emergency workers if more than this many requests are waiting in the queue
cheaper-busyness-backlog-step: 2 # How many emergegency workers to create if there are too many requests in the queue