-
Notifications
You must be signed in to change notification settings - Fork 17
/
docker-compose.yml
110 lines (100 loc) · 2.86 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
version: "3"
services:
corenlp:
image: 'sld3/corenlp:3.6.0'
ports:
- '5554:9000'
restart: 'unless-stopped'
opennmt:
image: 'sld3/opennmt:780979ab_distro20e52377'
volumes:
- './question_generation/data/:/root/data'
command: >
bash -c "cd /root/opennmt && th tools/translation_server.lua -host 0.0.0.0
-port 5556 -model /root/data/model.t7 -beam_size 12 -replace_unk true"
depends_on:
- corenlp
ports:
- '5556:5556'
restart: 'unless-stopped'
opennmtchitchat:
image: 'sld3/opennmt:780979ab_distro20e52377'
volumes:
- './opennmt_chitchat/data/:/root/data'
command: >
bash -c "cd /root/opennmt && th tools/translation_server.lua -host 0.0.0.0
-port 5556 -model /root/data/CPU_epoch5_14.62.t7
-beam_size 1 -replace_unk false"
ports:
- '5557:5556'
restart: 'unless-stopped'
opennmtsummary:
image: 'sld3/opennmt:780979ab_distro20e52377'
volumes:
- './opennmt_summarization/models:/root/data'
command: >
bash -c "cd /root/opennmt && th tools/translation_server.lua -host 0.0.0.0
-port 5556 -model /root/data/textsum_epoch7_14.69_release.t7
-beam_size 12 -replace_unk false"
ports:
- '5558:5556'
restart: 'unless-stopped'
opennmtfbpost:
image: 'sld3/opennmt:780979ab_distro20e52377'
volumes:
- './fbnews_chitchat/data/:/root/data'
command: >
bash -c "cd /root/opennmt && th tools/translation_server.lua -host 0.0.0.0
-port 5556 -model /root/data/fbchichat_ver2_epoch9.t7
-beam_size 5 -replace_unk false -max_sent_length 20"
ports:
- '5559:5556'
restart: 'unless-stopped'
bidaf:
image: 'sld3/bi-att-flow:0.1.0'
ports:
- '1995:1995'
restart: 'unless-stopped'
alice:
build:
context: './ALICEChatAPI/'
image: 'sld3/alice_chat:0.1.0'
volumes:
- './ALICEChatAPI:/src'
command: python -u server.py
restart: 'unless-stopped'
bigartm:
build:
context: './topic-modelling/'
image: 'sld3/bigartm:0.1.0'
volumes:
- './topic-modelling:/src'
command: gunicorn -w 4 -b 0.0.0.0:3000 server:app
expose:
- '3000'
restart: 'unless-stopped'
environment:
- PYTHONUNBUFFERED=1
intent_classifier:
build:
context: './intent_classifier/'
image: 'sld3/intent_classifier:0.1.0'
volumes:
- './intent_classifier:/src'
command: gunicorn -w 1 -t 120 -b 0.0.0.0:3000 server:app # Only 1 worker, beacuse a lot of ram required
expose:
- '3000'
restart: 'unless-stopped'
environment:
- PYTHONUNBUFFERED=1
spellchecker:
image: 'spellchecker:0.1'
build:
context: './spellchecker'
volumes:
- './spellchecker:/src'
- './spellchecker_model:/model'
command: python3.6 -u server.py
ports:
- '3050:3050'
restart: 'unless-stopped'