1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
|
run.config:
engine: ruby
engine.config:
runtime: ruby-2.4
extra_packages:
# basic servers:
- nginx
- nodejs
# for images:
- ImageMagick
# for videos:
- ffmpeg3
# to prep the .env file:
- gettext-tools
# for node-gyp, used in the asset compilation process:
- python-2
# i18n:
- libidn
cache_dirs:
- node_modules
extra_path_dirs:
- node_modules/.bin
build_triggers:
- .ruby-version
- Gemfile
- Gemfile.lock
- package.json
- yarn.lock
extra_steps:
- envsubst < .env.nanobox > .env
- yarn
fs_watch: true
deploy.config:
extra_steps:
- NODE_ENV=production bundle exec rake assets:precompile
transform:
- "sed 's/LOCAL_HTTPS=.*/LOCAL_HTTPS=true/i' /app/.env.nanobox | envsubst > /app/.env.production"
- |-
if [ -z "$LOCAL_DOMAIN" ]
then
. /app/.env.production
export LOCAL_DOMAIN
fi
erb /app/nanobox/nginx-web.conf.erb > /app/nanobox/nginx-web.conf
erb /app/nanobox/nginx-stream.conf.erb > /app/nanobox/nginx-stream.conf
- touch /app/log/production.log
before_live:
web.web:
- bundle exec rake db:migrate:setup
web.web:
start:
nginx: nginx -c /app/nanobox/nginx-web.conf
rails: bundle exec puma -C /app/config/puma.rb
routes:
- '/'
writable_dirs:
- tmp
log_watch:
rails: 'log/production.log'
network_dirs:
data.storage:
- public/system
web.stream:
start:
nginx: nginx -c /app/nanobox/nginx-stream.conf
node: yarn run start
routes:
- '/api/v1/streaming*'
# Somehow we're getting requests for scheme://domain//api/v1/streaming* - match those, too
- '//api/v1/streaming*'
writable_dirs:
- tmp
worker.sidekiq:
start:
default: bundle exec sidekiq -c 5 -q default -L /app/log/sidekiq.log
mailers: bundle exec sidekiq -c 5 -q mailers -L /app/log/sidekiq.log
pull: bundle exec sidekiq -c 5 -q pull -L /app/log/sidekiq.log
push: bundle exec sidekiq -c 5 -q push -L /app/log/sidekiq.log
writable_dirs:
- tmp
log_watch:
rails: 'log/production.log'
sidekiq: 'log/sidekiq.log'
network_dirs:
data.storage:
- public/system
worker.cron_only:
start: sleep 365d
writable_dirs:
- tmp
log_watch:
rake: 'log/production.log'
network_dirs:
data.storage:
- public/system
cron:
# 20:00 (8 pm), server time: send out the daily digest emails to everyone
# who opted to receive one
- id: send_digest_emails
schedule: '00 20 * * *'
command: 'bundle exec rake mastodon:emails:digest'
# 00:10 (ten past midnight), server time: remove local copies of remote
# users' media once they are older than a certain age (use NUM_DAYS evar to
# change this from the default of 7 days)
- id: clear_remote_media
schedule: '10 00 * * *'
command: 'bundle exec rake mastodon:media:remove_remote'
# 00:20 (twenty past midnight), server time: remove subscriptions to remote
# users that nobody follows locally (anymore)
- id: clear_unfollowed_subs
schedule: '20 00 * * *'
command: 'bundle exec rake mastodon:push:clear'
# 00:30 (half past midnight), server time: update local copies of remote
# users' avatars to match whatever they currently have set on their profile
- id: update_remote_avatars
schedule: '30 00 * * *'
command: 'bundle exec rake mastodon:media:redownload_avatars'
############################################################################
# This task is one you might want to enable, or might not. It keeps disk
# usage low, but makes "shadow bans" (scenarios where the user is silenced,
# but not intended to be made aware that the silencing has occurred) much
# more difficult to put in place, as users would then notice their media is
# vanishing on a regular basis. Enable it if you aren't worried about users
# knowing they've been silenced (on the instance level), and want to save
# disk space. Leave it disabled otherwise.
############################################################################
# # 00:00 (midnight), server time: remove media posted by silenced users
# - id: clear_silenced_media
# schedule: '00 00 * * *'
# command: 'bundle exec rake mastodon:media:remove_silenced'
############################################################################
# The following two tasks can be uncommented to automatically open and close
# registrations on a schedule. The format of 'schedule' is a standard cron
# time expression: minute hour day month day-of-week; search for "cron
# time expressions" for more info on how to set these up. The examples here
# open registration only from 8 am to 4 pm, server time.
############################################################################
# # 08:00 (8 am), server time: open registrations so new users can join
# - id: open_registrations
# schedule: '00 08 * * *'
# command: 'bundle exec rake mastodon:settings:open_registrations'
#
# # 16:00 (4 pm), server time: close registrations so new users *can't* join
# - id: close_registrations
# schedule: '00 16 * * *'
# command: 'bundle exec rake mastodon:settings:close_registrations'
data.db:
image: nanobox/postgresql:9.5
cron:
- id: backup
schedule: '0 3 * * *'
command: |
PGPASSWORD=${DATA_POSTGRES_PASS} pg_dump -U ${DATA_POSTGRES_USER} -w -Fc -O gonano |
gzip |
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).sql.gz --data-binary @- &&
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
json_pp |
grep ${HOSTNAME} |
sort |
head -n-${BACKUP_COUNT:-1} |
sed 's/.*: "\(.*\)".*/\1/' |
while read file
do
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
done
data.redis:
image: nanobox/redis:3.0
cron:
- id: backup
schedule: '0 3 * * *'
command: |
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).rdb --data-binary @/data/var/db/redis/dump.rdb &&
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
json_pp |
grep ${HOSTNAME} |
sort |
head -n-${BACKUP_COUNT:-1} |
sed 's/.*: "\(.*\)".*/\1/' |
while read file
do
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
done
data.storage:
image: nanobox/unfs:0.9
cron:
- id: backup
schedule: '0 3 * * *'
command: |
tar cz -C /data/var/db/unfs/ |
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).tgz --data-binary @- &&
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
json_pp |
grep ${HOSTNAME} |
sort |
head -n-${BACKUP_COUNT:-1} |
sed 's/.*: "\(.*\)".*/\1/' |
while read file
do
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
done
|