about summary refs log tree commit diff
path: root/app/workers
diff options
context:
space:
mode:
authorClaire <claire.github-309c@sitedethib.com>2023-02-25 14:00:40 +0100
committerClaire <claire.github-309c@sitedethib.com>2023-02-25 14:00:40 +0100
commit4ed09276d5267181061dff438a0b10770db9f226 (patch)
treecb8f358d58669626332ea01bcf0186d08b5eac90 /app/workers
parent45087c1092143e95dfcc85b6c9abc5c6c0a0a5c2 (diff)
parent730bb3e211a84a2f30e3e2bbeae3f77149824a68 (diff)
Merge branch 'main' into glitch-soc/merge-upstream
Conflicts:
- `.prettierignore`:
  Upstream added a line at the end of the file, while glitch-soc had its own
  extra lines.
  Took upstream's change.
- `CONTRIBUTING.md`:
  We have our custom CONTRIBUTING.md quoting upstream. Upstream made changes.
  Ported upstream changes.
- `app/controllers/application_controller.rb`:
  Upstream made code style changes in a method that is entirely replaced
  in glitch-soc.
  Ignored the change.
- `app/models/account.rb`:
  Code style changes textually close to glitch-soc-specific changes.
  Ported upstream changes.
- `lib/sanitize_ext/sanitize_config.rb`:
  Upstream code style changes.
  Ignored them.
Diffstat (limited to 'app/workers')
-rw-r--r--app/workers/scheduler/accounts_statuses_cleanup_scheduler.rb26
-rw-r--r--app/workers/web/push_notification_worker.rb12
2 files changed, 16 insertions, 22 deletions
diff --git a/app/workers/scheduler/accounts_statuses_cleanup_scheduler.rb b/app/workers/scheduler/accounts_statuses_cleanup_scheduler.rb
index bd92fe32c..f237f1dc9 100644
--- a/app/workers/scheduler/accounts_statuses_cleanup_scheduler.rb
+++ b/app/workers/scheduler/accounts_statuses_cleanup_scheduler.rb
@@ -7,7 +7,7 @@ class Scheduler::AccountsStatusesCleanupScheduler
   # This limit is mostly to be nice to the fediverse at large and not
   # generate too much traffic.
   # This also helps limiting the running time of the scheduler itself.
-  MAX_BUDGET         = 50
+  MAX_BUDGET         = 150
 
   # This is an attempt to spread the load across instances, as various
   # accounts are likely to have various followers.
@@ -15,28 +15,22 @@ class Scheduler::AccountsStatusesCleanupScheduler
 
   # This is an attempt to limit the workload generated by status removal
   # jobs to something the particular instance can handle.
-  PER_THREAD_BUDGET  = 5
+  PER_THREAD_BUDGET  = 6
 
   # Those avoid loading an instance that is already under load
-  MAX_DEFAULT_SIZE    = 2
+  MAX_DEFAULT_SIZE    = 200
   MAX_DEFAULT_LATENCY = 5
-  MAX_PUSH_SIZE       = 5
+  MAX_PUSH_SIZE       = 500
   MAX_PUSH_LATENCY    = 10
+
   # 'pull' queue has lower priority jobs, and it's unlikely that pushing
   # deletes would cause much issues with this queue if it didn't cause issues
   # with default and push. Yet, do not enqueue deletes if the instance is
   # lagging behind too much.
-  MAX_PULL_SIZE       = 500
-  MAX_PULL_LATENCY    = 300
-
-  # This is less of an issue in general, but deleting old statuses is likely
-  # to cause delivery errors, and thus increase the number of jobs to be retried.
-  # This doesn't directly translate to load, but connection errors and a high
-  # number of dead instances may lead to this spiraling out of control if
-  # unchecked.
-  MAX_RETRY_SIZE = 50_000
+  MAX_PULL_SIZE       = 10_000
+  MAX_PULL_LATENCY    = 5.minutes.to_i
 
-  sidekiq_options retry: 0, lock: :until_executed
+  sidekiq_options retry: 0, lock: :until_executed, lock_ttl: 1.day.to_i
 
   def perform
     return if under_load?
@@ -62,17 +56,17 @@ class Scheduler::AccountsStatusesCleanupScheduler
       # The idea here is to loop through all policies at least once until the budget is exhausted
       # and start back after the last processed account otherwise
       break if budget.zero? || (num_processed_accounts.zero? && first_policy_id.nil?)
+
       first_policy_id = nil
     end
   end
 
   def compute_budget
-    threads = Sidekiq::ProcessSet.new.select { |x| x['queues'].include?('push') }.map { |x| x['concurrency'] }.sum
+    threads = Sidekiq::ProcessSet.new.select { |x| x['queues'].include?('push') }.pluck('concurrency').sum
     [PER_THREAD_BUDGET * threads, MAX_BUDGET].min
   end
 
   def under_load?
-    return true if Sidekiq::Stats.new.retry_size > MAX_RETRY_SIZE
     queue_under_load?('default', MAX_DEFAULT_SIZE, MAX_DEFAULT_LATENCY) || queue_under_load?('push', MAX_PUSH_SIZE, MAX_PUSH_LATENCY) || queue_under_load?('pull', MAX_PULL_SIZE, MAX_PULL_LATENCY)
   end
 
diff --git a/app/workers/web/push_notification_worker.rb b/app/workers/web/push_notification_worker.rb
index 1ed5bb9e0..7e9691aab 100644
--- a/app/workers/web/push_notification_worker.rb
+++ b/app/workers/web/push_notification_worker.rb
@@ -22,13 +22,13 @@ class Web::PushNotificationWorker
       request = Request.new(:post, @subscription.endpoint, body: payload.fetch(:ciphertext), http_client: http_client)
 
       request.add_headers(
-        'Content-Type'     => 'application/octet-stream',
-        'Ttl'              => TTL,
-        'Urgency'          => URGENCY,
+        'Content-Type' => 'application/octet-stream',
+        'Ttl' => TTL,
+        'Urgency' => URGENCY,
         'Content-Encoding' => 'aesgcm',
-        'Encryption'       => "salt=#{Webpush.encode64(payload.fetch(:salt)).delete('=')}",
-        'Crypto-Key'       => "dh=#{Webpush.encode64(payload.fetch(:server_public_key)).delete('=')};#{@subscription.crypto_key_header}",
-        'Authorization'    => @subscription.authorization_header
+        'Encryption' => "salt=#{Webpush.encode64(payload.fetch(:salt)).delete('=')}",
+        'Crypto-Key' => "dh=#{Webpush.encode64(payload.fetch(:server_public_key)).delete('=')};#{@subscription.crypto_key_header}",
+        'Authorization' => @subscription.authorization_header
       )
 
       request.perform do |response|