Add button to load new replies in web UI (#35210)

This commit is contained in:
Eugen Rochko 2025-07-23 15:42:07 +02:00 committed by GitHub
parent cec26d58c8
commit 14a781fa24
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
18 changed files with 411 additions and 26 deletions

View file

@ -3,9 +3,6 @@
module Status::FetchRepliesConcern
extend ActiveSupport::Concern
# enable/disable fetching all replies
FETCH_REPLIES_ENABLED = ENV['FETCH_REPLIES_ENABLED'] == 'true'
# debounce fetching all replies to minimize DoS
FETCH_REPLIES_COOLDOWN_MINUTES = (ENV['FETCH_REPLIES_COOLDOWN_MINUTES'] || 15).to_i.minutes
FETCH_REPLIES_INITIAL_WAIT_MINUTES = (ENV['FETCH_REPLIES_INITIAL_WAIT_MINUTES'] || 5).to_i.minutes
@ -36,7 +33,7 @@ module Status::FetchRepliesConcern
def should_fetch_replies?
# we aren't brand new, and we haven't fetched replies since the debounce window
FETCH_REPLIES_ENABLED && !local? && created_at <= FETCH_REPLIES_INITIAL_WAIT_MINUTES.ago && (
!local? && created_at <= FETCH_REPLIES_INITIAL_WAIT_MINUTES.ago && (
fetched_replies_at.nil? || fetched_replies_at <= FETCH_REPLIES_COOLDOWN_MINUTES.ago
)
end

View file

@ -0,0 +1,79 @@
# frozen_string_literal: true
class WorkerBatch
include Redisable
TTL = 3600
def initialize(id = nil)
@id = id || SecureRandom.hex(12)
end
attr_reader :id
# Connect the batch with an async refresh. When the number of processed jobs
# passes the given threshold, the async refresh will be marked as finished.
# @param [String] async_refresh_key
# @param [Float] threshold
def connect(async_refresh_key, threshold: 1.0)
redis.hset(key, { 'async_refresh_key' => async_refresh_key, 'threshold' => threshold })
end
# Add jobs to the batch. Usually when the batch is created.
# @param [Array<String>] jids
def add_jobs(jids)
if jids.blank?
async_refresh_key = redis.hget(key, 'async_refresh_key')
if async_refresh_key.present?
async_refresh = AsyncRefresh.new(async_refresh_key)
async_refresh.finish!
end
return
end
redis.multi do |pipeline|
pipeline.sadd(key('jobs'), jids)
pipeline.expire(key('jobs'), TTL)
pipeline.hincrby(key, 'pending', jids.size)
pipeline.expire(key, TTL)
end
end
# Remove a job from the batch, such as when it's been processed or it has failed.
# @param [String] jid
def remove_job(jid)
_, pending, processed, async_refresh_key, threshold = redis.multi do |pipeline|
pipeline.srem(key('jobs'), jid)
pipeline.hincrby(key, 'pending', -1)
pipeline.hincrby(key, 'processed', 1)
pipeline.hget(key, 'async_refresh_key')
pipeline.hget(key, 'threshold')
end
if async_refresh_key.present?
async_refresh = AsyncRefresh.new(async_refresh_key)
async_refresh.increment_result_count(by: 1)
async_refresh.finish! if pending.zero? || processed >= threshold.to_f * (processed + pending)
end
end
# Get pending jobs.
# @returns [Array<String>]
def jobs
redis.smembers(key('jobs'))
end
# Inspect the batch.
# @returns [Hash]
def info
redis.hgetall(key)
end
private
def key(suffix = nil)
"worker_batch:#{@id}#{":#{suffix}" if suffix}"
end
end