From 5f4c3d8219c344e583d70c6f6f5cf43fefd5bc50 Mon Sep 17 00:00:00 2001 From: Nisanth Chunduru Date: Sun, 26 Mar 2017 13:28:41 +0530 Subject: [PATCH] Add sidekiq --- Gemfile | 7 +- Gemfile.lock | 24 +- config.ru | 4 +- config/eye/staging.eye | 23 ++ config/load.rb | 22 +- config/sba_config.yml.example | 12 +- config/sidekiq/staging.yml | 2 + lib/base.rb | 4 + run_app.rb | 10 +- vendor/gems/sidekiq-pro-3.4.5/README.md | 54 +++ .../gems/sidekiq-pro-3.4.5/lib/sidekiq-pro.rb | 82 +++++ .../sidekiq-pro-3.4.5/lib/sidekiq/batch.rb | 305 ++++++++++++++++ .../lib/sidekiq/batch/callback.rb | 171 +++++++++ .../lib/sidekiq/batch/client.rb | 49 +++ .../lib/sidekiq/batch/middleware.rb | 139 ++++++++ .../lib/sidekiq/batch/status.rb | 194 ++++++++++ .../sidekiq-pro-3.4.5/lib/sidekiq/intro.ans | 18 + .../lib/sidekiq/middleware/server/statsd.rb | 42 +++ .../sidekiq-pro-3.4.5/lib/sidekiq/pro/api.rb | 147 ++++++++ .../lib/sidekiq/pro/basic_fetch.rb | 45 +++ .../lib/sidekiq/pro/config.rb | 92 +++++ .../lib/sidekiq/pro/expiry.rb | 46 +++ .../lib/sidekiq/pro/fetch.rb | 262 ++++++++++++++ .../sidekiq-pro-3.4.5/lib/sidekiq/pro/push.rb | 86 +++++ .../lib/sidekiq/pro/scheduler.rb | 25 ++ .../lib/sidekiq/pro/scripting.rb | 113 ++++++ .../lib/sidekiq/pro/super_fetch.rb | 334 ++++++++++++++++++ .../lib/sidekiq/pro/timed_fetch.rb | 235 ++++++++++++ .../sidekiq-pro-3.4.5/lib/sidekiq/pro/util.rb | 14 + .../lib/sidekiq/pro/version.rb | 5 + .../sidekiq-pro-3.4.5/lib/sidekiq/pro/web.rb | 131 +++++++ .../lib/sidekiq/pro/worker.rb | 20 ++ .../lib/sidekiq/rack/batch_status.rb | 45 +++ .../lib/sidekiq/shard_set.rb | 75 ++++ .../sidekiq-pro-3.4.5/sidekiq-pro.gemspec | 32 ++ .../gems/sidekiq-pro-3.4.5/web/locales/en.yml | 17 + .../sidekiq-pro-3.4.5/web/views/batch.erb | 98 +++++ .../sidekiq-pro-3.4.5/web/views/batches.erb | 49 +++ .../sidekiq-pro-3.4.5/web/views/filtering.erb | 7 + workers/trigger_app_event.rb | 10 + 40 files changed, 3030 insertions(+), 20 deletions(-) create mode 100644 config/eye/staging.eye create mode 100644 config/sidekiq/staging.yml create mode 100644 vendor/gems/sidekiq-pro-3.4.5/README.md create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq-pro.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/callback.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/client.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/middleware.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/status.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/intro.ans create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/middleware/server/statsd.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/api.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/basic_fetch.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/config.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/expiry.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/fetch.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/push.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/scheduler.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/scripting.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/super_fetch.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/timed_fetch.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/util.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/version.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/web.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/worker.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/rack/batch_status.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/shard_set.rb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/sidekiq-pro.gemspec create mode 100644 vendor/gems/sidekiq-pro-3.4.5/web/locales/en.yml create mode 100644 vendor/gems/sidekiq-pro-3.4.5/web/views/batch.erb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/web/views/batches.erb create mode 100644 vendor/gems/sidekiq-pro-3.4.5/web/views/filtering.erb create mode 100644 workers/trigger_app_event.rb diff --git a/Gemfile b/Gemfile index 86df3bd..f61ed4c 100644 --- a/Gemfile +++ b/Gemfile @@ -18,13 +18,14 @@ gem 'redis' gem 'redis-namespace' gem 'savon', "~> 2.1.0" gem 'coffee-script' +# Error reporting +gem 'error-reporter', :git => 'https://github.com/SupportBee/ErrorReporter.git' # unicorn 4.1.0 fixes a bug that broke unicorn restarts # @see http://mongrel-unicorn.rubyforge.narkive.com/QM9xHegx/ruby-2-0-bad-file-descriptor-errno-ebadf gem 'unicorn', '>= 4.1.1' +gem "sidekiq-pro", "3.4.5", :path => "vendor/gems/sidekiq-pro-3.4.5" # Monitoring gem 'newrelic_rpm', :require => false -# Error reporting -gem 'error-reporter', :git => 'https://github.com/SupportBee/ErrorReporter.git' # App gems gem 'tinder' @@ -51,7 +52,7 @@ group :development do # Deploy gems gem 'capistrano', "= 2.15.5" gem 'capistrano-ext' - gem 'rvm-capistrano' + gem 'rvm-capistrano', :require => false end group :test do diff --git a/Gemfile.lock b/Gemfile.lock index 9fa109c..10c14be 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -5,6 +5,12 @@ GIT error-reporter (0.1.0) honeybadger (= 2.2.0) +PATH + remote: vendor/gems/sidekiq-pro-3.4.5 + specs: + sidekiq-pro (3.4.5) + sidekiq (>= 4.1.5) + GEM remote: https://rubygems.org/ specs: @@ -40,6 +46,8 @@ GEM coffee-script-source execjs coffee-script-source (1.3.3) + concurrent-ruby (1.0.5) + connection_pool (2.2.1) crack (0.4.3) safe_yaml (~> 1.0.0) diff-lcs (1.2.5) @@ -108,16 +116,16 @@ GEM coderay (~> 1.1.0) method_source (~> 0.8.1) slop (~> 3.4) - rack (1.4.1) - rack-protection (1.2.0) + rack (1.6.5) + rack-protection (1.5.3) rack rack-test (0.6.1) rack (>= 1.0) raindrops (0.17.0) rake (0.9.2.2) - redis (3.0.2) - redis-namespace (1.2.1) - redis (~> 3.0.0) + redis (3.3.3) + redis-namespace (1.5.3) + redis (~> 3.0, >= 3.0.4) rest-client (1.6.7) mime-types (>= 1.16) restforce (1.4.2) @@ -161,6 +169,11 @@ GEM nokogiri (>= 1.4.0) nori (~> 2.0.3) wasabi (~> 3.0.0) + sidekiq (4.2.10) + concurrent-ruby (~> 1.0) + connection_pool (~> 2.2, >= 2.2.0) + rack-protection (>= 1.5.0) + redis (~> 3.2, >= 3.2.1) simple_oauth (0.1.9) sinatra (1.3.2) rack (~> 1.3, >= 1.3.6) @@ -241,6 +254,7 @@ DEPENDENCIES rubyzoho (= 0.1.7) rvm-capistrano savon (~> 2.1.0) + sidekiq-pro (= 3.4.5)! sinatra sinatra-initializers thor diff --git a/config.ru b/config.ru index b874a37..0768574 100644 --- a/config.ru +++ b/config.ru @@ -3,4 +3,6 @@ require './config/load' puts "Preparing Assets..." SupportBeeApp::Build.build if PLATFORM_ENV == 'development' -run RunApp +# run RunApp +require "sidekiq/web" +run Rack::URLMap.new('/' => RunApp, '/sidekiq' => Sidekiq::Web) diff --git a/config/eye/staging.eye b/config/eye/staging.eye new file mode 100644 index 0000000..89e7489 --- /dev/null +++ b/config/eye/staging.eye @@ -0,0 +1,23 @@ +RAILS_ENV = ENV['RAILS_ENV'] || 'staging' +WORKING_DIR = ENV['RAILS_ROOT'] || '/home/rails/apps/supportbee_app_platform/current' +PID_DIR = "#{WORKING_DIR}/tmp/pids" + +Eye.config do + logger File.join WORKING_DIR, 'log', 'eye.log' +end + +Eye.application 'AppPlatform' do + working_dir WORKING_DIR + + process 'sidekiq' do + start_command "rvm-exec ruby-2.2.3 bundle exec sidekiq -C #{WORKING_DIR}/config/sidekiq/staging.yml -r #{WORKING_DIR}/config/load.rb" + stop_signals [:USR1, 25.seconds, :TERM, 15.seconds] # See https://github.com/mperham/sidekiq/wiki/Signals + + env 'RAILS_ENV' => RAILS_ENV + daemonize true + pid_file "#{PID_DIR}/sidekiq.pid" + + stdall "#{WORKING_DIR}/log/sidekiq.log" + end +end + diff --git a/config/load.rb b/config/load.rb index 11cea59..f74ead8 100644 --- a/config/load.rb +++ b/config/load.rb @@ -19,6 +19,7 @@ Dir["#{PLATFORM_ROOT}/lib/helpers/**/*.rb"].each { |f| require f } Dir["#{PLATFORM_ROOT}/lib/*.rb"].each { |f| require f } Dir["#{PLATFORM_ROOT}/apps/*/*.rb"].each { |f| require f } +Dir["#{PLATFORM_ROOT}/workers/*.rb"].each { |f| require f } app_config = YAML.load_file("#{PLATFORM_ROOT}/config/sba_config.yml")[PLATFORM_ENV]['app_platform'] SECRET_CONFIG = YAML.load_file("#{PLATFORM_ROOT}/config/secret_config.yml")[PLATFORM_ENV] @@ -39,13 +40,13 @@ FileUtils.mkdir(log_dir) unless File.exists?(log_dir) LOGGER = Logger.new(log_url) -redis_options = APP_CONFIG['redis'] +REDIS_CONFIG = APP_CONFIG['redis'] redis = nil if PLATFORM_ENV == 'test' require 'mock_redis' - redis = MockRedis.new(db: redis_options['db']) + redis = MockRedis.new(db: REDIS_CONFIG['db']) else - redis = Redis.new(host: redis_options['host'], db: redis_options['db']) + redis = Redis.new(host: REDIS_CONFIG['host'], db: REDIS_CONFIG['db']) redis = Redis::Namespace.new(:ap, redis: redis) end REDIS = redis @@ -56,3 +57,18 @@ require 'error-reporter' require "#{PLATFORM_ROOT}/run_app" + +# +# Sidekiq +# + +SIDEKIQ_REDIS_CONFIG = APP_CONFIG['sidekiq_redis'] +default_redis_port = 6379 +port = SIDEKIQ_REDIS_CONFIG['port'] || default_redis_port +redis_url = URI::Generic.build(scheme: "redis", host: REDIS_CONFIG['host'], port: port, path: "/#{REDIS_CONFIG['db']}").to_s # redis://127.0.0.1:6379/2 +Sidekiq.configure_server do |config| + config.redis = { url: redis_url } +end +Sidekiq.configure_client do |config| + config.redis = { url: redis_url } +end diff --git a/config/sba_config.yml.example b/config/sba_config.yml.example index fbf3c9c..6563132 100644 --- a/config/sba_config.yml.example +++ b/config/sba_config.yml.example @@ -6,7 +6,11 @@ development: cloudfront_base_url: http://localhost:9292 redis: host: '127.0.0.1' - db: 1 + db: 6 + sidekiq_redis: + host: '127.0.0.1' + db: 6 + # port: 6479 test: app_platform: @@ -16,4 +20,8 @@ test: cloudfront_base_url: http://localhost:9292 redis: host: '127.0.0.1' - db: 2 + db: 9 + sidekiq_redis: + host: '127.0.0.1' + db: 9 + # port: 6479 diff --git a/config/sidekiq/staging.yml b/config/sidekiq/staging.yml new file mode 100644 index 0000000..dbd2ab6 --- /dev/null +++ b/config/sidekiq/staging.yml @@ -0,0 +1,2 @@ +:queues: + - app_events \ No newline at end of file diff --git a/lib/base.rb b/lib/base.rb index a64f0ba..a4ceadb 100644 --- a/lib/base.rb +++ b/lib/base.rb @@ -234,6 +234,10 @@ def inherited(app) def setup_for(sinatra_app) sinatra_app.setup(self) end + + def find_from_slug(app_slug) + SupportBeeApp::Base.apps.detect { |app_class| app_class.slug == app_slug } + end end self.env ||= PLATFORM_ENV diff --git a/run_app.rb b/run_app.rb index ca49457..7f2f141 100644 --- a/run_app.rb +++ b/run_app.rb @@ -58,13 +58,11 @@ def self.setup(app_class) end post "/#{app_class.slug}/event/:event" do - data, payload = parse_request event = params[:event] - if app_class.trigger_event(event, data, payload) - status 204 - else - status 500 - end + data, payload = parse_request + Sidekiq::Client.enqueue(TriggerAppEvent, app_class.slug, event, data, payload) + + status 204 end post "/#{app_class.slug}/action/:action" do diff --git a/vendor/gems/sidekiq-pro-3.4.5/README.md b/vendor/gems/sidekiq-pro-3.4.5/README.md new file mode 100644 index 0000000..ec6025d --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/README.md @@ -0,0 +1,54 @@ +# Sidekiq Pro + +This gem adds advanced functionality and a commercial license for the Sidekiq +background job framework. + +## What's Provided + +* **Batch** - adds the notion of a set of jobs, so you can track progress + of the batch and receive notification when the batch is complete. The + Sidekiq Web UI provides a convenient overview of all Batches being processed + and their status. + +* **Reliability** - adds reliability upgrades to the client push to Redis + and the server fetch from Redis, to better withstand network outages + and process crashes. + +* **Much, much more** - Statsd, pause queues, API extensions, expire + jobs, etc. + + +## Download + +When you purchase Sidekiq Pro, you will receive an email within 24 hours +with your own personalized download URL. This URL can be used with a +Gemfile: + + source 'https://rubygems.org' + source 'https://YOUR:CODE@gems.contribsys.com/' + + gem 'sidekiq-pro' + +Please keep this URL private; I do reserve the right to revoke access if +the URL is made public and/or is being used to illegally download Sidekiq Pro. + + +## Usage + +Please see the Sidekiq wiki for in-depth documentation on each Sidekiq +Pro feature and how to use it. + + +## Licensing + +This library is sold commercially to provide support for the development of Sidekiq. +**The gem file or any of its contents may not be publicly distributed.** + +See COMM-LICENSE for the license terms. + + +## Support + +Please open an issue in the Sidekiq issue tracker or send an email to +the sidekiq mailing list. If you note that you are a Pro user, I will +make an effort to reply within 24 hours. diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq-pro.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq-pro.rb new file mode 100644 index 0000000..965340b --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq-pro.rb @@ -0,0 +1,82 @@ +require 'sidekiq' +require 'sidekiq/pro/version' +require 'sidekiq/pro/worker' +require 'sidekiq/pro/api' +require 'sidekiq/pro/push' +require 'sidekiq/pro/util' +require 'sidekiq/batch' + +Sidekiq.send(:remove_const, :LICENSE) +Sidekiq.send(:remove_const, :NAME) +Sidekiq::NAME = "Sidekiq Pro" +Sidekiq::LICENSE = "Sidekiq Pro #{Sidekiq::Pro::VERSION}, commercially licensed. Thanks for your support!" + +Sidekiq.configure_server do + class Sidekiq::CLI + def self.banner + File.read(File.expand_path(File.join(__FILE__, '../sidekiq/intro.ans'))) + end + end + require 'sidekiq/pro/basic_fetch' + Sidekiq.options[:fetch] = Sidekiq::Pro::BasicFetch +end + + +# Enable various reliability add-ons: +# +# Sidekiq.configure_server do |config| +# config.reliable_fetch! +# config.reliable_scheduler! +# config.timed_fetch! +# # enable both +# config.reliable! +# end +# +module Sidekiq + def self.reliable_fetch! + require 'sidekiq/pro/fetch' + Sidekiq.options[:fetch] = Sidekiq::Pro::ReliableFetch + Sidekiq.options[:ephemeral_hostname] ||= !!ENV['DYNO'] + env = Sidekiq.options[:environment] + Sidekiq.options[:index] ||= 0 if !env || env == 'development' + Array(Sidekiq.options[:labels]) << 'reliable' + nil + end + + def self.super_fetch! + require 'sidekiq/pro/super_fetch' + Sidekiq.options[:fetch] = Sidekiq::Pro::SuperFetch + Array(Sidekiq.options[:labels]) << 'reliable' + nil + end + + def self.timed_fetch!(timeout = 3600) + require 'sidekiq/pro/timed_fetch' + Sidekiq.options[:fetch] = Sidekiq::Pro::TimedFetch + Array(Sidekiq.options[:labels]) << 'reliable' + + Sidekiq.configure_server do |config| + config.on(:startup) do + klass = Sidekiq::Pro::TimedFetch::Manager + klass.instance = klass.new(Sidekiq.options) + klass.instance.timeout = timeout + end + end + nil + end + + def self.reliable_scheduler! + require 'sidekiq/pro/scheduler' + Sidekiq.options[:scheduled_enq] = Sidekiq::Scheduled::FastEnq + end + + def self.reliable! + reliable_fetch! + reliable_scheduler! + end + + def self.redis_pool + # Slight tweak to allow sharding support + Thread.current[:sidekiq_redis_pool] || (@redis ||= Sidekiq::RedisConnection.create) + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch.rb new file mode 100644 index 0000000..c92dfbb --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch.rb @@ -0,0 +1,305 @@ +require 'securerandom' +require 'sidekiq/shard_set' +require 'sidekiq/batch/callback' +require 'sidekiq/batch/client' +require 'sidekiq/batch/middleware' +require 'sidekiq/batch/status' + +module Sidekiq + ## + # Provide a higher-level Batch abstraction for units of work. + # Given a set of work, we want to break the set down to individual jobs + # for Sidekiq to process in parallel but then have an overall + # notification when the entire set is complete. + # + # batch = Sidekiq::Batch.new + # batch.on(:complete, self.class, :to => current_user.email) + # batch.jobs do + # # push messages to sidekiq + # end + # + # Sidekiq generates a unique Batch ID, along with the number of jobs pushed + # in the batch. + # + # Batches may be nested by creating a new Batch within another batch's +jobs+ + # method. When the child batch runs an event callback, it checks to see if + # it needs to fire the parent batch's event callback too. + + class Batch + + def self.redis(bid, &block) + idx = 0 + m = bid.match(/@@(\d+)/) + idx = m[1].to_i if m + Sidekiq::Shards.on(idx) do + Sidekiq.redis(&block) + end + end + + def redis(bid, &block) + self.class.redis(bid, &block) + end + + class NoSuchBatch < StandardError; end + + ONE_DAY = 60 * 60 * 24 + EXPIRY = ONE_DAY * 30 + + # Controls how long a batch record "lingers" in Redis before expiring. + # This allows APIs like Status#poll to check batch status even after + # the batch succeeds and is no longer needed. You can lower this + # constant if you create lots of batches, want to reclaim the memory + # and don't use polling. + LINGER = ONE_DAY + VALID_EVENTS = %w(complete success) + + attr_reader :created_at + attr_reader :bid + attr_reader :callbacks + attr_reader :parent_bid + attr_accessor :description + attr_accessor :callback_queue + + def initialize(bid=nil) + @expiry = EXPIRY + if bid + @bid = bid + @key = "b-#{bid}" + + props = redis(bid) do |conn| + conn.hgetall(@key) + end + raise NoSuchBatch, "Couldn't find Batch #{@bid} in redis" unless props['callbacks'] + raise "Batch #{@bid} has finished, you cannot modify it anymore" if props['deleted'] + @created_at = props['created_at'.freeze].to_f + @description = props['description'.freeze] + @parent_bid = props['parent'] + @callbacks = Sidekiq.load_json(props['callbacks']) + @mutable = false + @new = false + else + @bid = SecureRandom.urlsafe_base64(10) + + ss = Sidekiq::Shards + if ss.enabled? + @shard = ss.random_index + @bid += "@@#{@shard}" + else + @shard = 0 + end + + @key = "b-#{@bid}" + @created_at = Time.now.utc.to_f + @callbacks = {} + @mutable = true + @new = true + end + end + + def parent + Batch.new(parent_bid) if parent_bid + end + + def expiry + @expiry || EXPIRY + end + + def expires_at + Time.at(@created_at + expiry) + end + + # Retrieve the current set of JIDs associated with this batch. + def jids + redis(bid) do |conn| + conn.smembers("b-#{bid}-jids") + end + end + + def include?(jid) + redis(bid) do |conn| + conn.sismember("b-#{bid}-jids", jid) + end + end + alias_method :valid?, :include? + + def invalidate_all + result, _ = redis(bid) do |conn| + conn.multi do + conn.del("b-#{bid}-jids") + conn.hset(@key, "invalid", -1) + end + end + result + end + + def invalidate_jids(*jids) + count, _ = redis(bid) do |conn| + conn.multi do + conn.srem("b-#{bid}-jids", jids) + conn.hincrby(@key, "invalid", jids.size) + end + end + count + end + + def invalidated? + count = redis(bid) do |conn| + conn.hget(@key, "invalid").to_i + end + count != 0 + end + + def status + Status.new(@bid) + end + + def mutable? + !!@mutable + end + + ## + # Call a method upon completion or success of a batch. You + # may pass a bare Class, which will call "on_#{event}", or a + # String with the exact 'Class#method' to call. + # + # batch.on(:complete, MyClass) + # batch.on(:success, 'MyClass#foo') + # batch.on(:complete, MyClass, :email => current_user.email) + # + # The Class should implement a method signature like this: + # + # def on_complete(status, options) + # end + # + def on(event, call, options={}) + raise "Batch cannot be modified, jobs have already been defined" unless @mutable + e = event.to_s + raise ArgumentError, "Invalid event name: #{e}" unless VALID_EVENTS.include?(e) + + @callbacks ||= {} + @callbacks[e] ||= [] + @callbacks[e] << { call => options } + end + + ## + # Pass in a block which pushes all the work associated + # with this batch to Sidekiq. + # + # Returns the set of JIDs added to the batch. + # + # Note: all jobs defined within the block are pushed to Redis atomically + # so either the entire set of jobs are defined successfully or none at all. + def jobs(&block) + raise ArgumentError, "Must specify a block" if !block + parent_payloads, Thread.current[:sidekiq_batch_payloads] = Thread.current[:sidekiq_batch_payloads], [] + begin + myparent = nil + if mutable? + # Brand new batch, persist data to Redis. + data = ['created_at'.freeze, created_at, + 'callbacks'.freeze, Sidekiq.dump_json(callbacks), + 'description'.freeze, description] + if self.callback_queue + data << 'cbq'.freeze + data << self.callback_queue + end + if Thread.current[:sidekiq_batch] + @parent_bid = myparent = Thread.current[:sidekiq_batch].bid + data << 'parent'.freeze + data << myparent + end + + redis(bid) do |conn| + conn.multi do + conn.hmset(@key, *data) + # Default expiry is one day for newly created batches. + # If jobs are added to the batch, it is extended to 30 days. + conn.expire(@key, ONE_DAY) + end + end + end + + @mutable = false + @added = [] + + begin + parent, Thread.current[:sidekiq_batch] = Thread.current[:sidekiq_batch], self + block.call + ensure + Thread.current[:sidekiq_batch] = parent + end + + # If the jobs block produces no jobs, exit early. + return [] unless @added.size > 0 + + # Here's what we've been waiting for. We delay all + # batch chatter with Redis so we can send everything + # in one big atomic MULTI push. + redis(bid) do |conn| + conn.multi do + # As an optimization, don't continually zadd and zremrangebyscore when we're adding jobs + # to an existing batch + if @new + conn.zremrangebyscore('batches'.freeze, '-inf'.freeze, Time.now.to_f) + conn.zadd('batches'.freeze, @created_at + expiry, bid) + end + # only want to incr kids when the batch is first created, + # not when reopened to add more jobs. + if myparent + conn.hincrby("b-#{myparent}", "kids".freeze, 1) + conn.expire("b-#{myparent}", EXPIRY) + end + conn.expire @key, EXPIRY + + if !immediate_registration? + increment_batch_jobs_to_redis(conn, @added) + end + + if !Sidekiq::Shards.enabled? + Sidekiq::Client.new.flush(conn) + end + end + end + @new = false + + if Sidekiq::Shards.enabled? + client = Sidekiq::Client.new + client.redis_pool.with do |conn| + client.flush(conn) + end + end + @added + ensure + Thread.current[:sidekiq_batch_payloads] = parent_payloads + end + end + + # Not a public API + def register(jid) # :nodoc: + if immediate_registration? + redis(bid) do |conn| + conn.multi do + increment_batch_jobs_to_redis(conn, [jid]) + end + end + end + @added << jid + end + + private + + def immediate_registration? + defined?(Sidekiq::Testing) && Sidekiq::Testing.inline? + end + + def increment_batch_jobs_to_redis(conn, jids) + jids_key = "b-#{bid}-jids" + + Sidekiq.logger.debug { "Adding #{jids.size} jobs to batch #{bid}, JIDs #{jids}" } + conn.hincrby(@key, "pending".freeze, jids.size) + conn.hincrby(@key, "total".freeze, jids.size) + conn.sadd(jids_key, jids) + conn.expire(jids_key, expiry) + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/callback.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/callback.rb new file mode 100644 index 0000000..41c15b9 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/callback.rb @@ -0,0 +1,171 @@ +module Sidekiq + class Batch + class Callback + include Sidekiq::Worker + + SUCCESS = 'success' + COMPLETE = 'complete' + + def perform(event, bid, queue='default') + logger.debug { "BID-#{bid} #{event}" } + status = Status.new(bid) + send(event.to_sym, status, queue) if status + end + + private + + def execute_callback(status, event, target, options) + klass_name, method = target.split('#') + klass = klass_name.constantize + meth = method || "on_#{event}" + inst = klass.new + inst.jid = jid if inst.respond_to?(:jid) + inst.send(meth, status, options) + end + + def success(status, queue) + # Run any success callbacks + if status.callbacks[SUCCESS] + status.callbacks[SUCCESS].each do |hash| + hash.each_pair do |target, options| + execute_callback(status, SUCCESS, target, options) + end + end + end + + bid = status.bid + pb = status.parent_bid + if pb + # Check to see if our success means the parent is now successful + key = "b-#{pb}" + success = "b-#{pb}-success" + cbcomp, cbsucc, _, _, pending, successes, kids, q = Sidekiq::Batch.redis(pb) do |conn| + conn.multi do + conn.get("#{key}-notify") + conn.get("#{key}-cbsucc") + conn.sadd(success, status.bid) + conn.expire(success, Sidekiq::Batch::EXPIRY) + conn.hincrby(key, "pending".freeze, 0) + conn.scard(success) + conn.hincrby(key, "kids".freeze, 0) + conn.hget(key, "cbq".freeze) + end + end + + if pending == 0 && successes == kids && cbcomp && cbcomp.to_i > 1 && !cbsucc && needs_success?(pb) + enqueue_callback(queue, ['success'.freeze, pb, q || queue]) + end + end + + Sidekiq::Batch.redis(bid) do |conn| + conn.multi do + conn.hsetnx("b-#{bid}", "deleted", "1") + conn.del "b-#{bid}-failinfo", "b-#{bid}-success", "b-#{bid}-complete", "b-#{bid}-jids" + conn.zrem('batches'.freeze, bid) + conn.expire "b-#{bid}", Sidekiq::Batch::LINGER + # we can't delete these two or running callbacks inline will recurse forever + conn.expire "b-#{bid}-notify", 60 + conn.expire "b-#{bid}-cbsucc", 60 + conn.publish("batch-#{bid}", '$') + end + end + + end + + def complete(status, queue) + # Run the complete callbacks for this batch + if status.callbacks[COMPLETE] + status.callbacks[COMPLETE].each do |hash| + hash.each_pair do |target, options| + execute_callback(status, COMPLETE, target, options) + end + end + end + + # if we have a parent batch, check to see if our + # completion means that it is complete now and we need to + # fire the complete callback for it. + pb = status.parent_bid + if pb + cbcomp, _, _, complete, children, pending, fails, q = Sidekiq::Batch.redis(pb) do |conn| + conn.multi do + conn.get("b-#{pb}-notify") + + key = "b-#{pb}-complete" + conn.sadd(key, status.bid) + conn.expire(key, Sidekiq::Batch::EXPIRY) + conn.scard(key) + + key = "b-#{pb}" + conn.hincrby(key, "kids".freeze, 0) + conn.hincrby(key, "pending".freeze, 0) + conn.hlen("#{key}-failinfo") + conn.hget(key, "cbq".freeze) + end + end + + if complete == children && pending == fails && !cbcomp && needs_complete?(pb) + enqueue_callback(queue, ['complete'.freeze, pb, q || queue]) + end + end + + # Mark ourselves as complete now so that our success callback can + # be fired. + bid = status.bid + _, _, pending, children, bsucc, q = Sidekiq::Batch.redis(bid) do |conn| + conn.multi do + # 1 means the complete callback has been created + # 2 means the complete callback has run successfully + conn.incrby("b-#{bid}-notify", 1) + conn.publish("batch-#{bid}", '!') + if bid.length == 16 + conn.get("b-#{bid}-pending") + else + key = "b-#{bid}" + conn.hincrby(key, "pending".freeze, 0) + conn.hincrby(key, "kids".freeze, 0) + conn.scard("#{key}-success") + conn.hget(key, "cbq".freeze) + end + end + end + + if pending.to_i == 0 && children == bsucc && needs_success?(bid) + enqueue_callback(queue, ['success'.freeze, bid, q || queue]) + end + end + + def needs_success?(bid) + lock, _ = Sidekiq::Batch.redis(bid) do |conn| + notify_key = "b-#{bid}-cbsucc" + conn.pipelined do + conn.setnx(notify_key, 1) + conn.expire(notify_key, Sidekiq::Batch::EXPIRY) + end + end + lock + end + + def needs_complete?(bid) + lock, _ = Sidekiq::Batch.redis(bid) do |conn| + notify_key = "b-#{bid}-notify" + conn.pipelined do + conn.setnx(notify_key, 1) + conn.expire(notify_key, Sidekiq::Batch::EXPIRY) + end + end + lock + end + + def enqueue_callback(queue, args) + Sidekiq::Client.push('class' => Sidekiq::Batch::Callback, + 'queue' => queue, + 'args' => args) + end + end + + # Backwards compat for any Lifecycle jobs which are sitting in + # Redis during the 2.0 upgrade. + Lifecycle = Callback + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/client.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/client.rb new file mode 100644 index 0000000..79b6f36 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/client.rb @@ -0,0 +1,49 @@ +require 'sidekiq/client' + +module Sidekiq + class Client + + # + # The Sidekiq Batch client adds atomicity to batch definition: + # all jobs created within the +define+ block are pushed into a + # temporary array and then all flushed at once to Redis in a single + # transaction. This solves two problems: + # + # 1. We don't "half-create" a batch due to a networking issue + # 2. We don't have a "completed" race condition when creating the jobs slower + # than we can process them. + # + + def flush(conn) + return if collected_payloads.nil? + + collected_payloads.each do |payloads| + atomic_push(conn, payloads) + end + end + + private + + def collected_payloads + Thread.current[:sidekiq_batch_payloads] + end + + def raw_push_with_batch(payloads) + if defining_batch? + collected_payloads << payloads + true + else + raw_push_without_batch(payloads) + end + end + + # FIXME: I tried using Module#prepend but couldn't get it to work. + # Nothing but stack overflows for me. + alias_method :raw_push_without_batch, :raw_push + alias_method :raw_push, :raw_push_with_batch + + def defining_batch? + Thread.current[:sidekiq_batch_payloads] + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/middleware.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/middleware.rb new file mode 100644 index 0000000..4ac23a9 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/middleware.rb @@ -0,0 +1,139 @@ +require 'sidekiq/batch/callback' + +module Sidekiq + class Batch + class Client + + def call(worker_class, msg, queue, redis_pool) + batch = Thread.current[:sidekiq_batch] + if batch + msg['bid'.freeze] = batch.bid + result = yield + batch.register(msg['jid'.freeze]) if result + result + else + yield + end + end + + end + + class Server + def call(worker, msg, queue) + worker.bid = bid = msg['bid'.freeze] + if bid + begin + yield + add_success(bid, msg['jid'.freeze], queue) + rescue Exception => e + add_failure(bid, msg, queue, e) + raise + end + else + yield + end + end + + private + + def add_success(bid, jid, queue) + _, cbsucc, cbcomp, pending, _, failures, children, bcomp, bsucc, q = Sidekiq::Batch.redis(bid) do |conn| + key = "b-#{bid}" + conn.multi do + conn.publish("batch-#{bid}", '+'.freeze) + conn.get("#{key}-cbsucc") + conn.get("#{key}-notify") + conn.hincrby(key, "pending".freeze, -1) + conn.hdel("#{key}-failinfo", jid) + conn.hlen("#{key}-failinfo") + conn.hincrby(key, "kids", 0) + conn.scard("#{key}-complete") + conn.scard("#{key}-success") + conn.hget(key, "cbq".freeze) + conn.srem("#{key}-jids", jid) + end + end + + # A batch is complete iff: + # 1. Its pending job count == failed job count + # 2. All child batches are complete. + if pending.to_i == failures.to_i && children == bcomp && !cbcomp && needs_complete?(bid) + enqueue_callback(q || queue, ['complete'.freeze, bid, q || queue]) + end + + # A batch is successful iff: + # 1. Its pending job count == 0 + # 2. Its complete callbacks have run. + # 3. All child batches are successful. + if pending.to_i == 0 && children == bsucc && cbcomp && cbcomp.to_i > 1 && !cbsucc && needs_success?(bid) + enqueue_callback(q || queue, ['success'.freeze, bid, q || queue]) + end + end + + def needs_success?(bid) + lock, _ = Sidekiq::Batch.redis(bid) do |conn| + notify_key = "b-#{bid}-cbsucc" + conn.pipelined do + conn.setnx(notify_key, 1) + conn.expire(notify_key, Sidekiq::Batch::EXPIRY) + end + end + lock + end + + def needs_complete?(bid) + lock, _ = Sidekiq::Batch.redis(bid) do |conn| + notify_key = "b-#{bid}-notify" + conn.pipelined do + conn.setnx(notify_key, 1) + conn.expire(notify_key, Sidekiq::Batch::EXPIRY) + end + end + lock + end + + def add_failure(bid, msg, queue, ex) + jid = msg['jid'.freeze] + info = Sidekiq.dump_json([ex.class.name, ex.message]) + + cbcomp, _, _, pending, failures, children, bcomp, q = Sidekiq::Batch.redis(bid) do |conn| + conn.multi do + key = "b-#{bid}" + conn.get("#{key}-notify") + conn.hset("#{key}-failinfo", jid, info) + conn.expire("#{key}-failinfo", Batch::EXPIRY) + conn.hincrby(key, "pending".freeze, 0) + conn.hlen("#{key}-failinfo") + conn.hincrby(key, "kids".freeze, 0) + conn.scard("#{key}-complete") + conn.hget(key, "cbq".freeze) + conn.publish("batch-#{bid}", '-'.freeze) + end + end + if pending.to_i == failures && children == bcomp && !cbcomp && needs_complete?(bid) + enqueue_callback(q || queue, ['complete'.freeze, bid, q || queue]) + end + end + + def enqueue_callback(queue, args) + Sidekiq::Client.push('class'.freeze => Sidekiq::Batch::Callback, + 'queue'.freeze => queue, + 'args'.freeze => args) + end + end + end +end + +Sidekiq.configure_client do |config| + config.client_middleware do |chain| + chain.add Sidekiq::Batch::Client + end +end +Sidekiq.configure_server do |config| + config.client_middleware do |chain| + chain.add Sidekiq::Batch::Client + end + config.server_middleware do |chain| + chain.add Sidekiq::Batch::Server + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/status.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/status.rb new file mode 100644 index 0000000..ed7d5d1 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/batch/status.rb @@ -0,0 +1,194 @@ +module Sidekiq + class Batch + ## + # A snapshot in time of the current Batch status. + # + # * total - number of jobs in this batch. + # * pending - number of jobs which have not reported success yet. + # * failures - number of jobs which have failed. + # + # Batch job(s) can fail and be retried through Sidekiq's retry feature. + # For this reason, a batch is considered complete once all jobs have + # been executed, even if one or more executions was a failure. + class Status + attr_reader :bid + attr_reader :failures + + def initialize(bid) + @bid = bid + load_data(bid) + end + + def load_data(bid) + @props, @failures, @completed = Sidekiq::Batch.redis(bid) do |conn| + conn.pipelined do + conn.hgetall("b-#{bid}") + conn.hlen("b-#{bid}-failinfo") + conn.scard("b-#{bid}-complete") + end + end + @pending = @props['pending'].to_i + @total = @props['total'].to_i + raise NoSuchBatch, "Couldn't find Batch #{bid} in redis" if @props.empty? + end + + def parent_bid + @props['parent'] + end + + def parent + if parent_bid + @parent ||= Status.new(parent_bid) + end + end + + def child_count + @props['kids'].to_i + end + + def pending + @pending + end + + def total + @total + end + + def expiry + (@props['expiry'] || Batch::EXPIRY).to_i + end + + def description + @props['description'] + end + + def callbacks + @callbacks ||= Sidekiq.load_json(@props['callbacks']) + end + + def created_at + Time.at(@props['created_at'].to_f) + end + + def expires_at + created_at + expiry + end + + # Remove all info about this batch from Redis. The main batch + # data hash is kept around for 24 hours so it can be queried for status + # after success. + # + # Returns the bid if anything was deleted, nil if nothing was deleted. + def delete + result, _ = Sidekiq::Batch.redis(bid) do |conn| + conn.pipelined do + conn.hsetnx("b-#{bid}", "deleted", "1") + conn.del "b-#{bid}-failinfo", + "b-#{bid}-notify", + "b-#{bid}-cbsucc", + "b-#{bid}-success", + "b-#{bid}-complete", + "b-#{bid}-jids" + conn.zrem('batches'.freeze, bid) + conn.expire "b-#{bid}", Sidekiq::Batch::LINGER + end + end + result ? bid : nil + end + + def deleted? + !!@props['deleted'] + end + + def jids + Sidekiq::Batch.redis(bid) do |conn| + conn.smembers("b-#{bid}-jids") + end + end + + def include?(jid) + Sidekiq::Batch.redis(bid) do |conn| + conn.sismember("b-#{bid}-jids", jid) + end + end + + # returns true if any or all jids in the batch have been invalidated. + def invalidated? + count = @props["invalid"].to_i + count != 0 + end + + def success_pct + return 0 if total == 0 + ((total - pending) / Float(total)) * 100 + end + + def pending_pct + return 0 if total == 0 + ((pending - failures) / Float(total)) * 100 + end + + def failure_pct + return 0 if total == 0 + (failures / Float(total)) * 100 + end + + # A Batch is considered complete when no jobs are pending or + # the only pending jobs have already failed. Any child batches + # must have also completed. + def complete? + pending == failures && (child_count == 0 || child_count == @completed) + end + + def join + poll + end + + def poll(polling_sleep = 1) + while true + begin + deleted, @pending, @failures = Sidekiq::Batch.redis(bid) do |conn| + conn.pipelined do + conn.hget("b-#{bid}", "deleted".freeze) + conn.hincrby("b-#{bid}", "pending".freeze, 0) + conn.hlen("b-#{bid}-failinfo") + end + end + break if deleted || complete? + sleep polling_sleep + rescue Sidekiq::Batch::NoSuchBatch + break + end + end + end + + Failure = Struct.new(:jid, :error_class, :error_message, :backtrace) + + # Batches store job failure info in a Hash, keyed off the bid. + # The Hash contains { jid => [class name, error message] } + def failure_info + failures = Sidekiq::Batch.redis(bid) {|conn| conn.hgetall("b-#{bid}-failinfo") } + failures.map {|jid, json| Failure.new(jid, *Sidekiq.load_json(json)) } + end + + def data + { + :is_complete => complete?, + :bid => bid, + :total => total, + :pending => pending, + :description => description, + :failures => failures, + :created_at => created_at.to_f, + :fail_info => failure_info.map do |err| + { :jid => err.jid, :error_class => err.error_class, :error_message => err.error_message, :backtrace => nil } + end + } + end + + def to_json + Sidekiq.dump_json data + end + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/intro.ans b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/intro.ans new file mode 100644 index 0000000..9646556 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/intro.ans @@ -0,0 +1,18 @@ +/\./\ +L ---- -- ---,,-. ./.: 8. .-----8p--. .---.s,-'.:`--+ + ,8b 8i,F.oSSS"Sbo.o.s8 .\/.:'.oD \ /.:::.dS8b .\/ .: o8o ::::::| + `4P,oP ,o8;(88(`.:.4Y8 `"'::'.:."8DD:`\/ `:::.`888 :.db.::`P' `:' `: + o8P, .o8P 48bo.`:."`.dS`:'.oSSbDD8.`.oSSbo.`.888::oOOo oooo `.oSSbo o/ + d888Ld8P' `:."88888o.``888 d88',`488 d8('.`88b:88( .8P'.:`888 d88P `88P + 88 8P8P.`.:.`"Y88b 888 888 O 84P 888bod8P'.888o88 .:::888 88( O 888 + dP'`Y8Pdb .:::.)8P 888 888 " 888b888'....: 888`.88b.::888 88b " 88( +T ;88i4P"888888P'o888o.Y8bod88P"`Y8bod8P d888b..488od888b V8bod888o +88'48P::`odb.dSo,, ."P'.roy<sac>` ""'.`.o.`.."S"888. +`8  ".:'d888P'."S8b.`.::... ...' `.:::.d8b ::' dP' +.8` '"888 ::.)88\odbo d8b`.odSso.::.888.::."488" +.8 `:888bod88P'.`888P"8Pd88'`488b::888::::.`8\ +P.:888 "P'..:::88(.:."88( O )88::48P::'/'/\ \ +/\ 888`::./\`::888:::,888.".888::.o./\<:< > > + - - -- --- ------. .---' d888bo./ `-d888b.`:`Y8SbS8P'--48P' \ \/ / +\/`8P':/" \/ """"\/\/ +P'\//<>\ diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/middleware/server/statsd.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/middleware/server/statsd.rb new file mode 100644 index 0000000..a6967f7 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/middleware/server/statsd.rb @@ -0,0 +1,42 @@ +module Sidekiq + module Middleware + module Server + + ## + # Send Sidekiq job metrics to a statsd server. + # + # Stats are namespaced by Worker class name: + # + # jobs.WorkerClassName.count (counter) + # jobs.WorkerClassName.success (counter) + # jobs.WorkerClassName.failure (counter) + # jobs.WorkerClassName.perform (time gauge) + # + # Also sets global counters for tracking total job counts: + # + # jobs.count + # jobs.success + # jobs.failure + class Statsd + def initialize(options={}) + @statsd = options[:client] || raise("statsd support requires a :client option") + end + + def call(worker, msg, queue, &block) + w = msg['wrapped'.freeze] || worker.class.to_s + begin + @statsd.increment("jobs.count") + @statsd.increment("jobs.#{w}.count") + @statsd.time("jobs.#{w}.perform", &block) + @statsd.increment("jobs.success") + @statsd.increment("jobs.#{w}.success") + rescue Exception + @statsd.increment("jobs.failure") + @statsd.increment("jobs.#{w}.failure") + raise + end + end + end + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/api.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/api.rb new file mode 100644 index 0000000..5a02a4a --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/api.rb @@ -0,0 +1,147 @@ +require 'sidekiq/api' +require 'sidekiq/pro/scripting' +require 'sidekiq/pro/config' + +module Sidekiq + def self.redis_version + @redis_version ||= Sidekiq.redis {|c| c.info["redis_version"] } + end + + # Allows enumeration of all Batches in Redis. + # Example: + # + # Sidekiq::BatchSet.new.each do |status| + # puts status.bid + # end + class BatchSet + include Enumerable + + def size + @_size ||= Sidekiq.redis do |conn| + conn.zcard("batches".freeze) + end + end + + def each + initial_size = size + offset_size = 0 + page = -1 + page_size = 50 + + loop do + range_start = page * page_size + offset_size + range_end = page * page_size + offset_size + (page_size - 1) + elements = Sidekiq.redis do |conn| + conn.zrange "batches".freeze, range_start, range_end, with_scores: true + end + break if elements.empty? + page -= 1 + elements.each do |element, score| + begin + yield Sidekiq::Batch::Status.new(element) + rescue Sidekiq::Batch::NoSuchBatch + end + end + offset_size = initial_size - size + end + end + + end + + class Queue + # Delete a job from the given queue. + def delete_job(jid) + raise ArgumentError, "No JID provided" unless jid + Sidekiq::Pro::Scripting.call(:queue_delete_by_jid, ["queue:#{name}"], [jid]) + end + + # Remove all jobs in the queue with the given class. + # Accepts a String or Class but make sure to pass the fully + # qualified Class name if you use a String. + def delete_by_class(klass) + raise ArgumentError, "No class name provided" unless klass + size = self.size + page_size = 50 + result = 0 + r = size - 1 + q = "queue:#{name}" + klss = klass.to_s + + Sidekiq.redis do |conn| + while r >= 0 do + l = r - page_size + if l < 0 then + l = 0 + end + jobs = conn.lrange(q, l, r) + jobs.each do |jobstr| + if jobstr.index(klss) then + job = Sidekiq.load_json(jobstr) + if job['class'] == klss then + conn.lrem(q, -1, jobstr) + result = result + 1 + end + end + end + r = r - page_size + end + end + return result + end + + def unpause! + result, _ = Sidekiq.redis do |conn| + conn.multi do + conn.srem('paused', name) + Sidekiq::Pro::Config.publish(conn, :unpause, name) + end + end + result + end + + def pause! + result, _ = Sidekiq.redis do |conn| + conn.multi do + conn.sadd('paused', name) + Sidekiq::Pro::Config.publish(conn, :pause, name) + end + end + result + end + + def paused? + Sidekiq.redis { |conn| conn.sismember('paused', name) } + end + end + + class JobSet + def find_job(jid) + Sidekiq.redis do |conn| + conn.zscan_each(name, :match => "*#{jid}*") do |entry, score| + job = JSON.parse(entry) + matched = job["jid"] == jid + return SortedEntry.new(self, score, entry) if matched + end + end + nil + end + + # Efficiently scan through a job set, returning any + # jobs which contain the given substring. + def scan(match, &block) + regexp = "*#{match}*" + Sidekiq.redis do |conn| + if block_given? + conn.zscan_each(name, :match => regexp) do |entry, score| + yield SortedEntry.new(self, score, entry) + end + else + conn.zscan_each(name, :match => regexp).map do |entry, score| + SortedEntry.new(self, score, entry) + end + end + end + end + + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/basic_fetch.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/basic_fetch.rb new file mode 100644 index 0000000..035e6e5 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/basic_fetch.rb @@ -0,0 +1,45 @@ +require 'sidekiq/fetch' +require 'sidekiq/pro/config' + +module Sidekiq::Pro + + # Adds pause queue support to Sidekiq's basic fetch strategy. + class BasicFetch < ::Sidekiq::BasicFetch + def initialize(options, events=Sidekiq::Pro::Config) + super(options) + + members = Sidekiq.redis do |conn| + conn.smembers("paused") + end + @paused = Set.new(Array(members)) + @changed = true + @original = options[:queues].dup + + events.register(self) + end + + def notify(verb, payload) + if verb == :pause + @paused << payload + @changed = true + elsif verb == :unpause + @paused.delete payload + @changed = true + end + end + + def queues_cmd + if @changed + queues = (@original - @paused.to_a).map {|q| "queue:#{q}" } + if @strictly_ordered_queues + queues = queues.uniq + queues << TIMEOUT + end + @queues = queues + @changed = nil + end + super + end + + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/config.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/config.rb new file mode 100644 index 0000000..4119ba2 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/config.rb @@ -0,0 +1,92 @@ +require 'sidekiq/util' + +module Sidekiq::Pro + + Message = Struct.new(:verb, :payload) + + ## + # Allows for real-time configuration updates to be published to all + # Sidekiq processes cluster-wise. For example, pausing and unpausing + # queues is now instantaneous via this mechanism. + # + # Event listeners register their interest via #register and must + # supply a `notify(verb, payload)` method. + # + # Sidekiq::Pro::Config.register(self) + # + # You can broadcast a config event via `publish`: + # + # Sidekiq::Pro::Config.publish(:boom, { 'some' => 'info' }) + # + # The `notify` method on all registered listeners on all Sidekiq processes + # will be called. + # + # NOTE: pubsub is not persistent so you need to ensure that your listeners + # can pull the current state of the system from Redis. + # + class ConfigListener + include Sidekiq::Util + + CHANNEL = "sidekiq:config" + + def initialize + @thread = nil + @done = false + @handlers = [] + end + + def register(handler) + @handlers << handler + end + + # Takes a connection because it should be called as part of a larger + # `multi` block to update Redis. + def publish(conn, verb, payload) + conn.publish(CHANNEL, Marshal.dump(Message.new(verb, payload))) + end + + def start + @thread ||= safe_thread("config", &method(:listen)) + end + + def terminate + @done = true + @thread.raise Sidekiq::Shutdown + end + + private + + def listen + while !@done + begin + Sidekiq.redis do |conn| + conn.psubscribe(CHANNEL) do |on| + on.pmessage do |pattern, channel, msg| + message = Marshal.load(msg) + @handlers.each do |watcher| + watcher.notify(message.verb, message.payload) + end + end + end + end + rescue Sidekiq::Shutdown + rescue => ex + handle_exception(ex) + sleep 1 + end + end + end + + end + + Config = ConfigListener.new +end + +Sidekiq.configure_server do |config| + config.on(:startup) do + Sidekiq::Pro::Config.start + end + config.on(:quiet) do + Sidekiq::Pro::Config.terminate + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/expiry.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/expiry.rb new file mode 100644 index 0000000..d389dfe --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/expiry.rb @@ -0,0 +1,46 @@ +# Require in your initializer: +# +# require 'sidekiq/pro/expiry' +# +# Use like: +# +# class MyWorker +# sidekiq_options expires_in: 30.minutes +# +module Sidekiq::Middleware::Expiry + + class Client + def call(worker, msg, queue, redis_pool) + if msg['expires_in'] && !msg['expires_at'] + ein = msg['expires_in'].to_f + raise ArgumentError, "expires_in must be a relative time, not absolute time" if ein > 1_000_000_000 + msg['expires_at'] = (msg['at'] || Time.now.to_f) + ein + end + yield + end + end + + class Server + def call(worker, msg, queue) + if msg['expires_at'] && Time.now > Time.at(msg['expires_at']) + return worker.logger.info("Expired job #{worker.jid}") + end + + yield + end + end +end + +Sidekiq.configure_client do |config| + config.client_middleware do |chain| + chain.add Sidekiq::Middleware::Expiry::Client + end +end +Sidekiq.configure_server do |config| + config.client_middleware do |chain| + chain.add Sidekiq::Middleware::Expiry::Client + end + config.server_middleware do |chain| + chain.add Sidekiq::Middleware::Expiry::Server + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/fetch.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/fetch.rb new file mode 100644 index 0000000..8c3e796 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/fetch.rb @@ -0,0 +1,262 @@ +require 'concurrent' +require 'sidekiq/pro/config' + +module Sidekiq::Pro + ## + # Provides reliable queue processing via Redis' rpoplpush command. + # + # 1. retrieve the work while pushing it to our private queue for this process. + # 2. process the work + # 3. acknowledge the work by removing it from our private queue + # + # If we crash during this process, upon restart we'll pull any existing work from + # the private queue and work on that first, effectively recovering the jobs that + # were processing during the crash. + class ReliableFetch + def initialize(retriever=Retriever.instance, options) + raise ArgumentError, "reliable fetch requires a process index option" if !options[:index].is_a?(Integer) + @retriever = retriever + end + + def retrieve_work + @retriever.retrieve_work + end + + def self.bulk_requeue(in_progress, options) + # Ignore the in_progress arg passed in; rpoplpush lets us know everything in process + Sidekiq.redis do |conn| + get_queues(options).each do |(queue, working_queue)| + while conn.rpoplpush(working_queue, queue) + Sidekiq.logger.info {"Moving job from #{working_queue} back to #{queue}"} + end + end + end + rescue => ex + # best effort, ignore Redis network errors + Sidekiq.logger.info { "Failed to requeue: #{ex.message}" } + end + + def self.private_queue(q, options) + if options[:ephemeral_hostname] + # Running on Heroku, hostnames are not predictable or stable. + "queue:#{q}_#{options[:index]}" + else + "queue:#{q}_#{Socket.gethostname}_#{options[:index]}" + end + end + + def self.get_queues(options) + options[:queues].map {|q| ["queue:#{q}", private_queue(q, options)] } + end + + # Each Processor thread calls #retrieve_work concurrently. Since our + # reliable queue check is pretty heavyweight, we map all calls to #retrieve_work + # onto a single thread using a C::TPE. This singleton encapsulates the + # single thread and call to Redis. + class Retriever + include Sidekiq::Util + attr_accessor :options + attr_accessor :paused + class << self + attr_accessor :instance + end + + def initialize + @paused = Set.new + @internal = [] + @done = false + @changed = true + end + + def listen_for_pauses(events=Sidekiq::Pro::Config) + members = Sidekiq.redis do |conn| + conn.smembers("paused") + end + @paused = Set.new(Array(members)) + @changed = true + events.register(self) + end + + def notify(verb, payload) + if verb == :pause + @paused << payload + @changed = true + elsif verb == :unpause + @paused.delete payload + @changed = true + end + end + + def start(options) + @options = options + @pool ||= Concurrent::ThreadPoolExecutor.new( + min_threads: 1, + max_threads: 1, + max_queue: options[:concurrency], + ) + @queues = ReliableFetch.get_queues(@options) + @algo = (options[:strict] && @queues.length == @queues.uniq.length) ? Strict : Weighted + + Sidekiq.configure_server do |config| + config.on(:startup) do + @pool.post(&method(:startup)) + end + config.on(:shutdown) do + self.terminate + end + end + end + + def terminate + @done = true + @pool.shutdown + end + + def startup + watchdog("ReliableFetch#startup") do + Sidekiq.logger.info("ReliableFetch activated") + Sidekiq.logger.info("ReliableFetch is deprecated, super_fetch will become the new reliable_fetch in Pro 4.0.") + Sidekiq.logger.info("See https://github.com/mperham/sidekiq/wiki/Pro-Reliability-Server") + + # Heroku can get into a situation where the old and new process + # are running concurrently. Sleep 15 sec to ensure the old + # process is dead before we take jobs from the internal queue. + sleep(15) if ENV['DYNO'] + + # Need to unique here or we duplicate jobs! + # https://github.com/mperham/sidekiq/issues/2120 + queues = @queues.uniq + bulk_reply = Sidekiq.redis do |conn| + conn.pipelined do + queues.each do |(_, working_queue)| + conn.lrange(working_queue, 0, -1) + end + end + end + internals = [] + bulk_reply.each_with_index do |vals, i| + queue = queues[i][0] + working_queue = queues[i][1] + xform = vals.map do |msg| + [queue, working_queue, msg] + end + internals.unshift(*xform) + end + @internal = internals + Sidekiq.logger.warn("ReliableFetch: recovering work on #{@internal.size} jobs") if @internal.size > 0 + end + end + + def retrieve_work + return nil if @done + begin + future = Concurrent::Future.execute(:executor => @pool, &method(:get_job)) + val = future.value(nil) + return val if val + raise future.reason if future.rejected? + rescue Concurrent::RejectedExecutionError + # shutting down race condition, #2827, nbd + end + end + + def get_job + return nil if @done + + if @internal.size > 0 + (queue, working_queue, msg) = @internal.pop + Sidekiq.logger.warn("Processing recovered job from queue #{queue} (#{working_queue}): #{msg.inspect}") + UnitOfWork.new(queue, msg, working_queue) + else + @algo.call(active_queues) + end + end + + private unless $TESTING + + def active_queues + if @changed + @queues = (@options[:queues] - @paused.to_a).map {|q| ["queue:#{q}", ReliableFetch.private_queue(q, @options)] } + @changed = nil + end + @queues + end + + # In a weighted ordering, treat the queues like we're drawing + # a queue out of a hat: draw a queue, attempt to fetch work. + # Draw another queue, attempt to fetch work. + Weighted = lambda {|queues| + queues = queues.shuffle + Strict.call(queues) + } + + Strict = lambda {|queues| + work = nil + Sidekiq.redis do |conn| + if queues.length > 1 + queues.each do |(queue, working_queue)| + result = conn.rpoplpush(queue, working_queue) + if result + work = UnitOfWork.new(queue, result, working_queue) + break + end + end + end + if work.nil? + queue, working_queue = queues.first + if queue + # On the last queue, block to avoid spinning 100% of the CPU checking for jobs thousands of times per + # second when no jobs are enqueued at all. The above shuffle will randomize the queue blocked on each time. + # Queues with higher weights should still get blocked on more frequently since they should end up as the + # last queue in queues more frequently. + result = conn.brpoplpush(queue, working_queue, Sidekiq.options[:fetch_timeout] || 1) + if result + work = UnitOfWork.new(queue, result, working_queue) + end + end + end + end + if work.nil? + # If we get here, it's because there are no queues to process. + # We can wind up with no queues to process if all queues + # have been paused. In that case, we don't want to enter into an infinite + # busy loop so we'll sleep. + sleep(1) + end + + # Do not explicitly return, or will indicate to the ConnectionPool that the connection was interrupted and + # disconnect you from Redis + work + } + + UnitOfWork = Struct.new(:queue, :job, :local_queue) do + def acknowledge + result = Sidekiq.redis {|conn| conn.lrem(local_queue, -1, job) } + if result != 1 + Sidekiq.logger.error("Unable to remove job from private queue!") + end + result + end + + def queue_name + queue.sub(/.*queue:/, '') + end + + def requeue + # no worries, mate, rpoplpush got our back! + end + end + + end + end +end + +Sidekiq.configure_server do |config| + config.on(:startup) do + if Sidekiq.options[:fetch] == Sidekiq::Pro::ReliableFetch + s = Sidekiq::Pro::ReliableFetch::Retriever.new + s.listen_for_pauses + s.start(Sidekiq.options) + Sidekiq::Pro::ReliableFetch::Retriever.instance = s + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/push.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/push.rb new file mode 100644 index 0000000..bf63645 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/push.rb @@ -0,0 +1,86 @@ +require 'thread' + +module Sidekiq + # backup_limit controls the total number of pushes which will be enqueued + # before the client will start throwing away jobs. Note this limit is per-process. + # Note also that a bulk push is considered one push but might contain 100s or 1000s + # of jobs. + options[:backup_limit] = 1_000 + + # Reliable push is designed to handle transient network failures, + # which cause the client to fail to deliver jobs to Redis. It is not + # designed to be a perfectly reliable client but rather an incremental + # improvement over the existing client which will just fail in the face + # of any networking error. + # + # Each client process has a local queue, used for storage if a network problem + # is detected. Jobs are pushed to that queue if normal delivery fails. If + # normal delivery succeeds, the local queue is drained of any stored jobs. + # + # The standard `Sidekiq::Client.push` API returns a JID if the push to redis succeeded + # or raises an error if there was a problem. With reliable push activated, + # no Redis networking errors will be raised. + # + class Client + @@local_queue = ::Queue.new + + def local_queue + @@local_queue + end + + def raw_push_with_backup(payloads) + push_initial(payloads) do + drain if !local_queue.empty? + end + end + + # Return true if we saved the payloads without error. + # Yield if there was no exception. + def push_initial(payloads) + begin + raw_push_without_backup(payloads) + yield + rescue Redis::BaseError => ex + save_locally(payloads, ex) + end + true + end + + def drain + begin + ::Sidekiq.logger.info("[ReliablePush] Connectivity restored, draining local queue") + while !local_queue.empty? + (pool, payloads) = local_queue.pop(true) + oldpool, @redis_pool = @redis_pool, pool + raw_push_without_backup(payloads) + payloads = nil + end + rescue Redis::BaseError => ex + save_locally(payloads, ex) if payloads + ensure + @redis_pool = oldpool + end + end + + def save_locally(payloads, ex) + sz = local_queue.size + if sz > ::Sidekiq.options[:backup_limit] + ::Sidekiq.logger.error("[ReliablePush] Reached job backup limit, discarding job due to #{ex.class}: #{ex.message}") + false + else + ::Sidekiq.logger.warn("[ReliablePush] Enqueueing job locally due to #{ex.class}: #{ex.message}") if sz == 0 + local_queue << [@redis_pool, payloads] + payloads + end + end + + def self.reliable_push! + return false if self.private_instance_methods.include?(:raw_push_without_backup) + + ::Sidekiq.logger.debug("ReliablePush activated") + alias_method :raw_push_without_backup, :raw_push + alias_method :raw_push, :raw_push_with_backup + true + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/scheduler.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/scheduler.rb new file mode 100644 index 0000000..e8011a9 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/scheduler.rb @@ -0,0 +1,25 @@ +require 'sidekiq/scheduled' +require 'sidekiq/pro/scripting' + +# Implements Lua-based schedule enqueuer +module Sidekiq + module Scheduled + class FastEnq < Sidekiq::Scheduled::Enq + def initialize + prefix = Sidekiq.redis { |conn| conn.respond_to?(:namespace) ? conn.namespace : nil } + @prefix = prefix.to_s == '' ? '' : "#{prefix}:" + end + + def enqueue_jobs(now=Time.now.to_f.to_s, sorted_sets=Sidekiq::Scheduled::SETS) + sorted_sets.map do |sset| + total = 0 + loop do + count = Sidekiq::Pro::Scripting.call(:fast_enqueue, [sset, "queues"], [now, @prefix]) + break total + count if count != 100 + total += count + end + end + end + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/scripting.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/scripting.rb new file mode 100644 index 0000000..5f76db6 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/scripting.rb @@ -0,0 +1,113 @@ +module Sidekiq + module Pro + module Scripting + + LUA_SCRIPTS = { + :timed_requeue => <<-LUA, + local job = redis.call('zrem', KEYS[1], ARGV[1]) + if job == 1 then + redis.call('sadd', KEYS[2], ARGV[2]) + redis.call('rpush', KEYS[3], ARGV[1]) + return true + end + + return false + LUA + :super_requeue => <<-LUA, + -- foo + local val = redis.call('lrem', KEYS[1], -1, ARGV[1]) + if val == 1 then + redis.call('lpush', KEYS[2], ARGV[1]) + end + LUA + :queue_delete_by_jid => <<-LUA, + local window = 50 + local size = redis.call('llen', KEYS[1]) + local idx = 0 + local result = nil + while (idx < size) do + local jobs = redis.call('lrange', KEYS[1], idx, idx+window-1) + for _,jobstr in pairs(jobs) do + if string.find(jobstr, ARGV[1]) then + local job = cjson.decode(jobstr) + if job.jid == ARGV[1] then + redis.call('lrem', KEYS[1], 1, jobstr) + result = jobstr + break + end + end + end + idx = idx + window + end + return result + LUA + :fast_enqueue => <<-LUA, + local queue = ARGV[2].."queue:" + local jobs = redis.call('zrangebyscore', KEYS[1], '-inf', ARGV[1], "LIMIT", "0", "100") + local count = 0 + local now = ARGV[1] + for _,jobstr in pairs(jobs) do + local job = cjson.decode(jobstr) + + -- Hideous hack to work around cjson's braindead large number handling + -- https://github.com/mperham/sidekiq/issues/2478 + if job.enqueued_at == nil then + jobstr = string.sub(jobstr, 1, string.len(jobstr)-1) .. ',"enqueued_at":' .. now .. '}' + else + jobstr = string.gsub(jobstr, '(\"enqueued_at\":)[1-9][0-9.]+', '%1' .. now) + end + + redis.call('sadd', KEYS[2], job.queue) + + -- WARNING + -- We don't know which queues we'll be pushing jobs to until + -- we're actually executing so this script technically violates + -- the Redis Cluster requirements for Lua since we can't pass in + -- the full list of keys we'll be mutating. + redis.call('lpush', queue..job.queue, jobstr) + + count = count + 1 + end + if count > 0 then + if count == 100 then + redis.call('zrem', KEYS[1], unpack(jobs)) + else + redis.call('zremrangebyscore', KEYS[1], '-inf', ARGV[1]) + end + end + return count + LUA + } + + SHAs = {} + + def self.bootstrap + Sidekiq.logger.debug { "Loading Sidekiq Pro Lua extensions" } + + Sidekiq.redis do |conn| + LUA_SCRIPTS.each_with_object(SHAs) do |(name, lua), memo| + memo[name] = conn.script(:load, lua) + end + end + end + + def self.call(name, keys, args) + bootstrap if SHAs.length != LUA_SCRIPTS.length + + Sidekiq.redis do |conn| + conn.evalsha(SHAs[name], keys, args) + end + + rescue Redis::CommandError => ex + if ex.message =~ /NOSCRIPT/ + # scripts got flushed somehow? + bootstrap + retry + else + raise + end + end + + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/super_fetch.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/super_fetch.rb new file mode 100644 index 0000000..9bd5d96 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/super_fetch.rb @@ -0,0 +1,334 @@ +require 'securerandom' +require 'concurrent' +require 'set' +require 'sidekiq/pro/config' + +module Sidekiq::Pro + + ## + # Provides reliable queue processing via Redis' rpoplpush command. + # + # 1. retrieve the work while pushing it to our private queue for this process. + # 2. process the work + # 3. acknowledge the work by removing it from our private queue + # + class SuperFetch + include Sidekiq::Util + + def initialize(retriever=Retriever.instance, options) + @retriever = retriever + self.class.check_for_orphans if self.class.orphan_check?(options) + end + + def retrieve_work + @retriever.retrieve_work + end + + def self.bulk_requeue(in_progress, options) + # Ignore the in_progress arg passed in; rpoplpush lets us know everything in process + Sidekiq.redis do |conn| + get_queues(options).each do |(queue, working_queue)| + while conn.rpoplpush(working_queue, queue) + Sidekiq.logger.info {"Moving job from #{working_queue} back to #{queue}"} + end + end + id = options[:identity] + Sidekiq.logger.debug { "Unregistering super process #{id}" } + conn.multi do + conn.srem("super_processes", id) + conn.del("#{id}:super_queues") + end + end + rescue => ex + # best effort, ignore Redis network errors + Sidekiq.logger.warn { "Failed to requeue: #{ex.message}" } + end + + def self.private_queue(q, options) + "queue:sq_#{options[:identity]}_#{q}" + end + + def self.get_queues(options) + options[:queues].map {|q| ["queue:#{q}", private_queue(q, options)] } + end + + def self.orphan_check?(options) + delay = options.fetch(:super_fetch_orphan_check, 3600).to_i + return false if delay == 0 + + Sidekiq.redis do |conn| + conn.set("super_fetch_orphan_check", Time.now.to_f, ex: delay, nx: true) + end + end + + # This method is extra paranoid verification to check Redis for any possible + # orphaned queues with jobs. + def self.check_for_orphans + orphans = 0 + qcount = 0 + qs = Set.new + Sidekiq.redis do |conn| + ids = conn.smembers("super_processes") + Sidekiq.logger.debug("SuperFetch found #{ids.size} super processes") + + conn.scan_each(:match => "queue:sq_*", :count => 100) do |que| + qcount += 1 + _, id, *name = que.split("_") + next if ids.include?(id) + + # Race condition in pulling super_processes and checking queue liveness. + # Need to verify in Redis. + if !conn.sismember("super_processes", id) + tq = name.join("_") + qs << tq + while conn.rpoplpush(que, "queue:#{tq}") + orphans += 1 + end + end + + end + end + + if orphans > 0 + Sidekiq.logger.warn { "SuperFetch recovered #{orphans} orphaned jobs in queues: #{qs.to_a.inspect}" } + else + Sidekiq.logger.info { "SuperFetch found #{qcount} working queues with no orphaned jobs" } if qcount > 0 + end + orphans + end + + # Each Processor thread calls #retrieve_work concurrently. Since our + # reliable queue check is pretty heavyweight, we map all calls to #retrieve_work + # onto a single thread using a C::TPE. This singleton encapsulates the + # single thread and call to Redis. + class Retriever + include Sidekiq::Util + attr_accessor :options + attr_accessor :paused + class << self + attr_accessor :instance + end + + def initialize + @paused = Set.new + @internal = [] + @done = false + @changed = true + end + + def listen_for_pauses(events=Sidekiq::Pro::Config) + members = Sidekiq.redis do |conn| + conn.smembers("paused") + end + @paused = Set.new(Array(members)) + @changed = true + events.register(self) + end + + def notify(verb, payload) + if verb == :pause + @paused << payload + @changed = true + elsif verb == :unpause + @paused.delete payload + @changed = true + end + end + + def start(options) + @options = options + @pool ||= Concurrent::ThreadPoolExecutor.new( + min_threads: 1, + max_threads: 1, + max_queue: options[:concurrency], + ) + @queues = SuperFetch.get_queues(@options) + @algo = (options[:strict] && @queues.length == @queues.uniq.length) ? Strict : Weighted + + Sidekiq.configure_server do |config| + config.on(:startup) do + @pool.post(&method(:startup)) + end + config.on(:shutdown) do + self.terminate + end + end + end + + def terminate + @done = true + @pool.shutdown + end + + def cleanup_the_dead + count = 0 + Sidekiq.redis do |conn| + conn.sscan_each("super_processes") do |x| + next if conn.exists(x) + + Sidekiq.logger.debug { "Cleaning up old super process #{x}" } + + # heartbeat has expired, push back any leftover jobs in private queues + qs = conn.smembers("#{x}:super_queues") + qs.each do |priv| + (_, _, *q) = priv.split("_") + tq = q.join("_") + while conn.rpoplpush(priv, "queue:#{tq}") + count += 1 + end + end + + conn.del("#{x}:super_queues") + conn.srem("super_processes", x) + end + end + Sidekiq.logger.warn("SuperFetch: recovered #{count} jobs") if count > 0 + count + end + + def wait_for_heartbeat + beats = 0 + while !Sidekiq.redis {|conn| conn.exists(identity) } + # We want our own heartbeat to register before we + # can register ourself + sleep 0.1 + beats += 1 + raise "Did not find our own heartbeat within 10 seconds, that's bad" if beats > 100 + end unless $TESTING + beats + end + + def register_myself + Sidekiq.redis do |conn| + # We're officially alive in Redis so we can safely + # register this process as a super process! + qs = @queues.map{|x, priv| priv } + Sidekiq.logger.debug { "Registering super process #{identity} with #{qs}" } + conn.multi do + conn.sadd("super_processes", identity) + conn.sadd("#{identity}:super_queues", qs) + end + end + end + + def startup + watchdog("SuperFetch#startup") do + Sidekiq.logger.info("SuperFetch activated") + + Sidekiq.on(:heartbeat) do + register_myself + end + + cleanup_the_dead + wait_for_heartbeat + register_myself + end + end + + def retrieve_work + return nil if @done + begin + future = Concurrent::Future.execute(:executor => @pool, &method(:get_job)) + val = future.value(nil) + return val if val + raise future.reason if future.rejected? + rescue Concurrent::RejectedExecutionError + # shutting down race condition, #2827, nbd + end + end + + def get_job + return nil if @done + @algo.call(active_queues) + end + + private unless $TESTING + + def active_queues + if @changed + @queues = (@options[:queues] - @paused.to_a).map {|q| ["queue:#{q}", SuperFetch.private_queue(q, @options)] } + @changed = nil + end + @queues + end + + # In a weighted ordering, treat the queues like we're drawing + # a queue out of a hat: draw a queue, attempt to fetch work. + # Draw another queue, attempt to fetch work. + Weighted = lambda {|queues| + queues = queues.shuffle + Strict.call(queues) + } + + Strict = lambda {|queues| + work = nil + Sidekiq.redis do |conn| + if queues.length > 1 + queues.each do |(queue, working_queue)| + result = conn.rpoplpush(queue, working_queue) + if result + work = UnitOfWork.new(queue, result, working_queue) + break + end + end + end + if work.nil? + queue, working_queue = queues.first + if queue + # On the last queue, block to avoid spinning 100% of the CPU checking for jobs thousands of times per + # second when no jobs are enqueued at all. The above shuffle will randomize the queue blocked on each time. + # Queues with higher weights should still get blocked on more frequently since they should end up as the + # last queue in queues more frequently. + result = conn.brpoplpush(queue, working_queue, Sidekiq.options[:fetch_timeout] || 1) + if result + work = UnitOfWork.new(queue, result, working_queue) + end + end + end + end + if work.nil? + # If we get here, it's because there are no queues to process. + # We can wind up with no queues to process if all queues + # have been paused. In that case, we don't want to enter into an infinite + # busy loop so we'll sleep. + sleep(1) + end + + # Do not explicitly return, or will indicate to the ConnectionPool that the connection was interrupted and + # disconnect you from Redis + work + } + + UnitOfWork = Struct.new(:queue, :job, :local_queue) do + def acknowledge + result = Sidekiq.redis {|conn| conn.lrem(local_queue, -1, job) } + if result != 1 + Sidekiq.logger.error("Unable to remove job from private queue!") + end + result + end + + def queue_name + queue.sub(/.*queue:/, '') + end + + def requeue + Sidekiq::Pro::Scripting.call(:super_requeue, [local_queue, queue], [job]) + end + end + + end + end +end + +Sidekiq.configure_server do |config| + config.on(:startup) do + opts = Sidekiq.options + if opts[:fetch] == Sidekiq::Pro::SuperFetch + s = Sidekiq::Pro::SuperFetch::Retriever.new + s.listen_for_pauses + s.start(opts) + Sidekiq::Pro::SuperFetch::Retriever.instance = s + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/timed_fetch.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/timed_fetch.rb new file mode 100644 index 0000000..891e0e3 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/timed_fetch.rb @@ -0,0 +1,235 @@ +require 'sidekiq/pro/config' + +module Sidekiq + # Hack necessary to support Sidekiq < 4.1.2 + begin + Job.instance_method(:value) + rescue NameError + class Job + attr_reader :value + end + end + + ## + # The Pending set is where Sidekiq puts jobs that are being processed + # at the moment. They stay in the zset until they are acknowledged as + # finished or they time out. + class PendingSet < JobSet + def initialize + super 'pending' + end + + # Iterate through the pending set, pushing any jobs which have timed out + # back to their original queue. This should iterate oldest first so it won't + # pull over every single element, just ones which have timed out. + def pushback + @last = Time.now.to_f + prefix = Sidekiq.redis { |conn| conn.respond_to?(:namespace) ? conn.namespace : nil } + prefix = prefix.to_s == '' ? '' : "#{prefix}:" + + count = 0 + now = Time.now.to_f + each do |job| + if job.score < now + res = Sidekiq::Pro::Scripting.call(:timed_requeue, [Sidekiq::Pro::TimedFetch::PENDING, "queues", "queue:#{job.queue}"], [job.value, job.queue]) + count += 1 if res + else + return count + end + end + count + end + + # This will call pushback every TIMEOUT seconds, ensuring that lingering + # jobs are pushed back and rerun. + # + # Returns [int] number of jobs pushed back or [nil] if not time yet + def limited_pushback + return if Time.now.to_f < @last + Sidekiq::Pro::TimedFetch::TIMEOUT + pushback + end + + # This method allows you to destroy a pending job which is constantly failing + # and/or crashing the process. All you need is the JID. + # + # Sidekiq::PendingSet.new.destroy(jid) + # + def destroy(jid) + entry = find_job(jid) + delete_by_jid(entry.score, entry.jid) if entry + end + end + + module Pro + + ## + # Provides reliable queue processing within Redis using Lua. + # + # 1. Pull a job from a queue and push it onto a zset scored on job timeout. + # 2. Process the job + # 3. Acknowledge the work by removing it from the zset + # + # If we crash during this process, upon restart we'll move any jobs which have timed out + # back onto their respective queues, effectively recovering the jobs that + # were processing during the crash. This also means if a job is crashing Sidekiq, it won't + # be reprocessed for an hour, avoiding the dreaded "poison pill" wherein a job could + # crash all Sidekiq processes if we try to re-process it immediately. + # + # NB: this reliable algorithm does not require stable hostnames or unique indexes, unlike + # ReliableFetch, so it will work on Heroku, in Docker or ECS. It will autoscale with EBS. + # It does not use private queues so you won't orphan jobs. + # + # The drawback is that it has to use O(log N) Redis operations so it will get slower as + # more and more jobs are processed simultaneously. As always, monitor your load and Redis + # latency, please. + # + class TimedFetch + + # Jobs have 1 hour to complete or they can be + # pushed back onto the queue for re-processing. + TIMEOUT = 3600 + PENDING = 'pending'.freeze + + def initialize(options) + end + + def retrieve_work + Manager.instance.retrieve_work + end + + def self.bulk_requeue(in_progress, _=nil) + in_progress.each(&:requeue) + rescue => ex + # best effort, ignore Redis network errors + Sidekiq.logger.error { "Failed to bulk_requeue: #{ex.message}" } + end + + class Manager + attr_accessor :paused + attr_accessor :sleeptime + attr_accessor :timeout + class << self + attr_accessor :instance + end + + def initialize(sleeptime=1, options) + @options = options + @sleeptime = sleeptime + @paused = Set.new + @timeout = TIMEOUT + @queues = options[:queues].map {|q| "queue:#{q}" } + @shuffle = !(options[:strict] && @queues.length == @queues.uniq.length) + @ps = Sidekiq::PendingSet.new + + Sidekiq.logger.info("TimedFetch activated") + + count = @ps.pushback + Sidekiq.logger.warn { "TimedFetch pushed back #{count} timed out jobs" } if count > 0 + + listen_for_pauses + end + + def listen_for_pauses(events=Sidekiq::Pro::Config) + members = Sidekiq.redis do |conn| + conn.smembers("paused") + end + @paused = Set.new(Array(members)) + @changed = true + events.register(self) + end + + def notify(verb, payload) + if verb == :pause + @paused << payload + @changed = true + elsif verb == :unpause + @paused.delete payload + @changed = true + end + end + + def retrieve_work + count = @ps.limited_pushback + Sidekiq.logger.warn { "TimedFetch pushed back #{count} timed out jobs" } if count && count > 0 + + pull(active_queues) + end + + private unless $TESTING + + def active_queues + if @changed + @queues = (@options[:queues] - @paused.to_a).map {|q| "queue:#{q}" } + @changed = nil + end + @queues + end + + Sidekiq::Pro::Scripting::LUA_SCRIPTS[:timed_fetch] = <<-LUA + local timeout = ARGV[1] + local idx = 2 + local size = #KEYS + while idx <= size do + local queue = KEYS[idx] + local jobstr = redis.call('rpop', queue) + if jobstr then + redis.call('zadd', KEYS[1], timeout, jobstr) + return {queue, jobstr} + end + idx = idx + 1 + end + return nil + LUA + + def pull(queues) + # In a weighted ordering, treat the queues like we're drawing + # a queue out of a hat: draw a queue, attempt to fetch work. + # Draw another queue, attempt to fetch work. + queues = queues.shuffle if @shuffle + + limit = Time.now.to_f + timeout + queue, job = nil + + if queues.size > 0 + keys = [PENDING] + keys.concat(queues) + args = [limit] + + queue, job = Sidekiq.redis do |conn| + Sidekiq::Pro::Scripting.call(:timed_fetch, keys, args) + end + end + + if queue + UnitOfWork.new(queue, job) + else + # If we get here, it's because there are no queues to process + # or the queues are empty. We can wind up with no queues to + # process if all queues have been paused. In that case, + # we don't want to enter into an infinite busy loop so we'll sleep. + sleep(@sleeptime) + nil + end + end + + end + + UnitOfWork = Struct.new(:queue, :job) do + def acknowledge + result = Sidekiq.redis {|conn| conn.zrem(Sidekiq::Pro::TimedFetch::PENDING, job) } + Sidekiq.logger.error("Unable to remove job from pending set!") unless result + result + end + + def queue_name + queue.sub(/.*queue:/, ''.freeze) + end + + def requeue + Sidekiq::Pro::Scripting.call(:timed_requeue, [Sidekiq::Pro::TimedFetch::PENDING, "queues".freeze, queue], [job, queue_name]) + end + end + + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/util.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/util.rb new file mode 100644 index 0000000..68cc182 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/util.rb @@ -0,0 +1,14 @@ +module Sidekiq + module Pro + class << self + attr_accessor :logger + end + self.logger = Sidekiq.logger + + def self.deprecated(msg) + logger.warn "#{msg} is deprecated and will be removed in a future release." + logger.warn "Please switch to Batch callbacks: https://github.com/mperham/sidekiq/wiki/Batches#callbacks" + logger.warn caller[1] + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/version.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/version.rb new file mode 100644 index 0000000..629680d --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/version.rb @@ -0,0 +1,5 @@ +module Sidekiq + module Pro + VERSION = "3.4.5" + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/web.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/web.rb new file mode 100644 index 0000000..14dad72 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/web.rb @@ -0,0 +1,131 @@ +require 'sidekiq/web' +require 'sidekiq/batch' + +module Sidekiq::Pro + module Web + ROOT = File.expand_path('../../../../web', __FILE__) + + module Helpers + def filtering(which) + render(:erb, File.read("#{ROOT}/views/filtering.erb"), :locals => { :which => which }) + end + + def product_version + "Sidekiq v#{Sidekiq::VERSION} / Sidekiq Pro v#{Sidekiq::Pro::VERSION}" + end + + def search(jobset, substr) + resultset = jobset.scan(substr) + @current_page = 1 + @count = @total_size = resultset.size + resultset + end + + def filter_link(jid) + "#{jid}" + end + end + + # Sinatra only supports class-level configuration so if we want different + # app instances with different config, we need to create subclasses. + def self.with(options) + Class.new(Sidekiq::Web) do + options.each_pair do |k, v| + self.settings.send("#{k}=", v) + end + end + end + + def self.registered(app) + app.helpers ::Sidekiq::Pro::Web::Helpers + app.set :redis_pool, nil + + app.before do |env=nil, a=nil| + if a + # rack + Thread.current[:sidekiq_redis_pool] = a.settings.redis_pool + else + # sinatra + Thread.current[:sidekiq_redis_pool] = self.settings.redis_pool + end + end + + app.after do + Thread.current[:sidekiq_redis_pool] = nil + end + + #### + # Batches + app.get "/batches/:bid" do + begin + @batch = Sidekiq::Batch::Status.new(params[:bid]) + render(:erb, File.read("#{ROOT}/views/batch.erb")) + rescue Sidekiq::Batch::NoSuchBatch + redirect "#{root_path}batches" + end + end + + app.get "/batches" do + @count = (params[:count] || 25).to_i + Sidekiq.redis {|conn| conn.zremrangebyscore('batches'.freeze, '-inf', Time.now.to_f) } + (@current_page, @total_size, @batches) = page("batches".freeze, params[:page], @count, :reverse => true) + render(:erb, File.read("#{ROOT}/views/batches.erb")) + end + app.tabs['Batches'] = 'batches'.freeze + + ######## + # Filtering + app.get '/filter/retries' do + x = params[:substr] + return redirect "#{root_path}retries" unless x && x == '' + + @retries = search(Sidekiq::RetrySet.new, params[:substr]) + erb :retries + end + + app.post '/filter/retries' do + x = params[:substr] + return redirect "#{root_path}retries" unless x && x != '' + + @retries = search(Sidekiq::RetrySet.new, params[:substr]) + erb :retries + end + + app.get '/filter/scheduled' do + x = params[:substr] + return redirect "#{root_path}scheduled" unless x && x != '' + + @scheduled = search(Sidekiq::ScheduledSet.new, params[:substr]) + erb :scheduled + end + + app.post '/filter/scheduled' do + x = params[:substr] + return redirect "#{root_path}scheduled" unless x && x != '' + + @scheduled = search(Sidekiq::ScheduledSet.new, params[:substr]) + erb :scheduled + end + + app.get '/filter/dead' do + x = params[:substr] + return redirect "#{root_path}morgue" unless x && x != '' + + @dead = search(Sidekiq::DeadSet.new, params[:substr]) + erb :morgue + end + + app.post '/filter/dead' do + x = params[:substr] + return redirect "#{root_path}morgue" unless x && x != '' + + @dead = search(Sidekiq::DeadSet.new, params[:substr]) + erb :morgue + end + + app.settings.locales << File.expand_path('locales', ROOT) + end + end +end + +::Sidekiq::Web.register Sidekiq::Pro::Web diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/worker.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/worker.rb new file mode 100644 index 0000000..35e03d0 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/pro/worker.rb @@ -0,0 +1,20 @@ +require 'sidekiq/worker' + +module Sidekiq + module Worker + attr_accessor :bid + + def batch + @sbatch ||= Sidekiq::Batch.new(bid) if bid + end + + # Verify the job is still considered part of the batch. + def valid_within_batch? + raise RuntimeError, "Not a member of a batch" unless bid + + Sidekiq::Batch.redis(bid) do |conn| + conn.sismember("b-#{bid}-jids", jid) + end + end + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/rack/batch_status.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/rack/batch_status.rb new file mode 100644 index 0000000..bc45480 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/rack/batch_status.rb @@ -0,0 +1,45 @@ +=begin +Using websockets is efficient but requires thin or another event-driven server so +it's possible you'll need to deploy a new server. Notably it will not work with Unicorn or +Passenger. Instead we'll add a really simple Rack middleware which allows reasonably efficient +polling. This will work reasonably well with single-threaded app servers and very well with +multi-threaded app servers like Puma. + +Use the middleware via `config.ru`: + + require 'sidekiq/rack/batch_status' + use Sidekiq::Rack::BatchStatus + run Rails::Application + +Then you can query the server to get a JSON blob of data about a batch +by passing the BID. + + http://server.example.org/batch_status/abcdef1234567890.json + +=end + +module Sidekiq + module Rack + + class BatchStatus + + def initialize(app, options={}) + @app = app + @mount = /\A#{Regexp.escape(options[:mount] || '/batch_status')}\/([0-9a-zA-Z_\-]{14,16}).json\z/ + end + + def call(env) + return @app.call(env) if env['PATH_INFO'] !~ @mount + + begin + batch = Sidekiq::Batch::Status.new($1) + [200, {'Content-Type' => 'application/json'}, [batch.to_json]] + rescue => ex + return [401, {'Content-Type' => 'text/plain'}, [ex.message]] + end + end + + end + + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/shard_set.rb b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/shard_set.rb new file mode 100644 index 0000000..5505e85 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/lib/sidekiq/shard_set.rb @@ -0,0 +1,75 @@ +module Sidekiq + + ## + # The ShardSet is the set of Redis servers which Sidekiq can + # use to store data and perform operations. + # + # In the ShardSet, the index of the shard is **critical** and + # should never be changed. For that reason, you pass the + # complete set of shards at once, rather than adding them + # one at a time. You can safely add a new shard to the end + # of the list but you CANNOT remove or move shards in the list. + # + # Sidekiq::Shards.set [POOL1, POOL2, POOL3, POOL4] + # + # Since the shard set is Enumerable, you can do nice things like: + # + # total_enqueued_default_jobs = Sidekiq::Shards.map { Sidekiq::Queue.new.size }.inject(:+) + # + class ShardSet + include Enumerable + + def shards + @shards ||= [Sidekiq.redis_pool] + end + + def each(&block) + prev = Thread.current[:sidekiq_redis_pool] + shards.each do |x| + Thread.current[:sidekiq_redis_pool] = x + block.call(x) + end + ensure + Thread.current[:sidekiq_redis_pool] = prev + end + + def [](idx) + shards[idx] + end + + def set(*args) + @shards = args.flatten + end + + def enabled? + shards.size > 1 + end + + ## + # Return a random shard index + def random_index + return 0 if shards.size == 1 + + rand(shards.size) + end + + def on(idx, &block) + prev = Thread.current[:sidekiq_redis_pool] + Thread.current[:sidekiq_redis_pool] = shards[idx] + block.call + ensure + Thread.current[:sidekiq_redis_pool] = prev + end + + end + + Shards = ShardSet.new +end + +Sidekiq.configure_server do |config| + config.on(:startup) do + # touch attribute while on main thread to avoid any + # initialization race conditions. + Sidekiq::Shards.shards + end +end diff --git a/vendor/gems/sidekiq-pro-3.4.5/sidekiq-pro.gemspec b/vendor/gems/sidekiq-pro-3.4.5/sidekiq-pro.gemspec new file mode 100644 index 0000000..7d32b8b --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/sidekiq-pro.gemspec @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# stub: sidekiq-pro 3.4.5 ruby lib + +Gem::Specification.new do |s| + s.name = "sidekiq-pro" + s.version = "3.4.5" + + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + s.metadata = { "allowed_push_host" => "https://gems.contribsys.com" } if s.respond_to? :metadata= + s.require_paths = ["lib"] + s.authors = ["Mike Perham"] + s.date = "2017-03-07" + s.description = "Loads of additional functionality for Sidekiq" + s.email = ["mike@contribsys.com"] + s.homepage = "http://sidekiq.org" + s.rubygems_version = "2.4.5.1" + s.summary = "Black belt functionality for Sidekiq" + s.has_rdoc = true + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_runtime_dependency(%q, [">= 4.1.5"]) + else + s.add_dependency(%q, [">= 4.1.5"]) + end + else + s.add_dependency(%q, [">= 4.1.5"]) + end +end + diff --git a/vendor/gems/sidekiq-pro-3.4.5/web/locales/en.yml b/vendor/gems/sidekiq-pro-3.4.5/web/locales/en.yml new file mode 100644 index 0000000..9274077 --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/web/locales/en.yml @@ -0,0 +1,17 @@ +en: + Batch: Batch %{bid} + Batches: Batches + NoBatchesFound: No batches found + Description: Description + Size: Size + Pending: Pending + Job: Job + Class: Class + Failures: Failures + ErrorMessage: Error Message + ErrorBacktrace: Error Backtrace + Started: Started + JobCount: Job Count + Failed: Failed + Status: Status + Parent: Parent diff --git a/vendor/gems/sidekiq-pro-3.4.5/web/views/batch.erb b/vendor/gems/sidekiq-pro-3.4.5/web/views/batch.erb new file mode 100644 index 0000000..565eeaf --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/web/views/batch.erb @@ -0,0 +1,98 @@ + + +
+
+

<%= t('Batch', :bid => @batch.bid) %>

+
+
+ + + + + + + + <% if @batch.description && @batch.description.size > 0 %> + + + + + <% end %> + <% if @batch.parent_bid %> + + + + <% end %> + <% if @batch.callbacks['complete'] %> + + + + + <% end %> + <% if @batch.callbacks['success'] %> + + + + + <% end %> + + + + + + + + + + + + + + + + + + + +
<%= t('Status') %> +
+
+
+
+
+
<%= t('Description') %><%=h @batch.description %>
<%= t('Parent') %>"><%= @batch.parent_bid %> +
Complete + <% @batch.callbacks['complete'].each do |hash| %> + <%= hash.keys.first %>: <%= h hash.values.first.inspect %>
+ <% end %> +
Success + <% @batch.callbacks['success'].each do |hash| %> + <%= hash.keys.first %>: <%= h hash.values.first.inspect %>
+ <% end %> +
<%= t('Created') %><%= relative_time(@batch.created_at) %> +
<%= t('Expires') %><%= relative_time(@batch.expires_at) %> +
<%= t('Size') %><%= @batch.total %>
<%= t('Pending') %><%= @batch.pending %>
<%= t('Failures') %><%= @batch.failures %>
+ +<% fails = @batch.failure_info %> +<% if fails.size > 0 %> +

<%= t('Failures') %>

+ + + + + + + + <% fails.each do |failure| %> + + + + + + <% end %> + +
<%= t('Job') %><%= t('Class') %><%= t('ErrorMessage') %>
<%= filter_link(failure.jid) %><%= h failure.error_class %><%= h failure.error_message %>
+<% end %> diff --git a/vendor/gems/sidekiq-pro-3.4.5/web/views/batches.erb b/vendor/gems/sidekiq-pro-3.4.5/web/views/batches.erb new file mode 100644 index 0000000..455b39b --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/web/views/batches.erb @@ -0,0 +1,49 @@ +
+
+

<%= t('Batches') %>

+
+
+ <% if @batches.size > 0 %> + <%= erb :_paging, :locals => { :url => "#{root_path}batches" } %> + <% end %> +
+
+ +<% if @batches.size > 0 %> + + + + + + + + + + <% @batches.each do |bid, _| %> + <% + begin + status = Sidekiq::Batch::Status.new(bid) + rescue Sidekiq::Batch::NoSuchBatch + next + end + %> + + + + + + + + + <% end %> +
<%= t('Started') %><%= t('Description') %><%= t('JobCount') %><%= t('Pending') %><%= t('Failed') %><%= t('Status') %>
<%= relative_time(status.created_at.utc) %><%=h status.description %><%= status.total %><%= status.pending %><%= status.failures %> +
+
+
+
+
+
+ +<% else %> +
<%= t('NoBatchesFound') %>
+<% end %> diff --git a/vendor/gems/sidekiq-pro-3.4.5/web/views/filtering.erb b/vendor/gems/sidekiq-pro-3.4.5/web/views/filtering.erb new file mode 100644 index 0000000..cc70ded --- /dev/null +++ b/vendor/gems/sidekiq-pro-3.4.5/web/views/filtering.erb @@ -0,0 +1,7 @@ +
+ Filter: +
+ <%= csrf_tag %> + +
+
diff --git a/workers/trigger_app_event.rb b/workers/trigger_app_event.rb new file mode 100644 index 0000000..cb98f2c --- /dev/null +++ b/workers/trigger_app_event.rb @@ -0,0 +1,10 @@ +class TriggerAppEvent + include Sidekiq::Worker + + sidekiq_options queue: "app_events", retry: 3 + + def perform(app_slug, event, data, payload) + app_class = SupportBeeApp::Base.find_from_slug(app_slug) + app_class.trigger_event(event, data, payload) + end +end