base_importer.rb 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. # frozen_string_literal: true
  2. class Importer::BaseImporter
  3. # @param [Integer] batch_size
  4. # @param [Concurrent::ThreadPoolExecutor] executor
  5. def initialize(batch_size:, executor:)
  6. @batch_size = batch_size
  7. @executor = executor
  8. @wait_for = Concurrent::Set.new
  9. end
  10. # Callback to run when a concurrent work unit completes
  11. # @param [Proc]
  12. def on_progress(&block)
  13. @on_progress = block
  14. end
  15. # Callback to run when a concurrent work unit fails
  16. # @param [Proc]
  17. def on_failure(&block)
  18. @on_failure = block
  19. end
  20. # Reduce resource usage during and improve speed of indexing
  21. def optimize_for_import!
  22. Chewy.client.indices.put_settings index: index.index_name, body: { index: { refresh_interval: -1 } }
  23. end
  24. # Restore original index settings
  25. def optimize_for_search!
  26. Chewy.client.indices.put_settings index: index.index_name, body: { index: { refresh_interval: index.settings_hash[:settings][:index][:refresh_interval] } }
  27. end
  28. # Estimate the amount of documents that would be indexed. Not exact!
  29. # @returns [Integer]
  30. def estimate!
  31. reltuples = ActiveRecord::Base.connection_pool.with_connection { |connection| connection.select_one("SELECT reltuples FROM pg_class WHERE relname = '#{index.adapter.target.table_name}'")['reltuples'].to_i }
  32. # If the table has never yet been vacuumed or analyzed, reltuples contains -1
  33. [reltuples, 0].max
  34. end
  35. # Import data from the database into the index
  36. def import!
  37. raise NotImplementedError
  38. end
  39. # Remove documents from the index that no longer exist in the database
  40. def clean_up!
  41. index.scroll_batches do |documents|
  42. primary_key = index.adapter.target.primary_key
  43. raise ActiveRecord::UnknownPrimaryKey, index.adapter.target if primary_key.nil?
  44. ids = documents.pluck('_id')
  45. existence_map = index.adapter.target.where(primary_key => ids).pluck(primary_key).each_with_object({}) { |id, map| map[id.to_s] = true }
  46. tmp = ids.reject { |id| existence_map[id] }
  47. next if tmp.empty?
  48. in_work_unit(tmp) do |deleted_ids|
  49. bulk = Chewy::Index::Import::BulkBuilder.new(index, delete: deleted_ids).bulk_body
  50. Chewy::Index::Import::BulkRequest.new(index).perform(bulk)
  51. [0, bulk.size]
  52. end
  53. end
  54. wait!
  55. end
  56. protected
  57. def build_bulk_body(to_import)
  58. # Specialize `Chewy::Index::Import::BulkBuilder#bulk_body` to avoid a few
  59. # inefficiencies, as none of our fields or join fields and we do not need
  60. # `BulkBuilder`'s versatility.
  61. crutches = Chewy::Index::Crutch::Crutches.new index, to_import
  62. to_import.map { |object| { index: { _id: object.id, data: index.compose(object, crutches, fields: []) } } }
  63. end
  64. def in_work_unit(...)
  65. work_unit = Concurrent::Promises.future_on(@executor, ...)
  66. work_unit.on_fulfillment!(&@on_progress)
  67. work_unit.on_rejection!(&@on_failure)
  68. work_unit.on_resolution! { @wait_for.delete(work_unit) }
  69. @wait_for << work_unit
  70. rescue Concurrent::RejectedExecutionError
  71. sleep(0.1) && retry # Backpressure
  72. end
  73. def wait!
  74. Concurrent::Promises.zip(*@wait_for).wait
  75. end
  76. def index
  77. raise NotImplementedError
  78. end
  79. end