From bd1dba6bf5a3e637459a55bdb8559dfecc7cf4a8 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Mon, 14 Mar 2016 21:00:03 +0100 Subject: [PATCH 01/68] Rename FutureShortcuts to FutureFactoryMethods - do not polute Concurrent space - remove unused post methods --- examples/edge_futures.in.rb | 76 ++++----- examples/edge_futures.out.rb | 143 +++++++++-------- lib/concurrent/actor.rb | 6 +- lib/concurrent/actor/behaviour/termination.rb | 6 +- lib/concurrent/actor/errors.rb | 2 +- lib/concurrent/actor/reference.rb | 4 +- lib/concurrent/actor/utils/pool.rb | 2 +- lib/concurrent/edge/future.rb | 44 ++---- spec/concurrent/edge/future_spec.rb | 148 +++++++++--------- 9 files changed, 210 insertions(+), 221 deletions(-) diff --git a/examples/edge_futures.in.rb b/examples/edge_futures.in.rb index bbca7b2fa..b8737e679 100644 --- a/examples/edge_futures.in.rb +++ b/examples/edge_futures.in.rb @@ -1,6 +1,9 @@ +# adds factory methods like: future, event, delay, schedule, zip, +include Concurrent::Edge::FutureFactoryMethods + ### Simple asynchronous task -future = Concurrent.future { sleep 0.1; 1 + 1 } # evaluation starts immediately +future = future { sleep 0.1; 1 + 1 } # evaluation starts immediately future.completed? # block until evaluated future.value @@ -9,7 +12,7 @@ ### Failing asynchronous task -future = Concurrent.future { raise 'Boom' } +future = future { raise 'Boom' } future.value future.value! rescue $! future.reason @@ -19,23 +22,23 @@ ### Chaining -head = Concurrent.succeeded_future 1 # +head = succeeded_future 1 # branch1 = head.then(&:succ) # branch2 = head.then(&:succ).then(&:succ) # branch1.zip(branch2).value! (branch1 & branch2).then { |a, b| a + b }.value! (branch1 & branch2).then(&:+).value! -Concurrent.zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! +zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! # pick only first completed (branch1 | branch2).value! ### Error handling -Concurrent.future { Object.new }.then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates -Concurrent.future { Object.new }.then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 -Concurrent.future { 1 }.then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied +future { Object.new }.then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates +future { Object.new }.then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 +future { 1 }.then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied -failing_zip = Concurrent.succeeded_future(1) & Concurrent.failed_future(StandardError.new('boom')) +failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) failing_zip.result failing_zip.then { |v| 'never happens' }.result failing_zip.rescue { |a, b| (a || b).message }.value @@ -44,14 +47,14 @@ ### Delay # will not evaluate until asked by #value or other method requiring completion -future = Concurrent.delay { 'lazy' } +future = delay { 'lazy' } sleep 0.1 # future.completed? future.value # propagates trough chain allowing whole or partial lazy chains -head = Concurrent.delay { 1 } +head = delay { 1 } branch1 = head.then(&:succ) branch2 = head.delay.then(&:succ) join = branch1 & branch2 @@ -68,10 +71,10 @@ ### Flatting -Concurrent.future { Concurrent.future { 1+1 } }.flat.value # waits for inner future +future { future { 1+1 } }.flat.value # waits for inner future # more complicated example -Concurrent.future { Concurrent.future { Concurrent.future { 1 + 1 } } }. +future { future { future { 1 + 1 } } }. flat(1). then { |f| f.then(&:succ) }. flat(1).value @@ -79,13 +82,13 @@ ### Schedule -scheduled = Concurrent.schedule(0.1) { 1 } +scheduled = schedule(0.1) { 1 } scheduled.completed? scheduled.value # available after 0.1sec # and in chain -scheduled = Concurrent.delay { 1 }.schedule(0.1).then(&:succ) +scheduled = delay { 1 }.schedule(0.1).then(&:succ) # will not be scheduled until value is requested sleep 0.1 # scheduled.value # returns after another 0.1sec @@ -93,9 +96,9 @@ ### Completable Future and Event -future = Concurrent.future -event = Concurrent.event -# Don't forget to keep the reference, `Concurrent.future.then { |v| v }` is incompletable +future = future() +event = event() +# Don't forget to keep the reference, `future.then { |v| v }` is incompletable # will be blocked until completed t1 = Thread.new { future.value } # @@ -112,7 +115,7 @@ ### Callbacks queue = Queue.new -future = Concurrent.delay { 1 + 1 } +future = delay { 1 + 1 } future.on_success { queue << 1 } # evaluated asynchronously future.on_success! { queue << 2 } # evaluated on completing thread @@ -125,7 +128,7 @@ ### Thread-pools -Concurrent.future(:fast) { 2 }.then(:io) { File.read __FILE__ }.wait +future(:fast) { 2 }.then(:io) { File.read __FILE__ }.wait ### Interoperability with actors @@ -134,8 +137,8 @@ -> v { v ** 2 } end -Concurrent. - future { 2 }. + +future { 2 }. then_ask(actor). then { |v| v + 2 }. value @@ -148,15 +151,14 @@ ch1 = Concurrent::Channel.new ch2 = Concurrent::Channel.new -result = Concurrent.select(ch1, ch2) +result = select(ch1, ch2) ch1.put 1 result.value! -Concurrent. - future { 1+1 }. - then_push(ch1) -result = Concurrent. - future { '%02d' }. + +future { 1+1 }. + then_put(ch1) +result = future { '%02d' }. then_select(ch1, ch2). then { |format, (value, channel)| format format, value } result.value! @@ -165,18 +167,18 @@ ### Common use-cases Examples # simple background processing -Concurrent.future { do_stuff } +future { do_stuff } # parallel background processing -jobs = 10.times.map { |i| Concurrent.future { i } } # -Concurrent.zip(*jobs).value +jobs = 10.times.map { |i| future { i } } # +zip(*jobs).value # periodic task @end = false def schedule_job - Concurrent.schedule(1) { do_stuff }. + schedule(1) { do_stuff }. rescue { |e| StandardError === e ? report_error(e) : raise(e) }. then { schedule_job unless @end } end @@ -196,8 +198,8 @@ def schedule_job end concurrent_jobs = 11.times.map do |v| - Concurrent. - future { v }. + + future { v }. # ask the DB with the `v`, only one at the time, rest is parallel then_ask(DB). # get size of the string, fails for 11 @@ -205,7 +207,7 @@ def schedule_job rescue { |reason| reason.message } # translate error to value (exception, message) end # -Concurrent.zip(*concurrent_jobs).value! +zip(*concurrent_jobs).value! # In reality there is often a pool though: @@ -223,12 +225,12 @@ def schedule_job end concurrent_jobs = 11.times.map do |v| - Concurrent. - future { v }. + + future { v }. # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel then_ask(DB_POOL). then(&:size). rescue { |reason| reason.message } end # -Concurrent.zip(*concurrent_jobs).value! +zip(*concurrent_jobs).value! diff --git a/examples/edge_futures.out.rb b/examples/edge_futures.out.rb index e92e548e4..2ff1ae5b3 100644 --- a/examples/edge_futures.out.rb +++ b/examples/edge_futures.out.rb @@ -1,7 +1,11 @@ + +# adds factory methods like: future, event, delay, schedule, zip, +include Concurrent::Edge::FutureFactoryMethods + ### Simple asynchronous task -future = Concurrent.future { sleep 0.1; 1 + 1 } # evaluation starts immediately - # => <#Concurrent::Edge::Future:0x7fcc73208180 pending blocks:[]> +future = future { sleep 0.1; 1 + 1 } # evaluation starts immediately + # => <#Concurrent::Edge::Future:0x7fedf3042458 pending blocks:[]> future.completed? # => false # block until evaluated future.value # => 2 @@ -10,8 +14,8 @@ ### Failing asynchronous task -future = Concurrent.future { raise 'Boom' } - # => <#Concurrent::Edge::Future:0x7fcc731fa0a8 pending blocks:[]> +future = future { raise 'Boom' } + # => <#Concurrent::Edge::Future:0x7fedf30397e0 pending blocks:[]> future.value # => nil future.value! rescue $! # => # future.reason # => # @@ -21,28 +25,28 @@ ### Chaining -head = Concurrent.succeeded_future 1 +head = succeeded_future 1 branch1 = head.then(&:succ) branch2 = head.then(&:succ).then(&:succ) branch1.zip(branch2).value! # => [2, 3] (branch1 & branch2).then { |a, b| a + b }.value! # => 5 (branch1 & branch2).then(&:+).value! # => 5 -Concurrent.zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! +zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! # => 7 # pick only first completed (branch1 | branch2).value! # => 2 ### Error handling -Concurrent.future { Object.new }.then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates +future { Object.new }.then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates # => NoMethodError -Concurrent.future { Object.new }.then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 +future { Object.new }.then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 # => 2 -Concurrent.future { 1 }.then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied +future { 1 }.then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied # => 3 -failing_zip = Concurrent.succeeded_future(1) & Concurrent.failed_future(StandardError.new('boom')) - # => <#Concurrent::Edge::Future:0x7ffcc19ac2a0 failed blocks:[]> +failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) + # => <#Concurrent::Edge::Future:0x7fedf38e3378 failed blocks:[]> failing_zip.result # => [false, [1, nil], [nil, #]] failing_zip.then { |v| 'never happens' }.result # => [false, [1, nil], [nil, #]] failing_zip.rescue { |a, b| (a || b).message }.value @@ -53,22 +57,22 @@ ### Delay # will not evaluate until asked by #value or other method requiring completion -future = Concurrent.delay { 'lazy' } - # => <#Concurrent::Edge::Future:0x7fcc731a1840 pending blocks:[]> +future = delay { 'lazy' } + # => <#Concurrent::Edge::Future:0x7fedf38c0b20 pending blocks:[]> sleep 0.1 future.completed? # => false future.value # => "lazy" # propagates trough chain allowing whole or partial lazy chains -head = Concurrent.delay { 1 } - # => <#Concurrent::Edge::Future:0x7fcc73193b28 pending blocks:[]> +head = delay { 1 } + # => <#Concurrent::Edge::Future:0x7fedf480dc90 pending blocks:[]> branch1 = head.then(&:succ) - # => <#Concurrent::Edge::Future:0x7fcc73190900 pending blocks:[]> + # => <#Concurrent::Edge::Future:0x7fedf480cd40 pending blocks:[]> branch2 = head.delay.then(&:succ) - # => <#Concurrent::Edge::Future:0x7fcc7318b400 pending blocks:[]> + # => <#Concurrent::Edge::Future:0x7fedf1163690 pending blocks:[]> join = branch1 & branch2 - # => <#Concurrent::Edge::Future:0x7fcc73180af0 pending blocks:[]> + # => <#Concurrent::Edge::Future:0x7fedf11624c0 pending blocks:[]> sleep 0.1 # nothing will complete # => 0 [head, branch1, branch2, join].map(&:completed?) # => [false, false, false, false] @@ -83,11 +87,11 @@ ### Flatting -Concurrent.future { Concurrent.future { 1+1 } }.flat.value # waits for inner future +future { future { 1+1 } }.flat.value # waits for inner future # => 2 # more complicated example -Concurrent.future { Concurrent.future { Concurrent.future { 1 + 1 } } }. +future { future { future { 1 + 1 } } }. flat(1). then { |f| f.then(&:succ) }. flat(1).value # => 3 @@ -95,15 +99,15 @@ ### Schedule -scheduled = Concurrent.schedule(0.1) { 1 } - # => <#Concurrent::Edge::Future:0x7fcc73143e48 pending blocks:[]> +scheduled = schedule(0.1) { 1 } + # => <#Concurrent::Edge::Future:0x7fedf387ad28 pending blocks:[]> scheduled.completed? # => false scheduled.value # available after 0.1sec # => 1 # and in chain -scheduled = Concurrent.delay { 1 }.schedule(0.1).then(&:succ) - # => <#Concurrent::Edge::Future:0x7fcc7313a758 pending blocks:[]> +scheduled = delay { 1 }.schedule(0.1).then(&:succ) + # => <#Concurrent::Edge::Future:0x7fedf383b448 pending blocks:[]> # will not be scheduled until value is requested sleep 0.1 scheduled.value # returns after another 0.1sec # => 2 @@ -111,37 +115,37 @@ ### Completable Future and Event -future = Concurrent.future - # => <#Concurrent::Edge::CompletableFuture:0x7fcc731286e8 pending blocks:[]> -event = Concurrent.event - # => <#Concurrent::Edge::CompletableEvent:0x7fcc73123058 pending blocks:[]> -# Don't forget to keep the reference, `Concurrent.future.then { |v| v }` is incompletable +future = future() + # => <#Concurrent::Edge::CompletableFuture:0x7fedf3820828 pending blocks:[]> +event = event() + # => <#Concurrent::Edge::CompletableEvent:0x7fedf1112f88 pending blocks:[]> +# Don't forget to keep the reference, `future.then { |v| v }` is incompletable # will be blocked until completed t1 = Thread.new { future.value } t2 = Thread.new { event.wait } future.success 1 - # => <#Concurrent::Edge::CompletableFuture:0x7fcc731286e8 success blocks:[]> + # => <#Concurrent::Edge::CompletableFuture:0x7fedf3820828 success blocks:[]> future.success 1 rescue $! # => # future.try_success 2 # => false event.complete - # => <#Concurrent::Edge::CompletableEvent:0x7fcc73123058 completed blocks:[]> + # => <#Concurrent::Edge::CompletableEvent:0x7fedf1112f88 completed blocks:[]> [t1, t2].each &:join ### Callbacks -queue = Queue.new # => # -future = Concurrent.delay { 1 + 1 } - # => <#Concurrent::Edge::Future:0x7fcc7310ab98 pending blocks:[]> +queue = Queue.new # => # +future = delay { 1 + 1 } + # => <#Concurrent::Edge::Future:0x7fedf1141950 pending blocks:[]> future.on_success { queue << 1 } # evaluated asynchronously - # => <#Concurrent::Edge::Future:0x7fcc7310ab98 pending blocks:[]> + # => <#Concurrent::Edge::Future:0x7fedf1141950 pending blocks:[]> future.on_success! { queue << 2 } # evaluated on completing thread - # => <#Concurrent::Edge::Future:0x7fcc7310ab98 pending blocks:[]> + # => <#Concurrent::Edge::Future:0x7fedf1141950 pending blocks:[]> queue.empty? # => true future.value # => 2 @@ -151,8 +155,8 @@ ### Thread-pools -Concurrent.future(:fast) { 2 }.then(:io) { File.read __FILE__ }.wait - # => <#Concurrent::Edge::Future:0x7fcc730f98e8 success blocks:[]> +future(:fast) { 2 }.then(:io) { File.read __FILE__ }.wait + # => <#Concurrent::Edge::Future:0x7fedf1121a10 success blocks:[]> ### Interoperability with actors @@ -160,10 +164,10 @@ actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end - # => # + # => # -Concurrent. - future { 2 }. + +future { 2 }. then_ask(actor). then { |v| v + 2 }. value # => 6 @@ -173,49 +177,50 @@ ### Interoperability with channels -ch1 = Concurrent::Channel.new # => # -ch2 = Concurrent::Channel.new # => # +ch1 = Concurrent::Channel.new + # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> +ch2 = Concurrent::Channel.new + # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> -result = Concurrent.select(ch1, ch2) - # => <#Concurrent::Edge::CompletableFuture:0x7fcc730411a8 pending blocks:[]> -ch1.push 1 # => nil +result = select(ch1, ch2) + # => <#Concurrent::Edge::Future:0x7fedf10a1400 pending blocks:[]> +ch1.put 1 # => true result.value! - # => [1, #] - -Concurrent. - future { 1+1 }. - then_push(ch1) - # => <#Concurrent::Edge::Future:0x7fcc73032c98 pending blocks:[]> -result = Concurrent. - future { '%02d' }. + # => [1, #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#>] + + +future { 1+1 }. + then_put(ch1) + # => <#Concurrent::Edge::Future:0x7f8df49b4f90 pending blocks:[]> +result = future { '%02d' }. then_select(ch1, ch2). then { |format, (value, channel)| format format, value } - # => <#Concurrent::Edge::Future:0x7fcc7302a4f8 pending blocks:[]> + # => <#Concurrent::Edge::Future:0x7fedf0a1f7d0 pending blocks:[]> result.value! # => "02" ### Common use-cases Examples # simple background processing -Concurrent.future { do_stuff } - # => <#Concurrent::Edge::Future:0x7fcc72123c48 pending blocks:[]> +future { do_stuff } + # => <#Concurrent::Edge::Future:0x7fedf0a15cf8 pending blocks:[]> # parallel background processing -jobs = 10.times.map { |i| Concurrent.future { i } } -Concurrent.zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] +jobs = 10.times.map { |i| future { i } } +zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # periodic task @end = false # => false def schedule_job - Concurrent.schedule(1) { do_stuff }. + schedule(1) { do_stuff }. rescue { |e| StandardError === e ? report_error(e) : raise(e) }. then { schedule_job unless @end } end # => :schedule_job schedule_job - # => <#Concurrent::Edge::Future:0x7fcc75011370 pending blocks:[]> + # => <#Concurrent::Edge::Future:0x7fedf09c6720 pending blocks:[]> @end = true # => true @@ -228,11 +233,11 @@ def schedule_job data[message] end end - # => # + # => # concurrent_jobs = 11.times.map do |v| - Concurrent. - future { v }. + + future { v }. # ask the DB with the `v`, only one at the time, rest is parallel then_ask(DB). # get size of the string, fails for 11 @@ -240,7 +245,7 @@ def schedule_job rescue { |reason| reason.message } # translate error to value (exception, message) end -Concurrent.zip(*concurrent_jobs).value! +zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] @@ -258,16 +263,16 @@ def schedule_job end end end - # => # + # => # concurrent_jobs = 11.times.map do |v| - Concurrent. - future { v }. + + future { v }. # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel then_ask(DB_POOL). then(&:size). rescue { |reason| reason.message } end -Concurrent.zip(*concurrent_jobs).value! +zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] diff --git a/lib/concurrent/actor.rb b/lib/concurrent/actor.rb index 626a8762a..6cad3258c 100644 --- a/lib/concurrent/actor.rb +++ b/lib/concurrent/actor.rb @@ -34,8 +34,8 @@ def self.current Thread.current[:__current_actor__] end - @root = Concurrent.delay do - Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent.future).reference.tap do + @root = Concurrent::Edge.delay do + Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent::Edge.future).reference.tap do future.wait! end end @@ -74,7 +74,7 @@ def self.spawn(*args, &block) # as {.spawn} but it'll block until actor is initialized or it'll raise exception on error def self.spawn!(*args, &block) - spawn(to_spawn_options(*args).merge(initialized: future = Concurrent.future), &block).tap { future.wait! } + spawn(to_spawn_options(*args).merge(initialized: future = Concurrent::Edge.future), &block).tap { future.wait! } end # @overload to_spawn_options(context_class, name, *args) diff --git a/lib/concurrent/actor/behaviour/termination.rb b/lib/concurrent/actor/behaviour/termination.rb index c27b65bf3..431c83557 100644 --- a/lib/concurrent/actor/behaviour/termination.rb +++ b/lib/concurrent/actor/behaviour/termination.rb @@ -14,7 +14,7 @@ class Termination < Abstract def initialize(core, subsequent, core_options, trapping = false, terminate_children = true) super core, subsequent, core_options - @terminated = Concurrent.future + @terminated = Concurrent::Edge.future @public_terminated = @terminated.hide_completable @trapping = trapping @terminate_children = terminate_children @@ -62,9 +62,9 @@ def on_envelope(envelope) def terminate!(reason = nil, envelope = nil) return true if terminated? - self_termination = Concurrent.completed_future(reason.nil?, reason.nil? || nil, reason) + self_termination = Concurrent::Edge.completed_future(reason.nil?, reason.nil? || nil, reason) all_terminations = if @terminate_children - Concurrent.zip(*children.map { |ch| ch.ask(:terminate!) }, self_termination) + Concurrent::Edge.zip(*children.map { |ch| ch.ask(:terminate!) }, self_termination) else self_termination end diff --git a/lib/concurrent/actor/errors.rb b/lib/concurrent/actor/errors.rb index 9e736ba67..8ffb17f43 100644 --- a/lib/concurrent/actor/errors.rb +++ b/lib/concurrent/actor/errors.rb @@ -20,7 +20,7 @@ class UnknownMessage < Error def initialize(envelope) @envelope = Type! envelope, Envelope - super envelope.message.inspect + super "#{envelope.message.inspect} from #{envelope.sender_path}" end end end diff --git a/lib/concurrent/actor/reference.rb b/lib/concurrent/actor/reference.rb index 857c8ef04..2b17fd43d 100644 --- a/lib/concurrent/actor/reference.rb +++ b/lib/concurrent/actor/reference.rb @@ -51,7 +51,7 @@ def tell(message) # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask(1).value # => 2 # adder.ask(nil).wait.reason # => # - def ask(message, future = Concurrent.future) + def ask(message, future = Concurrent::Edge.future) message message, future end @@ -69,7 +69,7 @@ def ask(message, future = Concurrent.future) # @example # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask!(1) # => 2 - def ask!(message, future = Concurrent.future) + def ask!(message, future = Concurrent::Edge.future) ask(message, future).value! end diff --git a/lib/concurrent/actor/utils/pool.rb b/lib/concurrent/actor/utils/pool.rb index a5ced2b57..69c07b90c 100644 --- a/lib/concurrent/actor/utils/pool.rb +++ b/lib/concurrent/actor/utils/pool.rb @@ -43,7 +43,7 @@ def on_message(message) envelope_to_redirect = if envelope.future envelope else - Envelope.new(envelope.message, Concurrent.future, envelope.sender, envelope.address) + Envelope.new(envelope.message, Concurrent::Edge.future, envelope.sender, envelope.address) end envelope_to_redirect.future.on_completion! { @balancer << :subscribe } # TODO check safety of @balancer reading redirect @balancer, envelope_to_redirect diff --git a/lib/concurrent/edge/future.rb b/lib/concurrent/edge/future.rb index a63a881df..efe70dc89 100644 --- a/lib/concurrent/edge/future.rb +++ b/lib/concurrent/edge/future.rb @@ -19,7 +19,7 @@ module Edge # {include:file:examples/edge_futures.out.rb} # # @!macro edge_warning - module FutureShortcuts + module FutureFactoryMethods # User is responsible for completing the event once by {Edge::CompletableEvent#complete} # @return [CompletableEvent] def event(default_executor = :io) @@ -115,6 +115,7 @@ def any_successful(*futures) # only proof of concept # @return [Future] def select(*channels) + # TODO has to be redone, since it's blocking, resp. moved to edge future do # noinspection RubyArgCount Channel.select do |s| @@ -125,25 +126,7 @@ def select(*channels) end end - # post job on :fast executor - # @return [true, false] - def post!(*args, &job) - post_on(:fast, *args, &job) - end - - # post job on :io executor - # @return [true, false] - def post(*args, &job) - post_on(:io, *args, &job) - end - - # post job on executor - # @return [true, false] - def post_on(executor, *args, &job) - Concurrent.executor(executor).post(*args, &job) - end - - # TODO add first(futures, count=count) + # TODO add first(count, *futures) # TODO allow to to have a zip point for many futures and process them in batches by 10 end @@ -412,7 +395,7 @@ def wait_until_complete(timeout) end def with_async(executor, *args, &block) - Concurrent.post_on(executor, *args, &block) + Concurrent.executor(executor).post(*args, &block) end def async_callback_on_completion(executor, callback) @@ -692,7 +675,8 @@ def schedule(intended_time) # Zips with selected value form the suplied channels # @return [Future] def then_select(*channels) - ZipFuturesPromise.new([self, Concurrent.select(*channels)], @DefaultExecutor).future + # TODO (pitr-ch 14-Mar-2016): has to go to edge + ZipFuturesPromise.new([self, Concurrent::Edge.select(*channels)], @DefaultExecutor).future end # Changes default executor for rest of the chain @@ -1100,7 +1084,7 @@ def initialize(blocked_by_future, default_executor, executor, &task) def on_completable(done_future) if done_future.success? - Concurrent.post_on(@Executor, done_future, @Task) do |future, task| + Concurrent.executor(@Executor).post(done_future, @Task) do |future, task| evaluate_to lambda { future.apply task } end else @@ -1119,7 +1103,7 @@ def initialize(blocked_by_future, default_executor, executor, &task) def on_completable(done_future) if done_future.failed? - Concurrent.post_on(@Executor, done_future, @Task) do |future, task| + Concurrent.executor(@Executor).post(done_future, @Task) do |future, task| evaluate_to lambda { future.apply task } end else @@ -1134,9 +1118,9 @@ class ChainPromise < BlockedTaskPromise def on_completable(done_future) if Future === done_future - Concurrent.post_on(@Executor, done_future, @Task) { |future, task| evaluate_to(*future.result, task) } + Concurrent.executor(@Executor).post(done_future, @Task) { |future, task| evaluate_to(*future.result, task) } else - Concurrent.post_on(@Executor, @Task) { |task| evaluate_to task } + Concurrent.executor(@Executor).post(@Task) { |task| evaluate_to task } end end end @@ -1417,11 +1401,7 @@ def initialize(default_executor, intended_time) end end end + + extend FutureFactoryMethods end end - -Concurrent::Edge.send :extend, Concurrent::Edge::FutureShortcuts -Concurrent::Edge.send :include, Concurrent::Edge::FutureShortcuts - -Concurrent.send :extend, Concurrent::Edge::FutureShortcuts -Concurrent.send :include, Concurrent::Edge::FutureShortcuts diff --git a/spec/concurrent/edge/future_spec.rb b/spec/concurrent/edge/future_spec.rb index d9ce397a5..09093e33a 100644 --- a/spec/concurrent/edge/future_spec.rb +++ b/spec/concurrent/edge/future_spec.rb @@ -1,19 +1,25 @@ require 'concurrent-edge' require 'thread' +require 'pry' +# require 'pry-stack_explorer' + +Concurrent.use_stdlib_logger Logger::DEBUG describe 'Concurrent::Edge futures', edge: true do + include Concurrent::Edge::FutureFactoryMethods + describe 'chain_completable' do it 'event' do - b = Concurrent.event - a = Concurrent.event.chain_completable(b) + b = event + a = event.chain_completable(b) a.complete expect(b).to be_completed end it 'future' do - b = Concurrent.future - a = Concurrent.future.chain_completable(b) + b = future + a = future.chain_completable(b) a.success :val expect(b).to be_completed expect(b.value).to eq :val @@ -24,8 +30,8 @@ it 'executes tasks asynchronously' do queue = Queue.new value = 12 - Concurrent.post { queue.push(value) } - Concurrent.post(:io) { queue.push(value) } + Concurrent.executor(:fast).post { queue.push(value) } + Concurrent.executor(:io).post { queue.push(value) } expect(queue.pop).to eq value expect(queue.pop).to eq value end @@ -33,21 +39,21 @@ describe '.future' do it 'executes' do - future = Concurrent.future { 1 + 1 } + future = future { 1 + 1 } expect(future.value!).to eq 2 - future = Concurrent.succeeded_future(1).then { |v| v + 1 } + future = succeeded_future(1).then { |v| v + 1 } expect(future.value!).to eq 2 end end describe '.delay' do it 'delays execution' do - delay = Concurrent.delay { 1 + 1 } + delay = delay { 1 + 1 } expect(delay.completed?).to eq false expect(delay.value!).to eq 2 - delay = Concurrent.succeeded_future(1).delay.then { |v| v + 1 } + delay = succeeded_future(1).delay.then { |v| v + 1 } expect(delay.completed?).to eq false expect(delay.value!).to eq 2 end @@ -57,7 +63,7 @@ it 'scheduled execution' do start = Time.now.to_f queue = Queue.new - future = Concurrent.schedule(0.1) { 1 + 1 }.then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } + future = schedule(0.1) { 1 + 1 }.then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } expect(future.value!).to eq queue expect(queue.pop).to eq 2 @@ -65,8 +71,7 @@ start = Time.now.to_f queue = Queue.new - future = Concurrent. - succeeded_future(1). + future = succeeded_future(1). schedule(0.1). then { |v| v + 1 }. then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } @@ -79,8 +84,7 @@ it 'scheduled execution in graph' do start = Time.now.to_f queue = Queue.new - future = Concurrent. - future { sleep 0.1; 1 }. + future = future { sleep 0.1; 1 }. schedule(0.1). then { |v| v + 1 }. then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } @@ -95,9 +99,9 @@ describe '.event' do specify do - completable_event = Concurrent.event + completable_event = event one = completable_event.chain { 1 } - join = Concurrent.zip(completable_event).chain { 1 } + join = zip(completable_event).chain { 1 } expect(one.completed?).to be false completable_event.complete expect(one.value!).to eq 1 @@ -107,9 +111,9 @@ describe '.future without block' do specify do - completable_future = Concurrent.future + completable_future = future one = completable_future.then(&:succ) - join = Concurrent.zip_futures(completable_future).then { |v| v } + join = zip_futures(completable_future).then { |v| v } expect(one.completed?).to be false completable_future.success 0 expect(one.value!).to eq 1 @@ -120,11 +124,11 @@ describe '.any_complete' do it 'continues on first result' do - f1 = Concurrent.future - f2 = Concurrent.future - f3 = Concurrent.future + f1 = future + f2 = future + f3 = future - any1 = Concurrent.any_complete(f1, f2) + any1 = any_complete(f1, f2) any2 = f2 | f3 f1.success 1 @@ -137,10 +141,10 @@ describe '.any_successful' do it 'continues on first result' do - f1 = Concurrent.future - f2 = Concurrent.future + f1 = future + f2 = future - any = Concurrent.any_successful(f1, f2) + any = any_successful(f1, f2) f1.fail f2.success :value @@ -151,14 +155,14 @@ describe '.zip' do it 'waits for all results' do - a = Concurrent.future { 1 } - b = Concurrent.future { 2 } - c = Concurrent.future { 3 } + a = future { 1 } + b = future { 2 } + c = future { 3 } z1 = a & b - z2 = Concurrent.zip a, b, c - z3 = Concurrent.zip a - z4 = Concurrent.zip + z2 = zip a, b, c + z3 = zip a + z4 = zip expect(z1.value!).to eq [1, 2] expect(z2.value!).to eq [1, 2, 3] @@ -192,29 +196,29 @@ expect(z1.then(&:+).value!).to eq 3 expect(z2.then { |a, b, c| a+b+c }.value!).to eq 6 - expect(Concurrent.future { 1 }.delay).to be_a_kind_of Concurrent::Edge::Future - expect(Concurrent.future { 1 }.delay.wait!).to be_completed - expect(Concurrent.event.complete.delay).to be_a_kind_of Concurrent::Edge::Event - expect(Concurrent.event.complete.delay.wait).to be_completed + expect(future { 1 }.delay).to be_a_kind_of Concurrent::Edge::Future + expect(future { 1 }.delay.wait!).to be_completed + expect(event.complete.delay).to be_a_kind_of Concurrent::Edge::Event + expect(event.complete.delay.wait).to be_completed - a = Concurrent.future { 1 } - b = Concurrent.future { raise 'b' } - c = Concurrent.future { raise 'c' } + a = future { 1 } + b = future { raise 'b' } + c = future { raise 'c' } - Concurrent.zip(a, b, c).chain { |*args| q << args } + zip(a, b, c).chain { |*args| q << args } expect(q.pop.flatten.map(&:class)).to eq [FalseClass, 0.class, NilClass, NilClass, NilClass, RuntimeError, RuntimeError] - Concurrent.zip(a, b, c).rescue { |*args| q << args } + zip(a, b, c).rescue { |*args| q << args } expect(q.pop.map(&:class)).to eq [NilClass, RuntimeError, RuntimeError] - expect(Concurrent.zip.wait(0.1)).to eq true + expect(zip.wait(0.1)).to eq true end context 'when a future raises an error' do - let(:future) { Concurrent.future { raise 'error' } } + let(:a_future) { future { raise 'error' } } it 'raises a concurrent error' do - expect { Concurrent.zip(future).value! }.to raise_error(Concurrent::Error) + expect { zip(a_future).value! }.to raise_error(Concurrent::Error) end end @@ -222,13 +226,13 @@ describe '.zip_events' do it 'waits for all and returns event' do - a = Concurrent.succeeded_future 1 - b = Concurrent.failed_future :any - c = Concurrent.event.complete + a = succeeded_future 1 + b = failed_future :any + c = event.complete - z2 = Concurrent.zip_events a, b, c - z3 = Concurrent.zip_events a - z4 = Concurrent.zip_events + z2 = zip_events a, b, c + z3 = zip_events a + z4 = zip_events expect(z2.completed?).to be_truthy expect(z3.completed?).to be_truthy @@ -249,13 +253,13 @@ future.wait [queue.pop, queue.pop, queue.pop, queue.pop].sort end - callback_results = callbacks_tester.call(Concurrent.future { :value }) + callback_results = callbacks_tester.call(future { :value }) expect(callback_results).to eq ["async on_completion [true, :value, nil]", "async on_success :value", "sync on_completion [true, :value, nil]", "sync on_success :value"] - callback_results = callbacks_tester.call(Concurrent.future { raise 'error' }) + callback_results = callbacks_tester.call(future { raise 'error' }) expect(callback_results).to eq ["async on_completion [false, nil, #]", "async on_failure #", "sync on_completion [false, nil, #]", @@ -267,7 +271,7 @@ start_latch = Concurrent::CountDownLatch.new end_latch = Concurrent::CountDownLatch.new - future = Concurrent.future do + future = future do start_latch.count_down end_latch.wait(1) end @@ -282,7 +286,7 @@ it 'chains' do - future0 = Concurrent.future { 1 }.then { |v| v + 2 } # both executed on default FAST_EXECUTOR + future0 = future { 1 }.then { |v| v + 2 } # both executed on default FAST_EXECUTOR future1 = future0.then(:fast) { raise 'boo' } # executed on IO_EXECUTOR future2 = future1.then { |v| v + 1 } # will fail with 'boo' error, executed on default FAST_EXECUTOR future3 = future1.rescue { |err| err.message } # executed on default FAST_EXECUTOR @@ -318,7 +322,7 @@ it 'constructs promise like tree' do # if head of the tree is not constructed with #future but with #delay it does not start execute, # it's triggered later by calling wait or value on any of the dependent futures or the delay itself - three = (head = Concurrent.delay { 1 }).then { |v| v.succ }.then(&:succ) + three = (head = delay { 1 }).then { |v| v.succ }.then(&:succ) four = three.delay.then(&:succ) # meaningful to_s and inspect defined for Future and Promise @@ -333,16 +337,16 @@ expect(four.value!).to eq 4 # futures hidden behind two delays trigger evaluation of both - double_delay = Concurrent.delay { 1 }.delay.then(&:succ) + double_delay = delay { 1 }.delay.then(&:succ) expect(double_delay.value!).to eq 2 end it 'allows graphs' do - head = Concurrent.future { 1 } + head = future { 1 } branch1 = head.then(&:succ) branch2 = head.then(&:succ).delay.then(&:succ) results = [ - Concurrent.zip(branch1, branch2).then { |b1, b2| b1 + b2 }, + zip(branch1, branch2).then { |b1, b2| b1 + b2 }, branch1.zip(branch2).then { |b1, b2| b1 + b2 }, (branch1 & branch2).then { |b1, b2| b1 + b2 }] @@ -351,31 +355,31 @@ expect(branch2).not_to be_completed expect(results.map(&:value)).to eq [5, 5, 5] - expect(Concurrent.zip(branch1, branch2).value!).to eq [2, 3] + expect(zip(branch1, branch2).value!).to eq [2, 3] end describe '#flat' do it 'returns value of inner future' do - f = Concurrent.future { Concurrent.future { 1 } }.flat.then(&:succ) + f = future { future { 1 } }.flat.then(&:succ) expect(f.value!).to eq 2 end it 'propagates failure of inner future' do err = StandardError.new('boo') - f = Concurrent.future { Concurrent.failed_future(err) }.flat + f = future { failed_future(err) }.flat expect(f.reason).to eq err end it 'it propagates failure of the future which was suppose to provide inner future' do - f = Concurrent.future { raise 'boo' }.flat + f = future { raise 'boo' }.flat expect(f.reason.message).to eq 'boo' end it 'fails if inner value is not a future' do - f = Concurrent.future { 'boo' }.flat + f = future { 'boo' }.flat expect(f.reason).to be_an_instance_of TypeError - f = Concurrent.future { Concurrent.completed_event }.flat + f = future { completed_event }.flat expect(f.reason).to be_an_instance_of TypeError end @@ -385,7 +389,7 @@ end it 'completes future when Exception raised' do - f = Concurrent.future { raise Exception, 'fail' } + f = future { raise Exception, 'fail' } f.wait 1 expect(f).to be_completed expect(f).to be_failed @@ -399,8 +403,7 @@ -> v { v * 2 } end - expect(Concurrent. - future { 2 }. + expect(future { 2 }. then_ask(actor). then { |v| v + 2 }. value!).to eq 6 @@ -410,15 +413,14 @@ ch1 = Concurrent::Channel.new ch2 = Concurrent::Channel.new - result = Concurrent.select(ch1, ch2) + result = select(ch1, ch2) ch1.put 1 expect(result.value!).to eq [1, ch1] - Concurrent. - future { 1+1 }. + + future { 1+1 }. then_put(ch1) - result = Concurrent. - future { '%02d' }. + result = future { '%02d' }. then_select(ch1, ch2). then { |format, (value, channel)| format format, value } expect(result.value!).to eq '02' @@ -426,7 +428,7 @@ end specify do - expect(Concurrent.future { :v }.value!).to eq :v + expect(future { :v }.value!).to eq :v end end From 7b755ad60769fd06d9d8e65159b083fa93d4d83f Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Mon, 14 Mar 2016 21:12:49 +0100 Subject: [PATCH 02/68] split completable future construction from future to its own method completable_future --- examples/edge_futures.in.rb | 2 +- examples/edge_futures.out.rb | 4 ++-- lib/concurrent/actor.rb | 4 ++-- lib/concurrent/actor/behaviour/termination.rb | 2 +- lib/concurrent/actor/reference.rb | 4 ++-- lib/concurrent/edge/future.rb | 20 +++++++++---------- spec/concurrent/edge/future_spec.rb | 16 +++++++-------- 7 files changed, 25 insertions(+), 27 deletions(-) diff --git a/examples/edge_futures.in.rb b/examples/edge_futures.in.rb index b8737e679..befd04f43 100644 --- a/examples/edge_futures.in.rb +++ b/examples/edge_futures.in.rb @@ -96,7 +96,7 @@ ### Completable Future and Event -future = future() +future = completable_future event = event() # Don't forget to keep the reference, `future.then { |v| v }` is incompletable diff --git a/examples/edge_futures.out.rb b/examples/edge_futures.out.rb index 2ff1ae5b3..a12ac1049 100644 --- a/examples/edge_futures.out.rb +++ b/examples/edge_futures.out.rb @@ -115,8 +115,8 @@ ### Completable Future and Event -future = future() - # => <#Concurrent::Edge::CompletableFuture:0x7fedf3820828 pending blocks:[]> +future = completable_future + # => <#Concurrent::Edge::CompletableFuture:0x7fe8d312f9d0 pending blocks:[]> event = event() # => <#Concurrent::Edge::CompletableEvent:0x7fedf1112f88 pending blocks:[]> # Don't forget to keep the reference, `future.then { |v| v }` is incompletable diff --git a/lib/concurrent/actor.rb b/lib/concurrent/actor.rb index 6cad3258c..02c404475 100644 --- a/lib/concurrent/actor.rb +++ b/lib/concurrent/actor.rb @@ -35,7 +35,7 @@ def self.current end @root = Concurrent::Edge.delay do - Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent::Edge.future).reference.tap do + Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent::Edge.completable_future).reference.tap do future.wait! end end @@ -74,7 +74,7 @@ def self.spawn(*args, &block) # as {.spawn} but it'll block until actor is initialized or it'll raise exception on error def self.spawn!(*args, &block) - spawn(to_spawn_options(*args).merge(initialized: future = Concurrent::Edge.future), &block).tap { future.wait! } + spawn(to_spawn_options(*args).merge(initialized: future = Concurrent::Edge.completable_future), &block).tap { future.wait! } end # @overload to_spawn_options(context_class, name, *args) diff --git a/lib/concurrent/actor/behaviour/termination.rb b/lib/concurrent/actor/behaviour/termination.rb index 431c83557..477bd2ced 100644 --- a/lib/concurrent/actor/behaviour/termination.rb +++ b/lib/concurrent/actor/behaviour/termination.rb @@ -14,7 +14,7 @@ class Termination < Abstract def initialize(core, subsequent, core_options, trapping = false, terminate_children = true) super core, subsequent, core_options - @terminated = Concurrent::Edge.future + @terminated = Concurrent::Edge.completable_future @public_terminated = @terminated.hide_completable @trapping = trapping @terminate_children = terminate_children diff --git a/lib/concurrent/actor/reference.rb b/lib/concurrent/actor/reference.rb index 2b17fd43d..47fc0febb 100644 --- a/lib/concurrent/actor/reference.rb +++ b/lib/concurrent/actor/reference.rb @@ -51,7 +51,7 @@ def tell(message) # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask(1).value # => 2 # adder.ask(nil).wait.reason # => # - def ask(message, future = Concurrent::Edge.future) + def ask(message, future = Concurrent::Edge.completable_future) message message, future end @@ -69,7 +69,7 @@ def ask(message, future = Concurrent::Edge.future) # @example # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask!(1) # => 2 - def ask!(message, future = Concurrent::Edge.future) + def ask!(message, future = Concurrent::Edge.completable_future) ask(message, future).value! end diff --git a/lib/concurrent/edge/future.rb b/lib/concurrent/edge/future.rb index efe70dc89..ef12428d9 100644 --- a/lib/concurrent/edge/future.rb +++ b/lib/concurrent/edge/future.rb @@ -26,18 +26,16 @@ def event(default_executor = :io) CompletableEventPromise.new(default_executor).future end - # @overload future(default_executor = :io, &task) - # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. - # @return [Future] - # @overload future(default_executor = :io) - # User is responsible for completing the future once by {Edge::CompletableFuture#success} or {Edge::CompletableFuture#fail} - # @return [CompletableFuture] + # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. + # @return [Future] def future(default_executor = :io, &task) - if task - ImmediateEventPromise.new(default_executor).future.then(&task) - else - CompletableFuturePromise.new(default_executor).future - end + ImmediateEventPromise.new(default_executor).future.then(&task) + end + + # User is responsible for completing the future once by {Edge::CompletableFuture#success} or {Edge::CompletableFuture#fail} + # @return [CompletableFuture] + def completable_future(default_executor = :io) + CompletableFuturePromise.new(default_executor).future end # @return [Future] which is already completed diff --git a/spec/concurrent/edge/future_spec.rb b/spec/concurrent/edge/future_spec.rb index 09093e33a..ee9ef1bd8 100644 --- a/spec/concurrent/edge/future_spec.rb +++ b/spec/concurrent/edge/future_spec.rb @@ -18,8 +18,8 @@ end it 'future' do - b = future - a = future.chain_completable(b) + b = completable_future + a = completable_future.chain_completable(b) a.success :val expect(b).to be_completed expect(b.value).to eq :val @@ -111,7 +111,7 @@ describe '.future without block' do specify do - completable_future = future + completable_future = completable_future() one = completable_future.then(&:succ) join = zip_futures(completable_future).then { |v| v } expect(one.completed?).to be false @@ -124,9 +124,9 @@ describe '.any_complete' do it 'continues on first result' do - f1 = future - f2 = future - f3 = future + f1 = completable_future + f2 = completable_future + f3 = completable_future any1 = any_complete(f1, f2) any2 = f2 | f3 @@ -141,8 +141,8 @@ describe '.any_successful' do it 'continues on first result' do - f1 = future - f2 = future + f1 = completable_future + f2 = completable_future any = any_successful(f1, f2) From a3643a97ac6ade9827679d53ad6a3297aa8e496c Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Mon, 14 Mar 2016 22:23:06 +0100 Subject: [PATCH 03/68] move new Futures to core --- README.md | 16 +-- examples/benchmark_new_futures.rb | 15 ++- examples/edge_futures.in.rb | 2 +- examples/edge_futures.out.rb | 2 +- lib/concurrent-edge.rb | 4 +- lib/concurrent.rb | 2 + lib/concurrent/actor.rb | 8 +- lib/concurrent/actor/behaviour/termination.rb | 6 +- lib/concurrent/actor/core.rb | 2 +- lib/concurrent/actor/envelope.rb | 2 +- lib/concurrent/actor/reference.rb | 10 +- lib/concurrent/actor/utils/pool.rb | 2 +- lib/concurrent/edge/lock_free_stack.rb | 100 ------------------ lib/concurrent/edge/promises.rb | 57 ++++++++++ lib/concurrent/lock_free_stack.rb | 97 +++++++++++++++++ .../{edge/future.rb => promises.rb} | 93 +++++----------- spec/concurrent/edge/future_spec.rb | 20 ++-- 17 files changed, 232 insertions(+), 206 deletions(-) delete mode 100644 lib/concurrent/edge/lock_free_stack.rb create mode 100644 lib/concurrent/edge/promises.rb create mode 100644 lib/concurrent/lock_free_stack.rb rename lib/concurrent/{edge/future.rb => promises.rb} (94%) diff --git a/README.md b/README.md index a74795513..50602ed05 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,11 @@ We also have a [mailing list](http://groups.google.com/group/concurrent-ruby) an #### General-purpose Concurrency Abstractions * [Async](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Async.html): A mixin module that provides simple asynchronous behavior to a class. Loosely based on Erlang's [gen_server](http://www.erlang.org/doc/man/gen_server.html). +* [Promises Framework](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Promises/FutureFactoryMethods.html): + Unified implementation of futures and promises which combines features of previous `Future`, + `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the + new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking + operations like `#wait`, `#value`. It also offers better performance. * [Future](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Future.html): An asynchronous operation that produces a value. * [Dataflow](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent.html#dataflow-class_method): Built on Futures, Dataflow allows you to create a task that will be scheduled when all of its data dependencies are available. * [Promise](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Promise.html): Similar to Futures, with more features. @@ -81,6 +86,10 @@ Collection classes that were originally part of the (deprecated) `thread_safe` g * [Map](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Map.html) A hash-like object that should have much better performance characteristics, especially under high concurrency, than `Concurrent::Hash`. * [Tuple](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Tuple.html) A fixed size array with volatile (synchronized, thread safe) getters/setters. +and other collections: + +* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/LockFreeStack.html) + Value objects inspired by other languages: * [Maybe](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Maybe.html) A thread-safe, immutable object representing an optional value, based on @@ -129,11 +138,6 @@ be obeyed though. Features developed in `concurrent-ruby-edge` are expected to m * [Actor](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Actor.html): Implements the Actor Model, where concurrent actors exchange messages. -* [New Future Framework](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/FutureShortcuts.html): - Unified implementation of futures and promises which combines features of previous `Future`, - `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the - new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking - operations like `#wait`, `#value`. It also offers better performance. * [Channel](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/Channel.html): Communicating Sequential Processes ([CSP](https://en.wikipedia.org/wiki/Communicating_sequential_processes)). Functionally equivalent to Go [channels](https://tour.golang.org/concurrency/2) with additional @@ -141,7 +145,6 @@ be obeyed though. Features developed in `concurrent-ruby-edge` are expected to m * [LazyRegister](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/LazyRegister.html) * [AtomicMarkableReference](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/AtomicMarkableReference.html) * [LockFreeLinkedSet](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/LockFreeLinkedSet.html) -* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/LockFreeStack.html) #### Statuses: @@ -149,7 +152,6 @@ be obeyed though. Features developed in `concurrent-ruby-edge` are expected to m - **Actor** - Partial documentation and tests; depends on new future/promise framework; stability is good. - **Channel** - Brand new implementation; partial documentation and tests; stability is good. -- **Future/Promise Framework** - API changes; partial documentation and tests; stability is good. - **LazyRegister** - Missing documentation and tests. - **AtomicMarkableReference, LockFreeLinkedSet, LockFreeStack** - Need real world battle testing. diff --git a/examples/benchmark_new_futures.rb b/examples/benchmark_new_futures.rb index f8b04934c..76f36571c 100755 --- a/examples/benchmark_new_futures.rb +++ b/examples/benchmark_new_futures.rb @@ -2,7 +2,6 @@ require 'benchmark/ips' require 'concurrent' -require 'concurrent-edge' scale = 1 @@ -12,19 +11,19 @@ Benchmark.ips(time, warmup) do |x| x.report('flat-old') { Concurrent::Promise.execute { 1 }.flat_map { |v| Concurrent::Promise.execute { v + 2 } }.value! } - x.report('flat-new') { Concurrent.future(:fast) { 1 }.then { |v| Concurrent.future(:fast) { v + 2 } }.flat.value! } + x.report('flat-new') { Concurrent::Promises.future(:fast) { 1 }.then { |v| Concurrent::Promises.future(:fast) { v + 2 } }.flat.value! } x.compare! end Benchmark.ips(time, warmup) do |x| x.report('status-old') { f = Concurrent::Promise.execute { nil }; 100.times { f.complete? } } - x.report('status-new') { f = Concurrent.future(:fast) { nil }; 100.times { f.completed? } } + x.report('status-new') { f = Concurrent::Promises.future(:fast) { nil }; 100.times { f.completed? } } x.compare! end Benchmark.ips(time, warmup) do |x| of = Concurrent::Promise.execute { 1 } - nf = Concurrent.succeeded_future(1, :fast) + nf = Concurrent::Promises.succeeded_future(1, :fast) x.report('value-old') { of.value! } x.report('value-new') { nf.value! } x.compare! @@ -41,7 +40,7 @@ head.value! end x.report('graph-new') do - head = Concurrent.succeeded_future(1, :fast) + head = Concurrent::Promises.succeeded_future(1, :fast) 10.times do branch1 = head.then(&:succ) branch2 = head.then(&:succ).then(&:succ) @@ -54,14 +53,14 @@ Benchmark.ips(time, warmup) do |x| x.report('immediate-old') { Concurrent::Promise.execute { nil }.value! } - x.report('immediate-new') { Concurrent.succeeded_future(nil, :fast).value! } + x.report('immediate-new') { Concurrent::Promises.succeeded_future(nil, :fast).value! } x.compare! end Benchmark.ips(time, warmup) do |x| of = Concurrent::Promise.execute { 1 } - nf = Concurrent.succeeded_future(1, :fast) - x.report('then-old') { 100.times.reduce(nf) { |nf, _| nf.then(&:succ) }.value! } + nf = Concurrent::Promises.succeeded_future(1, :fast) + x.report('then-old') { 100.times.reduce(of) { |nf, _| nf.then(&:succ) }.value! } x.report('then-new') { 100.times.reduce(nf) { |nf, _| nf.then(&:succ) }.value! } x.compare! end diff --git a/examples/edge_futures.in.rb b/examples/edge_futures.in.rb index befd04f43..aa21ba4d1 100644 --- a/examples/edge_futures.in.rb +++ b/examples/edge_futures.in.rb @@ -1,5 +1,5 @@ # adds factory methods like: future, event, delay, schedule, zip, -include Concurrent::Edge::FutureFactoryMethods +include Concurrent::Promises::FutureFactoryMethods ### Simple asynchronous task diff --git a/examples/edge_futures.out.rb b/examples/edge_futures.out.rb index a12ac1049..142ecacb0 100644 --- a/examples/edge_futures.out.rb +++ b/examples/edge_futures.out.rb @@ -1,6 +1,6 @@ # adds factory methods like: future, event, delay, schedule, zip, -include Concurrent::Edge::FutureFactoryMethods +include Concurrent::Promises::FutureFactoryMethods ### Simple asynchronous task diff --git a/lib/concurrent-edge.rb b/lib/concurrent-edge.rb index f791f9164..5928efb87 100644 --- a/lib/concurrent-edge.rb +++ b/lib/concurrent-edge.rb @@ -6,7 +6,7 @@ require 'concurrent/exchanger' require 'concurrent/lazy_register' -require 'concurrent/edge/future' -require 'concurrent/edge/lock_free_stack' require 'concurrent/edge/atomic_markable_reference' require 'concurrent/edge/lock_free_linked_set' + +require 'concurrent/edge/promises' diff --git a/lib/concurrent.rb b/lib/concurrent.rb index a6f4e5898..8f9017183 100644 --- a/lib/concurrent.rb +++ b/lib/concurrent.rb @@ -29,6 +29,8 @@ require 'concurrent/settable_struct' require 'concurrent/timer_task' require 'concurrent/tvar' +require 'concurrent/lock_free_stack' +require 'concurrent/promises' require 'concurrent/thread_safe/synchronized_delegator' require 'concurrent/thread_safe/util' diff --git a/lib/concurrent/actor.rb b/lib/concurrent/actor.rb index 02c404475..81b98c608 100644 --- a/lib/concurrent/actor.rb +++ b/lib/concurrent/actor.rb @@ -1,7 +1,7 @@ require 'concurrent/configuration' require 'concurrent/executor/serialized_execution' require 'concurrent/synchronization' -require 'concurrent/edge/future' +require 'concurrent/promises' module Concurrent # TODO https://github.com/celluloid/celluloid/wiki/Supervision-Groups ? @@ -34,8 +34,8 @@ def self.current Thread.current[:__current_actor__] end - @root = Concurrent::Edge.delay do - Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent::Edge.completable_future).reference.tap do + @root = Concurrent::Promises.delay do + Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent::Promises.completable_future).reference.tap do future.wait! end end @@ -74,7 +74,7 @@ def self.spawn(*args, &block) # as {.spawn} but it'll block until actor is initialized or it'll raise exception on error def self.spawn!(*args, &block) - spawn(to_spawn_options(*args).merge(initialized: future = Concurrent::Edge.completable_future), &block).tap { future.wait! } + spawn(to_spawn_options(*args).merge(initialized: future = Concurrent::Promises.completable_future), &block).tap { future.wait! } end # @overload to_spawn_options(context_class, name, *args) diff --git a/lib/concurrent/actor/behaviour/termination.rb b/lib/concurrent/actor/behaviour/termination.rb index 477bd2ced..7f84cdb4e 100644 --- a/lib/concurrent/actor/behaviour/termination.rb +++ b/lib/concurrent/actor/behaviour/termination.rb @@ -14,7 +14,7 @@ class Termination < Abstract def initialize(core, subsequent, core_options, trapping = false, terminate_children = true) super core, subsequent, core_options - @terminated = Concurrent::Edge.completable_future + @terminated = Concurrent::Promises.completable_future @public_terminated = @terminated.hide_completable @trapping = trapping @terminate_children = terminate_children @@ -62,9 +62,9 @@ def on_envelope(envelope) def terminate!(reason = nil, envelope = nil) return true if terminated? - self_termination = Concurrent::Edge.completed_future(reason.nil?, reason.nil? || nil, reason) + self_termination = Concurrent::Promises.completed_future(reason.nil?, reason.nil? || nil, reason) all_terminations = if @terminate_children - Concurrent::Edge.zip(*children.map { |ch| ch.ask(:terminate!) }, self_termination) + Concurrent::Promises.zip(*children.map { |ch| ch.ask(:terminate!) }, self_termination) else self_termination end diff --git a/lib/concurrent/actor/core.rb b/lib/concurrent/actor/core.rb index c29173ac8..1d4679f61 100644 --- a/lib/concurrent/actor/core.rb +++ b/lib/concurrent/actor/core.rb @@ -192,7 +192,7 @@ def ns_initialize(opts, &block) @args = opts.fetch(:args, []) @block = block - initialized = Type! opts[:initialized], Edge::CompletableFuture, NilClass + initialized = Type! opts[:initialized], Promises::CompletableFuture, NilClass schedule_execution do begin diff --git a/lib/concurrent/actor/envelope.rb b/lib/concurrent/actor/envelope.rb index fa80f111b..120de6e7f 100644 --- a/lib/concurrent/actor/envelope.rb +++ b/lib/concurrent/actor/envelope.rb @@ -16,7 +16,7 @@ class Envelope def initialize(message, future, sender, address) @message = message - @future = Type! future, Edge::CompletableFuture, NilClass + @future = Type! future, Promises::CompletableFuture, NilClass @sender = Type! sender, Reference, Thread @address = Type! address, Reference end diff --git a/lib/concurrent/actor/reference.rb b/lib/concurrent/actor/reference.rb index 47fc0febb..ffbe22272 100644 --- a/lib/concurrent/actor/reference.rb +++ b/lib/concurrent/actor/reference.rb @@ -45,13 +45,13 @@ def tell(message) # global_io_executor will block on while asking. It's fine to use it form outside of actors and # global_io_executor. # @param [Object] message - # @param [Edge::Future] future to be fulfilled be message's processing result - # @return [Edge::Future] supplied future + # @param [Promises::Future] future to be fulfilled be message's processing result + # @return [Promises::Future] supplied future # @example # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask(1).value # => 2 # adder.ask(nil).wait.reason # => # - def ask(message, future = Concurrent::Edge.completable_future) + def ask(message, future = Concurrent::Promises.completable_future) message message, future end @@ -63,13 +63,13 @@ def ask(message, future = Concurrent::Edge.completable_future) # global_io_executor will block on while asking. It's fine to use it form outside of actors and # global_io_executor. # @param [Object] message - # @param [Edge::Future] future to be fulfilled be message's processing result + # @param [Promises::Future] future to be fulfilled be message's processing result # @return [Object] message's processing result # @raise [Exception] future.reason if future is #failed? # @example # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask!(1) # => 2 - def ask!(message, future = Concurrent::Edge.completable_future) + def ask!(message, future = Concurrent::Promises.completable_future) ask(message, future).value! end diff --git a/lib/concurrent/actor/utils/pool.rb b/lib/concurrent/actor/utils/pool.rb index 69c07b90c..da9a544ef 100644 --- a/lib/concurrent/actor/utils/pool.rb +++ b/lib/concurrent/actor/utils/pool.rb @@ -43,7 +43,7 @@ def on_message(message) envelope_to_redirect = if envelope.future envelope else - Envelope.new(envelope.message, Concurrent::Edge.future, envelope.sender, envelope.address) + Envelope.new(envelope.message, Concurrent::Promises.future, envelope.sender, envelope.address) end envelope_to_redirect.future.on_completion! { @balancer << :subscribe } # TODO check safety of @balancer reading redirect @balancer, envelope_to_redirect diff --git a/lib/concurrent/edge/lock_free_stack.rb b/lib/concurrent/edge/lock_free_stack.rb deleted file mode 100644 index 1749a0d16..000000000 --- a/lib/concurrent/edge/lock_free_stack.rb +++ /dev/null @@ -1,100 +0,0 @@ -module Concurrent - module Edge - class LockFreeStack < Synchronization::Object - - safe_initialization! - - class Node - attr_reader :value, :next_node - - def initialize(value, next_node) - @value = value - @next_node = next_node - end - - singleton_class.send :alias_method, :[], :new - end - - class Empty < Node - def next_node - self - end - end - - EMPTY = Empty[nil, nil] - - private(*attr_atomic(:head)) - - def initialize - super() - self.head = EMPTY - end - - def empty? - head.equal? EMPTY - end - - def compare_and_push(head, value) - compare_and_set_head head, Node[value, head] - end - - def push(value) - while true - current_head = head - return self if compare_and_set_head current_head, Node[value, current_head] - end - end - - def peek - head - end - - def compare_and_pop(head) - compare_and_set_head head, head.next_node - end - - def pop - while true - current_head = head - return current_head.value if compare_and_set_head current_head, current_head.next_node - end - end - - def compare_and_clear(head) - compare_and_set_head head, EMPTY - end - - include Enumerable - - def each(head = nil) - return to_enum(:each, head) unless block_given? - it = head || peek - until it.equal?(EMPTY) - yield it.value - it = it.next_node - end - self - end - - def clear - while true - current_head = head - return false if current_head == EMPTY - return true if compare_and_set_head current_head, EMPTY - end - end - - def clear_each(&block) - while true - current_head = head - return self if current_head == EMPTY - if compare_and_set_head current_head, EMPTY - each current_head, &block - return self - end - end - end - - end - end -end diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb new file mode 100644 index 000000000..1e25d9a71 --- /dev/null +++ b/lib/concurrent/edge/promises.rb @@ -0,0 +1,57 @@ +require 'concurrent/promises' + +module Concurrent + module Promises + module FutureFactoryMethods + # only proof of concept + # @return [Future] + def select(*channels) + future do + # noinspection RubyArgCount + Channel.select do |s| + channels.each do |ch| + s.take(ch) { |value| [value, ch] } + end + end + end + end + end + + class Future < Event + # Zips with selected value form the suplied channels + # @return [Future] + def then_select(*channels) + ZipFuturesPromise.new([self, Concurrent::Promises.select(*channels)], @DefaultExecutor).future + end + + # @note may block + # @note only proof of concept + def then_put(channel) + on_success(:io) { |value| channel.put value } + end + + # Asks the actor with its value. + # @return [Future] new future with the response form the actor + def then_ask(actor) + self.then { |v| actor.ask(v) }.flat + end + + include Enumerable + + def each(&block) + each_body self.value, &block + end + + def each!(&block) + each_body self.value!, &block + end + + private + + def each_body(value, &block) + (value.nil? ? [nil] : Array(value)).each(&block) + end + + end + end +end diff --git a/lib/concurrent/lock_free_stack.rb b/lib/concurrent/lock_free_stack.rb new file mode 100644 index 000000000..90550a648 --- /dev/null +++ b/lib/concurrent/lock_free_stack.rb @@ -0,0 +1,97 @@ +module Concurrent + class LockFreeStack < Synchronization::Object + + safe_initialization! + + class Node + attr_reader :value, :next_node + + def initialize(value, next_node) + @value = value + @next_node = next_node + end + + singleton_class.send :alias_method, :[], :new + end + + class Empty < Node + def next_node + self + end + end + + EMPTY = Empty[nil, nil] + + private(*attr_atomic(:head)) + + def initialize + super() + self.head = EMPTY + end + + def empty? + head.equal? EMPTY + end + + def compare_and_push(head, value) + compare_and_set_head head, Node[value, head] + end + + def push(value) + while true + current_head = head + return self if compare_and_set_head current_head, Node[value, current_head] + end + end + + def peek + head + end + + def compare_and_pop(head) + compare_and_set_head head, head.next_node + end + + def pop + while true + current_head = head + return current_head.value if compare_and_set_head current_head, current_head.next_node + end + end + + def compare_and_clear(head) + compare_and_set_head head, EMPTY + end + + include Enumerable + + def each(head = nil) + return to_enum(:each, head) unless block_given? + it = head || peek + until it.equal?(EMPTY) + yield it.value + it = it.next_node + end + self + end + + def clear + while true + current_head = head + return false if current_head == EMPTY + return true if compare_and_set_head current_head, EMPTY + end + end + + def clear_each(&block) + while true + current_head = head + return self if current_head == EMPTY + if compare_and_set_head current_head, EMPTY + each current_head, &block + return self + end + end + end + end +end diff --git a/lib/concurrent/edge/future.rb b/lib/concurrent/promises.rb similarity index 94% rename from lib/concurrent/edge/future.rb rename to lib/concurrent/promises.rb index ef12428d9..766d2d4cf 100644 --- a/lib/concurrent/edge/future.rb +++ b/lib/concurrent/promises.rb @@ -1,24 +1,24 @@ -require 'concurrent' # TODO do not require whole concurrent gem -require 'concurrent/concern/deprecation' -require 'concurrent/edge/lock_free_stack' +# TODO do not require whole concurrent gem +require 'concurrent' +require 'concurrent/lock_free_stack' - -# @note different name just not to collide for now module Concurrent - module Edge - - # Provides edge features, which will be added to or replace features in main gem. - # - # Contains new unified implementation of Futures and Promises which combines Features of previous `Future`, - # `Promise`, `IVar`, `Event`, `Probe`, `dataflow`, `Delay`, `TimerTask` into single framework. It uses extensively - # new synchronization layer to make all the paths lock-free with exception of blocking threads on `#wait`. - # It offers better performance and does not block threads (exception being #wait and similar methods where it's - # intended). - # - # ## Examples - # {include:file:examples/edge_futures.out.rb} - # - # @!macro edge_warning + + # # Futures and Promises + # + # New implementation added in version 0.8 differs from previous versions and has little in common. + # {Future} represents a value which will become {#completed?} in future, it'll contain {#value} if {#success?} or a {#reason} if {#failed?}. It cannot be directly completed, there are implementations of abstract {Promise} class for that, so {Promise}'s only purpose is to complete a given {Future} object. They are always constructed as a Pair even in chaining methods like {#then}, {#rescue}, {#then_delay}, etc. + # + # There is few {Promise} implementations: + # + # - OuterPromise - only Promise used by users, can be completed by outer code. Constructed with {Concurrent::Next.promise} helper method. + # - Immediate - internal implementation of Promise used to represent immediate evaluation of a block. Constructed with {Concurrent::Next.future} helper method. + # - Delay - internal implementation of Promise used to represent delayed evaluation of a block. Constructed with {Concurrent::Next.delay} helper method. + # - ConnectedPromise - used internally to support {Future#with_default_executor} + # + # TODO documentation + module Promises + module FutureFactoryMethods # User is responsible for completing the event once by {Edge::CompletableEvent#complete} # @return [CompletableEvent] @@ -29,6 +29,7 @@ def event(default_executor = :io) # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. # @return [Future] def future(default_executor = :io, &task) + # TODO (pitr-ch 14-Mar-2016): arguments for the block ImmediateEventPromise.new(default_executor).future.then(&task) end @@ -110,22 +111,8 @@ def any_successful(*futures) AnySuccessfulPromise.new(futures, :io).future end - # only proof of concept - # @return [Future] - def select(*channels) - # TODO has to be redone, since it's blocking, resp. moved to edge - future do - # noinspection RubyArgCount - Channel.select do |s| - channels.each do |ch| - s.take(ch) { |value| [value, ch] } - end - end - end - end - - # TODO add first(count, *futures) - # TODO allow to to have a zip point for many futures and process them in batches by 10 + # TODO consider adding first(count, *futures) + # TODO consider adding zip_by(slice, *futures) processing futures in slices end # Represents an event which will happen in future (will be completed). It has to always happen. @@ -134,7 +121,6 @@ class Event < Synchronization::Object private(*attr_atomic(:internal_state)) # @!visibility private public :internal_state - include Concern::Deprecation include Concern::Logging # @!visibility private @@ -541,22 +527,12 @@ def success?(state = internal_state) state.completed? && state.success? end - def fulfilled? - deprecated_method 'fulfilled?', 'success?' - success? - end - # Has Future been failed? # @return [Boolean] def failed?(state = internal_state) state.completed? && !state.success? end - def rejected? - deprecated_method 'rejected?', 'failed?' - failed? - end - # @return [Object, nil] the value of the Future when success, nil on timeout # @!macro [attach] edge.timeout_nil # @note If the Future can have value `nil` then it cannot be distinquished from `nil` returned on timeout. @@ -624,12 +600,6 @@ def then(executor = nil, &callback) ThenPromise.new(self, @DefaultExecutor, executor || @DefaultExecutor, &callback).future end - # Asks the actor with its value. - # @return [Future] new future with the response form the actor - def then_ask(actor) - self.then { |v| actor.ask(v) }.flat - end - def chain_completable(completable_future) on_completion! { completable_future.complete_with internal_state } end @@ -670,13 +640,6 @@ def schedule(intended_time) end.flat end - # Zips with selected value form the suplied channels - # @return [Future] - def then_select(*channels) - # TODO (pitr-ch 14-Mar-2016): has to go to edge - ZipFuturesPromise.new([self, Concurrent::Edge.select(*channels)], @DefaultExecutor).future - end - # Changes default executor for rest of the chain # @return [Future] def with_default_executor(executor) @@ -697,12 +660,6 @@ def zip(other) alias_method :|, :any - # @note may block - # @note only proof of concept - def then_put(channel) - on_success(:io) { |value| channel.put value } - end - # @yield [value] executed async on `executor` when success # @return self def on_success(executor = nil, &callback) @@ -817,7 +774,6 @@ def async_callback_on_completion(state, executor, callback) callback_on_completion st, cb end end - end # A Event which can be completed by user. @@ -1403,3 +1359,8 @@ def initialize(default_executor, intended_time) extend FutureFactoryMethods end end + +# TODO cancelable Futures, will cancel the future but the task will finish anyway +# TODO task interrupts, how to support? +# TODO when value is requested the current thread may evaluate the tasks to get the value for performance reasons it may not evaluate :io though +# TODO try work stealing pool, each thread has it's own queue diff --git a/spec/concurrent/edge/future_spec.rb b/spec/concurrent/edge/future_spec.rb index ee9ef1bd8..b3119097b 100644 --- a/spec/concurrent/edge/future_spec.rb +++ b/spec/concurrent/edge/future_spec.rb @@ -1,4 +1,4 @@ -require 'concurrent-edge' +require 'concurrent/edge/promises' require 'thread' require 'pry' # require 'pry-stack_explorer' @@ -7,7 +7,7 @@ describe 'Concurrent::Edge futures', edge: true do - include Concurrent::Edge::FutureFactoryMethods + include Concurrent::Promises::FutureFactoryMethods describe 'chain_completable' do it 'event' do @@ -196,9 +196,9 @@ expect(z1.then(&:+).value!).to eq 3 expect(z2.then { |a, b, c| a+b+c }.value!).to eq 6 - expect(future { 1 }.delay).to be_a_kind_of Concurrent::Edge::Future + expect(future { 1 }.delay).to be_a_kind_of Concurrent::Promises::Future expect(future { 1 }.delay.wait!).to be_completed - expect(event.complete.delay).to be_a_kind_of Concurrent::Edge::Event + expect(event.complete.delay).to be_a_kind_of Concurrent::Promises::Event expect(event.complete.delay.wait).to be_completed a = future { 1 } @@ -224,6 +224,14 @@ end end + describe '.each' do + specify do + expect(succeeded_future(nil).each.map(&:inspect)).to eq ['nil'] + expect(succeeded_future(1).each.map(&:inspect)).to eq ['1'] + expect(succeeded_future([1, 2]).each.map(&:inspect)).to eq ['1', '2'] + end + end + describe '.zip_events' do it 'waits for all and returns event' do a = succeeded_future 1 @@ -326,9 +334,9 @@ four = three.delay.then(&:succ) # meaningful to_s and inspect defined for Future and Promise - expect(head.to_s).to match /<#Concurrent::Edge::Future:0x[\da-f]+ pending>/ + expect(head.to_s).to match /<#Concurrent::Promises::Future:0x[\da-f]+ pending>/ expect(head.inspect).to( - match(/<#Concurrent::Edge::Future:0x[\da-f]+ pending blocks:\[<#Concurrent::Edge::ThenPromise:0x[\da-f]+ pending>\]>/)) + match(/<#Concurrent::Promises::Future:0x[\da-f]+ pending blocks:\[<#Concurrent::Promises::ThenPromise:0x[\da-f]+ pending>\]>/)) # evaluates only up to three, four is left unevaluated expect(three.value!).to eq 3 From 03512a88c2bc0ae0476d83cb85fb1e16d3c1e8f0 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Mon, 14 Mar 2016 23:18:06 +0100 Subject: [PATCH 04/68] update documentation --- doc/future-promise.md | 12 -- .../{edge_futures.in.rb => promises.in.rb} | 40 +++++-- .../{edge_futures.out.rb => promises.out.rb} | 104 +++++++++++------- lib/concurrent/edge/promises.rb | 15 +++ lib/concurrent/promises.rb | 28 ++--- 5 files changed, 119 insertions(+), 80 deletions(-) delete mode 100644 doc/future-promise.md rename examples/{edge_futures.in.rb => promises.in.rb} (77%) rename examples/{edge_futures.out.rb => promises.out.rb} (59%) diff --git a/doc/future-promise.md b/doc/future-promise.md deleted file mode 100644 index 5be5a6983..000000000 --- a/doc/future-promise.md +++ /dev/null @@ -1,12 +0,0 @@ -# Futures and Promises - -New implementation added in version 0.8 differs from previous versions and has little in common. -{Future} represents a value which will become {#completed?} in future, it'll contain {#value} if {#success?} or a {#reason} if {#failed?}. It cannot be directly completed, there are implementations of abstract {Promise} class for that, so {Promise}'s only purpose is to complete a given {Future} object. They are always constructed as a Pair even in chaining methods like {#then}, {#rescue}, {#then_delay}, etc. - -There is few {Promise} implementations: - -- OuterPromise - only Promise used by users, can be completed by outer code. Constructed with {Concurrent::Next.promise} helper method. -- Immediate - internal implementation of Promise used to represent immediate evaluation of a block. Constructed with {Concurrent::Next.future} helper method. -- Delay - internal implementation of Promise used to represent delayed evaluation of a block. Constructed with {Concurrent::Next.delay} helper method. -- ConnectedPromise - used internally to support {Future#with_default_executor} - diff --git a/examples/edge_futures.in.rb b/examples/promises.in.rb similarity index 77% rename from examples/edge_futures.in.rb rename to examples/promises.in.rb index aa21ba4d1..96ef01a33 100644 --- a/examples/edge_futures.in.rb +++ b/examples/promises.in.rb @@ -1,5 +1,7 @@ -# adds factory methods like: future, event, delay, schedule, zip, -include Concurrent::Promises::FutureFactoryMethods +# Adds factory methods like: future, event, delay, schedule, zip, ... +# otherwise they can be called on Promises module +include Concurrent::Promises::FutureFactoryMethods # + ### Simple asynchronous task @@ -19,24 +21,32 @@ # re-raising raise future rescue $! +### Direct creation of completed futures + +succeeded_future(Object.new) +failed_future(StandardError.new("boom")) -### Chaining +### Chaining of futures head = succeeded_future 1 # branch1 = head.then(&:succ) # branch2 = head.then(&:succ).then(&:succ) # branch1.zip(branch2).value! +# zip is aliased as & (branch1 & branch2).then { |a, b| a + b }.value! (branch1 & branch2).then(&:+).value! +# or a class method zip from FutureFactoryMethods can be used to zip multiple futures zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! # pick only first completed +any(branch1, branch2).value! (branch1 | branch2).value! + ### Error handling -future { Object.new }.then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates -future { Object.new }.then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 -future { 1 }.then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied +succeeded_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates +succeeded_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 +succeeded_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) failing_zip.result @@ -44,6 +54,7 @@ failing_zip.rescue { |a, b| (a || b).message }.value failing_zip.chain { |success, values, reasons| [success, values.compact, reasons.compactß] }.value + ### Delay # will not evaluate until asked by #value or other method requiring completion @@ -67,11 +78,13 @@ [head, branch1, branch2, join].map(&:completed?) join.value +[head, branch1, branch2, join].map(&:completed?) ### Flatting -future { future { 1+1 } }.flat.value # waits for inner future +# waits for inner future, only the last call to value blocks thread +future { future { 1+1 } }.flat.value # more complicated example future { future { future { 1 + 1 } } }. @@ -82,6 +95,7 @@ ### Schedule +# it'll be executed after 0.1 seconds scheduled = schedule(0.1) { 1 } scheduled.completed? @@ -98,9 +112,8 @@ future = completable_future event = event() -# Don't forget to keep the reference, `future.then { |v| v }` is incompletable -# will be blocked until completed +# These threads will be blocked until the future and event is completed t1 = Thread.new { future.value } # t2 = Thread.new { event.wait } # @@ -109,6 +122,7 @@ future.try_success 2 event.complete +# The threads can be joined now [t1, t2].each &:join # @@ -128,7 +142,12 @@ ### Thread-pools -future(:fast) { 2 }.then(:io) { File.read __FILE__ }.wait +# Factory methods are taking names of the global executors +# (ot instances of custom executors) + +future(:fast) { 2 }. # executed on :fast executor only short and non-blocking tasks can go there + then(:io) { File.read __FILE__ }. # executed on executor for blocking and long operations + wait ### Interoperability with actors @@ -175,6 +194,7 @@ # periodic task +# TODO (pitr-ch 14-Mar-2016): fix to be volatile @end = false def schedule_job diff --git a/examples/edge_futures.out.rb b/examples/promises.out.rb similarity index 59% rename from examples/edge_futures.out.rb rename to examples/promises.out.rb index 142ecacb0..fe25b665a 100644 --- a/examples/edge_futures.out.rb +++ b/examples/promises.out.rb @@ -1,11 +1,12 @@ +# Adds factory methods like: future, event, delay, schedule, zip, ... +# otherwise they can be called on Promises module +include Concurrent::Promises::FutureFactoryMethods -# adds factory methods like: future, event, delay, schedule, zip, -include Concurrent::Promises::FutureFactoryMethods ### Simple asynchronous task future = future { sleep 0.1; 1 + 1 } # evaluation starts immediately - # => <#Concurrent::Edge::Future:0x7fedf3042458 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae41b36630 pending blocks:[]> future.completed? # => false # block until evaluated future.value # => 2 @@ -15,38 +16,48 @@ ### Failing asynchronous task future = future { raise 'Boom' } - # => <#Concurrent::Edge::Future:0x7fedf30397e0 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae41b2d8f0 pending blocks:[]> future.value # => nil future.value! rescue $! # => # future.reason # => # # re-raising raise future rescue $! # => # +### Direct creation of completed futures -### Chaining +succeeded_future(Object.new) + # => <#Concurrent::Promises::Future:0x7fae41b25178 success blocks:[]> +failed_future(StandardError.new("boom")) + # => <#Concurrent::Promises::Future:0x7fae41b24188 failed blocks:[]> + +### Chaining of futures head = succeeded_future 1 branch1 = head.then(&:succ) branch2 = head.then(&:succ).then(&:succ) branch1.zip(branch2).value! # => [2, 3] +# zip is aliased as & (branch1 & branch2).then { |a, b| a + b }.value! # => 5 (branch1 & branch2).then(&:+).value! # => 5 +# or a class method zip from FutureFactoryMethods can be used to zip multiple futures zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! # => 7 # pick only first completed +any(branch1, branch2).value! # => 2 (branch1 | branch2).value! # => 2 + ### Error handling -future { Object.new }.then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates +succeeded_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates # => NoMethodError -future { Object.new }.then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 +succeeded_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 # => 2 -future { 1 }.then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied +succeeded_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied # => 3 failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) - # => <#Concurrent::Edge::Future:0x7fedf38e3378 failed blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae41abe748 failed blocks:[]> failing_zip.result # => [false, [1, nil], [nil, #]] failing_zip.then { |v| 'never happens' }.result # => [false, [1, nil], [nil, #]] failing_zip.rescue { |a, b| (a || b).message }.value @@ -54,11 +65,12 @@ failing_zip.chain { |success, values, reasons| [success, values.compact, reasons.compactß] }.value # => nil + ### Delay # will not evaluate until asked by #value or other method requiring completion future = delay { 'lazy' } - # => <#Concurrent::Edge::Future:0x7fedf38c0b20 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae41aa4938 pending blocks:[]> sleep 0.1 future.completed? # => false future.value # => "lazy" @@ -66,29 +78,30 @@ # propagates trough chain allowing whole or partial lazy chains head = delay { 1 } - # => <#Concurrent::Edge::Future:0x7fedf480dc90 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae41a94f38 pending blocks:[]> branch1 = head.then(&:succ) - # => <#Concurrent::Edge::Future:0x7fedf480cd40 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae41a94060 pending blocks:[]> branch2 = head.delay.then(&:succ) - # => <#Concurrent::Edge::Future:0x7fedf1163690 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae41a76948 pending blocks:[]> join = branch1 & branch2 - # => <#Concurrent::Edge::Future:0x7fedf11624c0 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae41a75a48 pending blocks:[]> sleep 0.1 # nothing will complete # => 0 [head, branch1, branch2, join].map(&:completed?) # => [false, false, false, false] branch1.value # => 2 sleep 0.1 # forces only head to complete, branch 2 stays incomplete - # => 0 + # => 1 [head, branch1, branch2, join].map(&:completed?) # => [true, true, false, false] join.value # => [2, 2] +[head, branch1, branch2, join].map(&:completed?) # => [true, true, true, true] ### Flatting -future { future { 1+1 } }.flat.value # waits for inner future - # => 2 +# waits for inner future, only the last call to value blocks thread +future { future { 1+1 } }.flat.value # => 2 # more complicated example future { future { future { 1 + 1 } } }. @@ -99,15 +112,16 @@ ### Schedule +# it'll be executed after 0.1 seconds scheduled = schedule(0.1) { 1 } - # => <#Concurrent::Edge::Future:0x7fedf387ad28 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae419fcf80 pending blocks:[]> scheduled.completed? # => false scheduled.value # available after 0.1sec # => 1 # and in chain scheduled = delay { 1 }.schedule(0.1).then(&:succ) - # => <#Concurrent::Edge::Future:0x7fedf383b448 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae4197ef68 pending blocks:[]> # will not be scheduled until value is requested sleep 0.1 scheduled.value # returns after another 0.1sec # => 2 @@ -116,36 +130,36 @@ ### Completable Future and Event future = completable_future - # => <#Concurrent::Edge::CompletableFuture:0x7fe8d312f9d0 pending blocks:[]> + # => <#Concurrent::Promises::CompletableFuture:0x7fae4196c368 pending blocks:[]> event = event() - # => <#Concurrent::Edge::CompletableEvent:0x7fedf1112f88 pending blocks:[]> -# Don't forget to keep the reference, `future.then { |v| v }` is incompletable + # => <#Concurrent::Promises::CompletableEvent:0x7fae41965db0 pending blocks:[]> -# will be blocked until completed +# These threads will be blocked until the future and event is completed t1 = Thread.new { future.value } t2 = Thread.new { event.wait } future.success 1 - # => <#Concurrent::Edge::CompletableFuture:0x7fedf3820828 success blocks:[]> + # => <#Concurrent::Promises::CompletableFuture:0x7fae4196c368 success blocks:[]> future.success 1 rescue $! # => # future.try_success 2 # => false event.complete - # => <#Concurrent::Edge::CompletableEvent:0x7fedf1112f88 completed blocks:[]> + # => <#Concurrent::Promises::CompletableEvent:0x7fae41965db0 completed blocks:[]> +# The threads can be joined now [t1, t2].each &:join ### Callbacks -queue = Queue.new # => # +queue = Queue.new # => # future = delay { 1 + 1 } - # => <#Concurrent::Edge::Future:0x7fedf1141950 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae4194ee58 pending blocks:[]> future.on_success { queue << 1 } # evaluated asynchronously - # => <#Concurrent::Edge::Future:0x7fedf1141950 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae4194ee58 pending blocks:[]> future.on_success! { queue << 2 } # evaluated on completing thread - # => <#Concurrent::Edge::Future:0x7fedf1141950 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae4194ee58 pending blocks:[]> queue.empty? # => true future.value # => 2 @@ -155,8 +169,13 @@ ### Thread-pools -future(:fast) { 2 }.then(:io) { File.read __FILE__ }.wait - # => <#Concurrent::Edge::Future:0x7fedf1121a10 success blocks:[]> +# Factory methods are taking names of the global executors +# (ot instances of custom executors) + +future(:fast) { 2 }. # executed on :fast executor only short and non-blocking tasks can go there + then(:io) { File.read __FILE__ }. # executed on executor for blocking and long operations + wait + # => <#Concurrent::Promises::Future:0x7fae4192eb08 success blocks:[]> ### Interoperability with actors @@ -164,7 +183,7 @@ actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end - # => # + # => # future { 2 }. @@ -178,24 +197,24 @@ ### Interoperability with channels ch1 = Concurrent::Channel.new - # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> + # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> ch2 = Concurrent::Channel.new - # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> + # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> result = select(ch1, ch2) - # => <#Concurrent::Edge::Future:0x7fedf10a1400 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae418a7338 pending blocks:[]> ch1.put 1 # => true result.value! - # => [1, #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#>] + # => [1, #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#>] future { 1+1 }. then_put(ch1) - # => <#Concurrent::Edge::Future:0x7f8df49b4f90 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae4189f098 pending blocks:[]> result = future { '%02d' }. then_select(ch1, ch2). then { |format, (value, channel)| format format, value } - # => <#Concurrent::Edge::Future:0x7fedf0a1f7d0 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae411371c0 pending blocks:[]> result.value! # => "02" @@ -203,7 +222,7 @@ # simple background processing future { do_stuff } - # => <#Concurrent::Edge::Future:0x7fedf0a15cf8 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae43069070 pending blocks:[]> # parallel background processing jobs = 10.times.map { |i| future { i } } @@ -211,6 +230,7 @@ # periodic task +# TODO (pitr-ch 14-Mar-2016): fix to be volatile @end = false # => false def schedule_job @@ -220,7 +240,7 @@ def schedule_job end # => :schedule_job schedule_job - # => <#Concurrent::Edge::Future:0x7fedf09c6720 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fae42900860 pending blocks:[]> @end = true # => true @@ -233,7 +253,7 @@ def schedule_job data[message] end end - # => # + # => # concurrent_jobs = 11.times.map do |v| @@ -263,7 +283,7 @@ def schedule_job end end end - # => # + # => # concurrent_jobs = 11.times.map do |v| diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 1e25d9a71..21423655e 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -36,6 +36,21 @@ def then_ask(actor) self.then { |v| actor.ask(v) }.flat end + # TODO (pitr-ch 14-Mar-2016): document, and move to core + def run(terminated = Promises.future) + on_completion do |success, value, reason| + if success + if value.is_a?(Future) + value.run terminated + else + terminated.success value + end + else + terminated.fail reason + end + end + end + include Enumerable def each(&block) diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index 766d2d4cf..3e86b5bf4 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -4,23 +4,19 @@ module Concurrent - # # Futures and Promises + # # Promises Framework # - # New implementation added in version 0.8 differs from previous versions and has little in common. - # {Future} represents a value which will become {#completed?} in future, it'll contain {#value} if {#success?} or a {#reason} if {#failed?}. It cannot be directly completed, there are implementations of abstract {Promise} class for that, so {Promise}'s only purpose is to complete a given {Future} object. They are always constructed as a Pair even in chaining methods like {#then}, {#rescue}, {#then_delay}, etc. + # Unified implementation of futures and promises which combines features of previous `Future`, + # `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the + # new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking + # operations like `#wait`, `#value`. It also offers better performance. # - # There is few {Promise} implementations: - # - # - OuterPromise - only Promise used by users, can be completed by outer code. Constructed with {Concurrent::Next.promise} helper method. - # - Immediate - internal implementation of Promise used to represent immediate evaluation of a block. Constructed with {Concurrent::Next.future} helper method. - # - Delay - internal implementation of Promise used to represent delayed evaluation of a block. Constructed with {Concurrent::Next.delay} helper method. - # - ConnectedPromise - used internally to support {Future#with_default_executor} - # - # TODO documentation + # ## Examples + # {include:file:examples/promises.out.rb} module Promises module FutureFactoryMethods - # User is responsible for completing the event once by {Edge::CompletableEvent#complete} + # User is responsible for completing the event once by {Promises::CompletableEvent#complete} # @return [CompletableEvent] def event(default_executor = :io) CompletableEventPromise.new(default_executor).future @@ -33,7 +29,7 @@ def future(default_executor = :io, &task) ImmediateEventPromise.new(default_executor).future.then(&task) end - # User is responsible for completing the future once by {Edge::CompletableFuture#success} or {Edge::CompletableFuture#fail} + # User is responsible for completing the future once by {Promises::CompletableFuture#success} or {Promises::CompletableFuture#fail} # @return [CompletableFuture] def completable_future(default_executor = :io) CompletableFuturePromise.new(default_executor).future @@ -587,7 +583,7 @@ def exception(*args) raise 'obligation is not failed' unless failed? reason = internal_state.reason if reason.is_a?(::Array) - reason.each { |e| log ERROR, 'Edge::Future', e } + reason.each { |e| log ERROR, 'Promises::Future', e } Concurrent::Error.new 'multiple exceptions, inspect log' else reason.exception(*args) @@ -692,7 +688,7 @@ def complete_with(state, raise_on_reassign = true) call_callbacks state else if raise_on_reassign - log ERROR, 'Edge::Future', reason if reason # print otherwise hidden error + log ERROR, 'Promises::Future', reason if reason # print otherwise hidden error raise(Concurrent::MultipleAssignmentError.new( "Future can be completed only once. Current result is #{result}, " + "trying to set #{state.result}")) @@ -887,7 +883,7 @@ def evaluate_to(*args, block) rescue StandardError => error complete_with Future::Failed.new(error) rescue Exception => error - log(ERROR, 'Edge::Future', error) + log(ERROR, 'Promises::Future', error) complete_with Future::Failed.new(error) end end From 95fb67cb006b874eb1101fdd641bdbd05954064b Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Mon, 14 Mar 2016 23:29:03 +0100 Subject: [PATCH 05/68] FutureFactoryMethods can be just FactoryMethods --- examples/promises.in.rb | 4 ++-- examples/promises.out.rb | 26 +++++++++++++------------- lib/concurrent/promises.rb | 4 ++-- spec/concurrent/edge/future_spec.rb | 2 +- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/examples/promises.in.rb b/examples/promises.in.rb index 96ef01a33..f7011b332 100644 --- a/examples/promises.in.rb +++ b/examples/promises.in.rb @@ -1,6 +1,6 @@ # Adds factory methods like: future, event, delay, schedule, zip, ... # otherwise they can be called on Promises module -include Concurrent::Promises::FutureFactoryMethods # +include Concurrent::Promises::FactoryMethods # ### Simple asynchronous task @@ -35,7 +35,7 @@ # zip is aliased as & (branch1 & branch2).then { |a, b| a + b }.value! (branch1 & branch2).then(&:+).value! -# or a class method zip from FutureFactoryMethods can be used to zip multiple futures +# or a class method zip from FactoryMethods can be used to zip multiple futures zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! # pick only first completed any(branch1, branch2).value! diff --git a/examples/promises.out.rb b/examples/promises.out.rb index fe25b665a..03fc6d5a0 100644 --- a/examples/promises.out.rb +++ b/examples/promises.out.rb @@ -1,6 +1,6 @@ # Adds factory methods like: future, event, delay, schedule, zip, ... # otherwise they can be called on Promises module -include Concurrent::Promises::FutureFactoryMethods +include Concurrent::Promises::FactoryMethods ### Simple asynchronous task @@ -32,14 +32,14 @@ ### Chaining of futures -head = succeeded_future 1 -branch1 = head.then(&:succ) -branch2 = head.then(&:succ).then(&:succ) +head = succeeded_future 1 +branch1 = head.then(&:succ) +branch2 = head.then(&:succ).then(&:succ) branch1.zip(branch2).value! # => [2, 3] # zip is aliased as & (branch1 & branch2).then { |a, b| a + b }.value! # => 5 (branch1 & branch2).then(&:+).value! # => 5 -# or a class method zip from FutureFactoryMethods can be used to zip multiple futures +# or a class method zip from FactoryMethods can be used to zip multiple futures zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! # => 7 # pick only first completed @@ -71,7 +71,7 @@ # will not evaluate until asked by #value or other method requiring completion future = delay { 'lazy' } # => <#Concurrent::Promises::Future:0x7fae41aa4938 pending blocks:[]> -sleep 0.1 +sleep 0.1 future.completed? # => false future.value # => "lazy" @@ -123,7 +123,7 @@ scheduled = delay { 1 }.schedule(0.1).then(&:succ) # => <#Concurrent::Promises::Future:0x7fae4197ef68 pending blocks:[]> # will not be scheduled until value is requested -sleep 0.1 +sleep 0.1 scheduled.value # returns after another 0.1sec # => 2 @@ -135,8 +135,8 @@ # => <#Concurrent::Promises::CompletableEvent:0x7fae41965db0 pending blocks:[]> # These threads will be blocked until the future and event is completed -t1 = Thread.new { future.value } -t2 = Thread.new { event.wait } +t1 = Thread.new { future.value } +t2 = Thread.new { event.wait } future.success 1 # => <#Concurrent::Promises::CompletableFuture:0x7fae4196c368 success blocks:[]> @@ -147,7 +147,7 @@ # => <#Concurrent::Promises::CompletableEvent:0x7fae41965db0 completed blocks:[]> # The threads can be joined now -[t1, t2].each &:join +[t1, t2].each &:join ### Callbacks @@ -225,7 +225,7 @@ # => <#Concurrent::Promises::Future:0x7fae43069070 pending blocks:[]> # parallel background processing -jobs = 10.times.map { |i| future { i } } +jobs = 10.times.map { |i| future { i } } zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] @@ -263,7 +263,7 @@ def schedule_job # get size of the string, fails for 11 then(&:size). rescue { |reason| reason.message } # translate error to value (exception, message) -end +end zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] @@ -292,7 +292,7 @@ def schedule_job then_ask(DB_POOL). then(&:size). rescue { |reason| reason.message } -end +end zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index 3e86b5bf4..51fdb3591 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -15,7 +15,7 @@ module Concurrent # {include:file:examples/promises.out.rb} module Promises - module FutureFactoryMethods + module FactoryMethods # User is responsible for completing the event once by {Promises::CompletableEvent#complete} # @return [CompletableEvent] def event(default_executor = :io) @@ -1352,7 +1352,7 @@ def initialize(default_executor, intended_time) end end - extend FutureFactoryMethods + extend FactoryMethods end end diff --git a/spec/concurrent/edge/future_spec.rb b/spec/concurrent/edge/future_spec.rb index b3119097b..f13441286 100644 --- a/spec/concurrent/edge/future_spec.rb +++ b/spec/concurrent/edge/future_spec.rb @@ -7,7 +7,7 @@ describe 'Concurrent::Edge futures', edge: true do - include Concurrent::Promises::FutureFactoryMethods + include Concurrent::Promises::FactoryMethods describe 'chain_completable' do it 'event' do From f1d20580f3c96551ad67040fc54bc363adf82910 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Mon, 21 Mar 2016 23:26:48 +0100 Subject: [PATCH 06/68] methods with block can take arguments which are safly published and passed to the block when executed --- examples/promises.in.rb | 28 ++-- examples/promises.out.rb | 120 +++++++++-------- lib/concurrent/edge/promises.rb | 2 +- lib/concurrent/promises.rb | 195 +++++++++++++++++----------- spec/concurrent/edge/future_spec.rb | 40 +++--- 5 files changed, 226 insertions(+), 159 deletions(-) diff --git a/examples/promises.in.rb b/examples/promises.in.rb index f7011b332..8c2104bb1 100644 --- a/examples/promises.in.rb +++ b/examples/promises.in.rb @@ -42,6 +42,15 @@ (branch1 | branch2).value! +### Arguments + +# any supplied arguments are passed to the block, promises ensure that they are visible to the block + +future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value +succeeded_future(1).then(2, &:+).value +succeeded_future(1).chain(2) { |success, value, reason, arg| value + arg }.value + + ### Error handling succeeded_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates @@ -145,8 +154,10 @@ # Factory methods are taking names of the global executors # (ot instances of custom executors) -future(:fast) { 2 }. # executed on :fast executor only short and non-blocking tasks can go there - then(:io) { File.read __FILE__ }. # executed on executor for blocking and long operations +# executed on :fast executor, only short and non-blocking tasks can go there +future_on(:fast) { 2 }. + # executed on executor for blocking and long operations + then_on(:io) { File.read __FILE__ }. wait @@ -194,22 +205,21 @@ # periodic task -# TODO (pitr-ch 14-Mar-2016): fix to be volatile -@end = false +DONE = Concurrent::AtomicBoolean.new false def schedule_job schedule(1) { do_stuff }. rescue { |e| StandardError === e ? report_error(e) : raise(e) }. - then { schedule_job unless @end } + then { schedule_job unless DONE.true? } end schedule_job -@end = true +DONE.make_true # How to limit processing where there are limited resources? # By creating an actor managing the resource -DB = Concurrent::Actor::Utils::AdHoc.spawn :db do +DB = Concurrent::Actor::Utils::AdHoc.spawn :db do data = Array.new(10) { |i| '*' * i } lambda do |message| # pretending that this queries a DB @@ -219,7 +229,7 @@ def schedule_job concurrent_jobs = 11.times.map do |v| - future { v }. + succeeded_future(v). # ask the DB with the `v`, only one at the time, rest is parallel then_ask(DB). # get size of the string, fails for 11 @@ -246,7 +256,7 @@ def schedule_job concurrent_jobs = 11.times.map do |v| - future { v }. + succeeded_future(v). # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel then_ask(DB_POOL). then(&:size). diff --git a/examples/promises.out.rb b/examples/promises.out.rb index 03fc6d5a0..c6fd6d062 100644 --- a/examples/promises.out.rb +++ b/examples/promises.out.rb @@ -1,12 +1,12 @@ # Adds factory methods like: future, event, delay, schedule, zip, ... # otherwise they can be called on Promises module -include Concurrent::Promises::FactoryMethods +include Concurrent::Promises::FactoryMethods ### Simple asynchronous task future = future { sleep 0.1; 1 + 1 } # evaluation starts immediately - # => <#Concurrent::Promises::Future:0x7fae41b36630 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc1e5340 pending blocks:[]> future.completed? # => false # block until evaluated future.value # => 2 @@ -16,7 +16,7 @@ ### Failing asynchronous task future = future { raise 'Boom' } - # => <#Concurrent::Promises::Future:0x7fae41b2d8f0 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc1dc808 pending blocks:[]> future.value # => nil future.value! rescue $! # => # future.reason # => # @@ -26,15 +26,15 @@ ### Direct creation of completed futures succeeded_future(Object.new) - # => <#Concurrent::Promises::Future:0x7fae41b25178 success blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc1c6030 success blocks:[]> failed_future(StandardError.new("boom")) - # => <#Concurrent::Promises::Future:0x7fae41b24188 failed blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc1c50b8 failed blocks:[]> ### Chaining of futures -head = succeeded_future 1 -branch1 = head.then(&:succ) -branch2 = head.then(&:succ).then(&:succ) +head = succeeded_future 1 +branch1 = head.then(&:succ) +branch2 = head.then(&:succ).then(&:succ) branch1.zip(branch2).value! # => [2, 3] # zip is aliased as & (branch1 & branch2).then { |a, b| a + b }.value! # => 5 @@ -47,6 +47,17 @@ (branch1 | branch2).value! # => 2 +### Arguments + +# any supplied arguments are passed to the block, promises ensure that they are visible to the block + +future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value + # => 5 +succeeded_future(1).then(2, &:+).value # => 3 +succeeded_future(1).chain(2) { |success, value, reason, arg| value + arg }.value + # => 3 + + ### Error handling succeeded_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates @@ -57,7 +68,7 @@ # => 3 failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) - # => <#Concurrent::Promises::Future:0x7fae41abe748 failed blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc11ec90 failed blocks:[]> failing_zip.result # => [false, [1, nil], [nil, #]] failing_zip.then { |v| 'never happens' }.result # => [false, [1, nil], [nil, #]] failing_zip.rescue { |a, b| (a || b).message }.value @@ -70,28 +81,28 @@ # will not evaluate until asked by #value or other method requiring completion future = delay { 'lazy' } - # => <#Concurrent::Promises::Future:0x7fae41aa4938 pending blocks:[]> -sleep 0.1 + # => <#Concurrent::Promises::Future:0x7fc5cc0ff660 pending blocks:[]> +sleep 0.1 future.completed? # => false future.value # => "lazy" # propagates trough chain allowing whole or partial lazy chains head = delay { 1 } - # => <#Concurrent::Promises::Future:0x7fae41a94f38 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc0fc938 pending blocks:[]> branch1 = head.then(&:succ) - # => <#Concurrent::Promises::Future:0x7fae41a94060 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc0df068 pending blocks:[]> branch2 = head.delay.then(&:succ) - # => <#Concurrent::Promises::Future:0x7fae41a76948 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc0dd178 pending blocks:[]> join = branch1 & branch2 - # => <#Concurrent::Promises::Future:0x7fae41a75a48 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc0dc430 pending blocks:[]> sleep 0.1 # nothing will complete # => 0 [head, branch1, branch2, join].map(&:completed?) # => [false, false, false, false] branch1.value # => 2 sleep 0.1 # forces only head to complete, branch 2 stays incomplete - # => 1 + # => 0 [head, branch1, branch2, join].map(&:completed?) # => [true, true, false, false] join.value # => [2, 2] @@ -114,52 +125,52 @@ # it'll be executed after 0.1 seconds scheduled = schedule(0.1) { 1 } - # => <#Concurrent::Promises::Future:0x7fae419fcf80 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5caaae028 pending blocks:[]> scheduled.completed? # => false scheduled.value # available after 0.1sec # => 1 # and in chain scheduled = delay { 1 }.schedule(0.1).then(&:succ) - # => <#Concurrent::Promises::Future:0x7fae4197ef68 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5caa9f2d0 pending blocks:[]> # will not be scheduled until value is requested -sleep 0.1 +sleep 0.1 scheduled.value # returns after another 0.1sec # => 2 ### Completable Future and Event future = completable_future - # => <#Concurrent::Promises::CompletableFuture:0x7fae4196c368 pending blocks:[]> + # => <#Concurrent::Promises::CompletableFuture:0x7fc5caa8eae8 pending blocks:[]> event = event() - # => <#Concurrent::Promises::CompletableEvent:0x7fae41965db0 pending blocks:[]> + # => <#Concurrent::Promises::CompletableEvent:0x7fc5caa8d648 pending blocks:[]> # These threads will be blocked until the future and event is completed -t1 = Thread.new { future.value } -t2 = Thread.new { event.wait } +t1 = Thread.new { future.value } +t2 = Thread.new { event.wait } future.success 1 - # => <#Concurrent::Promises::CompletableFuture:0x7fae4196c368 success blocks:[]> + # => <#Concurrent::Promises::CompletableFuture:0x7fc5caa8eae8 success blocks:[]> future.success 1 rescue $! # => # future.try_success 2 # => false event.complete - # => <#Concurrent::Promises::CompletableEvent:0x7fae41965db0 completed blocks:[]> + # => <#Concurrent::Promises::CompletableEvent:0x7fc5caa8d648 completed blocks:[]> # The threads can be joined now -[t1, t2].each &:join +[t1, t2].each &:join ### Callbacks -queue = Queue.new # => # +queue = Queue.new # => # future = delay { 1 + 1 } - # => <#Concurrent::Promises::Future:0x7fae4194ee58 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5caa754d0 pending blocks:[]> future.on_success { queue << 1 } # evaluated asynchronously - # => <#Concurrent::Promises::Future:0x7fae4194ee58 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5caa754d0 pending blocks:[]> future.on_success! { queue << 2 } # evaluated on completing thread - # => <#Concurrent::Promises::Future:0x7fae4194ee58 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5caa754d0 pending blocks:[]> queue.empty? # => true future.value # => 2 @@ -172,10 +183,12 @@ # Factory methods are taking names of the global executors # (ot instances of custom executors) -future(:fast) { 2 }. # executed on :fast executor only short and non-blocking tasks can go there - then(:io) { File.read __FILE__ }. # executed on executor for blocking and long operations +# executed on :fast executor, only short and non-blocking tasks can go there +future_on(:fast) { 2 }. + # executed on executor for blocking and long operations + then_on(:io) { File.read __FILE__ }. wait - # => <#Concurrent::Promises::Future:0x7fae4192eb08 success blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cb010b10 success blocks:[]> ### Interoperability with actors @@ -183,7 +196,7 @@ actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end - # => # + # => # future { 2 }. @@ -197,24 +210,24 @@ ### Interoperability with channels ch1 = Concurrent::Channel.new - # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> + # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> ch2 = Concurrent::Channel.new - # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> + # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> result = select(ch1, ch2) - # => <#Concurrent::Promises::Future:0x7fae418a7338 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc892e40 pending blocks:[]> ch1.put 1 # => true result.value! - # => [1, #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#>] + # => [1, #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#>] future { 1+1 }. then_put(ch1) - # => <#Concurrent::Promises::Future:0x7fae4189f098 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc87b920 pending blocks:[]> result = future { '%02d' }. then_select(ch1, ch2). then { |format, (value, channel)| format format, value } - # => <#Concurrent::Promises::Future:0x7fae411371c0 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc862f60 pending blocks:[]> result.value! # => "02" @@ -222,48 +235,47 @@ # simple background processing future { do_stuff } - # => <#Concurrent::Promises::Future:0x7fae43069070 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fc5cc080518 pending blocks:[]> # parallel background processing -jobs = 10.times.map { |i| future { i } } +jobs = 10.times.map { |i| future { i } } zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # periodic task -# TODO (pitr-ch 14-Mar-2016): fix to be volatile -@end = false # => false +DONE = Concurrent::AtomicBoolean.new false # => # def schedule_job schedule(1) { do_stuff }. rescue { |e| StandardError === e ? report_error(e) : raise(e) }. - then { schedule_job unless @end } + then { schedule_job unless DONE.true? } end # => :schedule_job schedule_job - # => <#Concurrent::Promises::Future:0x7fae42900860 pending blocks:[]> -@end = true # => true + # => <#Concurrent::Promises::Future:0x7fc5ca9949d0 pending blocks:[]> +DONE.make_true # => true # How to limit processing where there are limited resources? # By creating an actor managing the resource -DB = Concurrent::Actor::Utils::AdHoc.spawn :db do +DB = Concurrent::Actor::Utils::AdHoc.spawn :db do data = Array.new(10) { |i| '*' * i } lambda do |message| # pretending that this queries a DB data[message] end end - # => # + # => # concurrent_jobs = 11.times.map do |v| - future { v }. + succeeded_future(v). # ask the DB with the `v`, only one at the time, rest is parallel then_ask(DB). # get size of the string, fails for 11 then(&:size). rescue { |reason| reason.message } # translate error to value (exception, message) -end +end zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] @@ -283,16 +295,16 @@ def schedule_job end end end - # => # + # => # concurrent_jobs = 11.times.map do |v| - future { v }. + succeeded_future(v). # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel then_ask(DB_POOL). then(&:size). rescue { |reason| reason.message } -end +end zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 21423655e..f45d7e129 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -2,7 +2,7 @@ module Concurrent module Promises - module FutureFactoryMethods + module FactoryMethods # only proof of concept # @return [Future] def select(*channels) diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index 51fdb3591..b4c9f6aa7 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -24,9 +24,12 @@ def event(default_executor = :io) # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. # @return [Future] - def future(default_executor = :io, &task) - # TODO (pitr-ch 14-Mar-2016): arguments for the block - ImmediateEventPromise.new(default_executor).future.then(&task) + def future(*args, &task) + future_on(:io, *args, &task) + end + + def future_on(executor, *args, &task) + ImmediateEventPromise.new(executor).future.then(*args, &task) end # User is responsible for completing the future once by {Promises::CompletableFuture#success} or {Promises::CompletableFuture#fail} @@ -60,15 +63,23 @@ def completed_event(default_executor = :io) # Constructs new Future which will evaluate to the block after # requested by calling `#wait`, `#value`, `#value!`, etc. on it or on any of the chained futures. # @return [Future] - def delay(default_executor = :io, &task) - DelayPromise.new(default_executor).future.then(&task) + def delay(*args, &task) + delay_on :io, *args, &task + end + + def delay_on(executor, *args, &task) + DelayPromise.new(executor).future.then(*args, &task) end # Schedules the block to be executed on executor in given intended_time. # @param [Numeric, Time] intended_time Numeric => run in `intended_time` seconds. Time => eun on time. # @return [Future] - def schedule(intended_time, default_executor = :io, &task) - ScheduledPromise.new(default_executor, intended_time).future.then(&task) + def schedule(intended_time, *args, &task) + schedule_on :io, intended_time, *args, &task + end + + def schedule_on(executor, intended_time, *args, &task) + ScheduledPromise.new(executor, intended_time).future.then(*args, &task) end # Constructs new {Future} which is completed after all futures_and_or_events are complete. Its value is array @@ -163,8 +174,10 @@ def initialize(promise, default_executor) @Condition = ConditionVariable.new @Promise = promise @DefaultExecutor = default_executor + # noinspection RubyArgCount @Touched = AtomicBoolean.new false @Callbacks = LockFreeStack.new + # noinspection RubyArgCount @Waiters = AtomicFixnum.new 0 self.internal_state = PENDING end @@ -220,8 +233,12 @@ def default_executor end # @yield [success, value, reason] of the parent - def chain(executor = nil, &callback) - ChainPromise.new(self, @DefaultExecutor, executor || @DefaultExecutor, &callback).future + def chain(*args, &callback) + chain_on @DefaultExecutor, *args, &callback + end + + def chain_on(executor, *args, &callback) + ChainPromise.new(self, @DefaultExecutor, executor, args, &callback).future end alias_method :then, :chain @@ -250,6 +267,7 @@ def delay ZipEventEventPromise.new(self, DelayPromise.new(@DefaultExecutor).event, @DefaultExecutor).event end + # TODO (pitr-ch 20-Mar-2016): fix schedule on event # # Schedules rest of the chain for execution with specified time or on specified time # # @return [Event] # def schedule(intended_time) @@ -260,22 +278,21 @@ def delay # end.flat # end - # Zips with selected value form the suplied channels - # @return [Future] - def then_select(*channels) - ZipFutureEventPromise(Concurrent.select(*channels), self, @DefaultExecutor).future + # @yield [success, value, reason, *args] executed async on `executor` when completed + # @return self + def on_completion(*args, &callback) + on_completion_use @DefaultExecutor, *args, &callback end - # @yield [success, value, reason] executed async on `executor` when completed - # @return self - def on_completion(executor = nil, &callback) - add_callback :async_callback_on_completion, executor || @DefaultExecutor, callback + def on_completion_use(executor, *args, &callback) + # TODO (pitr-ch 21-Mar-2016): maybe remove all async callbacks?, `then` does the same thing + add_callback :async_callback_on_completion, executor, args, callback end - # @yield [success, value, reason] executed sync when completed + # @yield [success, value, reason, *args] executed sync when completed # @return self - def on_completion!(&callback) - add_callback :callback_on_completion, callback + def on_completion!(*args, &callback) + add_callback :callback_on_completion, args, callback end # Changes default executor for rest of the chain @@ -378,12 +395,12 @@ def with_async(executor, *args, &block) Concurrent.executor(executor).post(*args, &block) end - def async_callback_on_completion(executor, callback) - with_async(executor) { callback_on_completion callback } + def async_callback_on_completion(executor, args, callback) + with_async(executor) { callback_on_completion args, callback } end - def callback_on_completion(callback) - callback.call + def callback_on_completion(args, callback) + callback.call *args end def callback_notify_blocked(promise) @@ -422,6 +439,10 @@ def value def reason raise NotImplementedError end + + def apply + raise NotImplementedError + end end # @!visibility private @@ -434,8 +455,8 @@ def success? true end - def apply(block) - block.call value + def apply(args, block) + block.call value, *args end def value @@ -453,8 +474,8 @@ def to_sym # @!visibility private class SuccessArray < Success - def apply(block) - block.call(*value) + def apply(args, block) + block.call(*value, *args) end end @@ -480,8 +501,8 @@ def to_sym :failed end - def apply(block) - block.call reason + def apply(args, block) + block.call reason, *args end end @@ -509,8 +530,8 @@ def reason @Reason end - def apply(block) - block.call(*reason) + def apply(args, block) + block.call(*reason, *args) end end @@ -590,10 +611,14 @@ def exception(*args) end end - # @yield [value] executed only on parent success - # @return [Future] - def then(executor = nil, &callback) - ThenPromise.new(self, @DefaultExecutor, executor || @DefaultExecutor, &callback).future + # @yield [value, *args] executed only on parent success + # @return [Future] new + def then(*args, &callback) + then_on @DefaultExecutor, *args, &callback + end + + def then_on(executor, *args, &callback) + ThenPromise.new(self, @DefaultExecutor, executor, args, &callback).future end def chain_completable(completable_future) @@ -604,8 +629,12 @@ def chain_completable(completable_future) # @yield [reason] executed only on parent failure # @return [Future] - def rescue(executor = nil, &callback) - RescuePromise.new(self, @DefaultExecutor, executor || @DefaultExecutor, &callback).future + def rescue(*args, &callback) + rescue_on @DefaultExecutor, *args, &callback + end + + def rescue_on(executor, *args, &callback) + RescuePromise.new(self, @DefaultExecutor, executor, args, &callback).future end # zips with the Future in the value @@ -658,26 +687,34 @@ def zip(other) # @yield [value] executed async on `executor` when success # @return self - def on_success(executor = nil, &callback) - add_callback :async_callback_on_success, executor || @DefaultExecutor, callback + def on_success(*args, &callback) + on_success_use @DefaultExecutor, *args, &callback + end + + def on_success_use(executor, *args, &callback) + add_callback :async_callback_on_success, executor, args, callback end # @yield [reason] executed async on `executor` when failed? # @return self - def on_failure(executor = nil, &callback) - add_callback :async_callback_on_failure, executor || @DefaultExecutor, callback + def on_failure(*args, &callback) + on_failure_use @DefaultExecutor, *args, &callback + end + + def on_failure_use(executor, *args, &callback) + add_callback :async_callback_on_failure, executor, args, callback end # @yield [value] executed sync when success # @return self - def on_success!(&callback) - add_callback :callback_on_success, callback + def on_success!(*args, &callback) + add_callback :callback_on_success, args, callback end # @yield [reason] executed sync when failed? # @return self - def on_failure!(&callback) - add_callback :callback_on_failure, callback + def on_failure!(*args, &callback) + add_callback :callback_on_failure, args, callback end # @!visibility private @@ -688,7 +725,10 @@ def complete_with(state, raise_on_reassign = true) call_callbacks state else if raise_on_reassign - log ERROR, 'Promises::Future', reason if reason # print otherwise hidden error + # print otherwise hidden error + log ERROR, 'Promises::Future', reason if reason + log ERROR, 'Promises::Future', state.reason if state.reason + raise(Concurrent::MultipleAssignmentError.new( "Future can be completed only once. Current result is #{result}, " + "trying to set #{state.result}")) @@ -713,8 +753,8 @@ def add_callback(method, *args) end # @!visibility private - def apply(block) - internal_state.apply block + def apply(args, block) + internal_state.apply args, block end private @@ -737,37 +777,37 @@ def call_callback(method, state, *args) self.send method, state, *args end - def async_callback_on_success(state, executor, callback) - with_async(executor, state, callback) do |st, cb| - callback_on_success st, cb + def async_callback_on_success(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_success st, ar, cb end end - def async_callback_on_failure(state, executor, callback) - with_async(executor, state, callback) do |st, cb| - callback_on_failure st, cb + def async_callback_on_failure(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_failure st, ar, cb end end - def callback_on_success(state, callback) - state.apply callback if state.success? + def callback_on_success(state, args, callback) + state.apply args, callback if state.success? end - def callback_on_failure(state, callback) - state.apply callback unless state.success? + def callback_on_failure(state, args, callback) + state.apply args, callback unless state.success? end - def callback_on_completion(state, callback) - callback.call state.result + def callback_on_completion(state, args, callback) + callback.call state.result, *args end def callback_notify_blocked(state, promise) super(promise) end - def async_callback_on_completion(state, executor, callback) - with_async(executor, state, callback) do |st, cb| - callback_on_completion st, cb + def async_callback_on_completion(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_completion st, ar, cb end end end @@ -1011,11 +1051,12 @@ def on_completable(done_future) # @abstract # @!visibility private class BlockedTaskPromise < BlockedPromise - def initialize(blocked_by_future, default_executor, executor, &task) + def initialize(blocked_by_future, default_executor, executor, args, &task) raise ArgumentError, 'no block given' unless block_given? super Future.new(self, default_executor), blocked_by_future, 1 @Executor = executor @Task = task + @Args = args end def executor @@ -1027,15 +1068,15 @@ def executor class ThenPromise < BlockedTaskPromise private - def initialize(blocked_by_future, default_executor, executor, &task) + def initialize(blocked_by_future, default_executor, executor, args, &task) raise ArgumentError, 'only Future can be appended with then' unless blocked_by_future.is_a? Future - super blocked_by_future, default_executor, executor, &task + super blocked_by_future, default_executor, executor, args, &task end def on_completable(done_future) if done_future.success? - Concurrent.executor(@Executor).post(done_future, @Task) do |future, task| - evaluate_to lambda { future.apply task } + Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } end else complete_with done_future.internal_state @@ -1047,14 +1088,14 @@ def on_completable(done_future) class RescuePromise < BlockedTaskPromise private - def initialize(blocked_by_future, default_executor, executor, &task) - super blocked_by_future, default_executor, executor, &task + def initialize(blocked_by_future, default_executor, executor, args, &task) + super blocked_by_future, default_executor, executor, args, &task end def on_completable(done_future) if done_future.failed? - Concurrent.executor(@Executor).post(done_future, @Task) do |future, task| - evaluate_to lambda { future.apply task } + Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } end else complete_with done_future.internal_state @@ -1068,9 +1109,13 @@ class ChainPromise < BlockedTaskPromise def on_completable(done_future) if Future === done_future - Concurrent.executor(@Executor).post(done_future, @Task) { |future, task| evaluate_to(*future.result, task) } + Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| + evaluate_to(*future.result, *args, task) + end else - Concurrent.executor(@Executor).post(@Task) { |task| evaluate_to task } + Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| + evaluate_to *args, task + end end end end diff --git a/spec/concurrent/edge/future_spec.rb b/spec/concurrent/edge/future_spec.rb index f13441286..beafcd204 100644 --- a/spec/concurrent/edge/future_spec.rb +++ b/spec/concurrent/edge/future_spec.rb @@ -5,7 +5,7 @@ Concurrent.use_stdlib_logger Logger::DEBUG -describe 'Concurrent::Edge futures', edge: true do +describe 'Concurrent::Promises' do include Concurrent::Promises::FactoryMethods @@ -26,17 +26,6 @@ end end - describe '.post' do - it 'executes tasks asynchronously' do - queue = Queue.new - value = 12 - Concurrent.executor(:fast).post { queue.push(value) } - Concurrent.executor(:io).post { queue.push(value) } - expect(queue.pop).to eq value - expect(queue.pop).to eq value - end - end - describe '.future' do it 'executes' do future = future { 1 + 1 } @@ -45,17 +34,28 @@ future = succeeded_future(1).then { |v| v + 1 } expect(future.value!).to eq 2 end + + it 'executes with args' do + future = future(1, 2, &:+) + expect(future.value!).to eq 3 + + future = succeeded_future(1).then(1) { |v, a| v + 1 } + expect(future.value!).to eq 2 + end end describe '.delay' do - it 'delays execution' do - delay = delay { 1 + 1 } - expect(delay.completed?).to eq false - expect(delay.value!).to eq 2 - delay = succeeded_future(1).delay.then { |v| v + 1 } + def behaves_as_delay(delay, value) expect(delay.completed?).to eq false - expect(delay.value!).to eq 2 + expect(delay.value!).to eq value + end + + specify do + behaves_as_delay delay { 1 + 1 }, 2 + behaves_as_delay succeeded_future(1).delay.then { |v| v + 1 }, 2 + behaves_as_delay delay(1) { |a| a + 1 }, 2 + behaves_as_delay succeeded_future(1).delay.then { |v| v + 1 }, 2 end end @@ -295,7 +295,7 @@ it 'chains' do future0 = future { 1 }.then { |v| v + 2 } # both executed on default FAST_EXECUTOR - future1 = future0.then(:fast) { raise 'boo' } # executed on IO_EXECUTOR + future1 = future0.then_on(:fast) { raise 'boo' } # executed on IO_EXECUTOR future2 = future1.then { |v| v + 1 } # will fail with 'boo' error, executed on default FAST_EXECUTOR future3 = future1.rescue { |err| err.message } # executed on default FAST_EXECUTOR future4 = future0.chain { |success, value, reason| success } # executed on default FAST_EXECUTOR @@ -421,7 +421,7 @@ ch1 = Concurrent::Channel.new ch2 = Concurrent::Channel.new - result = select(ch1, ch2) + result = Concurrent::Promises.select(ch1, ch2) ch1.put 1 expect(result.value!).to eq [1, ch1] From 561a57bfe5c7937c7e2404e656814c1bcd9a5951 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Tue, 22 Mar 2016 13:56:47 +0100 Subject: [PATCH 07/68] promises_spec cleanup --- spec/concurrent/edge/future_spec.rb | 48 ----------------------------- 1 file changed, 48 deletions(-) diff --git a/spec/concurrent/edge/future_spec.rb b/spec/concurrent/edge/future_spec.rb index beafcd204..df575ec82 100644 --- a/spec/concurrent/edge/future_spec.rb +++ b/spec/concurrent/edge/future_spec.rb @@ -1,7 +1,5 @@ require 'concurrent/edge/promises' require 'thread' -require 'pry' -# require 'pry-stack_explorer' Concurrent.use_stdlib_logger Logger::DEBUG @@ -440,49 +438,3 @@ def behaves_as_delay(delay, value) end end - -# def synchronize -# if @__mutex__do_not_use_directly.owned? -# yield -# else -# @__mutex__do_not_use_directly.synchronize { yield } -# # @__mutex__do_not_use_directly.synchronize do -# # locking = (Thread.current[:locking] ||= []) -# # locking.push self -# # puts "locking #{locking.size}" # : #{locking}" -# # begin -# # yield -# # ensure -# # if locking.size > 2 -# # # binding.pry -# # end -# # locking.pop -# # end -# # end -# end -# end - -__END__ - -puts '-- connecting existing promises' - -source = Concurrent.delay { 1 } -promise = Concurrent.promise -promise.connect_to source -p promise.future.value # 1 -# or just -p Concurrent.promise.connect_to(source).value - - -puts '-- using shortcuts' - -include Concurrent # includes Future::Shortcuts - -# now methods on Concurrent are accessible directly - -p delay { 1 }.value, future { 1 }.value # => 1\n1 - -promise = promise() -promise.connect_to(future { 3 }) -p promise.future.value # 3 - From 960c9a15e9023ac3d0f4e98ea2823f75ac8db365 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sun, 27 Mar 2016 21:57:34 +0200 Subject: [PATCH 08/68] Promises improvements - require just what is needed - add *_on methods for future agrregators (zip, any) - make private classes private constants - add fixed Event#schedule - rename callback _use suffix to _using --- lib/concurrent/actor/behaviour/termination.rb | 2 +- lib/concurrent/actor/reference.rb | 2 +- lib/concurrent/edge/promises.rb | 1 + lib/concurrent/promises.rb | 167 ++++++++++-------- .../{edge/future_spec.rb => promises_spec.rb} | 19 +- 5 files changed, 111 insertions(+), 80 deletions(-) rename spec/concurrent/{edge/future_spec.rb => promises_spec.rb} (97%) diff --git a/lib/concurrent/actor/behaviour/termination.rb b/lib/concurrent/actor/behaviour/termination.rb index 7f84cdb4e..a58cd7b10 100644 --- a/lib/concurrent/actor/behaviour/termination.rb +++ b/lib/concurrent/actor/behaviour/termination.rb @@ -15,7 +15,7 @@ class Termination < Abstract def initialize(core, subsequent, core_options, trapping = false, terminate_children = true) super core, subsequent, core_options @terminated = Concurrent::Promises.completable_future - @public_terminated = @terminated.hide_completable + @public_terminated = @terminated.with_hidden_completable @trapping = trapping @terminate_children = terminate_children end diff --git a/lib/concurrent/actor/reference.rb b/lib/concurrent/actor/reference.rb index ffbe22272..909c64646 100644 --- a/lib/concurrent/actor/reference.rb +++ b/lib/concurrent/actor/reference.rb @@ -80,7 +80,7 @@ def map(messages) # behaves as {#tell} when no future and as {#ask} when future def message(message, future = nil) core.on_envelope Envelope.new(message, future, Actor.current || Thread.current, self) - return future ? future.hide_completable : self + return future ? future.with_hidden_completable : self end # @see AbstractContext#dead_letter_routing diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index f45d7e129..5957a9fea 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -6,6 +6,7 @@ module FactoryMethods # only proof of concept # @return [Future] def select(*channels) + # TODO (pitr-ch 26-Mar-2016): redo, has to be non-blocking future do # noinspection RubyArgCount Channel.select do |s| diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index b4c9f6aa7..f5fc81a0a 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -1,6 +1,9 @@ -# TODO do not require whole concurrent gem -require 'concurrent' +require 'concurrent/synchronization' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' require 'concurrent/lock_free_stack' +require 'concurrent/concern/logging' +require 'concurrent/errors' module Concurrent @@ -18,18 +21,21 @@ module Promises module FactoryMethods # User is responsible for completing the event once by {Promises::CompletableEvent#complete} # @return [CompletableEvent] - def event(default_executor = :io) + def completable_event(default_executor = :io) CompletableEventPromise.new(default_executor).future end + # TODO (pitr-ch 26-Mar-2016): remove event?, it does not match completable_future + alias_method :event, :completable_event + # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. # @return [Future] def future(*args, &task) future_on(:io, *args, &task) end - def future_on(executor, *args, &task) - ImmediateEventPromise.new(executor).future.then(*args, &task) + def future_on(default_executor, *args, &task) + ImmediateEventPromise.new(default_executor).future.then(*args, &task) end # User is responsible for completing the future once by {Promises::CompletableFuture#success} or {Promises::CompletableFuture#fail} @@ -67,8 +73,8 @@ def delay(*args, &task) delay_on :io, *args, &task end - def delay_on(executor, *args, &task) - DelayPromise.new(executor).future.then(*args, &task) + def delay_on(default_executor, *args, &task) + DelayPromise.new(default_executor).future.then(*args, &task) end # Schedules the block to be executed on executor in given intended_time. @@ -78,8 +84,8 @@ def schedule(intended_time, *args, &task) schedule_on :io, intended_time, *args, &task end - def schedule_on(executor, intended_time, *args, &task) - ScheduledPromise.new(executor, intended_time).future.then(*args, &task) + def schedule_on(default_executor, intended_time, *args, &task) + ScheduledPromise.new(default_executor, intended_time).future.then(*args, &task) end # Constructs new {Future} which is completed after all futures_and_or_events are complete. Its value is array @@ -88,7 +94,11 @@ def schedule_on(executor, intended_time, *args, &task) # @param [Event] futures_and_or_events # @return [Future] def zip_futures(*futures_and_or_events) - ZipFuturesPromise.new(futures_and_or_events, :io).future + zip_futures_on :io, *futures_and_or_events + end + + def zip_futures_on(default_executor, *futures_and_or_events) + ZipFuturesPromise.new(futures_and_or_events, default_executor).future end alias_method :zip, :zip_futures @@ -98,24 +108,44 @@ def zip_futures(*futures_and_or_events) # @param [Event] futures_and_or_events # @return [Event] def zip_events(*futures_and_or_events) - ZipEventsPromise.new(futures_and_or_events, :io).future + zip_events_on :io, *futures_and_or_events + end + + def zip_events_on(default_executor, *futures_and_or_events) + ZipEventsPromise.new(futures_and_or_events, default_executor).future end # Constructs new {Future} which is completed after first of the futures is complete. # @param [Event] futures # @return [Future] - def any_complete(*futures) - AnyCompletePromise.new(futures, :io).future + def any_complete_future(*futures) + any_complete_future_on :io, *futures end - alias_method :any, :any_complete + def any_complete_future_on(default_executor, *futures) + AnyCompleteFuturePromise.new(futures, default_executor).future + end + + alias_method :any, :any_complete_future # Constructs new {Future} which becomes succeeded after first of the futures succeedes or # failed if all futures fail (reason is last error). # @param [Event] futures # @return [Future] - def any_successful(*futures) - AnySuccessfulPromise.new(futures, :io).future + def any_successful_future(*futures) + any_successful_future_on :io, *futures + end + + def any_successful_future_on(default_executor, *futures) + AnySuccessfulFuturePromise.new(futures, default_executor).future + end + + def any_event(*events) + any_event_on :io, *events + end + + def any_event_on(default_executor, *events) + AnyCompletedEventPromise.new(events, default_executor).event end # TODO consider adding first(count, *futures) @@ -130,7 +160,6 @@ class Event < Synchronization::Object public :internal_state include Concern::Logging - # @!visibility private class State def completed? raise NotImplementedError @@ -141,7 +170,6 @@ def to_sym end end - # @!visibility private class Pending < State def completed? false @@ -152,7 +180,6 @@ def to_sym end end - # @!visibility private class Completed < State def completed? true @@ -163,6 +190,8 @@ def to_sym end end + private_constant :State, :Pending, :Completed + # @!visibility private PENDING = Pending.new # @!visibility private @@ -267,25 +296,21 @@ def delay ZipEventEventPromise.new(self, DelayPromise.new(@DefaultExecutor).event, @DefaultExecutor).event end - # TODO (pitr-ch 20-Mar-2016): fix schedule on event - # # Schedules rest of the chain for execution with specified time or on specified time - # # @return [Event] - # def schedule(intended_time) - # chain do - # ZipEventEventPromise.new(self, - # ScheduledPromise.new(@DefaultExecutor, intended_time).event, - # @DefaultExecutor).event - # end.flat - # end + # Schedules rest of the chain for execution with specified time or on specified time + # @return [Event] + def schedule(intended_time) + ZipEventEventPromise.new(self, + ScheduledPromise.new(@DefaultExecutor, intended_time).event, + @DefaultExecutor).event + end # @yield [success, value, reason, *args] executed async on `executor` when completed # @return self def on_completion(*args, &callback) - on_completion_use @DefaultExecutor, *args, &callback + on_completion_using @DefaultExecutor, *args, &callback end - def on_completion_use(executor, *args, &callback) - # TODO (pitr-ch 21-Mar-2016): maybe remove all async callbacks?, `then` does the same thing + def on_completion_using(executor, *args, &callback) add_callback :async_callback_on_completion, executor, args, callback end @@ -322,7 +347,7 @@ def complete_with(state, raise_on_reassign = true) call_callbacks else Concurrent::MultipleAssignmentError.new('Event can be completed only once') if raise_on_reassign - return false + return nil end self end @@ -422,7 +447,6 @@ def call_callbacks # Represents a value which will become available in future. May fail with a reason instead. class Future < Event - # @!visibility private class CompletedWithResult < Completed def result [success?, value, reason] @@ -535,6 +559,8 @@ def apply(args, block) end end + private_constant :CompletedWithResult + # @!method state # @return [:pending, :success, :failed] @@ -646,7 +672,7 @@ def flat(level = 1) # @return [Future] which has first completed value from futures def any(*futures) - AnyCompletePromise.new([self, *futures], @DefaultExecutor).future + AnyCompleteFuturePromise.new([self, *futures], @DefaultExecutor).future end # Inserts delay into the chain of Futures making rest of it lazy evaluated. @@ -688,20 +714,20 @@ def zip(other) # @yield [value] executed async on `executor` when success # @return self def on_success(*args, &callback) - on_success_use @DefaultExecutor, *args, &callback + on_success_using @DefaultExecutor, *args, &callback end - def on_success_use(executor, *args, &callback) + def on_success_using(executor, *args, &callback) add_callback :async_callback_on_success, executor, args, callback end # @yield [reason] executed async on `executor` when failed? # @return self def on_failure(*args, &callback) - on_failure_use @DefaultExecutor, *args, &callback + on_failure_using @DefaultExecutor, *args, &callback end - def on_failure_use(executor, *args, &callback) + def on_failure_using(executor, *args, &callback) add_callback :async_callback_on_failure, executor, args, callback end @@ -730,8 +756,8 @@ def complete_with(state, raise_on_reassign = true) log ERROR, 'Promises::Future', state.reason if state.reason raise(Concurrent::MultipleAssignmentError.new( - "Future can be completed only once. Current result is #{result}, " + - "trying to set #{state.result}")) + "Future can be completed only once. Current result is #{result}, " + + "trying to set #{state.result}")) end return false end @@ -819,7 +845,7 @@ def complete(raise_on_reassign = true) complete_with COMPLETED, raise_on_reassign end - def hide_completable + def with_hidden_completable EventWrapperPromise.new(self, @DefaultExecutor).event end end @@ -870,13 +896,12 @@ def evaluate_to!(*args, &block) promise.evaluate_to!(*args, block) end - def hide_completable + def with_hidden_completable FutureWrapperPromise.new(self, @DefaultExecutor).future end end # @abstract - # @!visibility private class AbstractPromise < Synchronization::Object safe_initialization! include Concern::Logging @@ -928,14 +953,12 @@ def evaluate_to(*args, block) end end - # @!visibility private class CompletableEventPromise < AbstractPromise def initialize(default_executor) super CompletableEvent.new(self, default_executor) end end - # @!visibility private class CompletableFuturePromise < AbstractPromise def initialize(default_executor) super CompletableFuture.new(self, default_executor) @@ -976,12 +999,10 @@ def evaluate_to!(*args, block) end # @abstract - # @!visibility private class InnerPromise < AbstractPromise end # @abstract - # @!visibility private class BlockedPromise < InnerPromise def self.new(*args, &block) promise = super(*args, &block) @@ -1049,7 +1070,6 @@ def on_completable(done_future) end # @abstract - # @!visibility private class BlockedTaskPromise < BlockedPromise def initialize(blocked_by_future, default_executor, executor, args, &task) raise ArgumentError, 'no block given' unless block_given? @@ -1064,7 +1084,6 @@ def executor end end - # @!visibility private class ThenPromise < BlockedTaskPromise private @@ -1084,7 +1103,6 @@ def on_completable(done_future) end end - # @!visibility private class RescuePromise < BlockedTaskPromise private @@ -1103,7 +1121,6 @@ def on_completable(done_future) end end - # @!visibility private class ChainPromise < BlockedTaskPromise private @@ -1121,22 +1138,19 @@ def on_completable(done_future) end # will be immediately completed - # @!visibility private class ImmediateEventPromise < InnerPromise def initialize(default_executor) super Event.new(self, default_executor).complete_with(Event::COMPLETED) end end - # @!visibility private class ImmediateFuturePromise < InnerPromise def initialize(default_executor, success, value, reason) super Future.new(self, default_executor). - complete_with(success ? Future::Success.new(value) : Future::Failed.new(reason)) + complete_with(success ? Future::Success.new(value) : Future::Failed.new(reason)) end end - # @!visibility private class FlatPromise < BlockedPromise # !visibility private @@ -1195,7 +1209,6 @@ def completable?(countdown, future) end end - # @!visibility private class ZipEventEventPromise < BlockedPromise def initialize(event1, event2, default_executor) super Event.new(self, default_executor), [event1, event2], 2 @@ -1206,7 +1219,6 @@ def on_completable(done_future) end end - # @!visibility private class ZipFutureEventPromise < BlockedPromise def initialize(future, event, default_executor) super Future.new(self, default_executor), [future, event], 2 @@ -1218,7 +1230,6 @@ def on_completable(done_future) end end - # @!visibility private class ZipFutureFuturePromise < BlockedPromise def initialize(future1, future2, default_executor) super Future.new(self, default_executor), [future1, future2], 2 @@ -1239,7 +1250,6 @@ def on_completable(done_future) end end - # @!visibility private class EventWrapperPromise < BlockedPromise def initialize(event, default_executor) super Event.new(self, default_executor), event, 1 @@ -1250,7 +1260,6 @@ def on_completable(done_future) end end - # @!visibility private class FutureWrapperPromise < BlockedPromise def initialize(future, default_executor) super Future.new(self, default_executor), future, 1 @@ -1261,7 +1270,6 @@ def on_completable(done_future) end end - # @!visibility private class ZipFuturesPromise < BlockedPromise private @@ -1294,7 +1302,6 @@ def on_completable(done_future) end end - # @!visibility private class ZipEventsPromise < BlockedPromise private @@ -1310,8 +1317,7 @@ def on_completable(done_future) end end - # @!visibility private - class AnyCompletePromise < BlockedPromise + class AnyCompleteFuturePromise < BlockedPromise private @@ -1330,8 +1336,24 @@ def on_completable(done_future) end end - # @!visibility private - class AnySuccessfulPromise < BlockedPromise + class AnyCompleteEventPromise < BlockedPromise + + private + + def initialize(blocked_by_futures, default_executor) + super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) + end + + def completable?(countdown, future) + true + end + + def on_completable(done_future) + complete_with Event::COMPLETED, false + end + end + + class AnySuccessfulFuturePromise < BlockedPromise private @@ -1350,7 +1372,6 @@ def on_completable(done_future) end end - # @!visibility private class DelayPromise < InnerPromise def touch @Future.complete_with Event::COMPLETED @@ -1364,7 +1385,6 @@ def initialize(default_executor) end # will be evaluated to task in intended_time - # @!visibility private class ScheduledPromise < InnerPromise def intended_time @IntendedTime @@ -1398,6 +1418,15 @@ def initialize(default_executor, intended_time) end extend FactoryMethods + + private_constant :AbstractPromise, :CompletableEventPromise, :CompletableFuturePromise, + :InnerPromise, :BlockedPromise, :BlockedTaskPromise, :ThenPromise, + :RescuePromise, :ChainPromise, :ImmediateEventPromise, + :ImmediateFuturePromise, :FlatPromise, :ZipEventEventPromise, + :ZipFutureEventPromise, :ZipFutureFuturePromise, :EventWrapperPromise, + :FutureWrapperPromise, :ZipFuturesPromise, :ZipEventsPromise, + :AnyCompleteFuturePromise, :AnySuccessfulFuturePromise, :DelayPromise, :ScheduledPromise + end end diff --git a/spec/concurrent/edge/future_spec.rb b/spec/concurrent/promises_spec.rb similarity index 97% rename from spec/concurrent/edge/future_spec.rb rename to spec/concurrent/promises_spec.rb index df575ec82..768b4e983 100644 --- a/spec/concurrent/edge/future_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -91,6 +91,11 @@ def behaves_as_delay(delay, value) expect(future.value!).to eq queue expect(queue.pop).to eq 2 expect(queue.pop).to be >= 0.09 + + scheduled = completed_event.schedule(0.1) + expect(scheduled.completed?).to be_falsey + scheduled.wait + expect(scheduled.completed?).to be_truthy end end @@ -126,7 +131,7 @@ def behaves_as_delay(delay, value) f2 = completable_future f3 = completable_future - any1 = any_complete(f1, f2) + any1 = any_complete_future(f1, f2) any2 = f2 | f3 f1.success 1 @@ -142,7 +147,7 @@ def behaves_as_delay(delay, value) f1 = completable_future f2 = completable_future - any = any_successful(f1, f2) + any = any_successful_future(f1, f2) f1.fail f2.success :value @@ -410,9 +415,9 @@ def behaves_as_delay(delay, value) end expect(future { 2 }. - then_ask(actor). - then { |v| v + 2 }. - value!).to eq 6 + then_ask(actor). + then { |v| v + 2 }. + value!).to eq 6 end it 'with channel' do @@ -433,8 +438,4 @@ def behaves_as_delay(delay, value) end end - specify do - expect(future { :v }.value!).to eq :v - end - end From 3bcd872ee92481f5b7c3af23b880a416ef5d8604 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sun, 27 Mar 2016 22:09:34 +0200 Subject: [PATCH 09/68] Cooperative cancellation for promises and any other async processing --- lib/concurrent/edge/promises.rb | 78 ++++++++++++++++++++++++++++++++ lib/concurrent/promises.rb | 2 - spec/concurrent/promises_spec.rb | 13 ++++++ 3 files changed, 91 insertions(+), 2 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 5957a9fea..b3e051007 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -70,4 +70,82 @@ def each_body(value, &block) end end + + # inspired by https://msdn.microsoft.com/en-us/library/dd537607(v=vs.110).aspx + class Cancellation < Synchronization::Object + safe_initialization! + + def self.create + [(i = new), i.token] + end + + private_class_method :new + + def initialize + @Cancel = Promises.completable_event + @Token = Token.new @Cancel.with_hidden_completable + end + + def token + @Token + end + + def cancel + try_cancel or raise MultipleAssignmentError, 'cannot cancel twice' + end + + def try_cancel + !!@Cancel.complete(false) + end + + def canceled? + @Cancel.complete? + end + + class Token < Synchronization::Object + safe_initialization! + + def initialize(cancel) + @Cancel = cancel + end + + def event + @Cancel + end + + def on_cancellation(*args, &block) + @Cancel.on_completion *args, &block + end + + def then(*args, &block) + @Cancel.chain *args, &block + end + + def canceled? + @Cancel.complete? + end + + def loop_until_canceled(&block) + until canceled? + result = block.call + end + result + end + + def raise_if_canceled + raise CancelledOperationError if canceled? + self + end + + def join(*tokens) + Token.new Promises.any_event(@Cancel, *tokens.map(&:event)) + end + + end + + private_constant :Token + + # TODO (pitr-ch 27-Mar-2016): cooperation with mutex, select etc? + # TODO (pitr-ch 27-Mar-2016): examples (scheduled to be cancelled in 10 sec) + end end diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index f5fc81a0a..54a5c74f5 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -1430,7 +1430,5 @@ def initialize(default_executor, intended_time) end end -# TODO cancelable Futures, will cancel the future but the task will finish anyway -# TODO task interrupts, how to support? # TODO when value is requested the current thread may evaluate the tasks to get the value for performance reasons it may not evaluate :io though # TODO try work stealing pool, each thread has it's own queue diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/promises_spec.rb index 768b4e983..3f1c234dd 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -438,4 +438,17 @@ def behaves_as_delay(delay, value) end end + describe 'Cancellation', edge: true do + specify do + source, token = Concurrent::Cancellation.create + + futures = Array.new(2) { future(token) { |t| t.loop_until_canceled { Thread.pass }; :done } } + + source.cancel + futures.each do |future| + expect(future.value!).to eq :done + end + end + end + end From c1acd200eed524bb42851b22b549a8105ef4203c Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 14:46:41 +0200 Subject: [PATCH 10/68] Add empty? and clear_if concurrent methods to LockFreeStack --- lib/concurrent/lock_free_stack.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/concurrent/lock_free_stack.rb b/lib/concurrent/lock_free_stack.rb index 90550a648..66d63df8c 100644 --- a/lib/concurrent/lock_free_stack.rb +++ b/lib/concurrent/lock_free_stack.rb @@ -29,7 +29,7 @@ def initialize self.head = EMPTY end - def empty? + def empty?(head = self.head) head.equal? EMPTY end @@ -83,6 +83,10 @@ def clear end end + def clear_if(head) + compare_and_set_head head, EMPTY + end + def clear_each(&block) while true current_head = head From 0bb92038f95910bfae4ebe67081178306fecb2ef Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 14:48:21 +0200 Subject: [PATCH 11/68] Cancellation: allow to configure the cancellable event/future --- lib/concurrent/edge/promises.rb | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index b3e051007..7617403bd 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -75,15 +75,17 @@ def each_body(value, &block) class Cancellation < Synchronization::Object safe_initialization! - def self.create - [(i = new), i.token] + def self.create(future_or_event = Promises.completable_event, *complete_args) + [(i = new(future_or_event, *complete_args)), i.token] end private_class_method :new - def initialize - @Cancel = Promises.completable_event - @Token = Token.new @Cancel.with_hidden_completable + def initialize(future, *complete_args) + raise ArgumentError, 'future is not Completable' unless future.is_a?(Promises::Completable) + @Cancel = future + @Token = Token.new @Cancel.with_hidden_completable + @CompleteArgs = complete_args end def token @@ -113,6 +115,8 @@ def event @Cancel end + alias_method :future, :event + def on_cancellation(*args, &block) @Cancel.on_completion *args, &block end From 76e956c22df4ef14ec6ed53009b35f746b2bbd55 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 14:51:13 +0200 Subject: [PATCH 12/68] Cancellation unify cancel methods --- lib/concurrent/edge/promises.rb | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 7617403bd..5da7cfea2 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -92,12 +92,8 @@ def token @Token end - def cancel - try_cancel or raise MultipleAssignmentError, 'cannot cancel twice' - end - - def try_cancel - !!@Cancel.complete(false) + def cancel(raise_on_repeated_call = true) + !!@Cancel.complete(*@CompleteArgs, raise_on_repeated_call) end def canceled? From 53128c3047e5133c88fa8b06b9de4837cce3e205 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 15:26:50 +0200 Subject: [PATCH 13/68] Fix any methods - add missing Event#any - allow mix of futures and events, completed event has value nil and is always successful - any does not propagate touch if already complete --- lib/concurrent/promises.rb | 272 +++++++++++++++++++------------------ 1 file changed, 137 insertions(+), 135 deletions(-) diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index 54a5c74f5..06b9d6be0 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -145,7 +145,7 @@ def any_event(*events) end def any_event_on(default_executor, *events) - AnyCompletedEventPromise.new(events, default_executor).event + AnyCompleteEventPromise.new(events, default_executor).event end # TODO consider adding first(count, *futures) @@ -170,6 +170,8 @@ def to_sym end end + private_constant :State + class Pending < State def completed? false @@ -180,7 +182,9 @@ def to_sym end end - class Completed < State + private_constant :Pending + + class CompletedWithResult < State def completed? true end @@ -188,14 +192,125 @@ def completed? def to_sym :completed end + + def result + [success?, value, reason] + end + + def success? + raise NotImplementedError + end + + def value + raise NotImplementedError + end + + def reason + raise NotImplementedError + end + + def apply + raise NotImplementedError + end + end + + private_constant :CompletedWithResult + + # @!visibility private + class Success < CompletedWithResult + def initialize(value) + @Value = value + end + + def success? + true + end + + def apply(args, block) + block.call value, *args + end + + def value + @Value + end + + def reason + nil + end + + def to_sym + :success + end + end + + # @!visibility private + class SuccessArray < Success + def apply(args, block) + block.call(*value, *args) + end + end + + # @!visibility private + class Failed < CompletedWithResult + def initialize(reason) + @Reason = reason + end + + def success? + false + end + + def value + nil + end + + def reason + @Reason + end + + def to_sym + :failed + end + + def apply(args, block) + block.call reason, *args + end + end + + # @!visibility private + class PartiallyFailed < CompletedWithResult + def initialize(value, reason) + super() + @Value = value + @Reason = reason + end + + def success? + false + end + + def to_sym + :failed + end + + def value + @Value + end + + def reason + @Reason + end + + def apply(args, block) + block.call(*reason, *args) + end end - private_constant :State, :Pending, :Completed # @!visibility private PENDING = Pending.new # @!visibility private - COMPLETED = Completed.new + COMPLETED = Success.new(nil) def initialize(promise, default_executor) super() @@ -284,12 +399,18 @@ def zip(other) if other.is?(Future) ZipFutureEventPromise.new(other, self, @DefaultExecutor).future else - ZipEventEventPromise.new(self, other, @DefaultExecutor).future + ZipEventEventPromise.new(self, other, @DefaultExecutor).event end end alias_method :&, :zip + def any(future) + AnyCompleteEventPromise.new([self, future], @DefaultExecutor).event + end + + alias_method :|, :any + # Inserts delay into the chain of Futures making rest of it lazy evaluated. # @return [Event] def delay @@ -447,119 +568,6 @@ def call_callbacks # Represents a value which will become available in future. May fail with a reason instead. class Future < Event - class CompletedWithResult < Completed - def result - [success?, value, reason] - end - - def success? - raise NotImplementedError - end - - def value - raise NotImplementedError - end - - def reason - raise NotImplementedError - end - - def apply - raise NotImplementedError - end - end - - # @!visibility private - class Success < CompletedWithResult - def initialize(value) - @Value = value - end - - def success? - true - end - - def apply(args, block) - block.call value, *args - end - - def value - @Value - end - - def reason - nil - end - - def to_sym - :success - end - end - - # @!visibility private - class SuccessArray < Success - def apply(args, block) - block.call(*value, *args) - end - end - - # @!visibility private - class Failed < CompletedWithResult - def initialize(reason) - @Reason = reason - end - - def success? - false - end - - def value - nil - end - - def reason - @Reason - end - - def to_sym - :failed - end - - def apply(args, block) - block.call reason, *args - end - end - - # @!visibility private - class PartiallyFailed < CompletedWithResult - def initialize(value, reason) - super() - @Value = value - @Reason = reason - end - - def success? - false - end - - def to_sym - :failed - end - - def value - @Value - end - - def reason - @Reason - end - - def apply(args, block) - block.call(*reason, *args) - end - end - - private_constant :CompletedWithResult # @!method state # @return [:pending, :success, :failed] @@ -671,8 +679,8 @@ def flat(level = 1) end # @return [Future] which has first completed value from futures - def any(*futures) - AnyCompleteFuturePromise.new([self, *futures], @DefaultExecutor).future + def any(future) + AnyCompleteFuturePromise.new([self, future], @DefaultExecutor).future end # Inserts delay into the chain of Futures making rest of it lazy evaluated. @@ -1317,13 +1325,17 @@ def on_completable(done_future) end end - class AnyCompleteFuturePromise < BlockedPromise + class AbstractAnyPromise < BlockedPromise + def touch + blocked_by.each(&:touch) unless @Future.completed? + end + end + + class AnyCompleteFuturePromise < AbstractAnyPromise private def initialize(blocked_by_futures, default_executor) - blocked_by_futures.all? { |f| f.is_a? Future } or - raise ArgumentError, 'accepts only Futures not Events' super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) end @@ -1336,7 +1348,7 @@ def on_completable(done_future) end end - class AnyCompleteEventPromise < BlockedPromise + class AnyCompleteEventPromise < AbstractAnyPromise private @@ -1353,23 +1365,13 @@ def on_completable(done_future) end end - class AnySuccessfulFuturePromise < BlockedPromise + class AnySuccessfulFuturePromise < AnyCompleteFuturePromise private - def initialize(blocked_by_futures, default_executor) - blocked_by_futures.all? { |f| f.is_a? Future } or - raise ArgumentError, 'accepts only Futures not Events' - super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - end - def completable?(countdown, future) future.success? || super(countdown, future) end - - def on_completable(done_future) - complete_with done_future.internal_state, false - end end class DelayPromise < InnerPromise From 278cdae83cac90df320b0394bdfc0cdf036615a0 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 15:27:12 +0200 Subject: [PATCH 14/68] Remove bad aliases --- lib/concurrent/promises.rb | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index 06b9d6be0..b01554e9f 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -25,9 +25,6 @@ def completable_event(default_executor = :io) CompletableEventPromise.new(default_executor).future end - # TODO (pitr-ch 26-Mar-2016): remove event?, it does not match completable_future - alias_method :event, :completable_event - # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. # @return [Future] def future(*args, &task) @@ -64,8 +61,6 @@ def completed_event(default_executor = :io) ImmediateEventPromise.new(default_executor).event end - alias_method :async, :future - # Constructs new Future which will evaluate to the block after # requested by calling `#wait`, `#value`, `#value!`, etc. on it or on any of the chained futures. # @return [Future] From f1cea8b5b7492848f2f5a7346aaab2e13cbac372 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 15:27:56 +0200 Subject: [PATCH 15/68] BlockedPromise always takes an array of futures --- lib/concurrent/promises.rb | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index b01554e9f..4f6274f06 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -1049,7 +1049,10 @@ def inspect private def initialize_blocked_by(blocked_by_futures) - @BlockedBy = [blocked_by_futures].flatten + unless blocked_by_futures.is_a?(::Array) + raise ArgumentError, "has to be array of events/futures: #{blocked_by_futures.inspect}" + end + @BlockedBy = blocked_by_futures end def clear_blocked_by! @@ -1076,7 +1079,7 @@ def on_completable(done_future) class BlockedTaskPromise < BlockedPromise def initialize(blocked_by_future, default_executor, executor, args, &task) raise ArgumentError, 'no block given' unless block_given? - super Future.new(self, default_executor), blocked_by_future, 1 + super Future.new(self, default_executor), [blocked_by_future], 1 @Executor = executor @Task = task @Args = args @@ -1255,7 +1258,7 @@ def on_completable(done_future) class EventWrapperPromise < BlockedPromise def initialize(event, default_executor) - super Event.new(self, default_executor), event, 1 + super Event.new(self, default_executor), [event], 1 end def on_completable(done_future) @@ -1265,7 +1268,7 @@ def on_completable(done_future) class FutureWrapperPromise < BlockedPromise def initialize(future, default_executor) - super Future.new(self, default_executor), future, 1 + super Future.new(self, default_executor), [future], 1 end def on_completable(done_future) From c110154dcad3770fce1e17fe17cbff789e17ff36 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 15:28:14 +0200 Subject: [PATCH 16/68] Add marking Completable module --- lib/concurrent/promises.rb | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index 4f6274f06..7f70b3363 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -841,8 +841,13 @@ def async_callback_on_completion(state, executor, args, callback) end end + module Completable + end + # A Event which can be completed by user. class CompletableEvent < Event + include Completable + # Complete the Event, `raise` if already completed def complete(raise_on_reassign = true) complete_with COMPLETED, raise_on_reassign @@ -855,6 +860,8 @@ def with_hidden_completable # A Future which can be completed by user. class CompletableFuture < Future + include Completable + # Complete the future with triplet od `success`, `value`, `reason` # `raise` if already completed # return [self] From 8340b14552cb5e1cebcf1069ce053695f83b5230 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 15:30:10 +0200 Subject: [PATCH 17/68] Memoize wrappers of Completables --- lib/concurrent/promises.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb index 7f70b3363..674f3f192 100644 --- a/lib/concurrent/promises.rb +++ b/lib/concurrent/promises.rb @@ -854,7 +854,7 @@ def complete(raise_on_reassign = true) end def with_hidden_completable - EventWrapperPromise.new(self, @DefaultExecutor).event + @with_hidden_completable ||= EventWrapperPromise.new(self, @DefaultExecutor).event end end @@ -907,7 +907,7 @@ def evaluate_to!(*args, &block) end def with_hidden_completable - FutureWrapperPromise.new(self, @DefaultExecutor).future + @with_hidden_completable ||= FutureWrapperPromise.new(self, @DefaultExecutor).future end end From 4b00a78140ed0ed1e485c27d726f03d597546249 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 15:30:54 +0200 Subject: [PATCH 18/68] Update promises tests --- spec/concurrent/promises_spec.rb | 44 +++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/promises_spec.rb index 3f1c234dd..805fcb5bd 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -9,8 +9,8 @@ describe 'chain_completable' do it 'event' do - b = event - a = event.chain_completable(b) + b = completable_event + a = completable_event.chain_completable(b) a.complete expect(b).to be_completed end @@ -102,7 +102,7 @@ def behaves_as_delay(delay, value) describe '.event' do specify do - completable_event = event + completable_event = completable_event() one = completable_event.chain { 1 } join = zip(completable_event).chain { 1 } expect(one.completed?).to be false @@ -201,8 +201,8 @@ def behaves_as_delay(delay, value) expect(future { 1 }.delay).to be_a_kind_of Concurrent::Promises::Future expect(future { 1 }.delay.wait!).to be_completed - expect(event.complete.delay).to be_a_kind_of Concurrent::Promises::Event - expect(event.complete.delay.wait).to be_completed + expect(completable_event.complete.delay).to be_a_kind_of Concurrent::Promises::Event + expect(completable_event.complete.delay.wait).to be_completed a = future { 1 } b = future { raise 'b' } @@ -239,7 +239,7 @@ def behaves_as_delay(delay, value) it 'waits for all and returns event' do a = succeeded_future 1 b = failed_future :any - c = event.complete + c = completable_event.complete z2 = zip_events a, b, c z3 = zip_events a @@ -449,6 +449,38 @@ def behaves_as_delay(delay, value) expect(future.value!).to eq :done end end + + specify do + source, token = Concurrent::Cancellation.create + source.cancel + expect(token.event.complete?).to be_truthy + + cancellable_branch = Concurrent::Promises.delay { 1 } + expect((cancellable_branch | token.event).value).to be_nil + expect(cancellable_branch.complete?).to be_falsey + end + + specify do + source, token = Concurrent::Cancellation.create( + Concurrent::Promises.completable_future, false, nil, err = StandardError.new('Cancelled')) + source.cancel + expect(token.future.complete?).to be_truthy + + cancellable_branch = Concurrent::Promises.delay { 1 } + expect((cancellable_branch | token.event).reason).to eq err + expect(cancellable_branch.complete?).to be_falsey + end + + + specify do + source, token = Concurrent::Cancellation.create + + cancellable_branch = Concurrent::Promises.delay { 1 } + expect((cancellable_branch | token.event).value).to eq 1 + expect(cancellable_branch.complete?).to be_truthy + end + end + end end From 97e74ad850b96435e384e71c723241b5e1744922 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 15:43:19 +0200 Subject: [PATCH 19/68] Add experimental Throttle --- lib/concurrent/edge/promises.rb | 49 ++++++++++++++++++++++++++++++++ spec/concurrent/promises_spec.rb | 15 ++++++++++ 2 files changed, 64 insertions(+) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 5da7cfea2..4ae34edf4 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -148,4 +148,53 @@ def join(*tokens) # TODO (pitr-ch 27-Mar-2016): cooperation with mutex, select etc? # TODO (pitr-ch 27-Mar-2016): examples (scheduled to be cancelled in 10 sec) end + + class Throttle < Synchronization::Object + + safe_initialization! + private *attr_atomic(:can_run) + + def initialize(max) + super() + self.can_run = max + # TODO (pitr-ch 10-Jun-2016): lockfree gueue is needed + @Queue = Queue.new + end + + def limit(ready = nil, &block) + # TODO (pitr-ch 11-Jun-2016): triggers should allocate resources when they are to be required + if block_given? + block.call(get_event).on_completion! { done } + else + get_event + end + end + + def done + while true + current_can_run = can_run + if compare_and_set_can_run current_can_run, current_can_run + 1 + @Queue.pop.complete if current_can_run < 0 + return self + end + end + end + + private + + def get_event + while true + current_can_run = can_run + if compare_and_set_can_run current_can_run, current_can_run - 1 + if current_can_run > 0 + return Promises.completed_event + else + e = Promises.completable_event + @Queue.push e + return e + end + end + end + end + end end diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/promises_spec.rb index 805fcb5bd..d40627fac 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -481,6 +481,21 @@ def behaves_as_delay(delay, value) end end + describe 'Throttling' do + specify do + throttle = Concurrent::Throttle.new 3 + counter = Concurrent::AtomicFixnum.new + expect(Concurrent::Promises.zip( + *12.times.map do |i| + throttle.limit do |trigger| + trigger.then do + counter.increment + sleep 0.01 + counter.decrement + end + end + end).value.all? { |v| v < 3 }).to be_truthy + end end end From be0ed75334c1df3d873dd9b051ba1769231e5cae Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 15:48:39 +0200 Subject: [PATCH 20/68] Keep promisses in edge for now --- lib/concurrent.rb | 1 - lib/concurrent/actor.rb | 2 +- lib/concurrent/edge/promises.rb | 1442 ++++++++++++++++++++++++++++++- lib/concurrent/promises.rb | 1441 ------------------------------ 4 files changed, 1442 insertions(+), 1444 deletions(-) delete mode 100644 lib/concurrent/promises.rb diff --git a/lib/concurrent.rb b/lib/concurrent.rb index 8f9017183..6fc240318 100644 --- a/lib/concurrent.rb +++ b/lib/concurrent.rb @@ -30,7 +30,6 @@ require 'concurrent/timer_task' require 'concurrent/tvar' require 'concurrent/lock_free_stack' -require 'concurrent/promises' require 'concurrent/thread_safe/synchronized_delegator' require 'concurrent/thread_safe/util' diff --git a/lib/concurrent/actor.rb b/lib/concurrent/actor.rb index 81b98c608..8d1822966 100644 --- a/lib/concurrent/actor.rb +++ b/lib/concurrent/actor.rb @@ -1,7 +1,7 @@ require 'concurrent/configuration' require 'concurrent/executor/serialized_execution' require 'concurrent/synchronization' -require 'concurrent/promises' +require 'concurrent/edge/promises' module Concurrent # TODO https://github.com/celluloid/celluloid/wiki/Supervision-Groups ? diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 4ae34edf4..c832791aa 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -1,4 +1,1444 @@ -require 'concurrent/promises' +require 'concurrent/synchronization' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/lock_free_stack' +require 'concurrent/concern/logging' +require 'concurrent/errors' + +module Concurrent + + # # Promises Framework + # + # Unified implementation of futures and promises which combines features of previous `Future`, + # `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the + # new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking + # operations like `#wait`, `#value`. It also offers better performance. + # + # ## Examples + # {include:file:examples/promises.out.rb} + module Promises + + module FactoryMethods + # User is responsible for completing the event once by {Promises::CompletableEvent#complete} + # @return [CompletableEvent] + def completable_event(default_executor = :io) + CompletableEventPromise.new(default_executor).future + end + + # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. + # @return [Future] + def future(*args, &task) + future_on(:io, *args, &task) + end + + def future_on(default_executor, *args, &task) + ImmediateEventPromise.new(default_executor).future.then(*args, &task) + end + + # User is responsible for completing the future once by {Promises::CompletableFuture#success} or {Promises::CompletableFuture#fail} + # @return [CompletableFuture] + def completable_future(default_executor = :io) + CompletableFuturePromise.new(default_executor).future + end + + # @return [Future] which is already completed + def completed_future(success, value, reason, default_executor = :io) + ImmediateFuturePromise.new(default_executor, success, value, reason).future + end + + # @return [Future] which is already completed in success state with value + def succeeded_future(value, default_executor = :io) + completed_future true, value, nil, default_executor + end + + # @return [Future] which is already completed in failed state with reason + def failed_future(reason, default_executor = :io) + completed_future false, nil, reason, default_executor + end + + # @return [Event] which is already completed + def completed_event(default_executor = :io) + ImmediateEventPromise.new(default_executor).event + end + + # Constructs new Future which will evaluate to the block after + # requested by calling `#wait`, `#value`, `#value!`, etc. on it or on any of the chained futures. + # @return [Future] + def delay(*args, &task) + delay_on :io, *args, &task + end + + def delay_on(default_executor, *args, &task) + DelayPromise.new(default_executor).future.then(*args, &task) + end + + # Schedules the block to be executed on executor in given intended_time. + # @param [Numeric, Time] intended_time Numeric => run in `intended_time` seconds. Time => eun on time. + # @return [Future] + def schedule(intended_time, *args, &task) + schedule_on :io, intended_time, *args, &task + end + + def schedule_on(default_executor, intended_time, *args, &task) + ScheduledPromise.new(default_executor, intended_time).future.then(*args, &task) + end + + # Constructs new {Future} which is completed after all futures_and_or_events are complete. Its value is array + # of dependent future values. If there is an error it fails with the first one. Event does not + # have a value so it's represented by nil in the array of values. + # @param [Event] futures_and_or_events + # @return [Future] + def zip_futures(*futures_and_or_events) + zip_futures_on :io, *futures_and_or_events + end + + def zip_futures_on(default_executor, *futures_and_or_events) + ZipFuturesPromise.new(futures_and_or_events, default_executor).future + end + + alias_method :zip, :zip_futures + + # Constructs new {Event} which is completed after all futures_and_or_events are complete + # (Future is completed when Success or Failed). + # @param [Event] futures_and_or_events + # @return [Event] + def zip_events(*futures_and_or_events) + zip_events_on :io, *futures_and_or_events + end + + def zip_events_on(default_executor, *futures_and_or_events) + ZipEventsPromise.new(futures_and_or_events, default_executor).future + end + + # Constructs new {Future} which is completed after first of the futures is complete. + # @param [Event] futures + # @return [Future] + def any_complete_future(*futures) + any_complete_future_on :io, *futures + end + + def any_complete_future_on(default_executor, *futures) + AnyCompleteFuturePromise.new(futures, default_executor).future + end + + alias_method :any, :any_complete_future + + # Constructs new {Future} which becomes succeeded after first of the futures succeedes or + # failed if all futures fail (reason is last error). + # @param [Event] futures + # @return [Future] + def any_successful_future(*futures) + any_successful_future_on :io, *futures + end + + def any_successful_future_on(default_executor, *futures) + AnySuccessfulFuturePromise.new(futures, default_executor).future + end + + def any_event(*events) + any_event_on :io, *events + end + + def any_event_on(default_executor, *events) + AnyCompleteEventPromise.new(events, default_executor).event + end + + # TODO consider adding first(count, *futures) + # TODO consider adding zip_by(slice, *futures) processing futures in slices + end + + # Represents an event which will happen in future (will be completed). It has to always happen. + class Event < Synchronization::Object + safe_initialization! + private(*attr_atomic(:internal_state)) + # @!visibility private + public :internal_state + include Concern::Logging + + class State + def completed? + raise NotImplementedError + end + + def to_sym + raise NotImplementedError + end + end + + private_constant :State + + class Pending < State + def completed? + false + end + + def to_sym + :pending + end + end + + private_constant :Pending + + class CompletedWithResult < State + def completed? + true + end + + def to_sym + :completed + end + + def result + [success?, value, reason] + end + + def success? + raise NotImplementedError + end + + def value + raise NotImplementedError + end + + def reason + raise NotImplementedError + end + + def apply + raise NotImplementedError + end + end + + private_constant :CompletedWithResult + + # @!visibility private + class Success < CompletedWithResult + def initialize(value) + @Value = value + end + + def success? + true + end + + def apply(args, block) + block.call value, *args + end + + def value + @Value + end + + def reason + nil + end + + def to_sym + :success + end + end + + # @!visibility private + class SuccessArray < Success + def apply(args, block) + block.call(*value, *args) + end + end + + # @!visibility private + class Failed < CompletedWithResult + def initialize(reason) + @Reason = reason + end + + def success? + false + end + + def value + nil + end + + def reason + @Reason + end + + def to_sym + :failed + end + + def apply(args, block) + block.call reason, *args + end + end + + # @!visibility private + class PartiallyFailed < CompletedWithResult + def initialize(value, reason) + super() + @Value = value + @Reason = reason + end + + def success? + false + end + + def to_sym + :failed + end + + def value + @Value + end + + def reason + @Reason + end + + def apply(args, block) + block.call(*reason, *args) + end + end + + + # @!visibility private + PENDING = Pending.new + # @!visibility private + COMPLETED = Success.new(nil) + + def initialize(promise, default_executor) + super() + @Lock = Mutex.new + @Condition = ConditionVariable.new + @Promise = promise + @DefaultExecutor = default_executor + # noinspection RubyArgCount + @Touched = AtomicBoolean.new false + @Callbacks = LockFreeStack.new + # noinspection RubyArgCount + @Waiters = AtomicFixnum.new 0 + self.internal_state = PENDING + end + + # @return [:pending, :completed] + def state + internal_state.to_sym + end + + # Is Event/Future pending? + # @return [Boolean] + def pending?(state = internal_state) + !state.completed? + end + + def unscheduled? + raise 'unsupported' + end + + alias_method :incomplete?, :pending? + + # Has the Event been completed? + # @return [Boolean] + def completed?(state = internal_state) + state.completed? + end + + alias_method :complete?, :completed? + + # Wait until Event is #complete? + # @param [Numeric] timeout the maximum time in second to wait. + # @return [Event, true, false] self or true/false if timeout is used + # @!macro [attach] edge.periodical_wait + # @note a thread should wait only once! For repeated checking use faster `completed?` check. + # If thread waits periodically it will dangerously grow the waiters stack. + def wait(timeout = nil) + touch + result = wait_until_complete(timeout) + timeout ? result : self + end + + # @!visibility private + def touch + # distribute touch to promise only once + @Promise.touch if @Touched.make_true + self + end + + # @return [Executor] current default executor + # @see #with_default_executor + def default_executor + @DefaultExecutor + end + + # @yield [success, value, reason] of the parent + def chain(*args, &callback) + chain_on @DefaultExecutor, *args, &callback + end + + def chain_on(executor, *args, &callback) + ChainPromise.new(self, @DefaultExecutor, executor, args, &callback).future + end + + alias_method :then, :chain + + def chain_completable(completable_event) + on_completion! { completable_event.complete_with COMPLETED } + end + + alias_method :tangle, :chain_completable + + # Zip with future producing new Future + # @return [Event] + def zip(other) + if other.is?(Future) + ZipFutureEventPromise.new(other, self, @DefaultExecutor).future + else + ZipEventEventPromise.new(self, other, @DefaultExecutor).event + end + end + + alias_method :&, :zip + + def any(future) + AnyCompleteEventPromise.new([self, future], @DefaultExecutor).event + end + + alias_method :|, :any + + # Inserts delay into the chain of Futures making rest of it lazy evaluated. + # @return [Event] + def delay + ZipEventEventPromise.new(self, DelayPromise.new(@DefaultExecutor).event, @DefaultExecutor).event + end + + # Schedules rest of the chain for execution with specified time or on specified time + # @return [Event] + def schedule(intended_time) + ZipEventEventPromise.new(self, + ScheduledPromise.new(@DefaultExecutor, intended_time).event, + @DefaultExecutor).event + end + + # @yield [success, value, reason, *args] executed async on `executor` when completed + # @return self + def on_completion(*args, &callback) + on_completion_using @DefaultExecutor, *args, &callback + end + + def on_completion_using(executor, *args, &callback) + add_callback :async_callback_on_completion, executor, args, callback + end + + # @yield [success, value, reason, *args] executed sync when completed + # @return self + def on_completion!(*args, &callback) + add_callback :callback_on_completion, args, callback + end + + # Changes default executor for rest of the chain + # @return [Event] + def with_default_executor(executor) + EventWrapperPromise.new(self, executor).future + end + + def to_s + "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state.to_sym}>" + end + + def inspect + "#{to_s[0..-2]} blocks:[#{blocks.map(&:to_s).join(', ')}]>" + end + + def set(*args, &block) + raise 'Use CompletableEvent#complete or CompletableFuture#complete instead, ' + + 'constructed by Concurrent.event or Concurrent.future respectively.' + end + + # @!visibility private + def complete_with(state, raise_on_reassign = true) + if compare_and_set_internal_state(PENDING, state) + # go to synchronized block only if there were waiting threads + @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 + call_callbacks + else + Concurrent::MultipleAssignmentError.new('Event can be completed only once') if raise_on_reassign + return nil + end + self + end + + # @!visibility private + # just for inspection + # @return [Array] + def blocks + @Callbacks.each_with_object([]) do |callback, promises| + promises.push(*(callback.select { |v| v.is_a? AbstractPromise })) + end + end + + # @!visibility private + # just for inspection + def callbacks + @Callbacks.each.to_a + end + + # @!visibility private + def add_callback(method, *args) + if completed? + call_callback method, *args + else + @Callbacks.push [method, *args] + call_callbacks if completed? + end + self + end + + # @!visibility private + # only for inspection + def promise + @Promise + end + + # @!visibility private + # only for inspection + def touched + @Touched.value + end + + # @!visibility private + # only for debugging inspection + def waiting_threads + @Waiters.each.to_a + end + + private + + # @return [true, false] + def wait_until_complete(timeout) + return true if completed? + + @Lock.synchronize do + begin + unless completed? + @Condition.wait @Lock, timeout + end + ensure + # JRuby may raise ConcurrencyError + @Waiters.decrement + end + end + completed? + end + + def with_async(executor, *args, &block) + Concurrent.executor(executor).post(*args, &block) + end + + def async_callback_on_completion(executor, args, callback) + with_async(executor) { callback_on_completion args, callback } + end + + def callback_on_completion(args, callback) + callback.call *args + end + + def callback_notify_blocked(promise) + promise.on_done self + end + + def call_callback(method, *args) + self.send method, *args + end + + def call_callbacks + method, *args = @Callbacks.pop + while method + call_callback method, *args + method, *args = @Callbacks.pop + end + end + end + + # Represents a value which will become available in future. May fail with a reason instead. + class Future < Event + + # @!method state + # @return [:pending, :success, :failed] + + # Has Future been success? + # @return [Boolean] + def success?(state = internal_state) + state.completed? && state.success? + end + + # Has Future been failed? + # @return [Boolean] + def failed?(state = internal_state) + state.completed? && !state.success? + end + + # @return [Object, nil] the value of the Future when success, nil on timeout + # @!macro [attach] edge.timeout_nil + # @note If the Future can have value `nil` then it cannot be distinquished from `nil` returned on timeout. + # In this case is better to use first `wait` then `value` (or similar). + # @!macro edge.periodical_wait + def value(timeout = nil) + touch + internal_state.value if wait_until_complete timeout + end + + # @return [Exception, nil] the reason of the Future's failure + # @!macro edge.timeout_nil + # @!macro edge.periodical_wait + def reason(timeout = nil) + touch + internal_state.reason if wait_until_complete timeout + end + + # @return [Array(Boolean, Object, Exception), nil] triplet of success, value, reason + # @!macro edge.timeout_nil + # @!macro edge.periodical_wait + def result(timeout = nil) + touch + internal_state.result if wait_until_complete timeout + end + + # Wait until Future is #complete? + # @param [Numeric] timeout the maximum time in second to wait. + # @raise reason on failure + # @return [Event, true, false] self or true/false if timeout is used + # @!macro edge.periodical_wait + def wait!(timeout = nil) + touch + result = wait_until_complete!(timeout) + timeout ? result : self + end + + # Wait until Future is #complete? + # @param [Numeric] timeout the maximum time in second to wait. + # @raise reason on failure + # @return [Object, nil] + # @!macro edge.timeout_nil + # @!macro edge.periodical_wait + def value!(timeout = nil) + touch + internal_state.value if wait_until_complete! timeout + end + + # @example allows failed Future to be risen + # raise Concurrent.future.fail + def exception(*args) + raise 'obligation is not failed' unless failed? + reason = internal_state.reason + if reason.is_a?(::Array) + reason.each { |e| log ERROR, 'Promises::Future', e } + Concurrent::Error.new 'multiple exceptions, inspect log' + else + reason.exception(*args) + end + end + + # @yield [value, *args] executed only on parent success + # @return [Future] new + def then(*args, &callback) + then_on @DefaultExecutor, *args, &callback + end + + def then_on(executor, *args, &callback) + ThenPromise.new(self, @DefaultExecutor, executor, args, &callback).future + end + + def chain_completable(completable_future) + on_completion! { completable_future.complete_with internal_state } + end + + alias_method :tangle, :chain_completable + + # @yield [reason] executed only on parent failure + # @return [Future] + def rescue(*args, &callback) + rescue_on @DefaultExecutor, *args, &callback + end + + def rescue_on(executor, *args, &callback) + RescuePromise.new(self, @DefaultExecutor, executor, args, &callback).future + end + + # zips with the Future in the value + # @example + # Concurrent.future { Concurrent.future { 1 } }.flat.vale # => 1 + def flat(level = 1) + FlatPromise.new(self, level, @DefaultExecutor).future + end + + # @return [Future] which has first completed value from futures + def any(future) + AnyCompleteFuturePromise.new([self, future], @DefaultExecutor).future + end + + # Inserts delay into the chain of Futures making rest of it lazy evaluated. + # @return [Future] + def delay + ZipFutureEventPromise.new(self, DelayPromise.new(@DefaultExecutor).future, @DefaultExecutor).future + end + + # Schedules rest of the chain for execution with specified time or on specified time + # @return [Future] + def schedule(intended_time) + chain do + ZipFutureEventPromise.new(self, + ScheduledPromise.new(@DefaultExecutor, intended_time).event, + @DefaultExecutor).future + end.flat + end + + # Changes default executor for rest of the chain + # @return [Future] + def with_default_executor(executor) + FutureWrapperPromise.new(self, executor).future + end + + # Zip with future producing new Future + # @return [Future] + def zip(other) + if other.is_a?(Future) + ZipFutureFuturePromise.new(self, other, @DefaultExecutor).future + else + ZipFutureEventPromise.new(self, other, @DefaultExecutor).future + end + end + + alias_method :&, :zip + + alias_method :|, :any + + # @yield [value] executed async on `executor` when success + # @return self + def on_success(*args, &callback) + on_success_using @DefaultExecutor, *args, &callback + end + + def on_success_using(executor, *args, &callback) + add_callback :async_callback_on_success, executor, args, callback + end + + # @yield [reason] executed async on `executor` when failed? + # @return self + def on_failure(*args, &callback) + on_failure_using @DefaultExecutor, *args, &callback + end + + def on_failure_using(executor, *args, &callback) + add_callback :async_callback_on_failure, executor, args, callback + end + + # @yield [value] executed sync when success + # @return self + def on_success!(*args, &callback) + add_callback :callback_on_success, args, callback + end + + # @yield [reason] executed sync when failed? + # @return self + def on_failure!(*args, &callback) + add_callback :callback_on_failure, args, callback + end + + # @!visibility private + def complete_with(state, raise_on_reassign = true) + if compare_and_set_internal_state(PENDING, state) + # go to synchronized block only if there were waiting threads + @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 + call_callbacks state + else + if raise_on_reassign + # print otherwise hidden error + log ERROR, 'Promises::Future', reason if reason + log ERROR, 'Promises::Future', state.reason if state.reason + + raise(Concurrent::MultipleAssignmentError.new( + "Future can be completed only once. Current result is #{result}, " + + "trying to set #{state.result}")) + end + return false + end + self + end + + # @!visibility private + def add_callback(method, *args) + state = internal_state + if completed?(state) + call_callback method, state, *args + else + @Callbacks.push [method, *args] + state = internal_state + # take back if it was completed in the meanwhile + call_callbacks state if completed?(state) + end + self + end + + # @!visibility private + def apply(args, block) + internal_state.apply args, block + end + + private + + def wait_until_complete!(timeout = nil) + result = wait_until_complete(timeout) + raise self if failed? + result + end + + def call_callbacks(state) + method, *args = @Callbacks.pop + while method + call_callback method, state, *args + method, *args = @Callbacks.pop + end + end + + def call_callback(method, state, *args) + self.send method, state, *args + end + + def async_callback_on_success(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_success st, ar, cb + end + end + + def async_callback_on_failure(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_failure st, ar, cb + end + end + + def callback_on_success(state, args, callback) + state.apply args, callback if state.success? + end + + def callback_on_failure(state, args, callback) + state.apply args, callback unless state.success? + end + + def callback_on_completion(state, args, callback) + callback.call state.result, *args + end + + def callback_notify_blocked(state, promise) + super(promise) + end + + def async_callback_on_completion(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_completion st, ar, cb + end + end + end + + module Completable + end + + # A Event which can be completed by user. + class CompletableEvent < Event + include Completable + + # Complete the Event, `raise` if already completed + def complete(raise_on_reassign = true) + complete_with COMPLETED, raise_on_reassign + end + + def with_hidden_completable + @with_hidden_completable ||= EventWrapperPromise.new(self, @DefaultExecutor).event + end + end + + # A Future which can be completed by user. + class CompletableFuture < Future + include Completable + + # Complete the future with triplet od `success`, `value`, `reason` + # `raise` if already completed + # return [self] + def complete(success, value, reason, raise_on_reassign = true) + complete_with(success ? Success.new(value) : Failed.new(reason), raise_on_reassign) + end + + # Complete the future with value + # return [self] + def success(value) + promise.success(value) + end + + # Try to complete the future with value + # return [self] + def try_success(value) + promise.try_success(value) + end + + # Fail the future with reason + # return [self] + def fail(reason = StandardError.new) + promise.fail(reason) + end + + # Try to fail the future with reason + # return [self] + def try_fail(reason = StandardError.new) + promise.try_fail(reason) + end + + # Evaluate the future to value if there is an exception the future fails with it + # return [self] + def evaluate_to(*args, &block) + promise.evaluate_to(*args, block) + end + + # Evaluate the future to value if there is an exception the future fails with it + # @raise the exception + # return [self] + def evaluate_to!(*args, &block) + promise.evaluate_to!(*args, block) + end + + def with_hidden_completable + @with_hidden_completable ||= FutureWrapperPromise.new(self, @DefaultExecutor).future + end + end + + # @abstract + class AbstractPromise < Synchronization::Object + safe_initialization! + include Concern::Logging + + def initialize(future) + super() + @Future = future + end + + def future + @Future + end + + alias_method :event, :future + + def default_executor + future.default_executor + end + + def state + future.state + end + + def touch + end + + def to_s + "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state}>" + end + + def inspect + to_s + end + + private + + def complete_with(new_state, raise_on_reassign = true) + @Future.complete_with(new_state, raise_on_reassign) + end + + # @return [Future] + def evaluate_to(*args, block) + complete_with Future::Success.new(block.call(*args)) + rescue StandardError => error + complete_with Future::Failed.new(error) + rescue Exception => error + log(ERROR, 'Promises::Future', error) + complete_with Future::Failed.new(error) + end + end + + class CompletableEventPromise < AbstractPromise + def initialize(default_executor) + super CompletableEvent.new(self, default_executor) + end + end + + class CompletableFuturePromise < AbstractPromise + def initialize(default_executor) + super CompletableFuture.new(self, default_executor) + end + + # Set the `Future` to a value and wake or notify all threads waiting on it. + # + # @param [Object] value the value to store in the `Future` + # @raise [Concurrent::MultipleAssignmentError] if the `Future` has already been set or otherwise completed + # @return [Future] + def success(value) + complete_with Future::Success.new(value) + end + + def try_success(value) + !!complete_with(Future::Success.new(value), false) + end + + # Set the `Future` to failed due to some error and wake or notify all threads waiting on it. + # + # @param [Object] reason for the failure + # @raise [Concurrent::MultipleAssignmentError] if the `Future` has already been set or otherwise completed + # @return [Future] + def fail(reason = StandardError.new) + complete_with Future::Failed.new(reason) + end + + def try_fail(reason = StandardError.new) + !!complete_with(Future::Failed.new(reason), false) + end + + public :evaluate_to + + # @return [Future] + def evaluate_to!(*args, block) + evaluate_to(*args, block).wait! + end + end + + # @abstract + class InnerPromise < AbstractPromise + end + + # @abstract + class BlockedPromise < InnerPromise + def self.new(*args, &block) + promise = super(*args, &block) + promise.blocked_by.each { |f| f.add_callback :callback_notify_blocked, promise } + promise + end + + def initialize(future, blocked_by_futures, countdown) + super(future) + initialize_blocked_by(blocked_by_futures) + @Countdown = AtomicFixnum.new countdown + end + + # @api private + def on_done(future) + countdown = process_on_done(future) + completable = completable?(countdown, future) + + if completable + on_completable(future) + # futures could be deleted from blocked_by one by one here, but that would be too expensive, + # it's done once when all are done to free the reference + clear_blocked_by! + end + end + + def touch + blocked_by.each(&:touch) + end + + # !visibility private + # for inspection only + def blocked_by + @BlockedBy + end + + def inspect + "#{to_s[0..-2]} blocked_by:[#{ blocked_by.map(&:to_s).join(', ')}]>" + end + + private + + def initialize_blocked_by(blocked_by_futures) + unless blocked_by_futures.is_a?(::Array) + raise ArgumentError, "has to be array of events/futures: #{blocked_by_futures.inspect}" + end + @BlockedBy = blocked_by_futures + end + + def clear_blocked_by! + # not synchronized because we do not care when this change propagates + @BlockedBy = [] + nil + end + + # @return [true,false] if completable + def completable?(countdown, future) + countdown.zero? + end + + def process_on_done(future) + @Countdown.decrement + end + + def on_completable(done_future) + raise NotImplementedError + end + end + + # @abstract + class BlockedTaskPromise < BlockedPromise + def initialize(blocked_by_future, default_executor, executor, args, &task) + raise ArgumentError, 'no block given' unless block_given? + super Future.new(self, default_executor), [blocked_by_future], 1 + @Executor = executor + @Task = task + @Args = args + end + + def executor + @Executor + end + end + + class ThenPromise < BlockedTaskPromise + private + + def initialize(blocked_by_future, default_executor, executor, args, &task) + raise ArgumentError, 'only Future can be appended with then' unless blocked_by_future.is_a? Future + super blocked_by_future, default_executor, executor, args, &task + end + + def on_completable(done_future) + if done_future.success? + Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + complete_with done_future.internal_state + end + end + end + + class RescuePromise < BlockedTaskPromise + private + + def initialize(blocked_by_future, default_executor, executor, args, &task) + super blocked_by_future, default_executor, executor, args, &task + end + + def on_completable(done_future) + if done_future.failed? + Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + complete_with done_future.internal_state + end + end + end + + class ChainPromise < BlockedTaskPromise + private + + def on_completable(done_future) + if Future === done_future + Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| + evaluate_to(*future.result, *args, task) + end + else + Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| + evaluate_to *args, task + end + end + end + end + + # will be immediately completed + class ImmediateEventPromise < InnerPromise + def initialize(default_executor) + super Event.new(self, default_executor).complete_with(Event::COMPLETED) + end + end + + class ImmediateFuturePromise < InnerPromise + def initialize(default_executor, success, value, reason) + super Future.new(self, default_executor). + complete_with(success ? Future::Success.new(value) : Future::Failed.new(reason)) + end + end + + class FlatPromise < BlockedPromise + + # !visibility private + def blocked_by + @BlockedBy.each.to_a + end + + private + + def process_on_done(future) + countdown = super(future) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.success? + complete_with internal_state + return countdown + end + + value = internal_state.value + case value + when Future + @BlockedBy.push value + value.add_callback :callback_notify_blocked, self + @Countdown.value + when Event + evaluate_to(lambda { raise TypeError, 'cannot flatten to Event' }) + else + evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) + end + end + countdown + end + + def initialize(blocked_by_future, levels, default_executor) + raise ArgumentError, 'levels has to be higher than 0' if levels < 1 + super Future.new(self, default_executor), blocked_by_future, 1 + levels + end + + def initialize_blocked_by(blocked_by_future) + @BlockedBy = LockFreeStack.new.push(blocked_by_future) + end + + def on_completable(done_future) + complete_with done_future.internal_state + end + + def clear_blocked_by! + @BlockedBy.clear + nil + end + + def completable?(countdown, future) + !@Future.internal_state.completed? && super(countdown, future) + end + end + + class ZipEventEventPromise < BlockedPromise + def initialize(event1, event2, default_executor) + super Event.new(self, default_executor), [event1, event2], 2 + end + + def on_completable(done_future) + complete_with Event::COMPLETED + end + end + + class ZipFutureEventPromise < BlockedPromise + def initialize(future, event, default_executor) + super Future.new(self, default_executor), [future, event], 2 + @FutureResult = future + end + + def on_completable(done_future) + complete_with @FutureResult.internal_state + end + end + + class ZipFutureFuturePromise < BlockedPromise + def initialize(future1, future2, default_executor) + super Future.new(self, default_executor), [future1, future2], 2 + @Future1Result = future1 + @Future2Result = future2 + end + + def on_completable(done_future) + success1, value1, reason1 = @Future1Result.result + success2, value2, reason2 = @Future2Result.result + success = success1 && success2 + new_state = if success + Future::SuccessArray.new([value1, value2]) + else + Future::PartiallyFailed.new([value1, value2], [reason1, reason2]) + end + complete_with new_state + end + end + + class EventWrapperPromise < BlockedPromise + def initialize(event, default_executor) + super Event.new(self, default_executor), [event], 1 + end + + def on_completable(done_future) + complete_with Event::COMPLETED + end + end + + class FutureWrapperPromise < BlockedPromise + def initialize(future, default_executor) + super Future.new(self, default_executor), [future], 1 + end + + def on_completable(done_future) + complete_with done_future.internal_state + end + end + + class ZipFuturesPromise < BlockedPromise + + private + + def initialize(blocked_by_futures, default_executor) + super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) + + on_completable nil if blocked_by_futures.empty? + end + + def on_completable(done_future) + all_success = true + values = Array.new(blocked_by.size) + reasons = Array.new(blocked_by.size) + + blocked_by.each_with_index do |future, i| + if future.is_a?(Future) + success, values[i], reasons[i] = future.result + all_success &&= success + else + values[i] = reasons[i] = nil + end + end + + if all_success + complete_with Future::SuccessArray.new(values) + else + complete_with Future::PartiallyFailed.new(values, reasons) + end + end + end + + class ZipEventsPromise < BlockedPromise + + private + + def initialize(blocked_by_futures, default_executor) + super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) + + on_completable nil if blocked_by_futures.empty? + end + + def on_completable(done_future) + complete_with Event::COMPLETED + end + end + + class AbstractAnyPromise < BlockedPromise + def touch + blocked_by.each(&:touch) unless @Future.completed? + end + end + + class AnyCompleteFuturePromise < AbstractAnyPromise + + private + + def initialize(blocked_by_futures, default_executor) + super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) + end + + def completable?(countdown, future) + true + end + + def on_completable(done_future) + complete_with done_future.internal_state, false + end + end + + class AnyCompleteEventPromise < AbstractAnyPromise + + private + + def initialize(blocked_by_futures, default_executor) + super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) + end + + def completable?(countdown, future) + true + end + + def on_completable(done_future) + complete_with Event::COMPLETED, false + end + end + + class AnySuccessfulFuturePromise < AnyCompleteFuturePromise + + private + + def completable?(countdown, future) + future.success? || super(countdown, future) + end + end + + class DelayPromise < InnerPromise + def touch + @Future.complete_with Event::COMPLETED + end + + private + + def initialize(default_executor) + super Event.new(self, default_executor) + end + end + + # will be evaluated to task in intended_time + class ScheduledPromise < InnerPromise + def intended_time + @IntendedTime + end + + def inspect + "#{to_s[0..-2]} intended_time:[#{@IntendedTime}}>" + end + + private + + def initialize(default_executor, intended_time) + super Event.new(self, default_executor) + + @IntendedTime = intended_time + + in_seconds = begin + now = Time.now + schedule_time = if @IntendedTime.is_a? Time + @IntendedTime + else + now + @IntendedTime + end + [0, schedule_time.to_f - now.to_f].max + end + + Concurrent.global_timer_set.post(in_seconds) do + @Future.complete_with Event::COMPLETED + end + end + end + + extend FactoryMethods + + private_constant :AbstractPromise, :CompletableEventPromise, :CompletableFuturePromise, + :InnerPromise, :BlockedPromise, :BlockedTaskPromise, :ThenPromise, + :RescuePromise, :ChainPromise, :ImmediateEventPromise, + :ImmediateFuturePromise, :FlatPromise, :ZipEventEventPromise, + :ZipFutureEventPromise, :ZipFutureFuturePromise, :EventWrapperPromise, + :FutureWrapperPromise, :ZipFuturesPromise, :ZipEventsPromise, + :AnyCompleteFuturePromise, :AnySuccessfulFuturePromise, :DelayPromise, :ScheduledPromise + + end +end + +# TODO when value is requested the current thread may evaluate the tasks to get the value for performance reasons it may not evaluate :io though +# TODO try work stealing pool, each thread has it's own queue + +# Experimental features follow module Concurrent module Promises diff --git a/lib/concurrent/promises.rb b/lib/concurrent/promises.rb deleted file mode 100644 index 674f3f192..000000000 --- a/lib/concurrent/promises.rb +++ /dev/null @@ -1,1441 +0,0 @@ -require 'concurrent/synchronization' -require 'concurrent/atomic/atomic_boolean' -require 'concurrent/atomic/atomic_fixnum' -require 'concurrent/lock_free_stack' -require 'concurrent/concern/logging' -require 'concurrent/errors' - -module Concurrent - - # # Promises Framework - # - # Unified implementation of futures and promises which combines features of previous `Future`, - # `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the - # new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking - # operations like `#wait`, `#value`. It also offers better performance. - # - # ## Examples - # {include:file:examples/promises.out.rb} - module Promises - - module FactoryMethods - # User is responsible for completing the event once by {Promises::CompletableEvent#complete} - # @return [CompletableEvent] - def completable_event(default_executor = :io) - CompletableEventPromise.new(default_executor).future - end - - # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. - # @return [Future] - def future(*args, &task) - future_on(:io, *args, &task) - end - - def future_on(default_executor, *args, &task) - ImmediateEventPromise.new(default_executor).future.then(*args, &task) - end - - # User is responsible for completing the future once by {Promises::CompletableFuture#success} or {Promises::CompletableFuture#fail} - # @return [CompletableFuture] - def completable_future(default_executor = :io) - CompletableFuturePromise.new(default_executor).future - end - - # @return [Future] which is already completed - def completed_future(success, value, reason, default_executor = :io) - ImmediateFuturePromise.new(default_executor, success, value, reason).future - end - - # @return [Future] which is already completed in success state with value - def succeeded_future(value, default_executor = :io) - completed_future true, value, nil, default_executor - end - - # @return [Future] which is already completed in failed state with reason - def failed_future(reason, default_executor = :io) - completed_future false, nil, reason, default_executor - end - - # @return [Event] which is already completed - def completed_event(default_executor = :io) - ImmediateEventPromise.new(default_executor).event - end - - # Constructs new Future which will evaluate to the block after - # requested by calling `#wait`, `#value`, `#value!`, etc. on it or on any of the chained futures. - # @return [Future] - def delay(*args, &task) - delay_on :io, *args, &task - end - - def delay_on(default_executor, *args, &task) - DelayPromise.new(default_executor).future.then(*args, &task) - end - - # Schedules the block to be executed on executor in given intended_time. - # @param [Numeric, Time] intended_time Numeric => run in `intended_time` seconds. Time => eun on time. - # @return [Future] - def schedule(intended_time, *args, &task) - schedule_on :io, intended_time, *args, &task - end - - def schedule_on(default_executor, intended_time, *args, &task) - ScheduledPromise.new(default_executor, intended_time).future.then(*args, &task) - end - - # Constructs new {Future} which is completed after all futures_and_or_events are complete. Its value is array - # of dependent future values. If there is an error it fails with the first one. Event does not - # have a value so it's represented by nil in the array of values. - # @param [Event] futures_and_or_events - # @return [Future] - def zip_futures(*futures_and_or_events) - zip_futures_on :io, *futures_and_or_events - end - - def zip_futures_on(default_executor, *futures_and_or_events) - ZipFuturesPromise.new(futures_and_or_events, default_executor).future - end - - alias_method :zip, :zip_futures - - # Constructs new {Event} which is completed after all futures_and_or_events are complete - # (Future is completed when Success or Failed). - # @param [Event] futures_and_or_events - # @return [Event] - def zip_events(*futures_and_or_events) - zip_events_on :io, *futures_and_or_events - end - - def zip_events_on(default_executor, *futures_and_or_events) - ZipEventsPromise.new(futures_and_or_events, default_executor).future - end - - # Constructs new {Future} which is completed after first of the futures is complete. - # @param [Event] futures - # @return [Future] - def any_complete_future(*futures) - any_complete_future_on :io, *futures - end - - def any_complete_future_on(default_executor, *futures) - AnyCompleteFuturePromise.new(futures, default_executor).future - end - - alias_method :any, :any_complete_future - - # Constructs new {Future} which becomes succeeded after first of the futures succeedes or - # failed if all futures fail (reason is last error). - # @param [Event] futures - # @return [Future] - def any_successful_future(*futures) - any_successful_future_on :io, *futures - end - - def any_successful_future_on(default_executor, *futures) - AnySuccessfulFuturePromise.new(futures, default_executor).future - end - - def any_event(*events) - any_event_on :io, *events - end - - def any_event_on(default_executor, *events) - AnyCompleteEventPromise.new(events, default_executor).event - end - - # TODO consider adding first(count, *futures) - # TODO consider adding zip_by(slice, *futures) processing futures in slices - end - - # Represents an event which will happen in future (will be completed). It has to always happen. - class Event < Synchronization::Object - safe_initialization! - private(*attr_atomic(:internal_state)) - # @!visibility private - public :internal_state - include Concern::Logging - - class State - def completed? - raise NotImplementedError - end - - def to_sym - raise NotImplementedError - end - end - - private_constant :State - - class Pending < State - def completed? - false - end - - def to_sym - :pending - end - end - - private_constant :Pending - - class CompletedWithResult < State - def completed? - true - end - - def to_sym - :completed - end - - def result - [success?, value, reason] - end - - def success? - raise NotImplementedError - end - - def value - raise NotImplementedError - end - - def reason - raise NotImplementedError - end - - def apply - raise NotImplementedError - end - end - - private_constant :CompletedWithResult - - # @!visibility private - class Success < CompletedWithResult - def initialize(value) - @Value = value - end - - def success? - true - end - - def apply(args, block) - block.call value, *args - end - - def value - @Value - end - - def reason - nil - end - - def to_sym - :success - end - end - - # @!visibility private - class SuccessArray < Success - def apply(args, block) - block.call(*value, *args) - end - end - - # @!visibility private - class Failed < CompletedWithResult - def initialize(reason) - @Reason = reason - end - - def success? - false - end - - def value - nil - end - - def reason - @Reason - end - - def to_sym - :failed - end - - def apply(args, block) - block.call reason, *args - end - end - - # @!visibility private - class PartiallyFailed < CompletedWithResult - def initialize(value, reason) - super() - @Value = value - @Reason = reason - end - - def success? - false - end - - def to_sym - :failed - end - - def value - @Value - end - - def reason - @Reason - end - - def apply(args, block) - block.call(*reason, *args) - end - end - - - # @!visibility private - PENDING = Pending.new - # @!visibility private - COMPLETED = Success.new(nil) - - def initialize(promise, default_executor) - super() - @Lock = Mutex.new - @Condition = ConditionVariable.new - @Promise = promise - @DefaultExecutor = default_executor - # noinspection RubyArgCount - @Touched = AtomicBoolean.new false - @Callbacks = LockFreeStack.new - # noinspection RubyArgCount - @Waiters = AtomicFixnum.new 0 - self.internal_state = PENDING - end - - # @return [:pending, :completed] - def state - internal_state.to_sym - end - - # Is Event/Future pending? - # @return [Boolean] - def pending?(state = internal_state) - !state.completed? - end - - def unscheduled? - raise 'unsupported' - end - - alias_method :incomplete?, :pending? - - # Has the Event been completed? - # @return [Boolean] - def completed?(state = internal_state) - state.completed? - end - - alias_method :complete?, :completed? - - # Wait until Event is #complete? - # @param [Numeric] timeout the maximum time in second to wait. - # @return [Event, true, false] self or true/false if timeout is used - # @!macro [attach] edge.periodical_wait - # @note a thread should wait only once! For repeated checking use faster `completed?` check. - # If thread waits periodically it will dangerously grow the waiters stack. - def wait(timeout = nil) - touch - result = wait_until_complete(timeout) - timeout ? result : self - end - - # @!visibility private - def touch - # distribute touch to promise only once - @Promise.touch if @Touched.make_true - self - end - - # @return [Executor] current default executor - # @see #with_default_executor - def default_executor - @DefaultExecutor - end - - # @yield [success, value, reason] of the parent - def chain(*args, &callback) - chain_on @DefaultExecutor, *args, &callback - end - - def chain_on(executor, *args, &callback) - ChainPromise.new(self, @DefaultExecutor, executor, args, &callback).future - end - - alias_method :then, :chain - - def chain_completable(completable_event) - on_completion! { completable_event.complete_with COMPLETED } - end - - alias_method :tangle, :chain_completable - - # Zip with future producing new Future - # @return [Event] - def zip(other) - if other.is?(Future) - ZipFutureEventPromise.new(other, self, @DefaultExecutor).future - else - ZipEventEventPromise.new(self, other, @DefaultExecutor).event - end - end - - alias_method :&, :zip - - def any(future) - AnyCompleteEventPromise.new([self, future], @DefaultExecutor).event - end - - alias_method :|, :any - - # Inserts delay into the chain of Futures making rest of it lazy evaluated. - # @return [Event] - def delay - ZipEventEventPromise.new(self, DelayPromise.new(@DefaultExecutor).event, @DefaultExecutor).event - end - - # Schedules rest of the chain for execution with specified time or on specified time - # @return [Event] - def schedule(intended_time) - ZipEventEventPromise.new(self, - ScheduledPromise.new(@DefaultExecutor, intended_time).event, - @DefaultExecutor).event - end - - # @yield [success, value, reason, *args] executed async on `executor` when completed - # @return self - def on_completion(*args, &callback) - on_completion_using @DefaultExecutor, *args, &callback - end - - def on_completion_using(executor, *args, &callback) - add_callback :async_callback_on_completion, executor, args, callback - end - - # @yield [success, value, reason, *args] executed sync when completed - # @return self - def on_completion!(*args, &callback) - add_callback :callback_on_completion, args, callback - end - - # Changes default executor for rest of the chain - # @return [Event] - def with_default_executor(executor) - EventWrapperPromise.new(self, executor).future - end - - def to_s - "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state.to_sym}>" - end - - def inspect - "#{to_s[0..-2]} blocks:[#{blocks.map(&:to_s).join(', ')}]>" - end - - def set(*args, &block) - raise 'Use CompletableEvent#complete or CompletableFuture#complete instead, ' + - 'constructed by Concurrent.event or Concurrent.future respectively.' - end - - # @!visibility private - def complete_with(state, raise_on_reassign = true) - if compare_and_set_internal_state(PENDING, state) - # go to synchronized block only if there were waiting threads - @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 - call_callbacks - else - Concurrent::MultipleAssignmentError.new('Event can be completed only once') if raise_on_reassign - return nil - end - self - end - - # @!visibility private - # just for inspection - # @return [Array] - def blocks - @Callbacks.each_with_object([]) do |callback, promises| - promises.push(*(callback.select { |v| v.is_a? AbstractPromise })) - end - end - - # @!visibility private - # just for inspection - def callbacks - @Callbacks.each.to_a - end - - # @!visibility private - def add_callback(method, *args) - if completed? - call_callback method, *args - else - @Callbacks.push [method, *args] - call_callbacks if completed? - end - self - end - - # @!visibility private - # only for inspection - def promise - @Promise - end - - # @!visibility private - # only for inspection - def touched - @Touched.value - end - - # @!visibility private - # only for debugging inspection - def waiting_threads - @Waiters.each.to_a - end - - private - - # @return [true, false] - def wait_until_complete(timeout) - return true if completed? - - @Lock.synchronize do - @Waiters.increment - begin - unless completed? - @Condition.wait @Lock, timeout - end - ensure - # JRuby may raise ConcurrencyError - @Waiters.decrement - end - end - completed? - end - - def with_async(executor, *args, &block) - Concurrent.executor(executor).post(*args, &block) - end - - def async_callback_on_completion(executor, args, callback) - with_async(executor) { callback_on_completion args, callback } - end - - def callback_on_completion(args, callback) - callback.call *args - end - - def callback_notify_blocked(promise) - promise.on_done self - end - - def call_callback(method, *args) - self.send method, *args - end - - def call_callbacks - method, *args = @Callbacks.pop - while method - call_callback method, *args - method, *args = @Callbacks.pop - end - end - end - - # Represents a value which will become available in future. May fail with a reason instead. - class Future < Event - - # @!method state - # @return [:pending, :success, :failed] - - # Has Future been success? - # @return [Boolean] - def success?(state = internal_state) - state.completed? && state.success? - end - - # Has Future been failed? - # @return [Boolean] - def failed?(state = internal_state) - state.completed? && !state.success? - end - - # @return [Object, nil] the value of the Future when success, nil on timeout - # @!macro [attach] edge.timeout_nil - # @note If the Future can have value `nil` then it cannot be distinquished from `nil` returned on timeout. - # In this case is better to use first `wait` then `value` (or similar). - # @!macro edge.periodical_wait - def value(timeout = nil) - touch - internal_state.value if wait_until_complete timeout - end - - # @return [Exception, nil] the reason of the Future's failure - # @!macro edge.timeout_nil - # @!macro edge.periodical_wait - def reason(timeout = nil) - touch - internal_state.reason if wait_until_complete timeout - end - - # @return [Array(Boolean, Object, Exception), nil] triplet of success, value, reason - # @!macro edge.timeout_nil - # @!macro edge.periodical_wait - def result(timeout = nil) - touch - internal_state.result if wait_until_complete timeout - end - - # Wait until Future is #complete? - # @param [Numeric] timeout the maximum time in second to wait. - # @raise reason on failure - # @return [Event, true, false] self or true/false if timeout is used - # @!macro edge.periodical_wait - def wait!(timeout = nil) - touch - result = wait_until_complete!(timeout) - timeout ? result : self - end - - # Wait until Future is #complete? - # @param [Numeric] timeout the maximum time in second to wait. - # @raise reason on failure - # @return [Object, nil] - # @!macro edge.timeout_nil - # @!macro edge.periodical_wait - def value!(timeout = nil) - touch - internal_state.value if wait_until_complete! timeout - end - - # @example allows failed Future to be risen - # raise Concurrent.future.fail - def exception(*args) - raise 'obligation is not failed' unless failed? - reason = internal_state.reason - if reason.is_a?(::Array) - reason.each { |e| log ERROR, 'Promises::Future', e } - Concurrent::Error.new 'multiple exceptions, inspect log' - else - reason.exception(*args) - end - end - - # @yield [value, *args] executed only on parent success - # @return [Future] new - def then(*args, &callback) - then_on @DefaultExecutor, *args, &callback - end - - def then_on(executor, *args, &callback) - ThenPromise.new(self, @DefaultExecutor, executor, args, &callback).future - end - - def chain_completable(completable_future) - on_completion! { completable_future.complete_with internal_state } - end - - alias_method :tangle, :chain_completable - - # @yield [reason] executed only on parent failure - # @return [Future] - def rescue(*args, &callback) - rescue_on @DefaultExecutor, *args, &callback - end - - def rescue_on(executor, *args, &callback) - RescuePromise.new(self, @DefaultExecutor, executor, args, &callback).future - end - - # zips with the Future in the value - # @example - # Concurrent.future { Concurrent.future { 1 } }.flat.value # => 1 - def flat(level = 1) - FlatPromise.new(self, level, @DefaultExecutor).future - end - - # @return [Future] which has first completed value from futures - def any(future) - AnyCompleteFuturePromise.new([self, future], @DefaultExecutor).future - end - - # Inserts delay into the chain of Futures making rest of it lazy evaluated. - # @return [Future] - def delay - ZipFutureEventPromise.new(self, DelayPromise.new(@DefaultExecutor).future, @DefaultExecutor).future - end - - # Schedules rest of the chain for execution with specified time or on specified time - # @return [Future] - def schedule(intended_time) - chain do - ZipFutureEventPromise.new(self, - ScheduledPromise.new(@DefaultExecutor, intended_time).event, - @DefaultExecutor).future - end.flat - end - - # Changes default executor for rest of the chain - # @return [Future] - def with_default_executor(executor) - FutureWrapperPromise.new(self, executor).future - end - - # Zip with future producing new Future - # @return [Future] - def zip(other) - if other.is_a?(Future) - ZipFutureFuturePromise.new(self, other, @DefaultExecutor).future - else - ZipFutureEventPromise.new(self, other, @DefaultExecutor).future - end - end - - alias_method :&, :zip - - alias_method :|, :any - - # @yield [value] executed async on `executor` when success - # @return self - def on_success(*args, &callback) - on_success_using @DefaultExecutor, *args, &callback - end - - def on_success_using(executor, *args, &callback) - add_callback :async_callback_on_success, executor, args, callback - end - - # @yield [reason] executed async on `executor` when failed? - # @return self - def on_failure(*args, &callback) - on_failure_using @DefaultExecutor, *args, &callback - end - - def on_failure_using(executor, *args, &callback) - add_callback :async_callback_on_failure, executor, args, callback - end - - # @yield [value] executed sync when success - # @return self - def on_success!(*args, &callback) - add_callback :callback_on_success, args, callback - end - - # @yield [reason] executed sync when failed? - # @return self - def on_failure!(*args, &callback) - add_callback :callback_on_failure, args, callback - end - - # @!visibility private - def complete_with(state, raise_on_reassign = true) - if compare_and_set_internal_state(PENDING, state) - # go to synchronized block only if there were waiting threads - @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 - call_callbacks state - else - if raise_on_reassign - # print otherwise hidden error - log ERROR, 'Promises::Future', reason if reason - log ERROR, 'Promises::Future', state.reason if state.reason - - raise(Concurrent::MultipleAssignmentError.new( - "Future can be completed only once. Current result is #{result}, " + - "trying to set #{state.result}")) - end - return false - end - self - end - - # @!visibility private - def add_callback(method, *args) - state = internal_state - if completed?(state) - call_callback method, state, *args - else - @Callbacks.push [method, *args] - state = internal_state - # take back if it was completed in the meanwhile - call_callbacks state if completed?(state) - end - self - end - - # @!visibility private - def apply(args, block) - internal_state.apply args, block - end - - private - - def wait_until_complete!(timeout = nil) - result = wait_until_complete(timeout) - raise self if failed? - result - end - - def call_callbacks(state) - method, *args = @Callbacks.pop - while method - call_callback method, state, *args - method, *args = @Callbacks.pop - end - end - - def call_callback(method, state, *args) - self.send method, state, *args - end - - def async_callback_on_success(state, executor, args, callback) - with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_success st, ar, cb - end - end - - def async_callback_on_failure(state, executor, args, callback) - with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_failure st, ar, cb - end - end - - def callback_on_success(state, args, callback) - state.apply args, callback if state.success? - end - - def callback_on_failure(state, args, callback) - state.apply args, callback unless state.success? - end - - def callback_on_completion(state, args, callback) - callback.call state.result, *args - end - - def callback_notify_blocked(state, promise) - super(promise) - end - - def async_callback_on_completion(state, executor, args, callback) - with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_completion st, ar, cb - end - end - end - - module Completable - end - - # A Event which can be completed by user. - class CompletableEvent < Event - include Completable - - # Complete the Event, `raise` if already completed - def complete(raise_on_reassign = true) - complete_with COMPLETED, raise_on_reassign - end - - def with_hidden_completable - @with_hidden_completable ||= EventWrapperPromise.new(self, @DefaultExecutor).event - end - end - - # A Future which can be completed by user. - class CompletableFuture < Future - include Completable - - # Complete the future with triplet od `success`, `value`, `reason` - # `raise` if already completed - # return [self] - def complete(success, value, reason, raise_on_reassign = true) - complete_with(success ? Success.new(value) : Failed.new(reason), raise_on_reassign) - end - - # Complete the future with value - # return [self] - def success(value) - promise.success(value) - end - - # Try to complete the future with value - # return [self] - def try_success(value) - promise.try_success(value) - end - - # Fail the future with reason - # return [self] - def fail(reason = StandardError.new) - promise.fail(reason) - end - - # Try to fail the future with reason - # return [self] - def try_fail(reason = StandardError.new) - promise.try_fail(reason) - end - - # Evaluate the future to value if there is an exception the future fails with it - # return [self] - def evaluate_to(*args, &block) - promise.evaluate_to(*args, block) - end - - # Evaluate the future to value if there is an exception the future fails with it - # @raise the exception - # return [self] - def evaluate_to!(*args, &block) - promise.evaluate_to!(*args, block) - end - - def with_hidden_completable - @with_hidden_completable ||= FutureWrapperPromise.new(self, @DefaultExecutor).future - end - end - - # @abstract - class AbstractPromise < Synchronization::Object - safe_initialization! - include Concern::Logging - - def initialize(future) - super() - @Future = future - end - - def future - @Future - end - - alias_method :event, :future - - def default_executor - future.default_executor - end - - def state - future.state - end - - def touch - end - - def to_s - "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state}>" - end - - def inspect - to_s - end - - private - - def complete_with(new_state, raise_on_reassign = true) - @Future.complete_with(new_state, raise_on_reassign) - end - - # @return [Future] - def evaluate_to(*args, block) - complete_with Future::Success.new(block.call(*args)) - rescue StandardError => error - complete_with Future::Failed.new(error) - rescue Exception => error - log(ERROR, 'Promises::Future', error) - complete_with Future::Failed.new(error) - end - end - - class CompletableEventPromise < AbstractPromise - def initialize(default_executor) - super CompletableEvent.new(self, default_executor) - end - end - - class CompletableFuturePromise < AbstractPromise - def initialize(default_executor) - super CompletableFuture.new(self, default_executor) - end - - # Set the `Future` to a value and wake or notify all threads waiting on it. - # - # @param [Object] value the value to store in the `Future` - # @raise [Concurrent::MultipleAssignmentError] if the `Future` has already been set or otherwise completed - # @return [Future] - def success(value) - complete_with Future::Success.new(value) - end - - def try_success(value) - !!complete_with(Future::Success.new(value), false) - end - - # Set the `Future` to failed due to some error and wake or notify all threads waiting on it. - # - # @param [Object] reason for the failure - # @raise [Concurrent::MultipleAssignmentError] if the `Future` has already been set or otherwise completed - # @return [Future] - def fail(reason = StandardError.new) - complete_with Future::Failed.new(reason) - end - - def try_fail(reason = StandardError.new) - !!complete_with(Future::Failed.new(reason), false) - end - - public :evaluate_to - - # @return [Future] - def evaluate_to!(*args, block) - evaluate_to(*args, block).wait! - end - end - - # @abstract - class InnerPromise < AbstractPromise - end - - # @abstract - class BlockedPromise < InnerPromise - def self.new(*args, &block) - promise = super(*args, &block) - promise.blocked_by.each { |f| f.add_callback :callback_notify_blocked, promise } - promise - end - - def initialize(future, blocked_by_futures, countdown) - super(future) - initialize_blocked_by(blocked_by_futures) - @Countdown = AtomicFixnum.new countdown - end - - # @api private - def on_done(future) - countdown = process_on_done(future) - completable = completable?(countdown, future) - - if completable - on_completable(future) - # futures could be deleted from blocked_by one by one here, but that would be too expensive, - # it's done once when all are done to free the reference - clear_blocked_by! - end - end - - def touch - blocked_by.each(&:touch) - end - - # !visibility private - # for inspection only - def blocked_by - @BlockedBy - end - - def inspect - "#{to_s[0..-2]} blocked_by:[#{ blocked_by.map(&:to_s).join(', ')}]>" - end - - private - - def initialize_blocked_by(blocked_by_futures) - unless blocked_by_futures.is_a?(::Array) - raise ArgumentError, "has to be array of events/futures: #{blocked_by_futures.inspect}" - end - @BlockedBy = blocked_by_futures - end - - def clear_blocked_by! - # not synchronized because we do not care when this change propagates - @BlockedBy = [] - nil - end - - # @return [true,false] if completable - def completable?(countdown, future) - countdown.zero? - end - - def process_on_done(future) - @Countdown.decrement - end - - def on_completable(done_future) - raise NotImplementedError - end - end - - # @abstract - class BlockedTaskPromise < BlockedPromise - def initialize(blocked_by_future, default_executor, executor, args, &task) - raise ArgumentError, 'no block given' unless block_given? - super Future.new(self, default_executor), [blocked_by_future], 1 - @Executor = executor - @Task = task - @Args = args - end - - def executor - @Executor - end - end - - class ThenPromise < BlockedTaskPromise - private - - def initialize(blocked_by_future, default_executor, executor, args, &task) - raise ArgumentError, 'only Future can be appended with then' unless blocked_by_future.is_a? Future - super blocked_by_future, default_executor, executor, args, &task - end - - def on_completable(done_future) - if done_future.success? - Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| - evaluate_to lambda { future.apply args, task } - end - else - complete_with done_future.internal_state - end - end - end - - class RescuePromise < BlockedTaskPromise - private - - def initialize(blocked_by_future, default_executor, executor, args, &task) - super blocked_by_future, default_executor, executor, args, &task - end - - def on_completable(done_future) - if done_future.failed? - Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| - evaluate_to lambda { future.apply args, task } - end - else - complete_with done_future.internal_state - end - end - end - - class ChainPromise < BlockedTaskPromise - private - - def on_completable(done_future) - if Future === done_future - Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| - evaluate_to(*future.result, *args, task) - end - else - Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| - evaluate_to *args, task - end - end - end - end - - # will be immediately completed - class ImmediateEventPromise < InnerPromise - def initialize(default_executor) - super Event.new(self, default_executor).complete_with(Event::COMPLETED) - end - end - - class ImmediateFuturePromise < InnerPromise - def initialize(default_executor, success, value, reason) - super Future.new(self, default_executor). - complete_with(success ? Future::Success.new(value) : Future::Failed.new(reason)) - end - end - - class FlatPromise < BlockedPromise - - # !visibility private - def blocked_by - @BlockedBy.each.to_a - end - - private - - def process_on_done(future) - countdown = super(future) - if countdown.nonzero? - internal_state = future.internal_state - - unless internal_state.success? - complete_with internal_state - return countdown - end - - value = internal_state.value - case value - when Future - value.touch if self.future.touched - @BlockedBy.push value - value.add_callback :callback_notify_blocked, self - @Countdown.value - when Event - evaluate_to(lambda { raise TypeError, 'cannot flatten to Event' }) - else - evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) - end - end - countdown - end - - def initialize(blocked_by_future, levels, default_executor) - raise ArgumentError, 'levels has to be higher than 0' if levels < 1 - super Future.new(self, default_executor), blocked_by_future, 1 + levels - end - - def initialize_blocked_by(blocked_by_future) - @BlockedBy = LockFreeStack.new.push(blocked_by_future) - end - - def on_completable(done_future) - complete_with done_future.internal_state - end - - def clear_blocked_by! - @BlockedBy.clear - nil - end - - def completable?(countdown, future) - !@Future.internal_state.completed? && super(countdown, future) - end - end - - class ZipEventEventPromise < BlockedPromise - def initialize(event1, event2, default_executor) - super Event.new(self, default_executor), [event1, event2], 2 - end - - def on_completable(done_future) - complete_with Event::COMPLETED - end - end - - class ZipFutureEventPromise < BlockedPromise - def initialize(future, event, default_executor) - super Future.new(self, default_executor), [future, event], 2 - @FutureResult = future - end - - def on_completable(done_future) - complete_with @FutureResult.internal_state - end - end - - class ZipFutureFuturePromise < BlockedPromise - def initialize(future1, future2, default_executor) - super Future.new(self, default_executor), [future1, future2], 2 - @Future1Result = future1 - @Future2Result = future2 - end - - def on_completable(done_future) - success1, value1, reason1 = @Future1Result.result - success2, value2, reason2 = @Future2Result.result - success = success1 && success2 - new_state = if success - Future::SuccessArray.new([value1, value2]) - else - Future::PartiallyFailed.new([value1, value2], [reason1, reason2]) - end - complete_with new_state - end - end - - class EventWrapperPromise < BlockedPromise - def initialize(event, default_executor) - super Event.new(self, default_executor), [event], 1 - end - - def on_completable(done_future) - complete_with Event::COMPLETED - end - end - - class FutureWrapperPromise < BlockedPromise - def initialize(future, default_executor) - super Future.new(self, default_executor), [future], 1 - end - - def on_completable(done_future) - complete_with done_future.internal_state - end - end - - class ZipFuturesPromise < BlockedPromise - - private - - def initialize(blocked_by_futures, default_executor) - super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - - on_completable nil if blocked_by_futures.empty? - end - - def on_completable(done_future) - all_success = true - values = Array.new(blocked_by.size) - reasons = Array.new(blocked_by.size) - - blocked_by.each_with_index do |future, i| - if future.is_a?(Future) - success, values[i], reasons[i] = future.result - all_success &&= success - else - values[i] = reasons[i] = nil - end - end - - if all_success - complete_with Future::SuccessArray.new(values) - else - complete_with Future::PartiallyFailed.new(values, reasons) - end - end - end - - class ZipEventsPromise < BlockedPromise - - private - - def initialize(blocked_by_futures, default_executor) - super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - - on_completable nil if blocked_by_futures.empty? - end - - def on_completable(done_future) - complete_with Event::COMPLETED - end - end - - class AbstractAnyPromise < BlockedPromise - def touch - blocked_by.each(&:touch) unless @Future.completed? - end - end - - class AnyCompleteFuturePromise < AbstractAnyPromise - - private - - def initialize(blocked_by_futures, default_executor) - super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - end - - def completable?(countdown, future) - true - end - - def on_completable(done_future) - complete_with done_future.internal_state, false - end - end - - class AnyCompleteEventPromise < AbstractAnyPromise - - private - - def initialize(blocked_by_futures, default_executor) - super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - end - - def completable?(countdown, future) - true - end - - def on_completable(done_future) - complete_with Event::COMPLETED, false - end - end - - class AnySuccessfulFuturePromise < AnyCompleteFuturePromise - - private - - def completable?(countdown, future) - future.success? || super(countdown, future) - end - end - - class DelayPromise < InnerPromise - def touch - @Future.complete_with Event::COMPLETED - end - - private - - def initialize(default_executor) - super Event.new(self, default_executor) - end - end - - # will be evaluated to task in intended_time - class ScheduledPromise < InnerPromise - def intended_time - @IntendedTime - end - - def inspect - "#{to_s[0..-2]} intended_time:[#{@IntendedTime}}>" - end - - private - - def initialize(default_executor, intended_time) - super Event.new(self, default_executor) - - @IntendedTime = intended_time - - in_seconds = begin - now = Time.now - schedule_time = if @IntendedTime.is_a? Time - @IntendedTime - else - now + @IntendedTime - end - [0, schedule_time.to_f - now.to_f].max - end - - Concurrent.global_timer_set.post(in_seconds) do - @Future.complete_with Event::COMPLETED - end - end - end - - extend FactoryMethods - - private_constant :AbstractPromise, :CompletableEventPromise, :CompletableFuturePromise, - :InnerPromise, :BlockedPromise, :BlockedTaskPromise, :ThenPromise, - :RescuePromise, :ChainPromise, :ImmediateEventPromise, - :ImmediateFuturePromise, :FlatPromise, :ZipEventEventPromise, - :ZipFutureEventPromise, :ZipFutureFuturePromise, :EventWrapperPromise, - :FutureWrapperPromise, :ZipFuturesPromise, :ZipEventsPromise, - :AnyCompleteFuturePromise, :AnySuccessfulFuturePromise, :DelayPromise, :ScheduledPromise - - end -end - -# TODO when value is requested the current thread may evaluate the tasks to get the value for performance reasons it may not evaluate :io though -# TODO try work stealing pool, each thread has it's own queue From 334e5267faf564e07d7bd61ae72af9ad8fe8488f Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 15:52:21 +0200 Subject: [PATCH 21/68] Fix AnySuccessfulFuturePromise --- lib/concurrent/edge/promises.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index c832791aa..544c93b70 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -1374,6 +1374,7 @@ class AnySuccessfulFuturePromise < AnyCompleteFuturePromise def completable?(countdown, future) future.success? || super(countdown, future) + countdown.zero? end end From 75b8186fda2e1aefb3d7384d3f3c935bfe3b24cd Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 16:21:13 +0200 Subject: [PATCH 22/68] Update examples --- examples/init.rb | 2 + examples/promises.in.rb | 44 +++++++++++---- examples/promises.out.rb | 112 ++++++++++++++++++++++++--------------- 3 files changed, 104 insertions(+), 54 deletions(-) diff --git a/examples/init.rb b/examples/init.rb index c3ed8aafb..4fdb8550e 100644 --- a/examples/init.rb +++ b/examples/init.rb @@ -3,3 +3,5 @@ def do_stuff :stuff end + +Concurrent.use_stdlib_logger Logger::DEBUG diff --git a/examples/promises.in.rb b/examples/promises.in.rb index 8c2104bb1..5d09f4a72 100644 --- a/examples/promises.in.rb +++ b/examples/promises.in.rb @@ -5,7 +5,7 @@ ### Simple asynchronous task -future = future { sleep 0.1; 1 + 1 } # evaluation starts immediately +future = future(0.1) { |duration| sleep duration; :result } # evaluation starts immediately future.completed? # block until evaluated future.value @@ -21,6 +21,7 @@ # re-raising raise future rescue $! + ### Direct creation of completed futures succeeded_future(Object.new) @@ -120,7 +121,7 @@ ### Completable Future and Event future = completable_future -event = event() +event = completable_event() # These threads will be blocked until the future and event is completed t1 = Thread.new { future.value } # @@ -205,17 +206,40 @@ # periodic task -DONE = Concurrent::AtomicBoolean.new false - -def schedule_job - schedule(1) { do_stuff }. - rescue { |e| StandardError === e ? report_error(e) : raise(e) }. - then { schedule_job unless DONE.true? } +def schedule_job(interval, &job) + # schedule the first execution and chain restart og the job + Concurrent.schedule(interval, &job).chain do |success, continue, reason| + if success + schedule_job(interval, &job) if continue + else + # handle error + p reason + # retry + schedule_job(interval, &job) + end + end end -schedule_job -DONE.make_true +queue = Queue.new +count = 0 + +schedule_job 0.05 do + queue.push count + count += 1 + # to continue scheduling return true, false will end the task + if count < 4 + # to continue scheduling return true + true + else + queue.push nil + # to end the task return false + false + end +end +# read the queue +arr, v = [], nil; arr << v while (v = queue.pop) # +arr # How to limit processing where there are limited resources? # By creating an actor managing the resource diff --git a/examples/promises.out.rb b/examples/promises.out.rb index c6fd6d062..896fd6694 100644 --- a/examples/promises.out.rb +++ b/examples/promises.out.rb @@ -5,30 +5,31 @@ ### Simple asynchronous task -future = future { sleep 0.1; 1 + 1 } # evaluation starts immediately - # => <#Concurrent::Promises::Future:0x7fc5cc1e5340 pending blocks:[]> +future = future(0.1) { |duration| sleep duration; :result } # evaluation starts immediately + # => <#Concurrent::Promises::Future:0x7fa602198e30 pending blocks:[]> future.completed? # => false # block until evaluated -future.value # => 2 +future.value # => :result future.completed? # => true ### Failing asynchronous task future = future { raise 'Boom' } - # => <#Concurrent::Promises::Future:0x7fc5cc1dc808 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa602188a58 pending blocks:[]> future.value # => nil future.value! rescue $! # => # future.reason # => # # re-raising raise future rescue $! # => # + ### Direct creation of completed futures succeeded_future(Object.new) - # => <#Concurrent::Promises::Future:0x7fc5cc1c6030 success blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa60217bf10 success blocks:[]> failed_future(StandardError.new("boom")) - # => <#Concurrent::Promises::Future:0x7fc5cc1c50b8 failed blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa60217aa48 failed blocks:[]> ### Chaining of futures @@ -68,7 +69,7 @@ # => 3 failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) - # => <#Concurrent::Promises::Future:0x7fc5cc11ec90 failed blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa602129300 failed blocks:[]> failing_zip.result # => [false, [1, nil], [nil, #]] failing_zip.then { |v| 'never happens' }.result # => [false, [1, nil], [nil, #]] failing_zip.rescue { |a, b| (a || b).message }.value @@ -81,7 +82,7 @@ # will not evaluate until asked by #value or other method requiring completion future = delay { 'lazy' } - # => <#Concurrent::Promises::Future:0x7fc5cc0ff660 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa60211a490 pending blocks:[]> sleep 0.1 future.completed? # => false future.value # => "lazy" @@ -89,20 +90,20 @@ # propagates trough chain allowing whole or partial lazy chains head = delay { 1 } - # => <#Concurrent::Promises::Future:0x7fc5cc0fc938 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa602113898 pending blocks:[]> branch1 = head.then(&:succ) - # => <#Concurrent::Promises::Future:0x7fc5cc0df068 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa602112a60 pending blocks:[]> branch2 = head.delay.then(&:succ) - # => <#Concurrent::Promises::Future:0x7fc5cc0dd178 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa602111188 pending blocks:[]> join = branch1 & branch2 - # => <#Concurrent::Promises::Future:0x7fc5cc0dc430 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa6021102b0 pending blocks:[]> sleep 0.1 # nothing will complete # => 0 [head, branch1, branch2, join].map(&:completed?) # => [false, false, false, false] branch1.value # => 2 sleep 0.1 # forces only head to complete, branch 2 stays incomplete - # => 0 + # => 1 [head, branch1, branch2, join].map(&:completed?) # => [true, true, false, false] join.value # => [2, 2] @@ -125,14 +126,14 @@ # it'll be executed after 0.1 seconds scheduled = schedule(0.1) { 1 } - # => <#Concurrent::Promises::Future:0x7fc5caaae028 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa60288a810 pending blocks:[]> scheduled.completed? # => false scheduled.value # available after 0.1sec # => 1 # and in chain scheduled = delay { 1 }.schedule(0.1).then(&:succ) - # => <#Concurrent::Promises::Future:0x7fc5caa9f2d0 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa603101638 pending blocks:[]> # will not be scheduled until value is requested sleep 0.1 scheduled.value # returns after another 0.1sec # => 2 @@ -141,21 +142,21 @@ ### Completable Future and Event future = completable_future - # => <#Concurrent::Promises::CompletableFuture:0x7fc5caa8eae8 pending blocks:[]> -event = event() - # => <#Concurrent::Promises::CompletableEvent:0x7fc5caa8d648 pending blocks:[]> + # => <#Concurrent::Promises::CompletableFuture:0x7fa6030e0ac8 pending blocks:[]> +event = completable_event() + # => <#Concurrent::Promises::CompletableEvent:0x7fa6030e0a78 pending blocks:[]> # These threads will be blocked until the future and event is completed t1 = Thread.new { future.value } t2 = Thread.new { event.wait } future.success 1 - # => <#Concurrent::Promises::CompletableFuture:0x7fc5caa8eae8 success blocks:[]> + # => <#Concurrent::Promises::CompletableFuture:0x7fa6030e0ac8 success blocks:[]> future.success 1 rescue $! # => # future.try_success 2 # => false event.complete - # => <#Concurrent::Promises::CompletableEvent:0x7fc5caa8d648 completed blocks:[]> + # => <#Concurrent::Promises::CompletableEvent:0x7fa6030e0a78 success blocks:[]> # The threads can be joined now [t1, t2].each &:join @@ -163,14 +164,14 @@ ### Callbacks -queue = Queue.new # => # +queue = Queue.new # => # future = delay { 1 + 1 } - # => <#Concurrent::Promises::Future:0x7fc5caa754d0 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa6030aa888 pending blocks:[]> future.on_success { queue << 1 } # evaluated asynchronously - # => <#Concurrent::Promises::Future:0x7fc5caa754d0 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa6030aa888 pending blocks:[]> future.on_success! { queue << 2 } # evaluated on completing thread - # => <#Concurrent::Promises::Future:0x7fc5caa754d0 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa6030aa888 pending blocks:[]> queue.empty? # => true future.value # => 2 @@ -188,7 +189,7 @@ # executed on executor for blocking and long operations then_on(:io) { File.read __FILE__ }. wait - # => <#Concurrent::Promises::Future:0x7fc5cb010b10 success blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa60307abb0 success blocks:[]> ### Interoperability with actors @@ -196,7 +197,7 @@ actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end - # => # + # => # future { 2 }. @@ -210,24 +211,24 @@ ### Interoperability with channels ch1 = Concurrent::Channel.new - # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> + # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> ch2 = Concurrent::Channel.new - # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> + # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> result = select(ch1, ch2) - # => <#Concurrent::Promises::Future:0x7fc5cc892e40 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa601a62d50 pending blocks:[]> ch1.put 1 # => true result.value! - # => [1, #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#>] + # => [1, #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#>] future { 1+1 }. then_put(ch1) - # => <#Concurrent::Promises::Future:0x7fc5cc87b920 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa601a51910 pending blocks:[]> result = future { '%02d' }. then_select(ch1, ch2). then { |format, (value, channel)| format format, value } - # => <#Concurrent::Promises::Future:0x7fc5cc862f60 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa601a2b008 pending blocks:[]> result.value! # => "02" @@ -235,7 +236,7 @@ # simple background processing future { do_stuff } - # => <#Concurrent::Promises::Future:0x7fc5cc080518 pending blocks:[]> + # => <#Concurrent::Promises::Future:0x7fa601a1b478 pending blocks:[]> # parallel background processing jobs = 10.times.map { |i| future { i } } @@ -243,18 +244,41 @@ # periodic task -DONE = Concurrent::AtomicBoolean.new false # => # - -def schedule_job - schedule(1) { do_stuff }. - rescue { |e| StandardError === e ? report_error(e) : raise(e) }. - then { schedule_job unless DONE.true? } +def schedule_job(interval, &job) + # schedule the first execution and chain restart og the job + Concurrent.schedule(interval, &job).chain do |success, continue, reason| + if success + schedule_job(interval, &job) if continue + else + # handle error + p reason + # retry + schedule_job(interval, &job) + end + end end # => :schedule_job -schedule_job - # => <#Concurrent::Promises::Future:0x7fc5ca9949d0 pending blocks:[]> -DONE.make_true # => true +queue = Queue.new # => # +count = 0 # => 0 + +schedule_job 0.05 do + queue.push count + count += 1 + # to continue scheduling return true, false will end the task + if count < 4 + # to continue scheduling return true + true + else + queue.push nil + # to end the task return false + false + end +end + # => <#Concurrent::Promises::Future:0x7fa6020c23d0 pending blocks:[]> +# read the queue +arr, v = [], nil; arr << v while (v = queue.pop) +arr # => [0, 1, 2, 3] # How to limit processing where there are limited resources? # By creating an actor managing the resource @@ -265,7 +289,7 @@ def schedule_job data[message] end end - # => # + # => # concurrent_jobs = 11.times.map do |v| @@ -295,7 +319,7 @@ def schedule_job end end end - # => # + # => # concurrent_jobs = 11.times.map do |v| From 59f9e467a3098a759e84503f9654dfe5d15079e6 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 11 Jun 2016 16:35:46 +0200 Subject: [PATCH 23/68] Documentation and hiding constants --- lib/concurrent/edge/promises.rb | 97 +++++++++++++++++++-------------- 1 file changed, 55 insertions(+), 42 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 544c93b70..69b19874a 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -31,6 +31,7 @@ def future(*args, &task) future_on(:io, *args, &task) end + # As {#future} but takes default_executor as first argument def future_on(default_executor, *args, &task) ImmediateEventPromise.new(default_executor).future.then(*args, &task) end @@ -68,6 +69,7 @@ def delay(*args, &task) delay_on :io, *args, &task end + # As {#delay} but takes default_executor as first argument def delay_on(default_executor, *args, &task) DelayPromise.new(default_executor).future.then(*args, &task) end @@ -79,6 +81,7 @@ def schedule(intended_time, *args, &task) schedule_on :io, intended_time, *args, &task end + # As {#schedule} but takes default_executor as first argument def schedule_on(default_executor, intended_time, *args, &task) ScheduledPromise.new(default_executor, intended_time).future.then(*args, &task) end @@ -92,6 +95,7 @@ def zip_futures(*futures_and_or_events) zip_futures_on :io, *futures_and_or_events end + # As {#zip_futures} but takes default_executor as first argument def zip_futures_on(default_executor, *futures_and_or_events) ZipFuturesPromise.new(futures_and_or_events, default_executor).future end @@ -106,6 +110,7 @@ def zip_events(*futures_and_or_events) zip_events_on :io, *futures_and_or_events end + # As {#zip_events} but takes default_executor as first argument def zip_events_on(default_executor, *futures_and_or_events) ZipEventsPromise.new(futures_and_or_events, default_executor).future end @@ -117,6 +122,7 @@ def any_complete_future(*futures) any_complete_future_on :io, *futures end + # As {#any_complete_future} but takes default_executor as first argument def any_complete_future_on(default_executor, *futures) AnyCompleteFuturePromise.new(futures, default_executor).future end @@ -131,14 +137,17 @@ def any_successful_future(*futures) any_successful_future_on :io, *futures end + # As {#any_succesful_future} but takes default_executor as first argument def any_successful_future_on(default_executor, *futures) AnySuccessfulFuturePromise.new(futures, default_executor).future end + # Constructs new {Event} which becomes complete after first if the events completes. def any_event(*events) any_event_on :io, *events end + # As {#any_event} but takes default_executor as first argument def any_event_on(default_executor, *events) AnyCompleteEventPromise.new(events, default_executor).event end @@ -147,14 +156,7 @@ def any_event_on(default_executor, *events) # TODO consider adding zip_by(slice, *futures) processing futures in slices end - # Represents an event which will happen in future (will be completed). It has to always happen. - class Event < Synchronization::Object - safe_initialization! - private(*attr_atomic(:internal_state)) - # @!visibility private - public :internal_state - include Concern::Logging - + module InternalStates class State def completed? raise NotImplementedError @@ -238,6 +240,8 @@ def to_sym end end + private_constant :Success + # @!visibility private class SuccessArray < Success def apply(args, block) @@ -245,6 +249,8 @@ def apply(args, block) end end + private_constant :SuccessArray + # @!visibility private class Failed < CompletedWithResult def initialize(reason) @@ -272,6 +278,8 @@ def apply(args, block) end end + private_constant :Failed + # @!visibility private class PartiallyFailed < CompletedWithResult def initialize(value, reason) @@ -301,12 +309,26 @@ def apply(args, block) end end + private_constant :PartiallyFailed - # @!visibility private PENDING = Pending.new - # @!visibility private COMPLETED = Success.new(nil) + private_constant :PENDING, :COMPLETED + end + + private_constant :InternalStates + + # Represents an event which will happen in future (will be completed). It has to always happen. + class Event < Synchronization::Object + safe_initialization! + private(*attr_atomic(:internal_state)) + # @!visibility private + public :internal_state + + include Concern::Logging + include InternalStates + def initialize(promise, default_executor) super() @Lock = Mutex.new @@ -913,6 +935,7 @@ def with_hidden_completable # @abstract class AbstractPromise < Synchronization::Object safe_initialization! + include InternalStates include Concern::Logging def initialize(future) @@ -953,12 +976,12 @@ def complete_with(new_state, raise_on_reassign = true) # @return [Future] def evaluate_to(*args, block) - complete_with Future::Success.new(block.call(*args)) + complete_with Success.new(block.call(*args)) rescue StandardError => error - complete_with Future::Failed.new(error) + complete_with Failed.new(error) rescue Exception => error log(ERROR, 'Promises::Future', error) - complete_with Future::Failed.new(error) + complete_with Failed.new(error) end end @@ -973,35 +996,24 @@ def initialize(default_executor) super CompletableFuture.new(self, default_executor) end - # Set the `Future` to a value and wake or notify all threads waiting on it. - # - # @param [Object] value the value to store in the `Future` - # @raise [Concurrent::MultipleAssignmentError] if the `Future` has already been set or otherwise completed - # @return [Future] def success(value) - complete_with Future::Success.new(value) + complete_with Success.new(value) end def try_success(value) - !!complete_with(Future::Success.new(value), false) + !!complete_with(Success.new(value), false) end - # Set the `Future` to failed due to some error and wake or notify all threads waiting on it. - # - # @param [Object] reason for the failure - # @raise [Concurrent::MultipleAssignmentError] if the `Future` has already been set or otherwise completed - # @return [Future] def fail(reason = StandardError.new) - complete_with Future::Failed.new(reason) + complete_with Failed.new(reason) end def try_fail(reason = StandardError.new) - !!complete_with(Future::Failed.new(reason), false) + !!complete_with(Failed.new(reason), false) end public :evaluate_to - # @return [Future] def evaluate_to!(*args, block) evaluate_to(*args, block).wait! end @@ -1152,14 +1164,14 @@ def on_completable(done_future) # will be immediately completed class ImmediateEventPromise < InnerPromise def initialize(default_executor) - super Event.new(self, default_executor).complete_with(Event::COMPLETED) + super Event.new(self, default_executor).complete_with(COMPLETED) end end class ImmediateFuturePromise < InnerPromise def initialize(default_executor, success, value, reason) super Future.new(self, default_executor). - complete_with(success ? Future::Success.new(value) : Future::Failed.new(reason)) + complete_with(success ? Success.new(value) : Failed.new(reason)) end end @@ -1226,7 +1238,7 @@ def initialize(event1, event2, default_executor) end def on_completable(done_future) - complete_with Event::COMPLETED + complete_with COMPLETED end end @@ -1253,9 +1265,9 @@ def on_completable(done_future) success2, value2, reason2 = @Future2Result.result success = success1 && success2 new_state = if success - Future::SuccessArray.new([value1, value2]) + SuccessArray.new([value1, value2]) else - Future::PartiallyFailed.new([value1, value2], [reason1, reason2]) + PartiallyFailed.new([value1, value2], [reason1, reason2]) end complete_with new_state end @@ -1267,7 +1279,7 @@ def initialize(event, default_executor) end def on_completable(done_future) - complete_with Event::COMPLETED + complete_with COMPLETED end end @@ -1306,9 +1318,9 @@ def on_completable(done_future) end if all_success - complete_with Future::SuccessArray.new(values) + complete_with SuccessArray.new(values) else - complete_with Future::PartiallyFailed.new(values, reasons) + complete_with PartiallyFailed.new(values, reasons) end end end @@ -1324,10 +1336,11 @@ def initialize(blocked_by_futures, default_executor) end def on_completable(done_future) - complete_with Event::COMPLETED + complete_with COMPLETED end end + # @abstract class AbstractAnyPromise < BlockedPromise def touch blocked_by.each(&:touch) unless @Future.completed? @@ -1364,7 +1377,7 @@ def completable?(countdown, future) end def on_completable(done_future) - complete_with Event::COMPLETED, false + complete_with COMPLETED, false end end @@ -1373,14 +1386,15 @@ class AnySuccessfulFuturePromise < AnyCompleteFuturePromise private def completable?(countdown, future) - future.success? || super(countdown, future) + future.success? || + # inlined super from BlockedPromise countdown.zero? end end class DelayPromise < InnerPromise def touch - @Future.complete_with Event::COMPLETED + @Future.complete_with COMPLETED end private @@ -1390,7 +1404,6 @@ def initialize(default_executor) end end - # will be evaluated to task in intended_time class ScheduledPromise < InnerPromise def intended_time @IntendedTime @@ -1418,7 +1431,7 @@ def initialize(default_executor, intended_time) end Concurrent.global_timer_set.post(in_seconds) do - @Future.complete_with Event::COMPLETED + @Future.complete_with COMPLETED end end end From 3d2d4c260179c6423b443d8eb6a348fe73331ad2 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Mon, 13 Jun 2016 23:57:30 +0200 Subject: [PATCH 24/68] New Promises documentation and updates - evaluator of ruby code in markdown files added - document all public methods with YARD (and macros) - hide properly everything private - better OO structure, Future is no longer an Event's child, they share common parent instead, private callback methods simplified - flat_future and flat_event added - schedule fixed on event - CompletableFuture API simplified - removed bad aliases: :complete?, :async - run method added to support green-threads like usage - rewrite new MD guide remains --- README.md | 2 +- doc/format-md.rb | 122 ++ doc/init.rb | 7 + examples/promises.in.rb => doc/promises.in.md | 49 +- doc/promises.out.md | 431 ++++++ examples/promises.out.rb | 334 ----- lib/concurrent/edge/promises.rb | 1169 +++++++++++------ spec/concurrent/promises_spec.rb | 51 +- 8 files changed, 1393 insertions(+), 772 deletions(-) create mode 100644 doc/format-md.rb create mode 100644 doc/init.rb rename examples/promises.in.rb => doc/promises.in.md (86%) create mode 100644 doc/promises.out.md delete mode 100644 examples/promises.out.rb diff --git a/README.md b/README.md index 50602ed05..210fea45d 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ We also have a [mailing list](http://groups.google.com/group/concurrent-ruby) an #### General-purpose Concurrency Abstractions * [Async](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Async.html): A mixin module that provides simple asynchronous behavior to a class. Loosely based on Erlang's [gen_server](http://www.erlang.org/doc/man/gen_server.html). -* [Promises Framework](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Promises/FutureFactoryMethods.html): +* [Promises Framework](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Promises.html): Unified implementation of futures and promises which combines features of previous `Future`, `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking diff --git a/doc/format-md.rb b/doc/format-md.rb new file mode 100644 index 000000000..36edd3de3 --- /dev/null +++ b/doc/format-md.rb @@ -0,0 +1,122 @@ +require 'rubygems' +require 'bundler/setup' +require 'pry' +require 'pp' + +class MDFormatter + + def initialize(input_file, environment) + @input_path = input_file + @environment = environment + @output = '' + + process_file input_file + end + + def evaluate (code, line) + eval(code, @environment, @input_path, line) + end + + def process_ruby(part, start_line) + lines = part.lines + chunks = [] + line = '' + + while !lines.empty? + line += lines.shift + if Pry::Code.complete_expression? line + chunks << line + line = '' + end + end + + raise unless line.empty? + + chunk_lines = chunks.map { |chunk| [chunk, [chunk.split($/).size, 1].max] } + indent = 40 + + line_count = start_line + output = '' + chunk_lines.each do |chunk, lines| + result = evaluate(chunk, line_count) + if chunk.strip.empty? || chunk.include?('#') + output << chunk + else + pre_lines = chunk.lines.to_a + last_line = pre_lines.pop + output << pre_lines.join + + if last_line =~ /\#$/ + output << last_line.gsub(/\#$/, '') + else + if last_line.size < indent && result.inspect.size < indent + output << "%-#{indent}s %s" % [last_line.chomp, "# => #{result.inspect}\n"] + else + inspect_lines = result.pretty_inspect.lines + output << last_line << "# => #{inspect_lines[0]}" << inspect_lines[1..-1].map { |l| format '# %s', l }.join + end + end + end + line_count += lines + end + output + end + + def process_file(input_path) + output_path = input_path.gsub /\.in\.md$/, '.out.md' + input = File.read(input_path) + parts = input.split(/^(```\w*\n)/) + + # pp parts.map(&:lines) + + code_block = nil + line_count = 1 + + parts.each do |part| + if part =~ /^```(\w+)$/ + code_block = $1 + @output << part + line_count += 1 + next + end + + if part =~ /^```$/ + code_block = nil + @output << part + line_count += 1 + next + end + + if code_block == 'ruby' + @output << process_ruby(part, line_count) + line_count += part.lines.size + next + end + + @output << part + line_count += part.lines.size + end + + puts "#{input_path}\n -> #{output_path}" + File.write(output_path, @output) + rescue => ex + puts "#{ex} (#{ex.class})\n#{ex.backtrace * "\n"}" + + end +end + +input_paths = if ARGV.empty? + Dir.glob("#{File.dirname(__FILE__)}/*.in.md") + else + ARGV + end.map { |p| File.expand_path p } + +input_paths.each_with_index do |input_path, i| + + pid = fork do + require_relative 'init.rb' + MDFormatter.new input_path, binding + end + + Process.wait pid +end diff --git a/doc/init.rb b/doc/init.rb new file mode 100644 index 000000000..4fdb8550e --- /dev/null +++ b/doc/init.rb @@ -0,0 +1,7 @@ +require 'concurrent-edge' + +def do_stuff + :stuff +end + +Concurrent.use_stdlib_logger Logger::DEBUG diff --git a/examples/promises.in.rb b/doc/promises.in.md similarity index 86% rename from examples/promises.in.rb rename to doc/promises.in.md index 5d09f4a72..801598378 100644 --- a/examples/promises.in.rb +++ b/doc/promises.in.md @@ -1,29 +1,57 @@ -# Adds factory methods like: future, event, delay, schedule, zip, ... -# otherwise they can be called on Promises module -include Concurrent::Promises::FactoryMethods # +# Promises Framework + +Promises is a new framework unifying former `Concurrent::Future`, `Concurrent::Promise`, `Concurrent::IVar`, +`Concurrent::Event`, `Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new +synchronization layer to make all the features **non-blocking** and +**lock-free**, with the exception of obviously blocking operations like +`#wait`, `#value`. As a result it lowers a danger of deadlocking and offers +better performance. + +## Overview + +There are two central classes ... TODO + +## Where does it executes? + +- TODO Explain `_on` `_using` sufixes. + +## Old examples follow +*TODO rewrite into md with examples* -### Simple asynchronous task +Adds factory methods like: future, event, delay, schedule, zip, etc. Otherwise +they can be called on Promises module. +```ruby +Concurrent::Promises::FactoryMethods.instance_methods false + +include Concurrent::Promises::FactoryMethods # +``` + +Simple asynchronous task: + +```ruby future = future(0.1) { |duration| sleep duration; :result } # evaluation starts immediately future.completed? # block until evaluated future.value future.completed? +``` +Failing asynchronous task -### Failing asynchronous task - +```ruby future = future { raise 'Boom' } future.value future.value! rescue $! future.reason # re-raising raise future rescue $! +``` +Direct creation of completed futures -### Direct creation of completed futures - +```ruby succeeded_future(Object.new) failed_future(StandardError.new("boom")) @@ -129,7 +157,7 @@ future.success 1 future.success 1 rescue $! -future.try_success 2 +future.success 2, false event.complete # The threads can be joined now @@ -158,7 +186,7 @@ # executed on :fast executor, only short and non-blocking tasks can go there future_on(:fast) { 2 }. # executed on executor for blocking and long operations - then_on(:io) { File.read __FILE__ }. + then_using(:io) { File.read __FILE__ }. wait @@ -288,3 +316,4 @@ def schedule_job(interval, &job) end # zip(*concurrent_jobs).value! +``` diff --git a/doc/promises.out.md b/doc/promises.out.md new file mode 100644 index 000000000..52642350a --- /dev/null +++ b/doc/promises.out.md @@ -0,0 +1,431 @@ +# Promises Framework + +Promises is a new framework unifying former `Concurrent::Future`, `Concurrent::Promise`, `Concurrent::IVar`, +`Concurrent::Event`, `Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new +synchronization layer to make all the features **non-blocking** and +**lock-free**, with the exception of obviously blocking operations like +`#wait`, `#value`. As a result it lowers a danger of deadlocking and offers +better performance. + +## Overview + +There are two central classes ... TODO + +## Where does it executes? + +- TODO Explain `_on` `_using` sufixes. + +## Old examples follow + +*TODO rewrite into md with examples* + +Adds factory methods like: future, event, delay, schedule, zip, etc. Otherwise +they can be called on Promises module. + +```ruby +Concurrent::Promises::FactoryMethods.instance_methods false +# => [:completable_event, +# :completable_event_on, +# :completable_future, +# :completable_future_on, +# :future, +# :future_on, +# :completed_future, +# :succeeded_future, +# :failed_future, +# :completed_event, +# :delay, +# :delay_on, +# :schedule, +# :schedule_on, +# :zip_futures, +# :zip_futures_on, +# :zip, +# :zip_events, +# :zip_events_on, +# :any_complete_future, +# :any, +# :any_complete_future_on, +# :any_successful_future, +# :any_successful_future_on, +# :any_event, +# :any_event_on, +# :select] + +include Concurrent::Promises::FactoryMethods # +``` + +Simple asynchronous task: + +```ruby +future = future(0.1) { |duration| sleep duration; :result } # evaluation starts immediately +future.completed? # => false +# block until evaluated +future.value # => :result +future.completed? # => true +``` + +Failing asynchronous task + +```ruby +future = future { raise 'Boom' } +# => <#Concurrent::Promises::Future:0x7f90a7886578 pending blocks:[]> +future.value # => nil +future.value! rescue $! # => # +future.reason # => # +# re-raising +raise future rescue $! # => # +``` + +Direct creation of completed futures + +```ruby +succeeded_future(Object.new) +# => <#Concurrent::Promises::Future:0x7f90a699edd0 success blocks:[]> +failed_future(StandardError.new("boom")) +# => <#Concurrent::Promises::Future:0x7f90a699d408 failed blocks:[]> + +### Chaining of futures + +head = succeeded_future 1 # +branch1 = head.then(&:succ) # +branch2 = head.then(&:succ).then(&:succ) # +branch1.zip(branch2).value! # => [2, 3] +# zip is aliased as & +(branch1 & branch2).then { |a, b| a + b }.value! +# => 5 +(branch1 & branch2).then(&:+).value! # => 5 +# or a class method zip from FactoryMethods can be used to zip multiple futures +zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! +# => 7 +# pick only first completed +any(branch1, branch2).value! # => 2 +(branch1 | branch2).value! # => 2 + + +### Arguments + +# any supplied arguments are passed to the block, promises ensure that they are visible to the block + +future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value +# => 5 +succeeded_future(1).then(2, &:+).value # => 3 +succeeded_future(1).chain(2) { |success, value, reason, arg| value + arg }.value +# => 3 + + +### Error handling + +succeeded_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates +succeeded_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 +succeeded_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied + +failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) +# => <#Concurrent::Promises::Future:0x7f90a6947918 failed blocks:[]> +failing_zip.result +# => [false, [1, nil], [nil, #]] +failing_zip.then { |v| 'never happens' }.result +# => [false, [1, nil], [nil, #]] +failing_zip.rescue { |a, b| (a || b).message }.value +# => "boom" +failing_zip.chain { |success, values, reasons| [success, values.compact, reasons.compactß] }.value +# => nil + + +### Delay + +# will not evaluate until asked by #value or other method requiring completion +future = delay { 'lazy' } +# => <#Concurrent::Promises::Future:0x7f90a690d718 pending blocks:[]> +sleep 0.1 # +future.completed? # => false +future.value # => "lazy" + +# propagates trough chain allowing whole or partial lazy chains + +head = delay { 1 } +# => <#Concurrent::Promises::Future:0x7f90a68edcb0 pending blocks:[]> +branch1 = head.then(&:succ) +# => <#Concurrent::Promises::Future:0x7f90a68d7460 pending blocks:[]> +branch2 = head.delay.then(&:succ) +# => <#Concurrent::Promises::Future:0x7f90a68d5368 pending blocks:[]> +join = branch1 & branch2 +# => <#Concurrent::Promises::Future:0x7f90a68b7e30 pending blocks:[]> + +sleep 0.1 # nothing will complete +[head, branch1, branch2, join].map(&:completed?) +# => [false, false, false, false] + +branch1.value # => 2 +sleep 0.1 # forces only head to complete, branch 2 stays incomplete +[head, branch1, branch2, join].map(&:completed?) +# => [true, true, false, false] + +join.value # => [2, 2] +[head, branch1, branch2, join].map(&:completed?) +# => [true, true, true, true] + + +### Flatting + +# waits for inner future, only the last call to value blocks thread +future { future { 1+1 } }.flat.value # => 2 + +# more complicated example +future { future { future { 1 + 1 } } }. + flat(1). + then { |f| f.then(&:succ) }. + flat(1).value # => 3 + + +### Schedule + +# it'll be executed after 0.1 seconds +scheduled = schedule(0.1) { 1 } +# => <#Concurrent::Promises::Future:0x7f90a4243ab0 pending blocks:[]> + +scheduled.completed? # => false +scheduled.value # available after 0.1sec + +# and in chain +scheduled = delay { 1 }.schedule(0.1).then(&:succ) +# => <#Concurrent::Promises::Future:0x7f90a4228d00 pending blocks:[]> +# will not be scheduled until value is requested +sleep 0.1 # +scheduled.value # returns after another 0.1sec + + +### Completable Future and Event + +future = completable_future +# => <#Concurrent::Promises::CompletableFuture:0x7f90a6075dd0 pending blocks:[]> +event = completable_event() +# => <#Concurrent::Promises::CompletableEvent:0x7f90a60741d8 pending blocks:[]> + +# These threads will be blocked until the future and event is completed +t1 = Thread.new { future.value } # +t2 = Thread.new { event.wait } # + +future.success 1 +# => <#Concurrent::Promises::CompletableFuture:0x7f90a6075dd0 success blocks:[]> +future.success 1 rescue $! +# => # +future.success 2, false # => false +event.complete +# => <#Concurrent::Promises::CompletableEvent:0x7f90a60741d8 success blocks:[]> + +# The threads can be joined now +[t1, t2].each &:join # + + +### Callbacks + +queue = Queue.new # => # +future = delay { 1 + 1 } +# => <#Concurrent::Promises::Future:0x7f90a4954f70 pending blocks:[]> + +future.on_success { queue << 1 } # evaluated asynchronously +future.on_success! { queue << 2 } # evaluated on completing thread + +queue.empty? # => true +future.value # => 2 +queue.pop # => 2 +queue.pop # => 1 + + +### Thread-pools + +# Factory methods are taking names of the global executors +# (ot instances of custom executors) + +# executed on :fast executor, only short and non-blocking tasks can go there +future_on(:fast) { 2 }. + # executed on executor for blocking and long operations + then_using(:io) { File.read __FILE__ }. + wait + + +### Interoperability with actors + +actor = Concurrent::Actor::Utils::AdHoc.spawn :square do + -> v { v ** 2 } +end +# => # + + +future { 2 }. + then_ask(actor). + then { |v| v + 2 }. + value # => 6 + +actor.ask(2).then(&:succ).value # => 5 + + +### Interoperability with channels + +ch1 = Concurrent::Channel.new +# => #, +# @__lock__=#, +# @buffer=nil, +# @capacity=1, +# @closed=false, +# @putting=[], +# @size=0, +# @taking=[]>, +# @validator= +# #> +ch2 = Concurrent::Channel.new +# => #, +# @__lock__=#, +# @buffer=nil, +# @capacity=1, +# @closed=false, +# @putting=[], +# @size=0, +# @taking=[]>, +# @validator= +# #> + +result = select(ch1, ch2) +# => <#Concurrent::Promises::Future:0x7f90a4180a60 pending blocks:[]> +ch1.put 1 # => true +result.value! +# => [1, +# #, +# @__lock__=#, +# @buffer=nil, +# @capacity=1, +# @closed=false, +# @putting=[], +# @size=0, +# @taking=[]>, +# @validator= +# #>] + + +future { 1+1 }. + then_put(ch1) +# => <#Concurrent::Promises::Future:0x7f90a6064918 pending blocks:[]> +result = future { '%02d' }. + then_select(ch1, ch2). + then { |format, (value, channel)| format format, value } +# => <#Concurrent::Promises::Future:0x7f90a4142cb0 pending blocks:[]> +result.value! # => "02" + + +### Common use-cases Examples + +# simple background processing +future { do_stuff } +# => <#Concurrent::Promises::Future:0x7f90a4129a08 pending blocks:[]> + +# parallel background processing +jobs = 10.times.map { |i| future { i } } # +zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + +# periodic task +def schedule_job(interval, &job) + # schedule the first execution and chain restart og the job + Concurrent.schedule(interval, &job).chain do |success, continue, reason| + if success + schedule_job(interval, &job) if continue + else + # handle error + p reason + # retry + schedule_job(interval, &job) + end + end +end + +queue = Queue.new # => # +count = 0 # => 0 + +schedule_job 0.05 do + queue.push count + count += 1 + # to continue scheduling return true, false will end the task + if count < 4 + # to continue scheduling return true + true + else + queue.push nil + # to end the task return false + false + end +end + +# read the queue +arr, v = [], nil; arr << v while (v = queue.pop) # +arr # => [0, 1, 2, 3] + +# How to limit processing where there are limited resources? +# By creating an actor managing the resource +DB = Concurrent::Actor::Utils::AdHoc.spawn :db do + data = Array.new(10) { |i| '*' * i } + lambda do |message| + # pretending that this queries a DB + data[message] + end +end + +concurrent_jobs = 11.times.map do |v| + + succeeded_future(v). + # ask the DB with the `v`, only one at the time, rest is parallel + then_ask(DB). + # get size of the string, fails for 11 + then(&:size). + rescue { |reason| reason.message } # translate error to value (exception, message) +end # + +zip(*concurrent_jobs).value! +# => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] + + +# In reality there is often a pool though: +data = Array.new(10) { |i| '*' * i } +# => ["", +# "*", +# "**", +# "***", +# "****", +# "*****", +# "******", +# "*******", +# "********", +# "*********"] +pool_size = 5 # => 5 + +DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| + # DB connection constructor + Concurrent::Actor::Utils::AdHoc.spawn(name: "worker-#{index}", args: [data]) do |data| + lambda do |message| + # pretending that this queries a DB + data[message] + end + end +end + +concurrent_jobs = 11.times.map do |v| + + succeeded_future(v). + # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel + then_ask(DB_POOL). + then(&:size). + rescue { |reason| reason.message } +end # + +zip(*concurrent_jobs).value! +# => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] +``` diff --git a/examples/promises.out.rb b/examples/promises.out.rb deleted file mode 100644 index 896fd6694..000000000 --- a/examples/promises.out.rb +++ /dev/null @@ -1,334 +0,0 @@ -# Adds factory methods like: future, event, delay, schedule, zip, ... -# otherwise they can be called on Promises module -include Concurrent::Promises::FactoryMethods - - -### Simple asynchronous task - -future = future(0.1) { |duration| sleep duration; :result } # evaluation starts immediately - # => <#Concurrent::Promises::Future:0x7fa602198e30 pending blocks:[]> -future.completed? # => false -# block until evaluated -future.value # => :result -future.completed? # => true - - -### Failing asynchronous task - -future = future { raise 'Boom' } - # => <#Concurrent::Promises::Future:0x7fa602188a58 pending blocks:[]> -future.value # => nil -future.value! rescue $! # => # -future.reason # => # -# re-raising -raise future rescue $! # => # - - -### Direct creation of completed futures - -succeeded_future(Object.new) - # => <#Concurrent::Promises::Future:0x7fa60217bf10 success blocks:[]> -failed_future(StandardError.new("boom")) - # => <#Concurrent::Promises::Future:0x7fa60217aa48 failed blocks:[]> - -### Chaining of futures - -head = succeeded_future 1 -branch1 = head.then(&:succ) -branch2 = head.then(&:succ).then(&:succ) -branch1.zip(branch2).value! # => [2, 3] -# zip is aliased as & -(branch1 & branch2).then { |a, b| a + b }.value! # => 5 -(branch1 & branch2).then(&:+).value! # => 5 -# or a class method zip from FactoryMethods can be used to zip multiple futures -zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! - # => 7 -# pick only first completed -any(branch1, branch2).value! # => 2 -(branch1 | branch2).value! # => 2 - - -### Arguments - -# any supplied arguments are passed to the block, promises ensure that they are visible to the block - -future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value - # => 5 -succeeded_future(1).then(2, &:+).value # => 3 -succeeded_future(1).chain(2) { |success, value, reason, arg| value + arg }.value - # => 3 - - -### Error handling - -succeeded_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates - # => NoMethodError -succeeded_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 - # => 2 -succeeded_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied - # => 3 - -failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) - # => <#Concurrent::Promises::Future:0x7fa602129300 failed blocks:[]> -failing_zip.result # => [false, [1, nil], [nil, #]] -failing_zip.then { |v| 'never happens' }.result # => [false, [1, nil], [nil, #]] -failing_zip.rescue { |a, b| (a || b).message }.value - # => "boom" -failing_zip.chain { |success, values, reasons| [success, values.compact, reasons.compactß] }.value - # => nil - - -### Delay - -# will not evaluate until asked by #value or other method requiring completion -future = delay { 'lazy' } - # => <#Concurrent::Promises::Future:0x7fa60211a490 pending blocks:[]> -sleep 0.1 -future.completed? # => false -future.value # => "lazy" - -# propagates trough chain allowing whole or partial lazy chains - -head = delay { 1 } - # => <#Concurrent::Promises::Future:0x7fa602113898 pending blocks:[]> -branch1 = head.then(&:succ) - # => <#Concurrent::Promises::Future:0x7fa602112a60 pending blocks:[]> -branch2 = head.delay.then(&:succ) - # => <#Concurrent::Promises::Future:0x7fa602111188 pending blocks:[]> -join = branch1 & branch2 - # => <#Concurrent::Promises::Future:0x7fa6021102b0 pending blocks:[]> - -sleep 0.1 # nothing will complete # => 0 -[head, branch1, branch2, join].map(&:completed?) # => [false, false, false, false] - -branch1.value # => 2 -sleep 0.1 # forces only head to complete, branch 2 stays incomplete - # => 1 -[head, branch1, branch2, join].map(&:completed?) # => [true, true, false, false] - -join.value # => [2, 2] -[head, branch1, branch2, join].map(&:completed?) # => [true, true, true, true] - - -### Flatting - -# waits for inner future, only the last call to value blocks thread -future { future { 1+1 } }.flat.value # => 2 - -# more complicated example -future { future { future { 1 + 1 } } }. - flat(1). - then { |f| f.then(&:succ) }. - flat(1).value # => 3 - - -### Schedule - -# it'll be executed after 0.1 seconds -scheduled = schedule(0.1) { 1 } - # => <#Concurrent::Promises::Future:0x7fa60288a810 pending blocks:[]> - -scheduled.completed? # => false -scheduled.value # available after 0.1sec # => 1 - -# and in chain -scheduled = delay { 1 }.schedule(0.1).then(&:succ) - # => <#Concurrent::Promises::Future:0x7fa603101638 pending blocks:[]> -# will not be scheduled until value is requested -sleep 0.1 -scheduled.value # returns after another 0.1sec # => 2 - - -### Completable Future and Event - -future = completable_future - # => <#Concurrent::Promises::CompletableFuture:0x7fa6030e0ac8 pending blocks:[]> -event = completable_event() - # => <#Concurrent::Promises::CompletableEvent:0x7fa6030e0a78 pending blocks:[]> - -# These threads will be blocked until the future and event is completed -t1 = Thread.new { future.value } -t2 = Thread.new { event.wait } - -future.success 1 - # => <#Concurrent::Promises::CompletableFuture:0x7fa6030e0ac8 success blocks:[]> -future.success 1 rescue $! - # => # -future.try_success 2 # => false -event.complete - # => <#Concurrent::Promises::CompletableEvent:0x7fa6030e0a78 success blocks:[]> - -# The threads can be joined now -[t1, t2].each &:join - - -### Callbacks - -queue = Queue.new # => # -future = delay { 1 + 1 } - # => <#Concurrent::Promises::Future:0x7fa6030aa888 pending blocks:[]> - -future.on_success { queue << 1 } # evaluated asynchronously - # => <#Concurrent::Promises::Future:0x7fa6030aa888 pending blocks:[]> -future.on_success! { queue << 2 } # evaluated on completing thread - # => <#Concurrent::Promises::Future:0x7fa6030aa888 pending blocks:[]> - -queue.empty? # => true -future.value # => 2 -queue.pop # => 2 -queue.pop # => 1 - - -### Thread-pools - -# Factory methods are taking names of the global executors -# (ot instances of custom executors) - -# executed on :fast executor, only short and non-blocking tasks can go there -future_on(:fast) { 2 }. - # executed on executor for blocking and long operations - then_on(:io) { File.read __FILE__ }. - wait - # => <#Concurrent::Promises::Future:0x7fa60307abb0 success blocks:[]> - - -### Interoperability with actors - -actor = Concurrent::Actor::Utils::AdHoc.spawn :square do - -> v { v ** 2 } -end - # => # - - -future { 2 }. - then_ask(actor). - then { |v| v + 2 }. - value # => 6 - -actor.ask(2).then(&:succ).value # => 5 - - -### Interoperability with channels - -ch1 = Concurrent::Channel.new - # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> -ch2 = Concurrent::Channel.new - # => #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#> - -result = select(ch1, ch2) - # => <#Concurrent::Promises::Future:0x7fa601a62d50 pending blocks:[]> -ch1.put 1 # => true -result.value! - # => [1, #, @__condition__=#, @closed=false, @size=0, @capacity=1, @buffer=nil, @putting=[], @taking=[]>, @validator=#>] - - -future { 1+1 }. - then_put(ch1) - # => <#Concurrent::Promises::Future:0x7fa601a51910 pending blocks:[]> -result = future { '%02d' }. - then_select(ch1, ch2). - then { |format, (value, channel)| format format, value } - # => <#Concurrent::Promises::Future:0x7fa601a2b008 pending blocks:[]> -result.value! # => "02" - - -### Common use-cases Examples - -# simple background processing -future { do_stuff } - # => <#Concurrent::Promises::Future:0x7fa601a1b478 pending blocks:[]> - -# parallel background processing -jobs = 10.times.map { |i| future { i } } -zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - - -# periodic task -def schedule_job(interval, &job) - # schedule the first execution and chain restart og the job - Concurrent.schedule(interval, &job).chain do |success, continue, reason| - if success - schedule_job(interval, &job) if continue - else - # handle error - p reason - # retry - schedule_job(interval, &job) - end - end -end # => :schedule_job - -queue = Queue.new # => # -count = 0 # => 0 - -schedule_job 0.05 do - queue.push count - count += 1 - # to continue scheduling return true, false will end the task - if count < 4 - # to continue scheduling return true - true - else - queue.push nil - # to end the task return false - false - end -end - # => <#Concurrent::Promises::Future:0x7fa6020c23d0 pending blocks:[]> - -# read the queue -arr, v = [], nil; arr << v while (v = queue.pop) -arr # => [0, 1, 2, 3] - -# How to limit processing where there are limited resources? -# By creating an actor managing the resource -DB = Concurrent::Actor::Utils::AdHoc.spawn :db do - data = Array.new(10) { |i| '*' * i } - lambda do |message| - # pretending that this queries a DB - data[message] - end -end - # => # - -concurrent_jobs = 11.times.map do |v| - - succeeded_future(v). - # ask the DB with the `v`, only one at the time, rest is parallel - then_ask(DB). - # get size of the string, fails for 11 - then(&:size). - rescue { |reason| reason.message } # translate error to value (exception, message) -end - -zip(*concurrent_jobs).value! - # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] - - -# In reality there is often a pool though: -data = Array.new(10) { |i| '*' * i } - # => ["", "*", "**", "***", "****", "*****", "******", "*******", "********", "*********"] -pool_size = 5 # => 5 - -DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| - # DB connection constructor - Concurrent::Actor::Utils::AdHoc.spawn(name: "worker-#{index}", args: [data]) do |data| - lambda do |message| - # pretending that this queries a DB - data[message] - end - end -end - # => # - -concurrent_jobs = 11.times.map do |v| - - succeeded_future(v). - # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel - then_ask(DB_POOL). - then(&:size). - rescue { |reason| reason.message } -end - -zip(*concurrent_jobs).value! - # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 69b19874a..0b280c7c0 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -7,149 +7,251 @@ module Concurrent - # # Promises Framework - # - # Unified implementation of futures and promises which combines features of previous `Future`, - # `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the - # new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking - # operations like `#wait`, `#value`. It also offers better performance. - # - # ## Examples - # {include:file:examples/promises.out.rb} + # {include:file:doc/promises.out.md} module Promises + # @!macro [new] promises.param.default_executor + # @param [Executor, :io, :fast] default_executor Instance of an executor or a name of the + # global executor. Default executor propagates to chained futures unless overridden with + # executor parameter or changed with {AbstractEventFuture#with_default_executor}. + # + # @!macro [new] promises.param.executor + # @param [Executor, :io, :fast] executor Instance of an executor or a name of the + # global executor. The task is executed on it, default executor remains unchanged. + # + # @!macro [new] promises.param.args + # @param [Object] args arguments which are passed to the task when it's executed. + # (It might be prepended with other arguments, see the @yeild section). + # + # @!macro [new] promises.shortcut.on + # Shortcut of {#$0_on} with default `:io` executor supplied. + # @see #$0_on + # + # @!macro [new] promises.shortcut.using + # Shortcut of {#$0_using} with default `:io` executor supplied. + # @see #$0_using + # + # @!macro [new] promise.param.task-future + # @yieldreturn will become result of the returned Future. + # Its returned value becomes {Future#value} succeeding, + # raised exception becomes {Future#reason} failing. + # + # @!macro [new] promise.param.callback + # @yieldreturn is forgotten. + + # Container of all {Future}, {Event} factory methods. They are never constructed directly with + # new. module FactoryMethods - # User is responsible for completing the event once by {Promises::CompletableEvent#complete} + + + # @!macro promises.shortcut.on # @return [CompletableEvent] - def completable_event(default_executor = :io) + def completable_event + completable_event_on :io + end + + # Created completable event, user is responsible for completing the event once by + # {Promises::CompletableEvent#complete}. + # + # @!macro promises.param.default_executor + # @return [CompletableEvent] + def completable_event_on(default_executor = :io) CompletableEventPromise.new(default_executor).future end - # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. + # @!macro promises.shortcut.on + # @return [CompletableFuture] + def completable_future + completable_future_on :io + end + + # Creates completable future, user is responsible for completing the future once by + # {Promises::CompletableFuture#complete}, {Promises::CompletableFuture#success}, + # or {Promises::CompletableFuture#fail} + # + # @!macro promises.param.default_executor + # @return [CompletableFuture] + def completable_future_on(default_executor = :io) + CompletableFuturePromise.new(default_executor).future + end + + # @!macro promises.shortcut.on # @return [Future] def future(*args, &task) future_on(:io, *args, &task) end - # As {#future} but takes default_executor as first argument + # @!macro [new] promises.future-on1 + # Constructs new Future which will be completed after block is evaluated on default executor. + # Evaluation begins immediately. + # + # @!macro [new] promises.future-on2 + # @!macro promises.param.default_executor + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] def future_on(default_executor, *args, &task) ImmediateEventPromise.new(default_executor).future.then(*args, &task) end - # User is responsible for completing the future once by {Promises::CompletableFuture#success} or {Promises::CompletableFuture#fail} - # @return [CompletableFuture] - def completable_future(default_executor = :io) - CompletableFuturePromise.new(default_executor).future - end - - # @return [Future] which is already completed + # Creates completed future with will be either success with the given value or failed with + # the given reason. + # + # @!macro promises.param.default_executor + # @return [Future] def completed_future(success, value, reason, default_executor = :io) ImmediateFuturePromise.new(default_executor, success, value, reason).future end - # @return [Future] which is already completed in success state with value + # Creates completed future with will be success with the given value. + # + # @!macro promises.param.default_executor + # @return [Future] def succeeded_future(value, default_executor = :io) completed_future true, value, nil, default_executor end - # @return [Future] which is already completed in failed state with reason + # Creates completed future with will be failed with the given reason. + # + # @!macro promises.param.default_executor + # @return [Future] def failed_future(reason, default_executor = :io) completed_future false, nil, reason, default_executor end - # @return [Event] which is already completed + # Creates completed event. + # + # @!macro promises.param.default_executor + # @return [Event] def completed_event(default_executor = :io) ImmediateEventPromise.new(default_executor).event end - # Constructs new Future which will evaluate to the block after - # requested by calling `#wait`, `#value`, `#value!`, etc. on it or on any of the chained futures. + # @!macro promises.shortcut.on # @return [Future] def delay(*args, &task) delay_on :io, *args, &task end - # As {#delay} but takes default_executor as first argument + # @!macro promises.future-on1 + # The task will be evaluated only after the future is touched, see {AbstractEventFuture#touch} + # + # @!macro promises.future-on2 def delay_on(default_executor, *args, &task) DelayPromise.new(default_executor).future.then(*args, &task) end - # Schedules the block to be executed on executor in given intended_time. - # @param [Numeric, Time] intended_time Numeric => run in `intended_time` seconds. Time => eun on time. + # @!macro promises.shortcut.on # @return [Future] def schedule(intended_time, *args, &task) schedule_on :io, intended_time, *args, &task end - # As {#schedule} but takes default_executor as first argument + # @!macro promises.future-on1 + # The task is planned for execution in intended_time. + # + # @!macro promises.future-on2 + # @!macro [new] promises.param.intended_time + # @param [Numeric, Time] intended_time `Numeric` means to run in `intended_time` seconds. + # `Time` means to run on `intended_time`. def schedule_on(default_executor, intended_time, *args, &task) ScheduledPromise.new(default_executor, intended_time).future.then(*args, &task) end - # Constructs new {Future} which is completed after all futures_and_or_events are complete. Its value is array - # of dependent future values. If there is an error it fails with the first one. Event does not - # have a value so it's represented by nil in the array of values. - # @param [Event] futures_and_or_events + # @!macro promises.shortcut.on # @return [Future] def zip_futures(*futures_and_or_events) zip_futures_on :io, *futures_and_or_events end - # As {#zip_futures} but takes default_executor as first argument + # Creates new future which is completed after all futures_and_or_events are complete. + # Its value is array of zipped future values. Its reason is array of reasons for failure. + # If there is an error it fails. + # @!macro [new] promises.event-conversion + # If event is supplied, which does not have value and can be only completed, it's + # represented as `:success` with value `nil`. + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] def zip_futures_on(default_executor, *futures_and_or_events) ZipFuturesPromise.new(futures_and_or_events, default_executor).future end alias_method :zip, :zip_futures - # Constructs new {Event} which is completed after all futures_and_or_events are complete - # (Future is completed when Success or Failed). - # @param [Event] futures_and_or_events + # @!macro promises.shortcut.on # @return [Event] def zip_events(*futures_and_or_events) zip_events_on :io, *futures_and_or_events end - # As {#zip_events} but takes default_executor as first argument + # Creates new event which is completed after all futures_and_or_events are complete. + # (Future is complete when successful or failed.) + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] def zip_events_on(default_executor, *futures_and_or_events) ZipEventsPromise.new(futures_and_or_events, default_executor).future end - # Constructs new {Future} which is completed after first of the futures is complete. - # @param [Event] futures + # @!macro promises.shortcut.on # @return [Future] - def any_complete_future(*futures) - any_complete_future_on :io, *futures - end - - # As {#any_complete_future} but takes default_executor as first argument - def any_complete_future_on(default_executor, *futures) - AnyCompleteFuturePromise.new(futures, default_executor).future + def any_complete_future(*futures_and_or_events) + any_complete_future_on :io, *futures_and_or_events end alias_method :any, :any_complete_future - # Constructs new {Future} which becomes succeeded after first of the futures succeedes or - # failed if all futures fail (reason is last error). - # @param [Event] futures + # Creates new future which is completed after first futures_and_or_events is complete. + # Its result equals result of the first complete future. + # @!macro [new] promises.any-touch + # If complete it does not propagate {AbstractEventFuture#touch}, leaving delayed + # futures un-executed if they are not required any more. + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events # @return [Future] - def any_successful_future(*futures) - any_successful_future_on :io, *futures + def any_complete_future_on(default_executor, *futures_and_or_events) + AnyCompleteFuturePromise.new(futures_and_or_events, default_executor).future end - # As {#any_succesful_future} but takes default_executor as first argument - def any_successful_future_on(default_executor, *futures) - AnySuccessfulFuturePromise.new(futures, default_executor).future + # @!macro promises.shortcut.on + # @return [Future] + def any_successful_future(*futures_and_or_events) + any_successful_future_on :io, *futures_and_or_events + end + + # Creates new future which is completed after first of futures_and_or_events is successful. + # Its result equals result of the first complete future or if all futures_and_or_events fail, + # it has reason of the last completed future. + # @!macro promises.any-touch + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_successful_future_on(default_executor, *futures_and_or_events) + AnySuccessfulFuturePromise.new(futures_and_or_events, default_executor).future end - # Constructs new {Event} which becomes complete after first if the events completes. - def any_event(*events) - any_event_on :io, *events + # @!macro promises.shortcut.on + # @return [Future] + def any_event(*futures_and_or_events) + any_event_on :io, *futures_and_or_events end - # As {#any_event} but takes default_executor as first argument - def any_event_on(default_executor, *events) - AnyCompleteEventPromise.new(events, default_executor).event + # Creates new event which becomes complete after first of the futures_and_or_events completes. + # @!macro promises.any-touch + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def any_event_on(default_executor, *futures_and_or_events) + AnyCompleteEventPromise.new(futures_and_or_events, default_executor).event end # TODO consider adding first(count, *futures) @@ -319,12 +421,9 @@ def apply(args, block) private_constant :InternalStates - # Represents an event which will happen in future (will be completed). It has to always happen. - class Event < Synchronization::Object + class AbstractEventFuture < Synchronization::Object safe_initialization! - private(*attr_atomic(:internal_state)) - # @!visibility private - public :internal_state + private(*attr_atomic(:internal_state) - [:internal_state]) include Concern::Logging include InternalStates @@ -343,138 +442,179 @@ def initialize(promise, default_executor) self.internal_state = PENDING end - # @return [:pending, :completed] + private :initialize + + # @!macro [new] promises.shortcut.event-future + # @see Event#$0 + # @see Future#$0 + + # @!macro [new] promises.param.timeout + # @param [Numeric] timeout the maximum time in second to wait. + + # @!macro [new] promises.warn.blocks + # @note This function potentially blocks current thread until the Future is complete. + # Be careful it can deadlock. Try to chain instead. + + # Returns its state. + # @return [Symbol] + # + # @overload an_event.state + # @return [:pending, :completed] + # @overload a_future.state + # Both :success, :failed implies :completed. + # @return [:pending, :success, :failed] def state internal_state.to_sym end - # Is Event/Future pending? + # Is it in pending state? # @return [Boolean] def pending?(state = internal_state) !state.completed? end - def unscheduled? - raise 'unsupported' - end - - alias_method :incomplete?, :pending? - - # Has the Event been completed? + # Is it in completed state? # @return [Boolean] def completed?(state = internal_state) state.completed? end - alias_method :complete?, :completed? - - # Wait until Event is #complete? - # @param [Numeric] timeout the maximum time in second to wait. - # @return [Event, true, false] self or true/false if timeout is used - # @!macro [attach] edge.periodical_wait - # @note a thread should wait only once! For repeated checking use faster `completed?` check. - # If thread waits periodically it will dangerously grow the waiters stack. - def wait(timeout = nil) - touch - result = wait_until_complete(timeout) - timeout ? result : self + # @deprecated + def unscheduled? + raise 'unsupported' end - # @!visibility private + # Propagates touch. Requests all the delayed futures, which it depends on, to be + # executed. This method is called by any other method requiring completeness, like {#wait}. + # @return [self] def touch # distribute touch to promise only once @Promise.touch if @Touched.make_true self end - # @return [Executor] current default executor + alias_method :needed, :touch + + # @!macro [new] promises.touches + # Calls {AbstractEventFuture#touch}. + + # @!macro [new] promises.method.wait + # Wait (block the Thread) until receiver is {#completed?}. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Future, true, false] self implies timeout was not used, true implies timeout was used + # and it was completed, false implies it was not completed within timeout. + def wait(timeout = nil) + touch + result = wait_until_complete(timeout) + timeout ? result : self + end + + # Returns default executor. + # @return [Executor] default executor # @see #with_default_executor + # @see FactoryMethods#future_on + # @see FactoryMethods#completable_future + # @see FactoryMethods#any_successful_future_on + # @see similar def default_executor @DefaultExecutor end - # @yield [success, value, reason] of the parent - def chain(*args, &callback) - chain_on @DefaultExecutor, *args, &callback - end - - def chain_on(executor, *args, &callback) - ChainPromise.new(self, @DefaultExecutor, executor, args, &callback).future + # @!macro promises.shortcut.using + # @return [Future] + def chain(*args, &task) + chain_using @DefaultExecutor, *args, &task end - alias_method :then, :chain - - def chain_completable(completable_event) - on_completion! { completable_event.complete_with COMPLETED } + # Chains the task to be executed asynchronously on executor after it is completed. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @return [Future] + # @!macro promise.param.task-future + # + # @overload an_event.chain_using(executor, *args, &task) + # @yield [*args] to the task. + # @overload a_future.chain_using(executor, *args, &task) + # @yield [success, value, reason, *args] to the task. + def chain_using(executor, *args, &task) + ChainPromise.new(self, @DefaultExecutor, executor, args, &task).future + end + + # Short string representation. + # @return [String] + def to_s + "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state.to_sym}>" end - alias_method :tangle, :chain_completable - - # Zip with future producing new Future - # @return [Event] - def zip(other) - if other.is?(Future) - ZipFutureEventPromise.new(other, self, @DefaultExecutor).future - else - ZipEventEventPromise.new(self, other, @DefaultExecutor).event - end + # Longer string representation. + # @return [String] + def inspect + "#{to_s[0..-2]} blocks:[#{blocks.map(&:to_s).join(', ')}]>" end - alias_method :&, :zip - - def any(future) - AnyCompleteEventPromise.new([self, future], @DefaultExecutor).event + # @deprecated + def set(*args, &block) + raise 'Use CompletableEvent#complete or CompletableFuture#complete instead, ' + + 'constructed by Promises.completable_event or Promises.completable_future respectively.' end - alias_method :|, :any - - # Inserts delay into the chain of Futures making rest of it lazy evaluated. - # @return [Event] - def delay - ZipEventEventPromise.new(self, DelayPromise.new(@DefaultExecutor).event, @DefaultExecutor).event + # Completes the completable when receiver is completed. + # + # @param [Completable] completable + # @return [self] + def chain_completable(completable) + on_completion! { completable.complete_with internal_state } end - # Schedules rest of the chain for execution with specified time or on specified time - # @return [Event] - def schedule(intended_time) - ZipEventEventPromise.new(self, - ScheduledPromise.new(@DefaultExecutor, intended_time).event, - @DefaultExecutor).event - end + alias_method :tangle, :chain_completable - # @yield [success, value, reason, *args] executed async on `executor` when completed - # @return self + # @!macro promises.shortcut.using + # @return [self] def on_completion(*args, &callback) on_completion_using @DefaultExecutor, *args, &callback end - def on_completion_using(executor, *args, &callback) - add_callback :async_callback_on_completion, executor, args, callback - end - - # @yield [success, value, reason, *args] executed sync when completed - # @return self + # Stores the callback to be executed synchronously on completing thread after it is + # completed. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_completion!(*args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_completion!(*args, &callback) + # @yield [success, value, reason, *args] to the callback. def on_completion!(*args, &callback) add_callback :callback_on_completion, args, callback end - # Changes default executor for rest of the chain - # @return [Event] - def with_default_executor(executor) - EventWrapperPromise.new(self, executor).future - end - - def to_s - "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state.to_sym}>" - end - - def inspect - "#{to_s[0..-2]} blocks:[#{blocks.map(&:to_s).join(', ')}]>" + # Stores the callback to be executed asynchronously on executor after it is completed. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_completion_using(executor, *args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_completion_using(executor, *args, &callback) + # @yield [success, value, reason, *args] to the callback. + def on_completion_using(executor, *args, &callback) + add_callback :async_callback_on_completion, executor, args, callback end - def set(*args, &block) - raise 'Use CompletableEvent#complete or CompletableFuture#complete instead, ' + - 'constructed by Concurrent.event or Concurrent.future respectively.' + # @!macro [new] promises.method.with_default_executor + # Crates new object with same class with the executor set as its new default executor. + # Any futures depending on it will use the new default executor. + # @!macro promises.shortcut.event-future + # @abstract + def with_default_executor(executor) + raise NotImplementedError end # @!visibility private @@ -482,16 +622,15 @@ def complete_with(state, raise_on_reassign = true) if compare_and_set_internal_state(PENDING, state) # go to synchronized block only if there were waiting threads @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 - call_callbacks + call_callbacks state else - Concurrent::MultipleAssignmentError.new('Event can be completed only once') if raise_on_reassign - return nil + return failed_complete(raise_on_reassign, state) end self end + # For inspection. # @!visibility private - # just for inspection # @return [Array] def blocks @Callbacks.each_with_object([]) do |callback, promises| @@ -499,44 +638,47 @@ def blocks end end + # For inspection. # @!visibility private - # just for inspection def callbacks @Callbacks.each.to_a end + # For inspection. # @!visibility private - def add_callback(method, *args) - if completed? - call_callback method, *args - else - @Callbacks.push [method, *args] - call_callbacks if completed? - end - self - end - - # @!visibility private - # only for inspection def promise @Promise end + # For inspection. # @!visibility private - # only for inspection def touched @Touched.value end + # For inspection. # @!visibility private - # only for debugging inspection def waiting_threads @Waiters.each.to_a end + # @!visibility private + def add_callback(method, *args) + state = internal_state + if completed?(state) + call_callback method, state, *args + else + @Callbacks.push [method, *args] + state = internal_state + # take back if it was completed in the meanwhile + call_callbacks state if completed?(state) + end + self + end + private - # @return [true, false] + # @return [Boolean] def wait_until_complete(timeout) return true if completed? @@ -553,107 +695,197 @@ def wait_until_complete(timeout) completed? end + def call_callback(method, state, *args) + self.send method, state, *args + end + + def call_callbacks(state) + method, *args = @Callbacks.pop + while method + call_callback method, state, *args + method, *args = @Callbacks.pop + end + end + def with_async(executor, *args, &block) Concurrent.executor(executor).post(*args, &block) end - def async_callback_on_completion(executor, args, callback) - with_async(executor) { callback_on_completion args, callback } + def async_callback_on_completion(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_completion st, ar, cb + end end - def callback_on_completion(args, callback) - callback.call *args + def callback_notify_blocked(state, promise) + promise.on_done self end + end - def callback_notify_blocked(promise) - promise.on_done self + # Represents an event which will happen in future (will be completed). The event is either + # pending or completed. It should be always completed. Use {Future} to communicate failures and + # cancellation. + class Event < AbstractEventFuture + + alias_method :then, :chain + + + # @!macro [new] promises.method.zip + # Creates a new event or a future which will be completed when receiver and other are. + # Returns an event if receiver and other are events, otherwise returns a future. + # If just one of the parties is Future then the result + # of the returned future is equal to the result of the supplied future. If both are futures + # then the result is as described in {FactoryMethods#zip_futures_on}. + # + # @return [Future, Event] + def zip(other) + if other.is?(Future) + ZipFutureEventPromise.new(other, self, @DefaultExecutor).future + else + ZipEventEventPromise.new(self, other, @DefaultExecutor).event + end end - def call_callback(method, *args) - self.send method, *args + alias_method :&, :zip + + # Creates a new event which will be completed when the first of receiver, `event_or_future` + # completes. + # + # @return [Event] + def any(event_or_future) + AnyCompleteEventPromise.new([self, event_or_future], @DefaultExecutor).event end - def call_callbacks - method, *args = @Callbacks.pop - while method - call_callback method, *args - method, *args = @Callbacks.pop - end + alias_method :|, :any + + # Creates new event dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Event] + def delay + ZipEventEventPromise.new(self, + DelayPromise.new(@DefaultExecutor).event, + @DefaultExecutor).event + end + + # @!macro [new] promise.method.schedule + # Creates new event dependent on receiver scheduled to execute on/in intended_time. + # In time is interpreted from the moment the receiver is completed, therefore it inserts + # delay into the chain. + # + # @!macro promises.param.intended_time + # @return [Event] + def schedule(intended_time) + chain do + ZipEventEventPromise.new(self, + ScheduledPromise.new(@DefaultExecutor, intended_time).event, + @DefaultExecutor).event + end.flat_event end - end - # Represents a value which will become available in future. May fail with a reason instead. - class Future < Event + # TODO (pitr-ch 12-Jun-2016): add to_event, to_future - # @!method state - # @return [:pending, :success, :failed] + # @!macro promises.method.with_default_executor + # @return [Event] + def with_default_executor(executor) + EventWrapperPromise.new(self, executor).future + end + + private + + def failed_complete(raise_on_reassign, state) + Concurrent::MultipleAssignmentError.new('Event can be completed only once') if raise_on_reassign + return false + end + + def callback_on_completion(state, args, callback) + callback.call *args + end + end + + # Represents a value which will become available in future. May fail with a reason instead, + # e.g. when the tasks raises an exception. + class Future < AbstractEventFuture - # Has Future been success? + # Is it in success state? # @return [Boolean] def success?(state = internal_state) state.completed? && state.success? end - # Has Future been failed? + # Is it in failed state? # @return [Boolean] def failed?(state = internal_state) state.completed? && !state.success? end - # @return [Object, nil] the value of the Future when success, nil on timeout - # @!macro [attach] edge.timeout_nil - # @note If the Future can have value `nil` then it cannot be distinquished from `nil` returned on timeout. - # In this case is better to use first `wait` then `value` (or similar). - # @!macro edge.periodical_wait + # @!macro [new] promises.warn.nil + # @note Make sure returned `nil` is not confused with timeout, no value when failed, + # no reason when success, etc. + # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. + + # @!macro [new] promises.method.value + # Return value of the future. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @return [Object, nil] the value of the Future when success, nil on timeout or failure. def value(timeout = nil) touch internal_state.value if wait_until_complete timeout end - # @return [Exception, nil] the reason of the Future's failure - # @!macro edge.timeout_nil - # @!macro edge.periodical_wait + # Returns reason of future's failure. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @return [Exception, nil] nil on timeout or success. def reason(timeout = nil) touch internal_state.reason if wait_until_complete timeout end - # @return [Array(Boolean, Object, Exception), nil] triplet of success, value, reason - # @!macro edge.timeout_nil - # @!macro edge.periodical_wait + # Returns triplet success?, value, reason. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Array(Boolean, Object, Exception), nil] triplet of success, value, reason, or nil + # on timeout. def result(timeout = nil) touch internal_state.result if wait_until_complete timeout end - # Wait until Future is #complete? - # @param [Numeric] timeout the maximum time in second to wait. - # @raise reason on failure - # @return [Event, true, false] self or true/false if timeout is used - # @!macro edge.periodical_wait + # @!macro promises.method.wait + # @raise [Exception] {#reason} on failure def wait!(timeout = nil) touch result = wait_until_complete!(timeout) timeout ? result : self end - # Wait until Future is #complete? - # @param [Numeric] timeout the maximum time in second to wait. - # @raise reason on failure - # @return [Object, nil] - # @!macro edge.timeout_nil - # @!macro edge.periodical_wait + # @!macro promises.method.value + # @return [Object, nil] the value of the Future when success, nil on timeout. + # @raise [Exception] {#reason} on failure def value!(timeout = nil) touch internal_state.value if wait_until_complete! timeout end - # @example allows failed Future to be risen - # raise Concurrent.future.fail + # Allows failed Future to be risen with `raise` method. + # @example + # raise Promises.failed_future(StandardError.new("boom")) + # @raise [StandardError] when raising not failed future def exception(*args) - raise 'obligation is not failed' unless failed? + raise Concurrent::Error, 'it is not failed' unless failed? reason = internal_state.reason if reason.is_a?(::Array) + # TODO (pitr-ch 12-Jun-2016): remove logging!, how? reason.each { |e| log ERROR, 'Promises::Future', e } Concurrent::Error.new 'multiple exceptions, inspect log' else @@ -661,51 +893,76 @@ def exception(*args) end end - # @yield [value, *args] executed only on parent success - # @return [Future] new - def then(*args, &callback) - then_on @DefaultExecutor, *args, &callback + # @!macro promises.shortcut.using + # @return [Future] + def then(*args, &task) + then_using @DefaultExecutor, *args, &task end - def then_on(executor, *args, &callback) - ThenPromise.new(self, @DefaultExecutor, executor, args, &callback).future + # Chains the task to be executed asynchronously on executor after it succeeds. Does not run + # the task if it fails. It will complete though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [value, *args] to the task. + def then_using(executor, *args, &task) + ThenPromise.new(self, @DefaultExecutor, executor, args, &task).future end - def chain_completable(completable_future) - on_completion! { completable_future.complete_with internal_state } + # @!macro promises.shortcut.using + # @return [Future] + def rescue(*args, &task) + rescue_using @DefaultExecutor, *args, &task end - alias_method :tangle, :chain_completable - - # @yield [reason] executed only on parent failure + # Chains the task to be executed asynchronously on executor after it fails. Does not run + # the task if it succeeds. It will complete though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future # @return [Future] - def rescue(*args, &callback) - rescue_on @DefaultExecutor, *args, &callback + # @yield [reason, *args] to the task. + def rescue_using(executor, *args, &task) + RescuePromise.new(self, @DefaultExecutor, executor, args, &task).future end - def rescue_on(executor, *args, &callback) - RescuePromise.new(self, @DefaultExecutor, executor, args, &callback).future + # @!macro promises.method.zip + # @return [Future] + def zip(other) + if other.is_a?(Future) + ZipFutureFuturePromise.new(self, other, @DefaultExecutor).future + else + ZipFutureEventPromise.new(self, other, @DefaultExecutor).future + end end - # zips with the Future in the value - # @example - # Concurrent.future { Concurrent.future { 1 } }.flat.vale # => 1 - def flat(level = 1) - FlatPromise.new(self, level, @DefaultExecutor).future - end + alias_method :&, :zip - # @return [Future] which has first completed value from futures - def any(future) - AnyCompleteFuturePromise.new([self, future], @DefaultExecutor).future + # Creates a new event which will be completed when the first of receiver, `event_or_future` + # completes. Returning future will have value nil if event_or_future is event and completes + # first. + # + # @return [Future] + def any(event_or_future) + AnyCompleteFuturePromise.new([self, event_or_future], @DefaultExecutor).future end - # Inserts delay into the chain of Futures making rest of it lazy evaluated. + alias_method :|, :any + + # Creates new future dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # # @return [Future] def delay - ZipFutureEventPromise.new(self, DelayPromise.new(@DefaultExecutor).future, @DefaultExecutor).future + ZipFutureEventPromise.new(self, + DelayPromise.new(@DefaultExecutor).future, + @DefaultExecutor).future end - # Schedules rest of the chain for execution with specified time or on specified time + # @!macro promise.method.schedule # @return [Future] def schedule(intended_time) chain do @@ -715,91 +972,103 @@ def schedule(intended_time) end.flat end - # Changes default executor for rest of the chain + # @!macro promises.method.with_default_executor # @return [Future] def with_default_executor(executor) FutureWrapperPromise.new(self, executor).future end - # Zip with future producing new Future + # Creates new future which will have result of the future returned by receiver. If receiver + # fails it will have its failure. + # + # @param [Integer] level how many levels of futures should flatten # @return [Future] - def zip(other) - if other.is_a?(Future) - ZipFutureFuturePromise.new(self, other, @DefaultExecutor).future - else - ZipFutureEventPromise.new(self, other, @DefaultExecutor).future - end + def flat_future(level = 1) + FlatFuturePromise.new(self, level, @DefaultExecutor).future end - alias_method :&, :zip + alias_method :flat, :flat_future - alias_method :|, :any + # Creates new event which will be completed when the returned event by receiver is. + # Be careful if the receiver fails it will just complete since Event does not hold reason. + # + # @return [Event] + def flat_event + FlatEventPromise.new(self, @DefaultExecutor).event + end - # @yield [value] executed async on `executor` when success - # @return self + # @!macro promises.shortcut.using + # @return [self] def on_success(*args, &callback) on_success_using @DefaultExecutor, *args, &callback end + # Stores the callback to be executed synchronously on completing thread after it is + # successful. Does nothing on failure. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value *args] to the callback. + def on_success!(*args, &callback) + add_callback :callback_on_success, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # successful. Does nothing on failure. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value *args] to the callback. def on_success_using(executor, *args, &callback) add_callback :async_callback_on_success, executor, args, callback end - # @yield [reason] executed async on `executor` when failed? - # @return self + # @!macro promises.shortcut.using + # @return [self] def on_failure(*args, &callback) on_failure_using @DefaultExecutor, *args, &callback end - def on_failure_using(executor, *args, &callback) - add_callback :async_callback_on_failure, executor, args, callback - end - - # @yield [value] executed sync when success - # @return self - def on_success!(*args, &callback) - add_callback :callback_on_success, args, callback - end - - # @yield [reason] executed sync when failed? - # @return self + # Stores the callback to be executed synchronously on completing thread after it is + # failed. Does nothing on success. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason *args] to the callback. def on_failure!(*args, &callback) add_callback :callback_on_failure, args, callback end - # @!visibility private - def complete_with(state, raise_on_reassign = true) - if compare_and_set_internal_state(PENDING, state) - # go to synchronized block only if there were waiting threads - @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 - call_callbacks state - else - if raise_on_reassign - # print otherwise hidden error - log ERROR, 'Promises::Future', reason if reason - log ERROR, 'Promises::Future', state.reason if state.reason - - raise(Concurrent::MultipleAssignmentError.new( - "Future can be completed only once. Current result is #{result}, " + - "trying to set #{state.result}")) - end - return false - end - self + # Stores the callback to be executed asynchronously on executor after it is + # failed. Does nothing on success. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason *args] to the callback. + def on_failure_using(executor, *args, &callback) + add_callback :async_callback_on_failure, executor, args, callback end - # @!visibility private - def add_callback(method, *args) - state = internal_state - if completed?(state) - call_callback method, state, *args - else - @Callbacks.push [method, *args] - state = internal_state - # take back if it was completed in the meanwhile - call_callbacks state if completed?(state) - end - self + # Allows to use futures as green threads. The receiver has to evaluate to a future which + # represents what should be done next. It basically flattens indefinitely until non Future + # values is returned which becomes result of the returned future. Any ancountered exception + # will become reason of the returned future. + # + # @return [Future] + # @example + # body = lambda do |v| + # v += 1 + # v < 5 ? future(v, &body) : v + # end + # future(0, &body).run.value! # => 5 + def run + RunFuturePromise.new(self, @DefaultExecutor).future end # @!visibility private @@ -809,22 +1078,24 @@ def apply(args, block) private - def wait_until_complete!(timeout = nil) - result = wait_until_complete(timeout) - raise self if failed? - result - end + def failed_complete(raise_on_reassign, state) + if raise_on_reassign + # TODO (pitr-ch 12-Jun-2016): remove logging?! + # print otherwise hidden error + log ERROR, 'Promises::Future', reason if reason + log ERROR, 'Promises::Future', state.reason if state.reason - def call_callbacks(state) - method, *args = @Callbacks.pop - while method - call_callback method, state, *args - method, *args = @Callbacks.pop + raise(Concurrent::MultipleAssignmentError.new( + "Future can be completed only once. Current result is #{result}, " + + "trying to set #{state.result}")) end + return false end - def call_callback(method, state, *args) - self.send method, state, *args + def wait_until_complete!(timeout = nil) + result = wait_until_complete(timeout) + raise self if failed? + result end def async_callback_on_success(state, executor, args, callback) @@ -851,17 +1122,9 @@ def callback_on_completion(state, args, callback) callback.call state.result, *args end - def callback_notify_blocked(state, promise) - super(promise) - end - - def async_callback_on_completion(state, executor, args, callback) - with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_completion st, ar, cb - end - end end + # Marker module of Future, Event completed manually by user. module Completable end @@ -869,11 +1132,26 @@ module Completable class CompletableEvent < Event include Completable - # Complete the Event, `raise` if already completed + + # @!macro [new] raise_on_reassign + # @raise [MultipleAssignmentError] when already completed and raise_on_reassign is true. + + # @!macro [new] promise.param.raise_on_reassign + # @param [Boolean] raise_on_reassign should method raise exception if already completed + # @return [self, false] false is returner when raise_on_reassign is false and the receiver + # is already completed. + # + + # Makes the event complete, which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign def complete(raise_on_reassign = true) complete_with COMPLETED, raise_on_reassign end + # Creates new event wrapping receiver, effectively hiding the complete method. + # + # @return [Event] def with_hidden_completable @with_hidden_completable ||= EventWrapperPromise.new(self, @DefaultExecutor).event end @@ -883,50 +1161,53 @@ def with_hidden_completable class CompletableFuture < Future include Completable - # Complete the future with triplet od `success`, `value`, `reason` - # `raise` if already completed - # return [self] + # Makes the future complete with result of triplet `success`, `value`, `reason`, + # which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign def complete(success, value, reason, raise_on_reassign = true) complete_with(success ? Success.new(value) : Failed.new(reason), raise_on_reassign) end - # Complete the future with value - # return [self] - def success(value) - promise.success(value) + # Makes the future successful with `value`, + # which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def success(value, raise_on_reassign = true) + promise.success(value, raise_on_reassign) end - # Try to complete the future with value - # return [self] - def try_success(value) - promise.try_success(value) + # Makes the future failed with `reason`, + # which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def fail(reason, raise_on_reassign = true) + promise.fail(reason, raise_on_reassign) end - # Fail the future with reason - # return [self] - def fail(reason = StandardError.new) - promise.fail(reason) - end - - # Try to fail the future with reason - # return [self] - def try_fail(reason = StandardError.new) - promise.try_fail(reason) - end - - # Evaluate the future to value if there is an exception the future fails with it - # return [self] + # Evaluates the block and sets its result as future's value succeeding, if the block raises + # an exception the future fails with it. + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] def evaluate_to(*args, &block) + # TODO (pitr-ch 13-Jun-2016): add raise_on_reassign promise.evaluate_to(*args, block) end - # Evaluate the future to value if there is an exception the future fails with it - # @raise the exception - # return [self] + # Evaluates the block and sets its result as future's value succeeding, if the block raises + # an exception the future fails with it. + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + # @raise [Exception] also raise reason on failure. def evaluate_to!(*args, &block) promise.evaluate_to!(*args, block) end + # Creates new future wrapping receiver, effectively hiding the complete method and similar. + # + # @return [Future] def with_hidden_completable @with_hidden_completable ||= FutureWrapperPromise.new(self, @DefaultExecutor).future end @@ -980,6 +1261,7 @@ def evaluate_to(*args, block) rescue StandardError => error complete_with Failed.new(error) rescue Exception => error + # TODO (pitr-ch 12-Jun-2016): remove logging? log(ERROR, 'Promises::Future', error) complete_with Failed.new(error) end @@ -996,20 +1278,12 @@ def initialize(default_executor) super CompletableFuture.new(self, default_executor) end - def success(value) - complete_with Success.new(value) - end - - def try_success(value) - !!complete_with(Success.new(value), false) + def success(value, raise_on_reassign) + complete_with Success.new(value), raise_on_reassign end - def fail(reason = StandardError.new) - complete_with Failed.new(reason) - end - - def try_fail(reason = StandardError.new) - !!complete_with(Failed.new(reason), false) + def fail(reason, raise_on_reassign) + complete_with Failed.new(reason), raise_on_reassign end public :evaluate_to @@ -1051,6 +1325,7 @@ def on_done(future) end def touch + # TODO (pitr-ch 13-Jun-2016): track if it has lazy parent if it's needed avoids CASes! blocked_by.each(&:touch) end @@ -1175,8 +1450,7 @@ def initialize(default_executor, success, value, reason) end end - class FlatPromise < BlockedPromise - + class AbstractFlatPromise < BlockedPromise # !visibility private def blocked_by @BlockedBy.each.to_a @@ -1184,51 +1458,119 @@ def blocked_by private + def initialize_blocked_by(blocked_by_future) + @BlockedBy = LockFreeStack.new.push(blocked_by_future) + end + + def on_completable(done_future) + complete_with done_future.internal_state + end + + def clear_blocked_by! + @BlockedBy.clear + nil + end + + def completable?(countdown, future) + !@Future.internal_state.completed? && super(countdown, future) + end + end + + class FlatEventPromise < AbstractFlatPromise + + private + + def initialize(blocked_by_future, default_executor) + super Event.new(self, default_executor), blocked_by_future, 2 + end + def process_on_done(future) countdown = super(future) if countdown.nonzero? internal_state = future.internal_state unless internal_state.success? - complete_with internal_state + complete_with COMPLETED return countdown end value = internal_state.value case value - when Future + when Future, Event @BlockedBy.push value value.add_callback :callback_notify_blocked, self @Countdown.value - when Event - evaluate_to(lambda { raise TypeError, 'cannot flatten to Event' }) else - evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) + complete_with COMPLETED end end countdown end + end + + class FlatFuturePromise < AbstractFlatPromise + + private + def initialize(blocked_by_future, levels, default_executor) raise ArgumentError, 'levels has to be higher than 0' if levels < 1 super Future.new(self, default_executor), blocked_by_future, 1 + levels end - def initialize_blocked_by(blocked_by_future) - @BlockedBy = LockFreeStack.new.push(blocked_by_future) - end + def process_on_done(future) + countdown = super(future) + if countdown.nonzero? + internal_state = future.internal_state - def on_completable(done_future) - complete_with done_future.internal_state + unless internal_state.success? + complete_with internal_state + return countdown + end + + value = internal_state.value + case value + when Future + @BlockedBy.push value + value.add_callback :callback_notify_blocked, self + @Countdown.value + when Event + evaluate_to(lambda { raise TypeError, 'cannot flatten to Event' }) + else + evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) + end + end + countdown end - def clear_blocked_by! - @BlockedBy.clear - nil + end + + class RunFuturePromise < AbstractFlatPromise + + private + + def initialize(blocked_by_future, default_executor) + super Future.new(self, default_executor), blocked_by_future, 1 end - def completable?(countdown, future) - !@Future.internal_state.completed? && super(countdown, future) + def process_on_done(future) + internal_state = future.internal_state + + unless internal_state.success? + complete_with internal_state + return 0 + end + + value = internal_state.value + case value + when Future + # @BlockedBy.push value + value.add_callback :callback_notify_blocked, self + else + complete_with internal_state + end + + 1 end end @@ -1438,25 +1780,48 @@ def initialize(default_executor, intended_time) extend FactoryMethods - private_constant :AbstractPromise, :CompletableEventPromise, :CompletableFuturePromise, - :InnerPromise, :BlockedPromise, :BlockedTaskPromise, :ThenPromise, - :RescuePromise, :ChainPromise, :ImmediateEventPromise, - :ImmediateFuturePromise, :FlatPromise, :ZipEventEventPromise, - :ZipFutureEventPromise, :ZipFutureFuturePromise, :EventWrapperPromise, - :FutureWrapperPromise, :ZipFuturesPromise, :ZipEventsPromise, - :AnyCompleteFuturePromise, :AnySuccessfulFuturePromise, :DelayPromise, :ScheduledPromise + private_constant :AbstractPromise, + :CompletableEventPromise, + :CompletableFuturePromise, + :InnerPromise, + :BlockedPromise, + :BlockedTaskPromise, + :ThenPromise, + :RescuePromise, + :ChainPromise, + :ImmediateEventPromise, + :ImmediateFuturePromise, + :AbstractFlatPromise, + :FlatFuturePromise, + :FlatEventPromise, + :RunFuturePromise, + :ZipEventEventPromise, + :ZipFutureEventPromise, + :ZipFutureFuturePromise, + :EventWrapperPromise, + :FutureWrapperPromise, + :ZipFuturesPromise, + :ZipEventsPromise, + :AbstractAnyPromise, + :AnyCompleteFuturePromise, + :AnySuccessfulFuturePromise, + :AnyCompleteEventPromise, + :DelayPromise, + :ScheduledPromise + end end -# TODO when value is requested the current thread may evaluate the tasks to get the value for performance reasons it may not evaluate :io though -# TODO try work stealing pool, each thread has it's own queue - -# Experimental features follow +# TODO try stealing pool, each thread has it's own queue +### Experimental features follow module Concurrent module Promises module FactoryMethods + + # @!visibility private + # only proof of concept # @return [Future] def select(*channels) @@ -1472,7 +1837,10 @@ def select(*channels) end end - class Future < Event + class Future < AbstractEventFuture + + # @!visibility private + # Zips with selected value form the suplied channels # @return [Future] def then_select(*channels) @@ -1491,21 +1859,6 @@ def then_ask(actor) self.then { |v| actor.ask(v) }.flat end - # TODO (pitr-ch 14-Mar-2016): document, and move to core - def run(terminated = Promises.future) - on_completion do |success, value, reason| - if success - if value.is_a?(Future) - value.run terminated - else - terminated.success value - end - else - terminated.fail reason - end - end - end - include Enumerable def each(&block) @@ -1551,7 +1904,7 @@ def cancel(raise_on_repeated_call = true) end def canceled? - @Cancel.complete? + @Cancel.completed? end class Token < Synchronization::Object @@ -1576,7 +1929,7 @@ def then(*args, &block) end def canceled? - @Cancel.complete? + @Cancel.completed? end def loop_until_canceled(&block) diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/promises_spec.rb index d40627fac..a54a78dd7 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -69,13 +69,13 @@ def behaves_as_delay(delay, value) start = Time.now.to_f queue = Queue.new - future = succeeded_future(1). + future = completed_event. schedule(0.1). - then { |v| v + 1 }. + then { 1 }. then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } expect(future.value!).to eq queue - expect(queue.pop).to eq 2 + expect(queue.pop).to eq 1 expect(queue.pop).to be >= 0.09 end @@ -103,7 +103,7 @@ def behaves_as_delay(delay, value) describe '.event' do specify do completable_event = completable_event() - one = completable_event.chain { 1 } + one = completable_event.chain(1) { |arg| arg } join = zip(completable_event).chain { 1 } expect(one.completed?).to be false completable_event.complete @@ -135,7 +135,7 @@ def behaves_as_delay(delay, value) any2 = f2 | f3 f1.success 1 - f2.fail + f2.fail StandardError.new expect(any1.value!).to eq 1 expect(any2.reason).to be_a_kind_of StandardError @@ -149,7 +149,7 @@ def behaves_as_delay(delay, value) any = any_successful_future(f1, f2) - f1.fail + f1.fail StandardError.new f2.success :value expect(any.value!).to eq :value @@ -298,7 +298,7 @@ def behaves_as_delay(delay, value) it 'chains' do future0 = future { 1 }.then { |v| v + 2 } # both executed on default FAST_EXECUTOR - future1 = future0.then_on(:fast) { raise 'boo' } # executed on IO_EXECUTOR + future1 = future0.then_using(:fast) { raise 'boo' } # executed on IO_EXECUTOR future2 = future1.then { |v| v + 1 } # will fail with 'boo' error, executed on default FAST_EXECUTOR future3 = future1.rescue { |err| err.message } # executed on default FAST_EXECUTOR future4 = future0.chain { |success, value, reason| success } # executed on default FAST_EXECUTOR @@ -406,6 +406,20 @@ def behaves_as_delay(delay, value) expect(f).to be_failed expect { f.value! }.to raise_error(Exception, 'fail') end + + it 'runs' do + body = lambda do |v| + v += 1 + v < 5 ? future(v, &body) : v + end + expect(future(0, &body).run.value!).to eq 5 + + body = lambda do |v| + v += 1 + v < 5 ? future(v, &body) : raise(v.to_s) + end + expect(future(0, &body).run.reason.message).to eq '5' + end end describe 'interoperability' do @@ -453,31 +467,30 @@ def behaves_as_delay(delay, value) specify do source, token = Concurrent::Cancellation.create source.cancel - expect(token.event.complete?).to be_truthy + expect(token.event.completed?).to be_truthy cancellable_branch = Concurrent::Promises.delay { 1 } expect((cancellable_branch | token.event).value).to be_nil - expect(cancellable_branch.complete?).to be_falsey + expect(cancellable_branch.completed?).to be_falsey end specify do - source, token = Concurrent::Cancellation.create( - Concurrent::Promises.completable_future, false, nil, err = StandardError.new('Cancelled')) - source.cancel - expect(token.future.complete?).to be_truthy + source, token = Concurrent::Cancellation.create cancellable_branch = Concurrent::Promises.delay { 1 } - expect((cancellable_branch | token.event).reason).to eq err - expect(cancellable_branch.complete?).to be_falsey + expect(any_complete_future(cancellable_branch, token.event).value).to eq 1 + expect(cancellable_branch.completed?).to be_truthy end - specify do - source, token = Concurrent::Cancellation.create + source, token = Concurrent::Cancellation.create( + Concurrent::Promises.completable_future, false, nil, err = StandardError.new('Cancelled')) + source.cancel + expect(token.future.completed?).to be_truthy cancellable_branch = Concurrent::Promises.delay { 1 } - expect((cancellable_branch | token.event).value).to eq 1 - expect(cancellable_branch.complete?).to be_truthy + expect((cancellable_branch | token.future).reason).to eq err + expect(cancellable_branch.completed?).to be_falsey end end From 0210f7a02e93f0ac34567edc5226ac609eb9c5d2 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 29 Jul 2016 21:13:05 +0200 Subject: [PATCH 25/68] fix success vs succeed naming --- .../actor/behaviour/sets_results.rb | 2 +- lib/concurrent/actor/core.rb | 2 +- lib/concurrent/edge/promises.rb | 24 +++++++++---------- spec/concurrent/promises_spec.rb | 24 +++++++++---------- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/lib/concurrent/actor/behaviour/sets_results.rb b/lib/concurrent/actor/behaviour/sets_results.rb index 8ec50e23c..afc1181fa 100644 --- a/lib/concurrent/actor/behaviour/sets_results.rb +++ b/lib/concurrent/actor/behaviour/sets_results.rb @@ -13,7 +13,7 @@ def initialize(core, subsequent, core_options, error_strategy) def on_envelope(envelope) result = pass envelope if result != MESSAGE_PROCESSED && !envelope.future.nil? - envelope.future.success result + envelope.future.succeed result log(DEBUG) { "finished processing of #{envelope.message.inspect}"} end nil diff --git a/lib/concurrent/actor/core.rb b/lib/concurrent/actor/core.rb index 1d4679f61..c17e23138 100644 --- a/lib/concurrent/actor/core.rb +++ b/lib/concurrent/actor/core.rb @@ -197,7 +197,7 @@ def ns_initialize(opts, &block) schedule_execution do begin build_context - initialized.success reference if initialized + initialized.succeed reference if initialized log DEBUG, 'spawned' rescue => ex log ERROR, ex diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 0b280c7c0..1738f6371 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -66,7 +66,7 @@ def completable_future end # Creates completable future, user is responsible for completing the future once by - # {Promises::CompletableFuture#complete}, {Promises::CompletableFuture#success}, + # {Promises::CompletableFuture#complete}, {Promises::CompletableFuture#succeed}, # or {Promises::CompletableFuture#fail} # # @!macro promises.param.default_executor @@ -95,7 +95,7 @@ def future_on(default_executor, *args, &task) ImmediateEventPromise.new(default_executor).future.then(*args, &task) end - # Creates completed future with will be either success with the given value or failed with + # Creates completed future with will be either success with the given value or failure with # the given reason. # # @!macro promises.param.default_executor @@ -108,7 +108,7 @@ def completed_future(success, value, reason, default_executor = :io) # # @!macro promises.param.default_executor # @return [Future] - def succeeded_future(value, default_executor = :io) + def successful_future(value, default_executor = :io) completed_future true, value, nil, default_executor end @@ -821,7 +821,7 @@ def failed?(state = internal_state) # @!macro [new] promises.warn.nil # @note Make sure returned `nil` is not confused with timeout, no value when failed, - # no reason when success, etc. + # no reason when successful, etc. # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. # @!macro [new] promises.method.value @@ -831,7 +831,7 @@ def failed?(state = internal_state) # @!macro promises.warn.blocks # @!macro promises.warn.nil # @!macro promises.param.timeout - # @return [Object, nil] the value of the Future when success, nil on timeout or failure. + # @return [Object, nil] the value of the Future when successful, nil on timeout or failure. def value(timeout = nil) touch internal_state.value if wait_until_complete timeout @@ -854,7 +854,7 @@ def reason(timeout = nil) # # @!macro promises.warn.blocks # @!macro promises.param.timeout - # @return [Array(Boolean, Object, Exception), nil] triplet of success, value, reason, or nil + # @return [Array(Boolean, Object, Exception), nil] triplet of success?, value, reason, or nil # on timeout. def result(timeout = nil) touch @@ -870,7 +870,7 @@ def wait!(timeout = nil) end # @!macro promises.method.value - # @return [Object, nil] the value of the Future when success, nil on timeout. + # @return [Object, nil] the value of the Future when successful, nil on timeout. # @raise [Exception] {#reason} on failure def value!(timeout = nil) touch @@ -1173,8 +1173,8 @@ def complete(success, value, reason, raise_on_reassign = true) # which triggers all dependent futures. # # @!macro promise.param.raise_on_reassign - def success(value, raise_on_reassign = true) - promise.success(value, raise_on_reassign) + def succeed(value, raise_on_reassign = true) + promise.succeed(value, raise_on_reassign) end # Makes the future failed with `reason`, @@ -1278,7 +1278,7 @@ def initialize(default_executor) super CompletableFuture.new(self, default_executor) end - def success(value, raise_on_reassign) + def succeed(value, raise_on_reassign) complete_with Success.new(value), raise_on_reassign end @@ -1325,7 +1325,7 @@ def on_done(future) end def touch - # TODO (pitr-ch 13-Jun-2016): track if it has lazy parent if it's needed avoids CASes! + # TODO (pitr-ch 13-Jun-2016): on construction pass down references of delays to be touched, avoids extra casses blocked_by.each(&:touch) end @@ -1964,7 +1964,7 @@ class Throttle < Synchronization::Object def initialize(max) super() self.can_run = max - # TODO (pitr-ch 10-Jun-2016): lockfree gueue is needed + # TODO (pitr-ch 10-Jun-2016): lock-free queue is needed @Queue = Queue.new end diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/promises_spec.rb index a54a78dd7..073c17e44 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -18,7 +18,7 @@ it 'future' do b = completable_future a = completable_future.chain_completable(b) - a.success :val + a.succeed :val expect(b).to be_completed expect(b.value).to eq :val end @@ -29,7 +29,7 @@ future = future { 1 + 1 } expect(future.value!).to eq 2 - future = succeeded_future(1).then { |v| v + 1 } + future = successful_future(1).then { |v| v + 1 } expect(future.value!).to eq 2 end @@ -37,7 +37,7 @@ future = future(1, 2, &:+) expect(future.value!).to eq 3 - future = succeeded_future(1).then(1) { |v, a| v + 1 } + future = successful_future(1).then(1) { |v, a| v + 1 } expect(future.value!).to eq 2 end end @@ -51,9 +51,9 @@ def behaves_as_delay(delay, value) specify do behaves_as_delay delay { 1 + 1 }, 2 - behaves_as_delay succeeded_future(1).delay.then { |v| v + 1 }, 2 + behaves_as_delay successful_future(1).delay.then { |v| v + 1 }, 2 behaves_as_delay delay(1) { |a| a + 1 }, 2 - behaves_as_delay succeeded_future(1).delay.then { |v| v + 1 }, 2 + behaves_as_delay successful_future(1).delay.then { |v| v + 1 }, 2 end end @@ -118,7 +118,7 @@ def behaves_as_delay(delay, value) one = completable_future.then(&:succ) join = zip_futures(completable_future).then { |v| v } expect(one.completed?).to be false - completable_future.success 0 + completable_future.succeed 0 expect(one.value!).to eq 1 expect(join.wait!.completed?).to be true expect(join.value!).to eq 0 @@ -134,7 +134,7 @@ def behaves_as_delay(delay, value) any1 = any_complete_future(f1, f2) any2 = f2 | f3 - f1.success 1 + f1.succeed 1 f2.fail StandardError.new expect(any1.value!).to eq 1 @@ -150,7 +150,7 @@ def behaves_as_delay(delay, value) any = any_successful_future(f1, f2) f1.fail StandardError.new - f2.success :value + f2.succeed :value expect(any.value!).to eq :value end @@ -229,15 +229,15 @@ def behaves_as_delay(delay, value) describe '.each' do specify do - expect(succeeded_future(nil).each.map(&:inspect)).to eq ['nil'] - expect(succeeded_future(1).each.map(&:inspect)).to eq ['1'] - expect(succeeded_future([1, 2]).each.map(&:inspect)).to eq ['1', '2'] + expect(successful_future(nil).each.map(&:inspect)).to eq ['nil'] + expect(successful_future(1).each.map(&:inspect)).to eq ['1'] + expect(successful_future([1, 2]).each.map(&:inspect)).to eq ['1', '2'] end end describe '.zip_events' do it 'waits for all and returns event' do - a = succeeded_future 1 + a = successful_future 1 b = failed_future :any c = completable_event.complete From da530a83d1c9164d0ab09b8fdba18de226576fb3 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 29 Jul 2016 21:14:33 +0200 Subject: [PATCH 26/68] fix visibility of methods in documentation --- lib/concurrent/edge/promises.rb | 39 ++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 1738f6371..577622a0a 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -259,6 +259,7 @@ def any_event_on(default_executor, *futures_and_or_events) end module InternalStates + # @private class State def completed? raise NotImplementedError @@ -271,6 +272,7 @@ def to_sym private_constant :State + # @private class Pending < State def completed? false @@ -283,6 +285,7 @@ def to_sym private_constant :Pending + # @private class CompletedWithResult < State def completed? true @@ -315,8 +318,9 @@ def apply private_constant :CompletedWithResult - # @!visibility private + # @private class Success < CompletedWithResult + def initialize(value) @Value = value end @@ -344,7 +348,7 @@ def to_sym private_constant :Success - # @!visibility private + # @private class SuccessArray < Success def apply(args, block) block.call(*value, *args) @@ -353,7 +357,7 @@ def apply(args, block) private_constant :SuccessArray - # @!visibility private + # @private class Failed < CompletedWithResult def initialize(reason) @Reason = reason @@ -382,7 +386,7 @@ def apply(args, block) private_constant :Failed - # @!visibility private + # @private class PartiallyFailed < CompletedWithResult def initialize(value, reason) super() @@ -421,6 +425,7 @@ def apply(args, block) private_constant :InternalStates + # Common ancestor of {Event} and {Future} classes class AbstractEventFuture < Synchronization::Object safe_initialization! private(*attr_atomic(:internal_state) - [:internal_state]) @@ -1214,6 +1219,7 @@ def with_hidden_completable end # @abstract + # @private class AbstractPromise < Synchronization::Object safe_initialization! include InternalStates @@ -1278,16 +1284,20 @@ def initialize(default_executor) super CompletableFuture.new(self, default_executor) end + # @!visibility private def succeed(value, raise_on_reassign) complete_with Success.new(value), raise_on_reassign end + # @!visibility private def fail(reason, raise_on_reassign) complete_with Failed.new(reason), raise_on_reassign end + # @!visibility private public :evaluate_to + # @!visibility private def evaluate_to!(*args, block) evaluate_to(*args, block).wait! end @@ -1299,6 +1309,7 @@ class InnerPromise < AbstractPromise # @abstract class BlockedPromise < InnerPromise + # @!visibility private def self.new(*args, &block) promise = super(*args, &block) promise.blocked_by.each { |f| f.add_callback :callback_notify_blocked, promise } @@ -1311,7 +1322,7 @@ def initialize(future, blocked_by_futures, countdown) @Countdown = AtomicFixnum.new countdown end - # @api private + # @!visibility private def on_done(future) countdown = process_on_done(future) completable = completable?(countdown, future) @@ -1324,6 +1335,7 @@ def on_done(future) end end + # @!visibility private def touch # TODO (pitr-ch 13-Jun-2016): on construction pass down references of delays to be touched, avoids extra casses blocked_by.each(&:touch) @@ -1335,6 +1347,7 @@ def blocked_by @BlockedBy end + # @!visibility private def inspect "#{to_s[0..-2]} blocked_by:[#{ blocked_by.map(&:to_s).join(', ')}]>" end @@ -1378,6 +1391,7 @@ def initialize(blocked_by_future, default_executor, executor, args, &task) @Args = args end + # @!visibility private def executor @Executor end @@ -1579,6 +1593,8 @@ def initialize(event1, event2, default_executor) super Event.new(self, default_executor), [event1, event2], 2 end + private + def on_completable(done_future) complete_with COMPLETED end @@ -1590,6 +1606,8 @@ def initialize(future, event, default_executor) @FutureResult = future end + private + def on_completable(done_future) complete_with @FutureResult.internal_state end @@ -1602,6 +1620,8 @@ def initialize(future1, future2, default_executor) @Future2Result = future2 end + private + def on_completable(done_future) success1, value1, reason1 = @Future1Result.result success2, value2, reason2 = @Future2Result.result @@ -1620,6 +1640,8 @@ def initialize(event, default_executor) super Event.new(self, default_executor), [event], 1 end + private + def on_completable(done_future) complete_with COMPLETED end @@ -1630,6 +1652,8 @@ def initialize(future, default_executor) super Future.new(self, default_executor), [future], 1 end + private + def on_completable(done_future) complete_with done_future.internal_state end @@ -1684,6 +1708,7 @@ def on_completable(done_future) # @abstract class AbstractAnyPromise < BlockedPromise + # @!visibility private def touch blocked_by.each(&:touch) unless @Future.completed? end @@ -1735,6 +1760,7 @@ def completable?(countdown, future) end class DelayPromise < InnerPromise + # @!visibility private def touch @Future.complete_with COMPLETED end @@ -1747,10 +1773,12 @@ def initialize(default_executor) end class ScheduledPromise < InnerPromise + # @!visibility private def intended_time @IntendedTime end + # @!visibility private def inspect "#{to_s[0..-2]} intended_time:[#{@IntendedTime}}>" end @@ -1816,6 +1844,7 @@ def initialize(default_executor, intended_time) # TODO try stealing pool, each thread has it's own queue ### Experimental features follow + module Concurrent module Promises module FactoryMethods From 86fd08e9ee4469e4e25924b8ce2f487aade3d2b5 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 29 Jul 2016 21:15:40 +0200 Subject: [PATCH 27/68] do not call touch when completed, saves CAS --- lib/concurrent/edge/promises.rb | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 577622a0a..a0776f781 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -498,8 +498,6 @@ def touch self end - alias_method :needed, :touch - # @!macro [new] promises.touches # Calls {AbstractEventFuture#touch}. @@ -512,7 +510,6 @@ def touch # @return [Future, true, false] self implies timeout was not used, true implies timeout was used # and it was completed, false implies it was not completed within timeout. def wait(timeout = nil) - touch result = wait_until_complete(timeout) timeout ? result : self end @@ -657,7 +654,7 @@ def promise # For inspection. # @!visibility private - def touched + def touched? @Touched.value end @@ -687,6 +684,8 @@ def add_callback(method, *args) def wait_until_complete(timeout) return true if completed? + touch + @Lock.synchronize do begin unless completed? @@ -838,7 +837,6 @@ def failed?(state = internal_state) # @!macro promises.param.timeout # @return [Object, nil] the value of the Future when successful, nil on timeout or failure. def value(timeout = nil) - touch internal_state.value if wait_until_complete timeout end @@ -850,7 +848,6 @@ def value(timeout = nil) # @!macro promises.param.timeout # @return [Exception, nil] nil on timeout or success. def reason(timeout = nil) - touch internal_state.reason if wait_until_complete timeout end @@ -862,14 +859,12 @@ def reason(timeout = nil) # @return [Array(Boolean, Object, Exception), nil] triplet of success?, value, reason, or nil # on timeout. def result(timeout = nil) - touch internal_state.result if wait_until_complete timeout end # @!macro promises.method.wait # @raise [Exception] {#reason} on failure def wait!(timeout = nil) - touch result = wait_until_complete!(timeout) timeout ? result : self end @@ -878,7 +873,6 @@ def wait!(timeout = nil) # @return [Object, nil] the value of the Future when successful, nil on timeout. # @raise [Exception] {#reason} on failure def value!(timeout = nil) - touch internal_state.value if wait_until_complete! timeout end From de57badd28093a22e0dcdf3fc7c12968109d4bab Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 10:07:51 +0200 Subject: [PATCH 28/68] Rename new promises states pending > pending completed > resolved success > fulfilled failed > rejected --- lib/concurrent/actor.rb | 4 +- .../actor/behaviour/sets_results.rb | 6 +- lib/concurrent/actor/behaviour/termination.rb | 12 +- lib/concurrent/actor/core.rb | 8 +- lib/concurrent/actor/envelope.rb | 4 +- lib/concurrent/actor/reference.rb | 8 +- lib/concurrent/actor/utils/pool.rb | 2 +- lib/concurrent/edge/promises.rb | 728 +++++++++--------- spec/concurrent/actor_spec.rb | 6 +- spec/concurrent/promises_spec.rb | 188 ++--- 10 files changed, 484 insertions(+), 482 deletions(-) diff --git a/lib/concurrent/actor.rb b/lib/concurrent/actor.rb index 8d1822966..4f47c5527 100644 --- a/lib/concurrent/actor.rb +++ b/lib/concurrent/actor.rb @@ -35,7 +35,7 @@ def self.current end @root = Concurrent::Promises.delay do - Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent::Promises.completable_future).reference.tap do + Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent::Promises.resolvable_future).reference.tap do future.wait! end end @@ -74,7 +74,7 @@ def self.spawn(*args, &block) # as {.spawn} but it'll block until actor is initialized or it'll raise exception on error def self.spawn!(*args, &block) - spawn(to_spawn_options(*args).merge(initialized: future = Concurrent::Promises.completable_future), &block).tap { future.wait! } + spawn(to_spawn_options(*args).merge(initialized: future = Concurrent::Promises.resolvable_future), &block).tap { future.wait! } end # @overload to_spawn_options(context_class, name, *args) diff --git a/lib/concurrent/actor/behaviour/sets_results.rb b/lib/concurrent/actor/behaviour/sets_results.rb index afc1181fa..97f0b47d7 100644 --- a/lib/concurrent/actor/behaviour/sets_results.rb +++ b/lib/concurrent/actor/behaviour/sets_results.rb @@ -1,7 +1,7 @@ module Concurrent module Actor module Behaviour - # Collects returning value and sets the CompletableFuture in the {Envelope} or error on failure. + # Collects returning value and sets the ResolvableFuture in the {Envelope} or error on failure. class SetResults < Abstract attr_reader :error_strategy @@ -13,7 +13,7 @@ def initialize(core, subsequent, core_options, error_strategy) def on_envelope(envelope) result = pass envelope if result != MESSAGE_PROCESSED && !envelope.future.nil? - envelope.future.succeed result + envelope.future.fulfill result log(DEBUG) { "finished processing of #{envelope.message.inspect}"} end nil @@ -29,7 +29,7 @@ def on_envelope(envelope) else raise end - envelope.future.fail error unless envelope.future.nil? + envelope.future.reject error unless envelope.future.nil? end end end diff --git a/lib/concurrent/actor/behaviour/termination.rb b/lib/concurrent/actor/behaviour/termination.rb index a58cd7b10..355c45901 100644 --- a/lib/concurrent/actor/behaviour/termination.rb +++ b/lib/concurrent/actor/behaviour/termination.rb @@ -14,8 +14,8 @@ class Termination < Abstract def initialize(core, subsequent, core_options, trapping = false, terminate_children = true) super core, subsequent, core_options - @terminated = Concurrent::Promises.completable_future - @public_terminated = @terminated.with_hidden_completable + @terminated = Concurrent::Promises.resolvable_future + @public_terminated = @terminated.with_hidden_resolvable @trapping = trapping @terminate_children = terminate_children end @@ -23,7 +23,7 @@ def initialize(core, subsequent, core_options, trapping = false, terminate_child # @note Actor rejects envelopes when terminated. # @return [true, false] if actor is terminated def terminated? - @terminated.completed? + @terminated.resolved? end def trapping? @@ -62,15 +62,15 @@ def on_envelope(envelope) def terminate!(reason = nil, envelope = nil) return true if terminated? - self_termination = Concurrent::Promises.completed_future(reason.nil?, reason.nil? || nil, reason) + self_termination = Concurrent::Promises.resolved_future(reason.nil?, reason.nil? || nil, reason) all_terminations = if @terminate_children Concurrent::Promises.zip(*children.map { |ch| ch.ask(:terminate!) }, self_termination) else self_termination end - all_terminations.chain_completable(@terminated) - all_terminations.chain_completable(envelope.future) if envelope && envelope.future + all_terminations.chain_resolvable(@terminated) + all_terminations.chain_resolvable(envelope.future) if envelope && envelope.future broadcast(true, [:terminated, reason]) # TODO do not end up in Dead Letter Router parent << :remove_child if parent diff --git a/lib/concurrent/actor/core.rb b/lib/concurrent/actor/core.rb index c17e23138..e531fccfa 100644 --- a/lib/concurrent/actor/core.rb +++ b/lib/concurrent/actor/core.rb @@ -42,7 +42,7 @@ class Core < Synchronization::LockableObject # @option opts [Class] reference a custom descendant of {Reference} to use # @option opts [Array)>] behaviour_definition, array of pairs # where each pair is behaviour class and its args, see {Behaviour.basic_behaviour_definition} - # @option opts [CompletableFuture, nil] initialized, if present it'll be set or failed after {Context} initialization + # @option opts [ResolvableFuture, nil] initialized, if present it'll be set or failed after {Context} initialization # @option opts [Reference, nil] parent **private api** parent of the actor (the one spawning ) # @option opts [Proc, nil] logger a proc accepting (level, progname, message = nil, &block) params, # can be used to hook actor instance to any logging system, see {Concurrent::Concern::Logging} @@ -192,17 +192,17 @@ def ns_initialize(opts, &block) @args = opts.fetch(:args, []) @block = block - initialized = Type! opts[:initialized], Promises::CompletableFuture, NilClass + initialized = Type! opts[:initialized], Promises::ResolvableFuture, NilClass schedule_execution do begin build_context - initialized.succeed reference if initialized + initialized.fulfill reference if initialized log DEBUG, 'spawned' rescue => ex log ERROR, ex @first_behaviour.terminate! - initialized.fail ex if initialized + initialized.reject ex if initialized end end end diff --git a/lib/concurrent/actor/envelope.rb b/lib/concurrent/actor/envelope.rb index 120de6e7f..118234f57 100644 --- a/lib/concurrent/actor/envelope.rb +++ b/lib/concurrent/actor/envelope.rb @@ -16,7 +16,7 @@ class Envelope def initialize(message, future, sender, address) @message = message - @future = Type! future, Promises::CompletableFuture, NilClass + @future = Type! future, Promises::ResolvableFuture, NilClass @sender = Type! sender, Reference, Thread @address = Type! address, Reference end @@ -34,7 +34,7 @@ def address_path end def reject!(error) - future.fail error unless future.nil? + future.reject error unless future.nil? end end end diff --git a/lib/concurrent/actor/reference.rb b/lib/concurrent/actor/reference.rb index 909c64646..64c86939c 100644 --- a/lib/concurrent/actor/reference.rb +++ b/lib/concurrent/actor/reference.rb @@ -51,7 +51,7 @@ def tell(message) # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask(1).value # => 2 # adder.ask(nil).wait.reason # => # - def ask(message, future = Concurrent::Promises.completable_future) + def ask(message, future = Concurrent::Promises.resolvable_future) message message, future end @@ -65,11 +65,11 @@ def ask(message, future = Concurrent::Promises.completable_future) # @param [Object] message # @param [Promises::Future] future to be fulfilled be message's processing result # @return [Object] message's processing result - # @raise [Exception] future.reason if future is #failed? + # @raise [Exception] future.reason if future is #rejected? # @example # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask!(1) # => 2 - def ask!(message, future = Concurrent::Promises.completable_future) + def ask!(message, future = Concurrent::Promises.resolvable_future) ask(message, future).value! end @@ -80,7 +80,7 @@ def map(messages) # behaves as {#tell} when no future and as {#ask} when future def message(message, future = nil) core.on_envelope Envelope.new(message, future, Actor.current || Thread.current, self) - return future ? future.with_hidden_completable : self + return future ? future.with_hidden_resolvable : self end # @see AbstractContext#dead_letter_routing diff --git a/lib/concurrent/actor/utils/pool.rb b/lib/concurrent/actor/utils/pool.rb index da9a544ef..b78e37223 100644 --- a/lib/concurrent/actor/utils/pool.rb +++ b/lib/concurrent/actor/utils/pool.rb @@ -45,7 +45,7 @@ def on_message(message) else Envelope.new(envelope.message, Concurrent::Promises.future, envelope.sender, envelope.address) end - envelope_to_redirect.future.on_completion! { @balancer << :subscribe } # TODO check safety of @balancer reading + envelope_to_redirect.future.on_fulfillment! { @balancer << :subscribe } # TODO check safety of @balancer reading redirect @balancer, envelope_to_redirect end end diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index a0776f781..3ff83c85f 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -33,8 +33,8 @@ module Promises # # @!macro [new] promise.param.task-future # @yieldreturn will become result of the returned Future. - # Its returned value becomes {Future#value} succeeding, - # raised exception becomes {Future#reason} failing. + # Its returned value becomes {Future#value} fulfilling it, + # raised exception becomes {Future#reason} rejecting it. # # @!macro [new] promise.param.callback # @yieldreturn is forgotten. @@ -43,36 +43,35 @@ module Promises # new. module FactoryMethods - # @!macro promises.shortcut.on - # @return [CompletableEvent] - def completable_event - completable_event_on :io + # @return [ResolvableEvent] + def resolvable_event + resolvable_event_on :io end - # Created completable event, user is responsible for completing the event once by - # {Promises::CompletableEvent#complete}. + # Created resolvable event, user is responsible for resolving the event once by + # {Promises::ResolvableEvent#resolve}. # # @!macro promises.param.default_executor - # @return [CompletableEvent] - def completable_event_on(default_executor = :io) - CompletableEventPromise.new(default_executor).future + # @return [ResolvableEvent] + def resolvable_event_on(default_executor = :io) + ResolvableEventPromise.new(default_executor).future end # @!macro promises.shortcut.on - # @return [CompletableFuture] - def completable_future - completable_future_on :io + # @return [ResolvableFuture] + def resolvable_future + resolvable_future_on :io end - # Creates completable future, user is responsible for completing the future once by - # {Promises::CompletableFuture#complete}, {Promises::CompletableFuture#succeed}, - # or {Promises::CompletableFuture#fail} + # Creates resolvable future, user is responsible for resolving the future once by + # {Promises::ResolvableFuture#resolve}, {Promises::ResolvableFuture#fulfill}, + # or {Promises::ResolvableFuture#reject} # # @!macro promises.param.default_executor - # @return [CompletableFuture] - def completable_future_on(default_executor = :io) - CompletableFuturePromise.new(default_executor).future + # @return [ResolvableFuture] + def resolvable_future_on(default_executor = :io) + ResolvableFuturePromise.new(default_executor).future end # @!macro promises.shortcut.on @@ -82,7 +81,7 @@ def future(*args, &task) end # @!macro [new] promises.future-on1 - # Constructs new Future which will be completed after block is evaluated on default executor. + # Constructs new Future which will be resolved after block is evaluated on default executor. # Evaluation begins immediately. # # @!macro [new] promises.future-on2 @@ -95,36 +94,36 @@ def future_on(default_executor, *args, &task) ImmediateEventPromise.new(default_executor).future.then(*args, &task) end - # Creates completed future with will be either success with the given value or failure with + # Creates resolved future with will be either fulfilled with the given value or rejection with # the given reason. # # @!macro promises.param.default_executor # @return [Future] - def completed_future(success, value, reason, default_executor = :io) - ImmediateFuturePromise.new(default_executor, success, value, reason).future + def resolved_future(fulfilled, value, reason, default_executor = :io) + ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future end - # Creates completed future with will be success with the given value. + # Creates resolved future with will be fulfilled with the given value. # # @!macro promises.param.default_executor # @return [Future] - def successful_future(value, default_executor = :io) - completed_future true, value, nil, default_executor + def fulfilled_future(value, default_executor = :io) + resolved_future true, value, nil, default_executor end - # Creates completed future with will be failed with the given reason. + # Creates resolved future with will be rejected with the given reason. # # @!macro promises.param.default_executor # @return [Future] - def failed_future(reason, default_executor = :io) - completed_future false, nil, reason, default_executor + def rejected_future(reason, default_executor = :io) + resolved_future false, nil, reason, default_executor end - # Creates completed event. + # Creates resolved event. # # @!macro promises.param.default_executor # @return [Event] - def completed_event(default_executor = :io) + def resolved_event(default_executor = :io) ImmediateEventPromise.new(default_executor).event end @@ -165,12 +164,12 @@ def zip_futures(*futures_and_or_events) zip_futures_on :io, *futures_and_or_events end - # Creates new future which is completed after all futures_and_or_events are complete. - # Its value is array of zipped future values. Its reason is array of reasons for failure. - # If there is an error it fails. + # Creates new future which is resolved after all futures_and_or_events are resolved. + # Its value is array of zipped future values. Its reason is array of reasons for rejection. + # If there is an error it rejects. # @!macro [new] promises.event-conversion - # If event is supplied, which does not have value and can be only completed, it's - # represented as `:success` with value `nil`. + # If event is supplied, which does not have value and can be only resolved, it's + # represented as `:fulfilled` with value `nil`. # # @!macro promises.param.default_executor # @param [AbstractEventFuture] futures_and_or_events @@ -187,8 +186,8 @@ def zip_events(*futures_and_or_events) zip_events_on :io, *futures_and_or_events end - # Creates new event which is completed after all futures_and_or_events are complete. - # (Future is complete when successful or failed.) + # Creates new event which is resolved after all futures_and_or_events are resolved. + # (Future is resolved when fulfilled or rejected.) # # @!macro promises.param.default_executor # @param [AbstractEventFuture] futures_and_or_events @@ -199,43 +198,43 @@ def zip_events_on(default_executor, *futures_and_or_events) # @!macro promises.shortcut.on # @return [Future] - def any_complete_future(*futures_and_or_events) - any_complete_future_on :io, *futures_and_or_events + def any_resolved_future(*futures_and_or_events) + any_resolved_future_on :io, *futures_and_or_events end - alias_method :any, :any_complete_future + alias_method :any, :any_resolved_future - # Creates new future which is completed after first futures_and_or_events is complete. - # Its result equals result of the first complete future. + # Creates new future which is resolved after first futures_and_or_events is resolved. + # Its result equals result of the first resolved future. # @!macro [new] promises.any-touch - # If complete it does not propagate {AbstractEventFuture#touch}, leaving delayed + # If resolved it does not propagate {AbstractEventFuture#touch}, leaving delayed # futures un-executed if they are not required any more. # @!macro promises.event-conversion # # @!macro promises.param.default_executor # @param [AbstractEventFuture] futures_and_or_events # @return [Future] - def any_complete_future_on(default_executor, *futures_and_or_events) - AnyCompleteFuturePromise.new(futures_and_or_events, default_executor).future + def any_resolved_future_on(default_executor, *futures_and_or_events) + AnyResolvedFuturePromise.new(futures_and_or_events, default_executor).future end # @!macro promises.shortcut.on # @return [Future] - def any_successful_future(*futures_and_or_events) - any_successful_future_on :io, *futures_and_or_events + def any_fulfilled_future(*futures_and_or_events) + any_fulfilled_future_on :io, *futures_and_or_events end - # Creates new future which is completed after first of futures_and_or_events is successful. - # Its result equals result of the first complete future or if all futures_and_or_events fail, - # it has reason of the last completed future. + # Creates new future which is resolved after first of futures_and_or_events is fulfilled. + # Its result equals result of the first resolved future or if all futures_and_or_events reject, + # it has reason of the last resolved future. # @!macro promises.any-touch # @!macro promises.event-conversion # # @!macro promises.param.default_executor # @param [AbstractEventFuture] futures_and_or_events # @return [Future] - def any_successful_future_on(default_executor, *futures_and_or_events) - AnySuccessfulFuturePromise.new(futures_and_or_events, default_executor).future + def any_fulfilled_future_on(default_executor, *futures_and_or_events) + AnyFulfilledFuturePromise.new(futures_and_or_events, default_executor).future end # @!macro promises.shortcut.on @@ -244,16 +243,18 @@ def any_event(*futures_and_or_events) any_event_on :io, *futures_and_or_events end - # Creates new event which becomes complete after first of the futures_and_or_events completes. + # Creates new event which becomes resolved after first of the futures_and_or_events resolves. # @!macro promises.any-touch # # @!macro promises.param.default_executor # @param [AbstractEventFuture] futures_and_or_events # @return [Event] def any_event_on(default_executor, *futures_and_or_events) - AnyCompleteEventPromise.new(futures_and_or_events, default_executor).event + AnyResolvedEventPromise.new(futures_and_or_events, default_executor).event end + # TODO (pitr-ch 30-Jul-2016): add general constructor behaving based on argument type + # TODO consider adding first(count, *futures) # TODO consider adding zip_by(slice, *futures) processing futures in slices end @@ -261,7 +262,7 @@ def any_event_on(default_executor, *futures_and_or_events) module InternalStates # @private class State - def completed? + def resolved? raise NotImplementedError end @@ -274,7 +275,7 @@ def to_sym # @private class Pending < State - def completed? + def resolved? false end @@ -286,20 +287,20 @@ def to_sym private_constant :Pending # @private - class CompletedWithResult < State - def completed? + class ResolvedWithResult < State + def resolved? true end def to_sym - :completed + :resolved end def result - [success?, value, reason] + [fulfilled?, value, reason] end - def success? + def fulfilled? raise NotImplementedError end @@ -316,16 +317,16 @@ def apply end end - private_constant :CompletedWithResult + private_constant :ResolvedWithResult # @private - class Success < CompletedWithResult + class Fulfilled < ResolvedWithResult def initialize(value) @Value = value end - def success? + def fulfilled? true end @@ -342,28 +343,28 @@ def reason end def to_sym - :success + :fulfilled end end - private_constant :Success + private_constant :Fulfilled # @private - class SuccessArray < Success + class FulfilledArray < Fulfilled def apply(args, block) block.call(*value, *args) end end - private_constant :SuccessArray + private_constant :FulfilledArray # @private - class Failed < CompletedWithResult + class Rejected < ResolvedWithResult def initialize(reason) @Reason = reason end - def success? + def fulfilled? false end @@ -376,7 +377,7 @@ def reason end def to_sym - :failed + :rejected end def apply(args, block) @@ -384,22 +385,22 @@ def apply(args, block) end end - private_constant :Failed + private_constant :Rejected # @private - class PartiallyFailed < CompletedWithResult + class PartiallyRejected < ResolvedWithResult def initialize(value, reason) super() @Value = value @Reason = reason end - def success? + def fulfilled? false end def to_sym - :failed + :rejected end def value @@ -415,12 +416,12 @@ def apply(args, block) end end - private_constant :PartiallyFailed + private_constant :PartiallyRejected - PENDING = Pending.new - COMPLETED = Success.new(nil) + PENDING = Pending.new + RESOLVED = Fulfilled.new(nil) - private_constant :PENDING, :COMPLETED + private_constant :PENDING, :RESOLVED end private_constant :InternalStates @@ -457,17 +458,17 @@ def initialize(promise, default_executor) # @param [Numeric] timeout the maximum time in second to wait. # @!macro [new] promises.warn.blocks - # @note This function potentially blocks current thread until the Future is complete. + # @note This function potentially blocks current thread until the Future is resolved. # Be careful it can deadlock. Try to chain instead. # Returns its state. # @return [Symbol] # # @overload an_event.state - # @return [:pending, :completed] + # @return [:pending, :resolved] # @overload a_future.state - # Both :success, :failed implies :completed. - # @return [:pending, :success, :failed] + # Both :fulfilled, :rejected implies :resolved. + # @return [:pending, :fulfilled, :rejected] def state internal_state.to_sym end @@ -475,13 +476,13 @@ def state # Is it in pending state? # @return [Boolean] def pending?(state = internal_state) - !state.completed? + !state.resolved? end - # Is it in completed state? + # Is it in resolved state? # @return [Boolean] - def completed?(state = internal_state) - state.completed? + def resolved?(state = internal_state) + state.resolved? end # @deprecated @@ -490,7 +491,7 @@ def unscheduled? end # Propagates touch. Requests all the delayed futures, which it depends on, to be - # executed. This method is called by any other method requiring completeness, like {#wait}. + # executed. This method is called by any other method requiring resolved state, like {#wait}. # @return [self] def touch # distribute touch to promise only once @@ -502,15 +503,15 @@ def touch # Calls {AbstractEventFuture#touch}. # @!macro [new] promises.method.wait - # Wait (block the Thread) until receiver is {#completed?}. + # Wait (block the Thread) until receiver is {#resolved?}. # @!macro promises.touches # # @!macro promises.warn.blocks # @!macro promises.param.timeout # @return [Future, true, false] self implies timeout was not used, true implies timeout was used - # and it was completed, false implies it was not completed within timeout. + # and it was resolved, false implies it was not resolved within timeout. def wait(timeout = nil) - result = wait_until_complete(timeout) + result = wait_until_resolved(timeout) timeout ? result : self end @@ -518,8 +519,8 @@ def wait(timeout = nil) # @return [Executor] default executor # @see #with_default_executor # @see FactoryMethods#future_on - # @see FactoryMethods#completable_future - # @see FactoryMethods#any_successful_future_on + # @see FactoryMethods#resolvable_future + # @see FactoryMethods#any_fulfilled_future_on # @see similar def default_executor @DefaultExecutor @@ -531,7 +532,7 @@ def chain(*args, &task) chain_using @DefaultExecutor, *args, &task end - # Chains the task to be executed asynchronously on executor after it is completed. + # Chains the task to be executed asynchronously on executor after it is resolved. # # @!macro promises.param.executor # @!macro promises.param.args @@ -541,7 +542,7 @@ def chain(*args, &task) # @overload an_event.chain_using(executor, *args, &task) # @yield [*args] to the task. # @overload a_future.chain_using(executor, *args, &task) - # @yield [success, value, reason, *args] to the task. + # @yield [fulfilled?, value, reason, *args] to the task. def chain_using(executor, *args, &task) ChainPromise.new(self, @DefaultExecutor, executor, args, &task).future end @@ -560,54 +561,54 @@ def inspect # @deprecated def set(*args, &block) - raise 'Use CompletableEvent#complete or CompletableFuture#complete instead, ' + - 'constructed by Promises.completable_event or Promises.completable_future respectively.' + raise 'Use ResolvableEvent#resolve or ResolvableFuture#resolve instead, ' + + 'constructed by Promises.resolvable_event or Promises.resolvable_future respectively.' end - # Completes the completable when receiver is completed. + # Resolves the resolvable when receiver is resolved. # - # @param [Completable] completable + # @param [Resolvable] resolvable # @return [self] - def chain_completable(completable) - on_completion! { completable.complete_with internal_state } + def chain_resolvable(resolvable) + on_resolution! { resolvable.resolve_with internal_state } end - alias_method :tangle, :chain_completable + alias_method :tangle, :chain_resolvable # @!macro promises.shortcut.using # @return [self] - def on_completion(*args, &callback) - on_completion_using @DefaultExecutor, *args, &callback + def on_resolution(*args, &callback) + on_resolution_using @DefaultExecutor, *args, &callback end - # Stores the callback to be executed synchronously on completing thread after it is - # completed. + # Stores the callback to be executed synchronously on resolving thread after it is + # resolved. # # @!macro promises.param.args # @!macro promise.param.callback # @return [self] # - # @overload an_event.on_completion!(*args, &callback) + # @overload an_event.on_resolution!(*args, &callback) # @yield [*args] to the callback. - # @overload a_future.on_completion!(*args, &callback) - # @yield [success, value, reason, *args] to the callback. - def on_completion!(*args, &callback) - add_callback :callback_on_completion, args, callback + # @overload a_future.on_resolution!(*args, &callback) + # @yield [fulfilled?, value, reason, *args] to the callback. + def on_resolution!(*args, &callback) + add_callback :callback_on_resolution, args, callback end - # Stores the callback to be executed asynchronously on executor after it is completed. + # Stores the callback to be executed asynchronously on executor after it is resolved. # # @!macro promises.param.executor # @!macro promises.param.args # @!macro promise.param.callback # @return [self] # - # @overload an_event.on_completion_using(executor, *args, &callback) + # @overload an_event.on_resolution_using(executor, *args, &callback) # @yield [*args] to the callback. - # @overload a_future.on_completion_using(executor, *args, &callback) - # @yield [success, value, reason, *args] to the callback. - def on_completion_using(executor, *args, &callback) - add_callback :async_callback_on_completion, executor, args, callback + # @overload a_future.on_resolution_using(executor, *args, &callback) + # @yield [fulfilled?, value, reason, *args] to the callback. + def on_resolution_using(executor, *args, &callback) + add_callback :async_callback_on_resolution, executor, args, callback end # @!macro [new] promises.method.with_default_executor @@ -620,13 +621,13 @@ def with_default_executor(executor) end # @!visibility private - def complete_with(state, raise_on_reassign = true) + def resolve_with(state, raise_on_reassign = true) if compare_and_set_internal_state(PENDING, state) # go to synchronized block only if there were waiting threads @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 call_callbacks state else - return failed_complete(raise_on_reassign, state) + return rejected_resolution(raise_on_reassign, state) end self end @@ -667,13 +668,13 @@ def waiting_threads # @!visibility private def add_callback(method, *args) state = internal_state - if completed?(state) + if resolved?(state) call_callback method, state, *args else @Callbacks.push [method, *args] state = internal_state - # take back if it was completed in the meanwhile - call_callbacks state if completed?(state) + # take back if it was resolved in the meanwhile + call_callbacks state if resolved?(state) end self end @@ -681,14 +682,15 @@ def add_callback(method, *args) private # @return [Boolean] - def wait_until_complete(timeout) - return true if completed? + def wait_until_resolved(timeout) + return true if resolved? touch @Lock.synchronize do + @Waiters.increment begin - unless completed? + unless resolved? @Condition.wait @Lock, timeout end ensure @@ -696,7 +698,7 @@ def wait_until_complete(timeout) @Waiters.decrement end end - completed? + resolved? end def call_callback(method, state, *args) @@ -715,19 +717,19 @@ def with_async(executor, *args, &block) Concurrent.executor(executor).post(*args, &block) end - def async_callback_on_completion(state, executor, args, callback) + def async_callback_on_resolution(state, executor, args, callback) with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_completion st, ar, cb + callback_on_resolution st, ar, cb end end def callback_notify_blocked(state, promise) - promise.on_done self + promise.on_resolution self end end - # Represents an event which will happen in future (will be completed). The event is either - # pending or completed. It should be always completed. Use {Future} to communicate failures and + # Represents an event which will happen in future (will be resolved). The event is either + # pending or resolved. It should be always resolved. Use {Future} to communicate rejections and # cancellation. class Event < AbstractEventFuture @@ -735,7 +737,7 @@ class Event < AbstractEventFuture # @!macro [new] promises.method.zip - # Creates a new event or a future which will be completed when receiver and other are. + # Creates a new event or a future which will be resolved when receiver and other are. # Returns an event if receiver and other are events, otherwise returns a future. # If just one of the parties is Future then the result # of the returned future is equal to the result of the supplied future. If both are futures @@ -752,12 +754,12 @@ def zip(other) alias_method :&, :zip - # Creates a new event which will be completed when the first of receiver, `event_or_future` - # completes. + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. # # @return [Event] def any(event_or_future) - AnyCompleteEventPromise.new([self, event_or_future], @DefaultExecutor).event + AnyResolvedEventPromise.new([self, event_or_future], @DefaultExecutor).event end alias_method :|, :any @@ -774,7 +776,7 @@ def delay # @!macro [new] promise.method.schedule # Creates new event dependent on receiver scheduled to execute on/in intended_time. - # In time is interpreted from the moment the receiver is completed, therefore it inserts + # In time is interpreted from the moment the receiver is resolved, therefore it inserts # delay into the chain. # # @!macro promises.param.intended_time @@ -797,35 +799,35 @@ def with_default_executor(executor) private - def failed_complete(raise_on_reassign, state) - Concurrent::MultipleAssignmentError.new('Event can be completed only once') if raise_on_reassign + def rejected_resolution(raise_on_reassign, state) + Concurrent::MultipleAssignmentError.new('Event can be resolved only once') if raise_on_reassign return false end - def callback_on_completion(state, args, callback) + def callback_on_resolution(state, args, callback) callback.call *args end end - # Represents a value which will become available in future. May fail with a reason instead, + # Represents a value which will become available in future. May reject with a reason instead, # e.g. when the tasks raises an exception. class Future < AbstractEventFuture - # Is it in success state? + # Is it in fulfilled state? # @return [Boolean] - def success?(state = internal_state) - state.completed? && state.success? + def fulfilled?(state = internal_state) + state.resolved? && state.fulfilled? end - # Is it in failed state? + # Is it in rejected state? # @return [Boolean] - def failed?(state = internal_state) - state.completed? && !state.success? + def rejected?(state = internal_state) + state.resolved? && !state.fulfilled? end # @!macro [new] promises.warn.nil - # @note Make sure returned `nil` is not confused with timeout, no value when failed, - # no reason when successful, etc. + # @note Make sure returned `nil` is not confused with timeout, no value when rejected, + # no reason when fulfilled, etc. # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. # @!macro [new] promises.method.value @@ -835,53 +837,53 @@ def failed?(state = internal_state) # @!macro promises.warn.blocks # @!macro promises.warn.nil # @!macro promises.param.timeout - # @return [Object, nil] the value of the Future when successful, nil on timeout or failure. + # @return [Object, nil] the value of the Future when fulfilled, nil on timeout or rejection. def value(timeout = nil) - internal_state.value if wait_until_complete timeout + internal_state.value if wait_until_resolved timeout end - # Returns reason of future's failure. + # Returns reason of future's rejection. # @!macro promises.touches # # @!macro promises.warn.blocks # @!macro promises.warn.nil # @!macro promises.param.timeout - # @return [Exception, nil] nil on timeout or success. + # @return [Exception, nil] nil on timeout or fulfillment. def reason(timeout = nil) - internal_state.reason if wait_until_complete timeout + internal_state.reason if wait_until_resolved timeout end - # Returns triplet success?, value, reason. + # Returns triplet fulfilled?, value, reason. # @!macro promises.touches # # @!macro promises.warn.blocks # @!macro promises.param.timeout - # @return [Array(Boolean, Object, Exception), nil] triplet of success?, value, reason, or nil + # @return [Array(Boolean, Object, Exception), nil] triplet of fulfilled?, value, reason, or nil # on timeout. def result(timeout = nil) - internal_state.result if wait_until_complete timeout + internal_state.result if wait_until_resolved timeout end # @!macro promises.method.wait - # @raise [Exception] {#reason} on failure + # @raise [Exception] {#reason} on rejection def wait!(timeout = nil) - result = wait_until_complete!(timeout) + result = wait_until_resolved!(timeout) timeout ? result : self end # @!macro promises.method.value - # @return [Object, nil] the value of the Future when successful, nil on timeout. - # @raise [Exception] {#reason} on failure + # @return [Object, nil] the value of the Future when fulfilled, nil on timeout. + # @raise [Exception] {#reason} on rejection def value!(timeout = nil) - internal_state.value if wait_until_complete! timeout + internal_state.value if wait_until_resolved! timeout end - # Allows failed Future to be risen with `raise` method. + # Allows rejected Future to be risen with `raise` method. # @example - # raise Promises.failed_future(StandardError.new("boom")) - # @raise [StandardError] when raising not failed future + # raise Promises.rejected_future(StandardError.new("boom")) + # @raise [StandardError] when raising not rejected future def exception(*args) - raise Concurrent::Error, 'it is not failed' unless failed? + raise Concurrent::Error, 'it is not rejected' unless rejected? reason = internal_state.reason if reason.is_a?(::Array) # TODO (pitr-ch 12-Jun-2016): remove logging!, how? @@ -898,8 +900,8 @@ def then(*args, &task) then_using @DefaultExecutor, *args, &task end - # Chains the task to be executed asynchronously on executor after it succeeds. Does not run - # the task if it fails. It will complete though, triggering any dependent futures. + # Chains the task to be executed asynchronously on executor after it fulfills. Does not run + # the task if it rejects. It will resolve though, triggering any dependent futures. # # @!macro promises.param.executor # @!macro promises.param.args @@ -916,8 +918,8 @@ def rescue(*args, &task) rescue_using @DefaultExecutor, *args, &task end - # Chains the task to be executed asynchronously on executor after it fails. Does not run - # the task if it succeeds. It will complete though, triggering any dependent futures. + # Chains the task to be executed asynchronously on executor after it rejects. Does not run + # the task if it fulfills. It will resolve though, triggering any dependent futures. # # @!macro promises.param.executor # @!macro promises.param.args @@ -940,13 +942,13 @@ def zip(other) alias_method :&, :zip - # Creates a new event which will be completed when the first of receiver, `event_or_future` - # completes. Returning future will have value nil if event_or_future is event and completes + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. Returning future will have value nil if event_or_future is event and resolves # first. # # @return [Future] def any(event_or_future) - AnyCompleteFuturePromise.new([self, event_or_future], @DefaultExecutor).future + AnyResolvedFuturePromise.new([self, event_or_future], @DefaultExecutor).future end alias_method :|, :any @@ -978,7 +980,7 @@ def with_default_executor(executor) end # Creates new future which will have result of the future returned by receiver. If receiver - # fails it will have its failure. + # rejects it will have its rejection. # # @param [Integer] level how many levels of futures should flatten # @return [Future] @@ -988,8 +990,8 @@ def flat_future(level = 1) alias_method :flat, :flat_future - # Creates new event which will be completed when the returned event by receiver is. - # Be careful if the receiver fails it will just complete since Event does not hold reason. + # Creates new event which will be resolved when the returned event by receiver is. + # Be careful if the receiver rejects it will just resolve since Event does not hold reason. # # @return [Event] def flat_event @@ -998,65 +1000,65 @@ def flat_event # @!macro promises.shortcut.using # @return [self] - def on_success(*args, &callback) - on_success_using @DefaultExecutor, *args, &callback + def on_fulfillment(*args, &callback) + on_fulfillment_using @DefaultExecutor, *args, &callback end - # Stores the callback to be executed synchronously on completing thread after it is - # successful. Does nothing on failure. + # Stores the callback to be executed synchronously on resolving thread after it is + # fulfilled. Does nothing on rejection. # # @!macro promises.param.args # @!macro promise.param.callback # @return [self] # @yield [value *args] to the callback. - def on_success!(*args, &callback) - add_callback :callback_on_success, args, callback + def on_fulfillment!(*args, &callback) + add_callback :callback_on_fulfillment, args, callback end # Stores the callback to be executed asynchronously on executor after it is - # successful. Does nothing on failure. + # fulfilled. Does nothing on rejection. # # @!macro promises.param.executor # @!macro promises.param.args # @!macro promise.param.callback # @return [self] # @yield [value *args] to the callback. - def on_success_using(executor, *args, &callback) - add_callback :async_callback_on_success, executor, args, callback + def on_fulfillment_using(executor, *args, &callback) + add_callback :async_callback_on_fulfillment, executor, args, callback end # @!macro promises.shortcut.using # @return [self] - def on_failure(*args, &callback) - on_failure_using @DefaultExecutor, *args, &callback + def on_rejection(*args, &callback) + on_rejection_using @DefaultExecutor, *args, &callback end - # Stores the callback to be executed synchronously on completing thread after it is - # failed. Does nothing on success. + # Stores the callback to be executed synchronously on resolving thread after it is + # rejected. Does nothing on fulfillment. # # @!macro promises.param.args # @!macro promise.param.callback # @return [self] # @yield [reason *args] to the callback. - def on_failure!(*args, &callback) - add_callback :callback_on_failure, args, callback + def on_rejection!(*args, &callback) + add_callback :callback_on_rejection, args, callback end # Stores the callback to be executed asynchronously on executor after it is - # failed. Does nothing on success. + # rejected. Does nothing on fulfillment. # # @!macro promises.param.executor # @!macro promises.param.args # @!macro promise.param.callback # @return [self] # @yield [reason *args] to the callback. - def on_failure_using(executor, *args, &callback) - add_callback :async_callback_on_failure, executor, args, callback + def on_rejection_using(executor, *args, &callback) + add_callback :async_callback_on_rejection, executor, args, callback end # Allows to use futures as green threads. The receiver has to evaluate to a future which # represents what should be done next. It basically flattens indefinitely until non Future - # values is returned which becomes result of the returned future. Any ancountered exception + # values is returned which becomes result of the returned future. Any encountered exception # will become reason of the returned future. # # @return [Future] @@ -1077,7 +1079,7 @@ def apply(args, block) private - def failed_complete(raise_on_reassign, state) + def rejected_resolution(raise_on_reassign, state) if raise_on_reassign # TODO (pitr-ch 12-Jun-2016): remove logging?! # print otherwise hidden error @@ -1085,107 +1087,107 @@ def failed_complete(raise_on_reassign, state) log ERROR, 'Promises::Future', state.reason if state.reason raise(Concurrent::MultipleAssignmentError.new( - "Future can be completed only once. Current result is #{result}, " + + "Future can be resolved only once. Current result is #{result}, " + "trying to set #{state.result}")) end return false end - def wait_until_complete!(timeout = nil) - result = wait_until_complete(timeout) - raise self if failed? + def wait_until_resolved!(timeout = nil) + result = wait_until_resolved(timeout) + raise self if rejected? result end - def async_callback_on_success(state, executor, args, callback) + def async_callback_on_fulfillment(state, executor, args, callback) with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_success st, ar, cb + callback_on_fulfillment st, ar, cb end end - def async_callback_on_failure(state, executor, args, callback) + def async_callback_on_rejection(state, executor, args, callback) with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_failure st, ar, cb + callback_on_rejection st, ar, cb end end - def callback_on_success(state, args, callback) - state.apply args, callback if state.success? + def callback_on_fulfillment(state, args, callback) + state.apply args, callback if state.fulfilled? end - def callback_on_failure(state, args, callback) - state.apply args, callback unless state.success? + def callback_on_rejection(state, args, callback) + state.apply args, callback unless state.fulfilled? end - def callback_on_completion(state, args, callback) + def callback_on_resolution(state, args, callback) callback.call state.result, *args end end - # Marker module of Future, Event completed manually by user. - module Completable + # Marker module of Future, Event resolved manually by user. + module Resolvable end - # A Event which can be completed by user. - class CompletableEvent < Event - include Completable + # A Event which can be resolved by user. + class ResolvableEvent < Event + include Resolvable # @!macro [new] raise_on_reassign - # @raise [MultipleAssignmentError] when already completed and raise_on_reassign is true. + # @raise [MultipleAssignmentError] when already resolved and raise_on_reassign is true. # @!macro [new] promise.param.raise_on_reassign - # @param [Boolean] raise_on_reassign should method raise exception if already completed + # @param [Boolean] raise_on_reassign should method raise exception if already resolved # @return [self, false] false is returner when raise_on_reassign is false and the receiver - # is already completed. + # is already resolved. # - # Makes the event complete, which triggers all dependent futures. + # Makes the event resolved, which triggers all dependent futures. # # @!macro promise.param.raise_on_reassign - def complete(raise_on_reassign = true) - complete_with COMPLETED, raise_on_reassign + def resolve(raise_on_reassign = true) + resolve_with RESOLVED, raise_on_reassign end - # Creates new event wrapping receiver, effectively hiding the complete method. + # Creates new event wrapping receiver, effectively hiding the resolve method. # # @return [Event] - def with_hidden_completable - @with_hidden_completable ||= EventWrapperPromise.new(self, @DefaultExecutor).event + def with_hidden_resolvable + @with_hidden_resolvable ||= EventWrapperPromise.new(self, @DefaultExecutor).event end end - # A Future which can be completed by user. - class CompletableFuture < Future - include Completable + # A Future which can be resolved by user. + class ResolvableFuture < Future + include Resolvable - # Makes the future complete with result of triplet `success`, `value`, `reason`, + # Makes the future resolved with result of triplet `fulfilled?`, `value`, `reason`, # which triggers all dependent futures. # # @!macro promise.param.raise_on_reassign - def complete(success, value, reason, raise_on_reassign = true) - complete_with(success ? Success.new(value) : Failed.new(reason), raise_on_reassign) + def resolve(fulfilled, value, reason, raise_on_reassign = true) + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign) end - # Makes the future successful with `value`, + # Makes the future fulfilled with `value`, # which triggers all dependent futures. # # @!macro promise.param.raise_on_reassign - def succeed(value, raise_on_reassign = true) - promise.succeed(value, raise_on_reassign) + def fulfill(value, raise_on_reassign = true) + promise.fulfill(value, raise_on_reassign) end - # Makes the future failed with `reason`, + # Makes the future rejected with `reason`, # which triggers all dependent futures. # # @!macro promise.param.raise_on_reassign - def fail(reason, raise_on_reassign = true) - promise.fail(reason, raise_on_reassign) + def reject(reason, raise_on_reassign = true) + promise.reject(reason, raise_on_reassign) end - # Evaluates the block and sets its result as future's value succeeding, if the block raises - # an exception the future fails with it. + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. # @yield [*args] to the block. # @yieldreturn [Object] value # @return [self] @@ -1194,21 +1196,21 @@ def evaluate_to(*args, &block) promise.evaluate_to(*args, block) end - # Evaluates the block and sets its result as future's value succeeding, if the block raises - # an exception the future fails with it. + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. # @yield [*args] to the block. # @yieldreturn [Object] value # @return [self] - # @raise [Exception] also raise reason on failure. + # @raise [Exception] also raise reason on rejection. def evaluate_to!(*args, &block) promise.evaluate_to!(*args, block) end - # Creates new future wrapping receiver, effectively hiding the complete method and similar. + # Creates new future wrapping receiver, effectively hiding the resolve method and similar. # # @return [Future] - def with_hidden_completable - @with_hidden_completable ||= FutureWrapperPromise.new(self, @DefaultExecutor).future + def with_hidden_resolvable + @with_hidden_resolvable ||= FutureWrapperPromise.new(self, @DefaultExecutor).future end end @@ -1251,41 +1253,41 @@ def inspect private - def complete_with(new_state, raise_on_reassign = true) - @Future.complete_with(new_state, raise_on_reassign) + def resolve_with(new_state, raise_on_reassign = true) + @Future.resolve_with(new_state, raise_on_reassign) end # @return [Future] def evaluate_to(*args, block) - complete_with Success.new(block.call(*args)) + resolve_with Fulfilled.new(block.call(*args)) rescue StandardError => error - complete_with Failed.new(error) + resolve_with Rejected.new(error) rescue Exception => error # TODO (pitr-ch 12-Jun-2016): remove logging? log(ERROR, 'Promises::Future', error) - complete_with Failed.new(error) + resolve_with Rejected.new(error) end end - class CompletableEventPromise < AbstractPromise + class ResolvableEventPromise < AbstractPromise def initialize(default_executor) - super CompletableEvent.new(self, default_executor) + super ResolvableEvent.new(self, default_executor) end end - class CompletableFuturePromise < AbstractPromise + class ResolvableFuturePromise < AbstractPromise def initialize(default_executor) - super CompletableFuture.new(self, default_executor) + super ResolvableFuture.new(self, default_executor) end # @!visibility private - def succeed(value, raise_on_reassign) - complete_with Success.new(value), raise_on_reassign + def fulfill(value, raise_on_reassign) + resolve_with Fulfilled.new(value), raise_on_reassign end # @!visibility private - def fail(reason, raise_on_reassign) - complete_with Failed.new(reason), raise_on_reassign + def reject(reason, raise_on_reassign) + resolve_with Rejected.new(reason), raise_on_reassign end # @!visibility private @@ -1317,14 +1319,14 @@ def initialize(future, blocked_by_futures, countdown) end # @!visibility private - def on_done(future) - countdown = process_on_done(future) - completable = completable?(countdown, future) + def on_resolution(future) + countdown = process_on_resolution(future) + resolvable = resolvable?(countdown, future) - if completable - on_completable(future) + if resolvable + on_resolvable(future) # futures could be deleted from blocked_by one by one here, but that would be too expensive, - # it's done once when all are done to free the reference + # it's done once when all are resolved to free their references clear_blocked_by! end end @@ -1361,16 +1363,16 @@ def clear_blocked_by! nil end - # @return [true,false] if completable - def completable?(countdown, future) + # @return [true,false] if resolvable + def resolvable?(countdown, future) countdown.zero? end - def process_on_done(future) + def process_on_resolution(future) @Countdown.decrement end - def on_completable(done_future) + def on_resolvable(resolved_future) raise NotImplementedError end end @@ -1399,13 +1401,13 @@ def initialize(blocked_by_future, default_executor, executor, args, &task) super blocked_by_future, default_executor, executor, args, &task end - def on_completable(done_future) - if done_future.success? - Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| + def on_resolvable(resolved_future) + if resolved_future.fulfilled? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| evaluate_to lambda { future.apply args, task } end else - complete_with done_future.internal_state + resolve_with resolved_future.internal_state end end end @@ -1417,13 +1419,13 @@ def initialize(blocked_by_future, default_executor, executor, args, &task) super blocked_by_future, default_executor, executor, args, &task end - def on_completable(done_future) - if done_future.failed? - Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| + def on_resolvable(resolved_future) + if resolved_future.rejected? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| evaluate_to lambda { future.apply args, task } end else - complete_with done_future.internal_state + resolve_with resolved_future.internal_state end end end @@ -1431,9 +1433,9 @@ def on_completable(done_future) class ChainPromise < BlockedTaskPromise private - def on_completable(done_future) - if Future === done_future - Concurrent.executor(@Executor).post(done_future, @Args, @Task) do |future, args, task| + def on_resolvable(resolved_future) + if Future === resolved_future + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| evaluate_to(*future.result, *args, task) end else @@ -1444,17 +1446,17 @@ def on_completable(done_future) end end - # will be immediately completed + # will be immediately resolved class ImmediateEventPromise < InnerPromise def initialize(default_executor) - super Event.new(self, default_executor).complete_with(COMPLETED) + super Event.new(self, default_executor).resolve_with(RESOLVED) end end class ImmediateFuturePromise < InnerPromise - def initialize(default_executor, success, value, reason) + def initialize(default_executor, fulfilled, value, reason) super Future.new(self, default_executor). - complete_with(success ? Success.new(value) : Failed.new(reason)) + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason)) end end @@ -1470,8 +1472,8 @@ def initialize_blocked_by(blocked_by_future) @BlockedBy = LockFreeStack.new.push(blocked_by_future) end - def on_completable(done_future) - complete_with done_future.internal_state + def on_resolvable(resolved_future) + resolve_with resolved_future.internal_state end def clear_blocked_by! @@ -1479,8 +1481,8 @@ def clear_blocked_by! nil end - def completable?(countdown, future) - !@Future.internal_state.completed? && super(countdown, future) + def resolvable?(countdown, future) + !@Future.internal_state.resolved? && super(countdown, future) end end @@ -1492,13 +1494,13 @@ def initialize(blocked_by_future, default_executor) super Event.new(self, default_executor), blocked_by_future, 2 end - def process_on_done(future) + def process_on_resolution(future) countdown = super(future) if countdown.nonzero? internal_state = future.internal_state - unless internal_state.success? - complete_with COMPLETED + unless internal_state.fulfilled? + resolve_with RESOLVED return countdown end @@ -1509,7 +1511,7 @@ def process_on_done(future) value.add_callback :callback_notify_blocked, self @Countdown.value else - complete_with COMPLETED + resolve_with RESOLVED end end countdown @@ -1526,13 +1528,13 @@ def initialize(blocked_by_future, levels, default_executor) super Future.new(self, default_executor), blocked_by_future, 1 + levels end - def process_on_done(future) + def process_on_resolution(future) countdown = super(future) if countdown.nonzero? internal_state = future.internal_state - unless internal_state.success? - complete_with internal_state + unless internal_state.fulfilled? + resolve_with internal_state return countdown end @@ -1561,11 +1563,11 @@ def initialize(blocked_by_future, default_executor) super Future.new(self, default_executor), blocked_by_future, 1 end - def process_on_done(future) + def process_on_resolution(future) internal_state = future.internal_state - unless internal_state.success? - complete_with internal_state + unless internal_state.fulfilled? + resolve_with internal_state return 0 end @@ -1575,7 +1577,7 @@ def process_on_done(future) # @BlockedBy.push value value.add_callback :callback_notify_blocked, self else - complete_with internal_state + resolve_with internal_state end 1 @@ -1589,8 +1591,8 @@ def initialize(event1, event2, default_executor) private - def on_completable(done_future) - complete_with COMPLETED + def on_resolvable(resolved_future) + resolve_with RESOLVED end end @@ -1602,8 +1604,8 @@ def initialize(future, event, default_executor) private - def on_completable(done_future) - complete_with @FutureResult.internal_state + def on_resolvable(resolved_future) + resolve_with @FutureResult.internal_state end end @@ -1616,16 +1618,16 @@ def initialize(future1, future2, default_executor) private - def on_completable(done_future) - success1, value1, reason1 = @Future1Result.result - success2, value2, reason2 = @Future2Result.result - success = success1 && success2 - new_state = if success - SuccessArray.new([value1, value2]) - else - PartiallyFailed.new([value1, value2], [reason1, reason2]) - end - complete_with new_state + def on_resolvable(resolved_future) + fulfilled1, value1, reason1 = @Future1Result.result + fulfilled2, value2, reason2 = @Future2Result.result + fulfilled = fulfilled1 && fulfilled2 + new_state = if fulfilled + FulfilledArray.new([value1, value2]) + else + PartiallyRejected.new([value1, value2], [reason1, reason2]) + end + resolve_with new_state end end @@ -1636,8 +1638,8 @@ def initialize(event, default_executor) private - def on_completable(done_future) - complete_with COMPLETED + def on_resolvable(resolved_future) + resolve_with RESOLVED end end @@ -1648,8 +1650,8 @@ def initialize(future, default_executor) private - def on_completable(done_future) - complete_with done_future.internal_state + def on_resolvable(resolved_future) + resolve_with resolved_future.internal_state end end @@ -1660,27 +1662,27 @@ class ZipFuturesPromise < BlockedPromise def initialize(blocked_by_futures, default_executor) super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - on_completable nil if blocked_by_futures.empty? + on_resolvable nil if blocked_by_futures.empty? end - def on_completable(done_future) - all_success = true - values = Array.new(blocked_by.size) - reasons = Array.new(blocked_by.size) + def on_resolvable(resolved_future) + all_fulfilled = true + values = Array.new(blocked_by.size) + reasons = Array.new(blocked_by.size) blocked_by.each_with_index do |future, i| if future.is_a?(Future) - success, values[i], reasons[i] = future.result - all_success &&= success + fulfilled, values[i], reasons[i] = future.result + all_fulfilled &&= fulfilled else values[i] = reasons[i] = nil end end - if all_success - complete_with SuccessArray.new(values) + if all_fulfilled + resolve_with FulfilledArray.new(values) else - complete_with PartiallyFailed.new(values, reasons) + resolve_with PartiallyRejected.new(values, reasons) end end end @@ -1692,11 +1694,11 @@ class ZipEventsPromise < BlockedPromise def initialize(blocked_by_futures, default_executor) super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - on_completable nil if blocked_by_futures.empty? + on_resolvable nil if blocked_by_futures.empty? end - def on_completable(done_future) - complete_with COMPLETED + def on_resolvable(resolved_future) + resolve_with RESOLVED end end @@ -1704,11 +1706,11 @@ def on_completable(done_future) class AbstractAnyPromise < BlockedPromise # @!visibility private def touch - blocked_by.each(&:touch) unless @Future.completed? + blocked_by.each(&:touch) unless @Future.resolved? end end - class AnyCompleteFuturePromise < AbstractAnyPromise + class AnyResolvedFuturePromise < AbstractAnyPromise private @@ -1716,16 +1718,16 @@ def initialize(blocked_by_futures, default_executor) super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) end - def completable?(countdown, future) + def resolvable?(countdown, future) true end - def on_completable(done_future) - complete_with done_future.internal_state, false + def on_resolvable(resolved_future) + resolve_with resolved_future.internal_state, false end end - class AnyCompleteEventPromise < AbstractAnyPromise + class AnyResolvedEventPromise < AbstractAnyPromise private @@ -1733,21 +1735,21 @@ def initialize(blocked_by_futures, default_executor) super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) end - def completable?(countdown, future) + def resolvable?(countdown, future) true end - def on_completable(done_future) - complete_with COMPLETED, false + def on_resolvable(resolved_future) + resolve_with RESOLVED, false end end - class AnySuccessfulFuturePromise < AnyCompleteFuturePromise + class AnyFulfilledFuturePromise < AnyResolvedFuturePromise private - def completable?(countdown, future) - future.success? || + def resolvable?(countdown, future) + future.fulfilled? || # inlined super from BlockedPromise countdown.zero? end @@ -1756,7 +1758,7 @@ def completable?(countdown, future) class DelayPromise < InnerPromise # @!visibility private def touch - @Future.complete_with COMPLETED + @Future.resolve_with RESOLVED end private @@ -1795,7 +1797,7 @@ def initialize(default_executor, intended_time) end Concurrent.global_timer_set.post(in_seconds) do - @Future.complete_with COMPLETED + @Future.resolve_with RESOLVED end end end @@ -1803,8 +1805,8 @@ def initialize(default_executor, intended_time) extend FactoryMethods private_constant :AbstractPromise, - :CompletableEventPromise, - :CompletableFuturePromise, + :ResolvableEventPromise, + :ResolvableFuturePromise, :InnerPromise, :BlockedPromise, :BlockedTaskPromise, @@ -1825,9 +1827,9 @@ def initialize(default_executor, intended_time) :ZipFuturesPromise, :ZipEventsPromise, :AbstractAnyPromise, - :AnyCompleteFuturePromise, - :AnySuccessfulFuturePromise, - :AnyCompleteEventPromise, + :AnyResolvedFuturePromise, + :AnyFulfilledFuturePromise, + :AnyResolvedEventPromise, :DelayPromise, :ScheduledPromise @@ -1873,7 +1875,7 @@ def then_select(*channels) # @note may block # @note only proof of concept def then_put(channel) - on_success(:io) { |value| channel.put value } + on_fulfillment(:io) { |value| channel.put value } end # Asks the actor with its value. @@ -1905,17 +1907,17 @@ def each_body(value, &block) class Cancellation < Synchronization::Object safe_initialization! - def self.create(future_or_event = Promises.completable_event, *complete_args) - [(i = new(future_or_event, *complete_args)), i.token] + def self.create(future_or_event = Promises.resolvable_event, *resolve_args) + [(i = new(future_or_event, *resolve_args)), i.token] end private_class_method :new - def initialize(future, *complete_args) - raise ArgumentError, 'future is not Completable' unless future.is_a?(Promises::Completable) - @Cancel = future - @Token = Token.new @Cancel.with_hidden_completable - @CompleteArgs = complete_args + def initialize(future, *resolve_args) + raise ArgumentError, 'future is not Resolvable' unless future.is_a?(Promises::Resolvable) + @Cancel = future + @Token = Token.new @Cancel.with_hidden_resolvable + @ResolveArgs = resolve_args end def token @@ -1923,11 +1925,11 @@ def token end def cancel(raise_on_repeated_call = true) - !!@Cancel.complete(*@CompleteArgs, raise_on_repeated_call) + !!@Cancel.resolve(*@ResolveArgs, raise_on_repeated_call) end def canceled? - @Cancel.completed? + @Cancel.resolved? end class Token < Synchronization::Object @@ -1944,7 +1946,7 @@ def event alias_method :future, :event def on_cancellation(*args, &block) - @Cancel.on_completion *args, &block + @Cancel.on_resolution *args, &block end def then(*args, &block) @@ -1952,7 +1954,7 @@ def then(*args, &block) end def canceled? - @Cancel.completed? + @Cancel.resolved? end def loop_until_canceled(&block) @@ -1994,7 +1996,7 @@ def initialize(max) def limit(ready = nil, &block) # TODO (pitr-ch 11-Jun-2016): triggers should allocate resources when they are to be required if block_given? - block.call(get_event).on_completion! { done } + block.call(get_event).on_resolution! { done } else get_event end @@ -2004,7 +2006,7 @@ def done while true current_can_run = can_run if compare_and_set_can_run current_can_run, current_can_run + 1 - @Queue.pop.complete if current_can_run < 0 + @Queue.pop.resolve if current_can_run < 0 return self end end @@ -2017,9 +2019,9 @@ def get_event current_can_run = can_run if compare_and_set_can_run current_can_run, current_can_run - 1 if current_can_run > 0 - return Promises.completed_event + return Promises.resolved_event else - e = Promises.completable_event + e = Promises.resolvable_event @Queue.push e return e end diff --git a/spec/concurrent/actor_spec.rb b/spec/concurrent/actor_spec.rb index be3b74dc5..2603a38d3 100644 --- a/spec/concurrent/actor_spec.rb +++ b/spec/concurrent/actor_spec.rb @@ -78,7 +78,7 @@ def on_message(message) it 'terminates on failed initialization' do a = AdHoc.spawn(name: :fail, logger: Concurrent::NULL_LOGGER) { raise } - expect(a.ask(nil).wait.failed?).to be_truthy + expect(a.ask(nil).wait.rejected?).to be_truthy expect(a.ask!(:terminated?)).to be_truthy end @@ -90,7 +90,7 @@ def on_message(message) it 'terminates on failed message processing' do a = AdHoc.spawn(name: :fail, logger: Concurrent::NULL_LOGGER) { -> _ { raise } } - expect(a.ask(nil).wait.failed?).to be_truthy + expect(a.ask(nil).wait.rejected?).to be_truthy expect(a.ask!(:terminated?)).to be_truthy end end @@ -133,7 +133,7 @@ def on_message(message) envelope = subject.ask!('a') expect(envelope).to be_a_kind_of Envelope expect(envelope.message).to eq 'a' - expect(envelope.future).to be_completed + expect(envelope.future).to be_resolved expect(envelope.future.value).to eq envelope expect(envelope.sender).to eq Thread.current terminate_actors subject diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/promises_spec.rb index 073c17e44..40c5075ad 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -7,19 +7,19 @@ include Concurrent::Promises::FactoryMethods - describe 'chain_completable' do + describe 'chain_resolvable' do it 'event' do - b = completable_event - a = completable_event.chain_completable(b) - a.complete - expect(b).to be_completed + b = resolvable_event + a = resolvable_event.chain_resolvable(b) + a.resolve + expect(b).to be_resolved end it 'future' do - b = completable_future - a = completable_future.chain_completable(b) - a.succeed :val - expect(b).to be_completed + b = resolvable_future + a = resolvable_future.chain_resolvable(b) + a.fulfill :val + expect(b).to be_resolved expect(b.value).to eq :val end end @@ -29,7 +29,7 @@ future = future { 1 + 1 } expect(future.value!).to eq 2 - future = successful_future(1).then { |v| v + 1 } + future = fulfilled_future(1).then { |v| v + 1 } expect(future.value!).to eq 2 end @@ -37,7 +37,7 @@ future = future(1, 2, &:+) expect(future.value!).to eq 3 - future = successful_future(1).then(1) { |v, a| v + 1 } + future = fulfilled_future(1).then(1) { |v, a| v + 1 } expect(future.value!).to eq 2 end end @@ -45,15 +45,15 @@ describe '.delay' do def behaves_as_delay(delay, value) - expect(delay.completed?).to eq false + expect(delay.resolved?).to eq false expect(delay.value!).to eq value end specify do behaves_as_delay delay { 1 + 1 }, 2 - behaves_as_delay successful_future(1).delay.then { |v| v + 1 }, 2 + behaves_as_delay fulfilled_future(1).delay.then { |v| v + 1 }, 2 behaves_as_delay delay(1) { |a| a + 1 }, 2 - behaves_as_delay successful_future(1).delay.then { |v| v + 1 }, 2 + behaves_as_delay fulfilled_future(1).delay.then { |v| v + 1 }, 2 end end @@ -69,7 +69,7 @@ def behaves_as_delay(delay, value) start = Time.now.to_f queue = Queue.new - future = completed_event. + future = resolved_event. schedule(0.1). then { 1 }. then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } @@ -92,65 +92,65 @@ def behaves_as_delay(delay, value) expect(queue.pop).to eq 2 expect(queue.pop).to be >= 0.09 - scheduled = completed_event.schedule(0.1) - expect(scheduled.completed?).to be_falsey + scheduled = resolved_event.schedule(0.1) + expect(scheduled.resolved?).to be_falsey scheduled.wait - expect(scheduled.completed?).to be_truthy + expect(scheduled.resolved?).to be_truthy end end describe '.event' do specify do - completable_event = completable_event() - one = completable_event.chain(1) { |arg| arg } - join = zip(completable_event).chain { 1 } - expect(one.completed?).to be false - completable_event.complete + resolvable_event = resolvable_event() + one = resolvable_event.chain(1) { |arg| arg } + join = zip(resolvable_event).chain { 1 } + expect(one.resolved?).to be false + resolvable_event.resolve expect(one.value!).to eq 1 - expect(join.wait.completed?).to be true + expect(join.wait.resolved?).to be true end end describe '.future without block' do specify do - completable_future = completable_future() - one = completable_future.then(&:succ) - join = zip_futures(completable_future).then { |v| v } - expect(one.completed?).to be false - completable_future.succeed 0 + resolvable_future = resolvable_future() + one = resolvable_future.then(&:succ) + join = zip_futures(resolvable_future).then { |v| v } + expect(one.resolved?).to be false + resolvable_future.fulfill 0 expect(one.value!).to eq 1 - expect(join.wait!.completed?).to be true + expect(join.wait!.resolved?).to be true expect(join.value!).to eq 0 end end - describe '.any_complete' do + describe '.any_resolved' do it 'continues on first result' do - f1 = completable_future - f2 = completable_future - f3 = completable_future + f1 = resolvable_future + f2 = resolvable_future + f3 = resolvable_future - any1 = any_complete_future(f1, f2) + any1 = any_resolved_future(f1, f2) any2 = f2 | f3 - f1.succeed 1 - f2.fail StandardError.new + f1.fulfill 1 + f2.reject StandardError.new expect(any1.value!).to eq 1 expect(any2.reason).to be_a_kind_of StandardError end end - describe '.any_successful' do + describe '.any_fulfilled' do it 'continues on first result' do - f1 = completable_future - f2 = completable_future + f1 = resolvable_future + f2 = resolvable_future - any = any_successful_future(f1, f2) + any = any_fulfilled_future(f1, f2) - f1.fail StandardError.new - f2.succeed :value + f1.reject StandardError.new + f2.fulfill :value expect(any.value!).to eq :value end @@ -200,9 +200,9 @@ def behaves_as_delay(delay, value) expect(z2.then { |a, b, c| a+b+c }.value!).to eq 6 expect(future { 1 }.delay).to be_a_kind_of Concurrent::Promises::Future - expect(future { 1 }.delay.wait!).to be_completed - expect(completable_event.complete.delay).to be_a_kind_of Concurrent::Promises::Event - expect(completable_event.complete.delay.wait).to be_completed + expect(future { 1 }.delay.wait!).to be_resolved + expect(resolvable_event.resolve.delay).to be_a_kind_of Concurrent::Promises::Event + expect(resolvable_event.resolve.delay.wait).to be_resolved a = future { 1 } b = future { raise 'b' } @@ -229,25 +229,25 @@ def behaves_as_delay(delay, value) describe '.each' do specify do - expect(successful_future(nil).each.map(&:inspect)).to eq ['nil'] - expect(successful_future(1).each.map(&:inspect)).to eq ['1'] - expect(successful_future([1, 2]).each.map(&:inspect)).to eq ['1', '2'] + expect(fulfilled_future(nil).each.map(&:inspect)).to eq ['nil'] + expect(fulfilled_future(1).each.map(&:inspect)).to eq ['1'] + expect(fulfilled_future([1, 2]).each.map(&:inspect)).to eq ['1', '2'] end end describe '.zip_events' do it 'waits for all and returns event' do - a = successful_future 1 - b = failed_future :any - c = completable_event.complete + a = fulfilled_future 1 + b = rejected_future :any + c = resolvable_event.resolve z2 = zip_events a, b, c z3 = zip_events a z4 = zip_events - expect(z2.completed?).to be_truthy - expect(z3.completed?).to be_truthy - expect(z4.completed?).to be_truthy + expect(z2.resolved?).to be_truthy + expect(z3.resolved?).to be_truthy + expect(z4.resolved?).to be_truthy end end @@ -255,26 +255,26 @@ def behaves_as_delay(delay, value) it 'has sync and async callbacks' do callbacks_tester = ->(future) do queue = Queue.new - future.on_completion(:io) { |result| queue.push("async on_completion #{ result.inspect }") } - future.on_completion! { |result| queue.push("sync on_completion #{ result.inspect }") } - future.on_success(:io) { |value| queue.push("async on_success #{ value.inspect }") } - future.on_success! { |value| queue.push("sync on_success #{ value.inspect }") } - future.on_failure(:io) { |reason| queue.push("async on_failure #{ reason.inspect }") } - future.on_failure! { |reason| queue.push("sync on_failure #{ reason.inspect }") } + future.on_resolution_using(:io) { |result| queue.push("async on_resolution #{ result.inspect }") } + future.on_resolution! { |result| queue.push("sync on_resolution #{ result.inspect }") } + future.on_fulfillment_using(:io) { |value| queue.push("async on_fulfillment #{ value.inspect }") } + future.on_fulfillment! { |value| queue.push("sync on_fulfillment #{ value.inspect }") } + future.on_rejection_using(:io) { |reason| queue.push("async on_rejection #{ reason.inspect }") } + future.on_rejection! { |reason| queue.push("sync on_rejection #{ reason.inspect }") } future.wait [queue.pop, queue.pop, queue.pop, queue.pop].sort end callback_results = callbacks_tester.call(future { :value }) - expect(callback_results).to eq ["async on_completion [true, :value, nil]", - "async on_success :value", - "sync on_completion [true, :value, nil]", - "sync on_success :value"] + expect(callback_results).to eq ["async on_fulfillment :value", + "async on_resolution [true, :value, nil]", + "sync on_fulfillment :value", + "sync on_resolution [true, :value, nil]"] callback_results = callbacks_tester.call(future { raise 'error' }) - expect(callback_results).to eq ["async on_completion [false, nil, #]", - "async on_failure #", - "sync on_completion [false, nil, #]", - "sync on_failure #"] + expect(callback_results).to eq ["async on_rejection #", + "async on_resolution [false, nil, #]", + "sync on_rejection #", + "sync on_resolution [false, nil, #]"] end [:wait, :wait!, :value, :value!, :reason, :result].each do |method_with_timeout| @@ -289,7 +289,7 @@ def behaves_as_delay(delay, value) start_latch.wait(1) future.send(method_with_timeout, 0.1) - expect(future).not_to be_completed + expect(future).not_to be_resolved end_latch.count_down future.wait end @@ -299,19 +299,19 @@ def behaves_as_delay(delay, value) it 'chains' do future0 = future { 1 }.then { |v| v + 2 } # both executed on default FAST_EXECUTOR future1 = future0.then_using(:fast) { raise 'boo' } # executed on IO_EXECUTOR - future2 = future1.then { |v| v + 1 } # will fail with 'boo' error, executed on default FAST_EXECUTOR + future2 = future1.then { |v| v + 1 } # will reject with 'boo' error, executed on default FAST_EXECUTOR future3 = future1.rescue { |err| err.message } # executed on default FAST_EXECUTOR future4 = future0.chain { |success, value, reason| success } # executed on default FAST_EXECUTOR - future5 = future3.with_default_executor(:fast) # connects new future with different executor, the new future is completed when future3 is + future5 = future3.with_default_executor(:fast) # connects new future with different executor, the new future is resolved when future3 is future6 = future5.then(&:capitalize) # executes on IO_EXECUTOR because default was set to :io on future5 future7 = future0 & future3 - future8 = future0.rescue { raise 'never happens' } # future0 succeeds so future8'll have same value as future 0 + future8 = future0.rescue { raise 'never happens' } # future0 fulfills so future8'll have same value as future 0 futures = [future0, future1, future2, future3, future4, future5, future6, future7, future8] futures.each &:wait table = futures.each_with_index.map do |f, i| - '%5i %7s %10s %6s %4s %6s' % [i, f.success?, f.value, f.reason, + '%5i %7s %10s %6s %4s %6s' % [i, f.fulfilled?, f.value, f.reason, (f.promise.executor if f.promise.respond_to?(:executor)), f.default_executor] end.unshift('index success value reason pool d.pool') @@ -343,7 +343,7 @@ def behaves_as_delay(delay, value) # evaluates only up to three, four is left unevaluated expect(three.value!).to eq 3 - expect(four).not_to be_completed + expect(four).not_to be_resolved expect(four.value!).to eq 4 @@ -362,8 +362,8 @@ def behaves_as_delay(delay, value) (branch1 & branch2).then { |b1, b2| b1 + b2 }] sleep 0.1 - expect(branch1).to be_completed - expect(branch2).not_to be_completed + expect(branch1).to be_resolved + expect(branch2).not_to be_resolved expect(results.map(&:value)).to eq [5, 5, 5] expect(zip(branch1, branch2).value!).to eq [2, 3] @@ -375,22 +375,22 @@ def behaves_as_delay(delay, value) expect(f.value!).to eq 2 end - it 'propagates failure of inner future' do + it 'propagates rejection of inner future' do err = StandardError.new('boo') - f = future { failed_future(err) }.flat + f = future { rejected_future(err) }.flat expect(f.reason).to eq err end - it 'it propagates failure of the future which was suppose to provide inner future' do + it 'it propagates rejection of the future which was suppose to provide inner future' do f = future { raise 'boo' }.flat expect(f.reason.message).to eq 'boo' end - it 'fails if inner value is not a future' do + it 'rejects if inner value is not a future' do f = future { 'boo' }.flat expect(f.reason).to be_an_instance_of TypeError - f = future { completed_event }.flat + f = future { resolved_event }.flat expect(f.reason).to be_an_instance_of TypeError end @@ -399,12 +399,12 @@ def behaves_as_delay(delay, value) end end - it 'completes future when Exception raised' do - f = future { raise Exception, 'fail' } + it 'resolves future when Exception raised' do + f = future { raise Exception, 'reject' } f.wait 1 - expect(f).to be_completed - expect(f).to be_failed - expect { f.value! }.to raise_error(Exception, 'fail') + expect(f).to be_resolved + expect(f).to be_rejected + expect { f.value! }.to raise_error(Exception, 'reject') end it 'runs' do @@ -467,30 +467,30 @@ def behaves_as_delay(delay, value) specify do source, token = Concurrent::Cancellation.create source.cancel - expect(token.event.completed?).to be_truthy + expect(token.event.resolved?).to be_truthy cancellable_branch = Concurrent::Promises.delay { 1 } expect((cancellable_branch | token.event).value).to be_nil - expect(cancellable_branch.completed?).to be_falsey + expect(cancellable_branch.resolved?).to be_falsey end specify do source, token = Concurrent::Cancellation.create cancellable_branch = Concurrent::Promises.delay { 1 } - expect(any_complete_future(cancellable_branch, token.event).value).to eq 1 - expect(cancellable_branch.completed?).to be_truthy + expect(any_resolved_future(cancellable_branch, token.event).value).to eq 1 + expect(cancellable_branch.resolved?).to be_truthy end specify do source, token = Concurrent::Cancellation.create( - Concurrent::Promises.completable_future, false, nil, err = StandardError.new('Cancelled')) + Concurrent::Promises.resolvable_future, false, nil, err = StandardError.new('Cancelled')) source.cancel - expect(token.future.completed?).to be_truthy + expect(token.future.resolved?).to be_truthy cancellable_branch = Concurrent::Promises.delay { 1 } expect((cancellable_branch | token.future).reason).to eq err - expect(cancellable_branch.completed?).to be_falsey + expect(cancellable_branch.resolved?).to be_falsey end end From 248762b5dd07a02d4bb7a3ef20b345403712296e Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 10:45:45 +0200 Subject: [PATCH 29/68] update primises md documentation --- doc/format-md.rb | 122 -------------- doc/promises.in.md | 131 +++++++++------ doc/{init.rb => promises.init.rb} | 0 doc/promises.out.md | 264 +++++++++++++++++------------- 4 files changed, 227 insertions(+), 290 deletions(-) delete mode 100644 doc/format-md.rb rename doc/{init.rb => promises.init.rb} (100%) diff --git a/doc/format-md.rb b/doc/format-md.rb deleted file mode 100644 index 36edd3de3..000000000 --- a/doc/format-md.rb +++ /dev/null @@ -1,122 +0,0 @@ -require 'rubygems' -require 'bundler/setup' -require 'pry' -require 'pp' - -class MDFormatter - - def initialize(input_file, environment) - @input_path = input_file - @environment = environment - @output = '' - - process_file input_file - end - - def evaluate (code, line) - eval(code, @environment, @input_path, line) - end - - def process_ruby(part, start_line) - lines = part.lines - chunks = [] - line = '' - - while !lines.empty? - line += lines.shift - if Pry::Code.complete_expression? line - chunks << line - line = '' - end - end - - raise unless line.empty? - - chunk_lines = chunks.map { |chunk| [chunk, [chunk.split($/).size, 1].max] } - indent = 40 - - line_count = start_line - output = '' - chunk_lines.each do |chunk, lines| - result = evaluate(chunk, line_count) - if chunk.strip.empty? || chunk.include?('#') - output << chunk - else - pre_lines = chunk.lines.to_a - last_line = pre_lines.pop - output << pre_lines.join - - if last_line =~ /\#$/ - output << last_line.gsub(/\#$/, '') - else - if last_line.size < indent && result.inspect.size < indent - output << "%-#{indent}s %s" % [last_line.chomp, "# => #{result.inspect}\n"] - else - inspect_lines = result.pretty_inspect.lines - output << last_line << "# => #{inspect_lines[0]}" << inspect_lines[1..-1].map { |l| format '# %s', l }.join - end - end - end - line_count += lines - end - output - end - - def process_file(input_path) - output_path = input_path.gsub /\.in\.md$/, '.out.md' - input = File.read(input_path) - parts = input.split(/^(```\w*\n)/) - - # pp parts.map(&:lines) - - code_block = nil - line_count = 1 - - parts.each do |part| - if part =~ /^```(\w+)$/ - code_block = $1 - @output << part - line_count += 1 - next - end - - if part =~ /^```$/ - code_block = nil - @output << part - line_count += 1 - next - end - - if code_block == 'ruby' - @output << process_ruby(part, line_count) - line_count += part.lines.size - next - end - - @output << part - line_count += part.lines.size - end - - puts "#{input_path}\n -> #{output_path}" - File.write(output_path, @output) - rescue => ex - puts "#{ex} (#{ex.class})\n#{ex.backtrace * "\n"}" - - end -end - -input_paths = if ARGV.empty? - Dir.glob("#{File.dirname(__FILE__)}/*.in.md") - else - ARGV - end.map { |p| File.expand_path p } - -input_paths.each_with_index do |input_path, i| - - pid = fork do - require_relative 'init.rb' - MDFormatter.new input_path, binding - end - - Process.wait pid -end diff --git a/doc/promises.in.md b/doc/promises.in.md index 801598378..a46fa5cf1 100644 --- a/doc/promises.in.md +++ b/doc/promises.in.md @@ -1,44 +1,71 @@ # Promises Framework -Promises is a new framework unifying former `Concurrent::Future`, `Concurrent::Promise`, `Concurrent::IVar`, -`Concurrent::Event`, `Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new -synchronization layer to make all the features **non-blocking** and -**lock-free**, with the exception of obviously blocking operations like -`#wait`, `#value`. As a result it lowers a danger of deadlocking and offers +Promises is a new framework unifying former `Concurrent::Future`, +`Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, +`Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new +synchronization layer to make all the features *non-blocking* and +*lock-free*, with the exception of obviously blocking operations like +`#wait`, `#value`, etc. As a result it lowers a danger of deadlocking and offers better performance. ## Overview -There are two central classes ... TODO +*TODO* -## Where does it executes? - -- TODO Explain `_on` `_using` sufixes. +- What is it? +- What is it for? +- Main classes {Future}, {Event} +- Explain `_on` `_using` suffixes. ## Old examples follow *TODO rewrite into md with examples* -Adds factory methods like: future, event, delay, schedule, zip, etc. Otherwise -they can be called on Promises module. +Constructors are not accessible, instead there are many constructor methods in +FactoryMethods. ```ruby Concurrent::Promises::FactoryMethods.instance_methods false +``` + +The module can be included or extended where needed. + +```ruby +Class.new do + include Concurrent::Promises::FactoryMethods + + def a_method + resolvable_event + end +end.new.a_method +Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event +``` +The module is already extended into {Promises} for convenience. + +```ruby +Concurrent::Promises.resolvable_event +``` + +For this guide we include the module into `main` so we can call the factory +methods in following examples directly. + +```ruby include Concurrent::Promises::FactoryMethods # +resolvable_event ``` Simple asynchronous task: ```ruby future = future(0.1) { |duration| sleep duration; :result } # evaluation starts immediately -future.completed? +future.resolved? # block until evaluated future.value -future.completed? +future.resolved? ``` -Failing asynchronous task +Rejecting asynchronous task ```ruby future = future { raise 'Boom' } @@ -49,15 +76,15 @@ future.reason raise future rescue $! ``` -Direct creation of completed futures +Direct creation of resolved futures ```ruby -succeeded_future(Object.new) -failed_future(StandardError.new("boom")) +fulfilled_future(Object.new) +rejected_future(StandardError.new("boom")) ### Chaining of futures -head = succeeded_future 1 # +head = fulfilled_future 1 # branch1 = head.then(&:succ) # branch2 = head.then(&:succ).then(&:succ) # branch1.zip(branch2).value! @@ -66,7 +93,7 @@ branch1.zip(branch2).value! (branch1 & branch2).then(&:+).value! # or a class method zip from FactoryMethods can be used to zip multiple futures zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! -# pick only first completed +# pick only first resolved any(branch1, branch2).value! (branch1 | branch2).value! @@ -76,29 +103,29 @@ any(branch1, branch2).value! # any supplied arguments are passed to the block, promises ensure that they are visible to the block future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value -succeeded_future(1).then(2, &:+).value -succeeded_future(1).chain(2) { |success, value, reason, arg| value + arg }.value +fulfilled_future(1).then(2, &:+).value +fulfilled_future(1).chain(2) { |fulfilled, value, reason, arg| value + arg }.value ### Error handling -succeeded_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates -succeeded_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 -succeeded_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied +fulfilled_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates +fulfilled_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 +fulfilled_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied -failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) -failing_zip.result -failing_zip.then { |v| 'never happens' }.result -failing_zip.rescue { |a, b| (a || b).message }.value -failing_zip.chain { |success, values, reasons| [success, values.compact, reasons.compactß] }.value +rejected_zip = fulfilled_future(1) & rejected_future(StandardError.new('boom')) +rejected_zip.result +rejected_zip.then { |v| 'never happens' }.result +rejected_zip.rescue { |a, b| (a || b).message }.value +rejected_zip.chain { |fulfilled, values, reasons| [fulfilled, values.compact, reasons.compact] }.value ### Delay -# will not evaluate until asked by #value or other method requiring completion +# will not evaluate until asked by #value or other method requiring resolution future = delay { 'lazy' } sleep 0.1 # -future.completed? +future.resolved? future.value # propagates trough chain allowing whole or partial lazy chains @@ -108,15 +135,15 @@ branch1 = head.then(&:succ) branch2 = head.delay.then(&:succ) join = branch1 & branch2 -sleep 0.1 # nothing will complete -[head, branch1, branch2, join].map(&:completed?) +sleep 0.1 # nothing will resolve +[head, branch1, branch2, join].map(&:resolved?) branch1.value -sleep 0.1 # forces only head to complete, branch 2 stays incomplete -[head, branch1, branch2, join].map(&:completed?) +sleep 0.1 # forces only head to resolve, branch 2 stays pending +[head, branch1, branch2, join].map(&:resolved?) join.value -[head, branch1, branch2, join].map(&:completed?) +[head, branch1, branch2, join].map(&:resolved?) ### Flatting @@ -136,7 +163,7 @@ future { future { future { 1 + 1 } } }. # it'll be executed after 0.1 seconds scheduled = schedule(0.1) { 1 } -scheduled.completed? +scheduled.resolved? scheduled.value # available after 0.1sec # and in chain @@ -146,19 +173,19 @@ sleep 0.1 # scheduled.value # returns after another 0.1sec -### Completable Future and Event +### Resolvable Future and Event -future = completable_future -event = completable_event() +future = resolvable_future +event = resolvable_event() -# These threads will be blocked until the future and event is completed +# These threads will be blocked until the future and event is resolved t1 = Thread.new { future.value } # t2 = Thread.new { event.wait } # -future.success 1 -future.success 1 rescue $! -future.success 2, false -event.complete +future.fulfill 1 +future.fulfill 1 rescue $! +future.fulfill 2, false +event.resolve # The threads can be joined now [t1, t2].each &:join # @@ -169,8 +196,8 @@ event.complete queue = Queue.new future = delay { 1 + 1 } -future.on_success { queue << 1 } # evaluated asynchronously -future.on_success! { queue << 2 } # evaluated on completing thread +future.on_fulfillment { queue << 1 } # evaluated asynchronously +future.on_fulfillment! { queue << 2 } # evaluated on resolving thread queue.empty? future.value @@ -236,8 +263,8 @@ zip(*jobs).value # periodic task def schedule_job(interval, &job) # schedule the first execution and chain restart og the job - Concurrent.schedule(interval, &job).chain do |success, continue, reason| - if success + Concurrent.schedule(interval, &job).chain do |fulfilled, continue, reason| + if fulfilled schedule_job(interval, &job) if continue else # handle error @@ -281,10 +308,10 @@ end concurrent_jobs = 11.times.map do |v| - succeeded_future(v). + fulfilled_future(v). # ask the DB with the `v`, only one at the time, rest is parallel then_ask(DB). - # get size of the string, fails for 11 + # get size of the string, rejects for 11 then(&:size). rescue { |reason| reason.message } # translate error to value (exception, message) end # @@ -308,7 +335,7 @@ end concurrent_jobs = 11.times.map do |v| - succeeded_future(v). + fulfilled_future(v). # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel then_ask(DB_POOL). then(&:size). diff --git a/doc/init.rb b/doc/promises.init.rb similarity index 100% rename from doc/init.rb rename to doc/promises.init.rb diff --git a/doc/promises.out.md b/doc/promises.out.md index 52642350a..e46ca5915 100644 --- a/doc/promises.out.md +++ b/doc/promises.out.md @@ -1,39 +1,42 @@ # Promises Framework -Promises is a new framework unifying former `Concurrent::Future`, `Concurrent::Promise`, `Concurrent::IVar`, -`Concurrent::Event`, `Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new -synchronization layer to make all the features **non-blocking** and -**lock-free**, with the exception of obviously blocking operations like -`#wait`, `#value`. As a result it lowers a danger of deadlocking and offers +Promises is a new framework unifying former `Concurrent::Future`, +`Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, +`Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new +synchronization layer to make all the features *non-blocking* and +*lock-free*, with the exception of obviously blocking operations like +`#wait`, `#value`, etc. As a result it lowers a danger of deadlocking and offers better performance. ## Overview -There are two central classes ... TODO +*TODO* -## Where does it executes? - -- TODO Explain `_on` `_using` sufixes. +- What is it? +- What is it for? +- Main classes {Future}, {Event} +- Explain `_on` `_using` suffixes. ## Old examples follow *TODO rewrite into md with examples* -Adds factory methods like: future, event, delay, schedule, zip, etc. Otherwise -they can be called on Promises module. +Constructors are not accessible, instead there are many constructor methods in +FactoryMethods. ```ruby Concurrent::Promises::FactoryMethods.instance_methods false -# => [:completable_event, -# :completable_event_on, -# :completable_future, -# :completable_future_on, +# => [:resolvable_event, +# :resolvable_event_on, +# :resolvable_future, +# :resolvable_future_on, # :future, # :future_on, -# :completed_future, -# :succeeded_future, -# :failed_future, -# :completed_event, +# :resolved_future, +# :fulfilled_future, +# :rejected_future, +# :resolved_event, +# :create, # :delay, # :delay_on, # :schedule, @@ -43,33 +46,62 @@ Concurrent::Promises::FactoryMethods.instance_methods false # :zip, # :zip_events, # :zip_events_on, -# :any_complete_future, +# :any_resolved_future, # :any, -# :any_complete_future_on, -# :any_successful_future, -# :any_successful_future_on, +# :any_resolved_future_on, +# :any_fulfilled_future, +# :any_fulfilled_future_on, # :any_event, # :any_event_on, # :select] +``` + +The module can be included or extended where needed. + +```ruby +Class.new do + include Concurrent::Promises::FactoryMethods + + def a_method + resolvable_event + end +end.new.a_method +# => <#Concurrent::Promises::ResolvableEvent:0x7ff23c2ece18 pending blocks:[]> -include Concurrent::Promises::FactoryMethods # +Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event +# => <#Concurrent::Promises::ResolvableEvent:0x7ff23c2e6ea0 pending blocks:[]> +``` +The module is already extended into {Promises} for convenience. + +```ruby +Concurrent::Promises.resolvable_event +# => <#Concurrent::Promises::ResolvableEvent:0x7ff23c2d7cc0 pending blocks:[]> +``` + +For this guide we include the module into `main` so we can call the factory +methods in following examples directly. + +```ruby +include Concurrent::Promises::FactoryMethods +resolvable_event +# => <#Concurrent::Promises::ResolvableEvent:0x7ff23c2d4e08 pending blocks:[]> ``` Simple asynchronous task: ```ruby future = future(0.1) { |duration| sleep duration; :result } # evaluation starts immediately -future.completed? # => false +future.resolved? # => false # block until evaluated future.value # => :result -future.completed? # => true +future.resolved? # => true ``` -Failing asynchronous task +Rejecting asynchronous task ```ruby future = future { raise 'Boom' } -# => <#Concurrent::Promises::Future:0x7f90a7886578 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23c2be428 pending blocks:[]> future.value # => nil future.value! rescue $! # => # future.reason # => # @@ -77,19 +109,19 @@ future.reason # => # raise future rescue $! # => # ``` -Direct creation of completed futures +Direct creation of resolved futures ```ruby -succeeded_future(Object.new) -# => <#Concurrent::Promises::Future:0x7f90a699edd0 success blocks:[]> -failed_future(StandardError.new("boom")) -# => <#Concurrent::Promises::Future:0x7f90a699d408 failed blocks:[]> +fulfilled_future(Object.new) +# => <#Concurrent::Promises::Future:0x7ff23c10e920 fulfilled blocks:[]> +rejected_future(StandardError.new("boom")) +# => <#Concurrent::Promises::Future:0x7ff23c106090 rejected blocks:[]> ### Chaining of futures -head = succeeded_future 1 # -branch1 = head.then(&:succ) # -branch2 = head.then(&:succ).then(&:succ) # +head = fulfilled_future 1 +branch1 = head.then(&:succ) +branch2 = head.then(&:succ).then(&:succ) branch1.zip(branch2).value! # => [2, 3] # zip is aliased as & (branch1 & branch2).then { |a, b| a + b }.value! @@ -98,7 +130,7 @@ branch1.zip(branch2).value! # => [2, 3] # or a class method zip from FactoryMethods can be used to zip multiple futures zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! # => 7 -# pick only first completed +# pick only first resolved any(branch1, branch2).value! # => 2 (branch1 | branch2).value! # => 2 @@ -109,60 +141,60 @@ any(branch1, branch2).value! # => 2 future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value # => 5 -succeeded_future(1).then(2, &:+).value # => 3 -succeeded_future(1).chain(2) { |success, value, reason, arg| value + arg }.value +fulfilled_future(1).then(2, &:+).value # => 3 +fulfilled_future(1).chain(2) { |fulfilled, value, reason, arg| value + arg }.value # => 3 ### Error handling -succeeded_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates -succeeded_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 -succeeded_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied +fulfilled_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates +fulfilled_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 +fulfilled_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied -failing_zip = succeeded_future(1) & failed_future(StandardError.new('boom')) -# => <#Concurrent::Promises::Future:0x7f90a6947918 failed blocks:[]> -failing_zip.result +rejected_zip = fulfilled_future(1) & rejected_future(StandardError.new('boom')) +# => <#Concurrent::Promises::Future:0x7ff23c08f350 rejected blocks:[]> +rejected_zip.result # => [false, [1, nil], [nil, #]] -failing_zip.then { |v| 'never happens' }.result +rejected_zip.then { |v| 'never happens' }.result # => [false, [1, nil], [nil, #]] -failing_zip.rescue { |a, b| (a || b).message }.value +rejected_zip.rescue { |a, b| (a || b).message }.value # => "boom" -failing_zip.chain { |success, values, reasons| [success, values.compact, reasons.compactß] }.value -# => nil +rejected_zip.chain { |fulfilled, values, reasons| [fulfilled, values.compact, reasons.compact] }.value +# => [false, [1], [#]] ### Delay -# will not evaluate until asked by #value or other method requiring completion +# will not evaluate until asked by #value or other method requiring resolution future = delay { 'lazy' } -# => <#Concurrent::Promises::Future:0x7f90a690d718 pending blocks:[]> -sleep 0.1 # -future.completed? # => false +# => <#Concurrent::Promises::Future:0x7ff23c064e70 pending blocks:[]> +sleep 0.1 +future.resolved? # => false future.value # => "lazy" # propagates trough chain allowing whole or partial lazy chains head = delay { 1 } -# => <#Concurrent::Promises::Future:0x7f90a68edcb0 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23c054408 pending blocks:[]> branch1 = head.then(&:succ) -# => <#Concurrent::Promises::Future:0x7f90a68d7460 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23c044a30 pending blocks:[]> branch2 = head.delay.then(&:succ) -# => <#Concurrent::Promises::Future:0x7f90a68d5368 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23c036840 pending blocks:[]> join = branch1 & branch2 -# => <#Concurrent::Promises::Future:0x7f90a68b7e30 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23c034e78 pending blocks:[]> -sleep 0.1 # nothing will complete -[head, branch1, branch2, join].map(&:completed?) +sleep 0.1 # nothing will resolve +[head, branch1, branch2, join].map(&:resolved?) # => [false, false, false, false] branch1.value # => 2 -sleep 0.1 # forces only head to complete, branch 2 stays incomplete -[head, branch1, branch2, join].map(&:completed?) +sleep 0.1 # forces only head to resolve, branch 2 stays pending +[head, branch1, branch2, join].map(&:resolved?) # => [true, true, false, false] join.value # => [2, 2] -[head, branch1, branch2, join].map(&:completed?) +[head, branch1, branch2, join].map(&:resolved?) # => [true, true, true, true] @@ -182,50 +214,50 @@ future { future { future { 1 + 1 } } }. # it'll be executed after 0.1 seconds scheduled = schedule(0.1) { 1 } -# => <#Concurrent::Promises::Future:0x7f90a4243ab0 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23d005aa0 pending blocks:[]> -scheduled.completed? # => false +scheduled.resolved? # => false scheduled.value # available after 0.1sec # and in chain scheduled = delay { 1 }.schedule(0.1).then(&:succ) -# => <#Concurrent::Promises::Future:0x7f90a4228d00 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23b990b58 pending blocks:[]> # will not be scheduled until value is requested -sleep 0.1 # +sleep 0.1 scheduled.value # returns after another 0.1sec -### Completable Future and Event +### Resolvable Future and Event -future = completable_future -# => <#Concurrent::Promises::CompletableFuture:0x7f90a6075dd0 pending blocks:[]> -event = completable_event() -# => <#Concurrent::Promises::CompletableEvent:0x7f90a60741d8 pending blocks:[]> +future = resolvable_future +# => <#Concurrent::Promises::ResolvableFuture:0x7ff23b95a2b0 pending blocks:[]> +event = resolvable_event() +# => <#Concurrent::Promises::ResolvableEvent:0x7ff23b9528f8 pending blocks:[]> -# These threads will be blocked until the future and event is completed -t1 = Thread.new { future.value } # -t2 = Thread.new { event.wait } # +# These threads will be blocked until the future and event is resolved +t1 = Thread.new { future.value } +t2 = Thread.new { event.wait } -future.success 1 -# => <#Concurrent::Promises::CompletableFuture:0x7f90a6075dd0 success blocks:[]> -future.success 1 rescue $! -# => # -future.success 2, false # => false -event.complete -# => <#Concurrent::Promises::CompletableEvent:0x7f90a60741d8 success blocks:[]> +future.fulfill 1 +# => <#Concurrent::Promises::ResolvableFuture:0x7ff23b95a2b0 fulfilled blocks:[]> +future.fulfill 1 rescue $! +# => # +future.fulfill 2, false # => false +event.resolve +# => <#Concurrent::Promises::ResolvableEvent:0x7ff23b9528f8 fulfilled blocks:[]> # The threads can be joined now -[t1, t2].each &:join # +[t1, t2].each &:join ### Callbacks -queue = Queue.new # => # +queue = Queue.new # => # future = delay { 1 + 1 } -# => <#Concurrent::Promises::Future:0x7f90a4954f70 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23b9203f8 pending blocks:[]> -future.on_success { queue << 1 } # evaluated asynchronously -future.on_success! { queue << 2 } # evaluated on completing thread +future.on_fulfillment { queue << 1 } # evaluated asynchronously +future.on_fulfillment! { queue << 2 } # evaluated on resolving thread queue.empty? # => true future.value # => 2 @@ -250,7 +282,7 @@ future_on(:fast) { 2 }. actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end -# => # +# => # future { 2 }. @@ -264,11 +296,11 @@ actor.ask(2).then(&:succ).value # => 5 ### Interoperability with channels ch1 = Concurrent::Channel.new -# => # #, -# @__lock__=#, +# #, +# @__lock__=#, # @buffer=nil, # @capacity=1, # @closed=false, @@ -276,13 +308,13 @@ ch1 = Concurrent::Channel.new # @size=0, # @taking=[]>, # @validator= -# #> +# #> ch2 = Concurrent::Channel.new -# => # #, -# @__lock__=#, +# #, +# @__lock__=#, # @buffer=nil, # @capacity=1, # @closed=false, @@ -290,18 +322,18 @@ ch2 = Concurrent::Channel.new # @size=0, # @taking=[]>, # @validator= -# #> +# #> result = select(ch1, ch2) -# => <#Concurrent::Promises::Future:0x7f90a4180a60 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23b05a980 pending blocks:[]> ch1.put 1 # => true result.value! # => [1, -# #, -# @__lock__=#, +# #, +# @__lock__=#, # @buffer=nil, # @capacity=1, # @closed=false, @@ -309,16 +341,16 @@ result.value! # @size=0, # @taking=[]>, # @validator= -# #>] +# #>] future { 1+1 }. then_put(ch1) -# => <#Concurrent::Promises::Future:0x7f90a6064918 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23ba19250 pending blocks:[]> result = future { '%02d' }. then_select(ch1, ch2). then { |format, (value, channel)| format format, value } -# => <#Concurrent::Promises::Future:0x7f90a4142cb0 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23c3569f8 pending blocks:[]> result.value! # => "02" @@ -326,18 +358,18 @@ result.value! # => "02" # simple background processing future { do_stuff } -# => <#Concurrent::Promises::Future:0x7f90a4129a08 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7ff23c336928 pending blocks:[]> # parallel background processing -jobs = 10.times.map { |i| future { i } } # +jobs = 10.times.map { |i| future { i } } zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # periodic task def schedule_job(interval, &job) # schedule the first execution and chain restart og the job - Concurrent.schedule(interval, &job).chain do |success, continue, reason| - if success + Concurrent.schedule(interval, &job).chain do |fulfilled, continue, reason| + if fulfilled schedule_job(interval, &job) if continue else # handle error @@ -348,7 +380,7 @@ def schedule_job(interval, &job) end end -queue = Queue.new # => # +queue = Queue.new # => # count = 0 # => 0 schedule_job 0.05 do @@ -366,7 +398,7 @@ schedule_job 0.05 do end # read the queue -arr, v = [], nil; arr << v while (v = queue.pop) # +arr, v = [], nil; arr << v while (v = queue.pop) arr # => [0, 1, 2, 3] # How to limit processing where there are limited resources? @@ -381,13 +413,13 @@ end concurrent_jobs = 11.times.map do |v| - succeeded_future(v). + fulfilled_future(v). # ask the DB with the `v`, only one at the time, rest is parallel then_ask(DB). - # get size of the string, fails for 11 + # get size of the string, rejects for 11 then(&:size). rescue { |reason| reason.message } # translate error to value (exception, message) -end # +end zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] @@ -419,12 +451,12 @@ end concurrent_jobs = 11.times.map do |v| - succeeded_future(v). + fulfilled_future(v). # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel then_ask(DB_POOL). then(&:size). rescue { |reason| reason.message } -end # +end zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] From 07510fcd9762becdce448001173ea708111633f3 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 10:46:01 +0200 Subject: [PATCH 30/68] add general constructor --- lib/concurrent/edge/promises.rb | 42 +++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 3ff83c85f..3254cb9dd 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -127,6 +127,46 @@ def resolved_event(default_executor = :io) ImmediateEventPromise.new(default_executor).event end + # General constructor. Behaves differently based on the argument's type. It's provided for convenience + # but it's better to be explicit. + # + # @see rejected_future, resolved_event, fulfilled_future + # @!macro promises.param.default_executor + # @return [Event, Future] + # + # @overload create(nil, default_executor = :io) + # @param [nil] nil + # @return [Event] resolved event. + # + # @overload create(a_future = nil, default_executor = :io) + # @param [Future] a_future + # @return [Future] a future which will be resolved when a_future is. + # + # @overload create(an_event = nil, default_executor = :io) + # @param [Event] an_event + # @return [Event] an event which will be resolved when an_event is. + # + # @overload create(exception = nil, default_executor = :io) + # @param [Exception] exception + # @return [Future] a rejected future with the exception as its reason. + # + # @overload create(value = nil, default_executor = :io) + # @param [Object] value when none of the above overloads fits + # @return [Future] a fulfilled future with the value. + def create(argument = nil, default_executor = :io) + case argument + when AbstractEventFuture + # returning wrapper would change nothing + argument + when Exception + rejected_future argument, default_executor + when nil + resolved_event default_executor + else + fulfilled_future argument, default_executor + end + end + # @!macro promises.shortcut.on # @return [Future] def delay(*args, &task) @@ -253,8 +293,6 @@ def any_event_on(default_executor, *futures_and_or_events) AnyResolvedEventPromise.new(futures_and_or_events, default_executor).event end - # TODO (pitr-ch 30-Jul-2016): add general constructor behaving based on argument type - # TODO consider adding first(count, *futures) # TODO consider adding zip_by(slice, *futures) processing futures in slices end From d6d1dd21070df5580d69884c94bf407bd918bec5 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 11:04:47 +0200 Subject: [PATCH 31/68] convert rest of the examples to evaluated md --- .yardopts | 1 + Gemfile | 1 + doc/promises.in.md | 126 +++++++++++++------------ doc/promises.out.md | 222 ++++++++++++++++++-------------------------- 4 files changed, 158 insertions(+), 192 deletions(-) diff --git a/.yardopts b/.yardopts index 77e7d3bb4..262c698f9 100644 --- a/.yardopts +++ b/.yardopts @@ -5,6 +5,7 @@ --title=Concurrent Ruby --template default --template-path ./yard-template +--default-return undocumented ./lib/**/*.rb ./ext/concurrent_ruby_ext/**/*.c diff --git a/Gemfile b/Gemfile index 3ac928d82..6dde8818b 100644 --- a/Gemfile +++ b/Gemfile @@ -16,6 +16,7 @@ group :development do # TODO (pitr-ch 15-Oct-2016): does not work on 1.9.3 anymore gem 'inch', '~> 0.6.3', :platforms => :mri, :require => false gem 'redcarpet', '~> 3.3.2', platforms: :mri # understands github markdown + gem 'md-ruby-eval' end group :testing do diff --git a/doc/promises.in.md b/doc/promises.in.md index a46fa5cf1..ee9671a1c 100644 --- a/doc/promises.in.md +++ b/doc/promises.in.md @@ -3,8 +3,8 @@ Promises is a new framework unifying former `Concurrent::Future`, `Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, `Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new -synchronization layer to make all the features *non-blocking* and -*lock-free*, with the exception of obviously blocking operations like +synchronization layer to make all the features *lock-free*, +with the exception of obviously blocking operations like `#wait`, `#value`, etc. As a result it lowers a danger of deadlocking and offers better performance. @@ -15,11 +15,11 @@ better performance. - What is it? - What is it for? - Main classes {Future}, {Event} -- Explain `_on` `_using` suffixes. +- Explain pool usage :io vs :fast, and `_on` `_using` suffixes. ## Old examples follow -*TODO rewrite into md with examples* +*TODO review pending* Constructors are not accessible, instead there are many constructor methods in FactoryMethods. @@ -65,7 +65,7 @@ future.value future.resolved? ``` -Rejecting asynchronous task +Rejecting asynchronous task: ```ruby future = future { raise 'Boom' } @@ -76,14 +76,16 @@ future.reason raise future rescue $! ``` -Direct creation of resolved futures +Direct creation of resolved futures: ```ruby fulfilled_future(Object.new) rejected_future(StandardError.new("boom")) +``` -### Chaining of futures +Chaining of futures: +```ruby head = fulfilled_future 1 # branch1 = head.then(&:succ) # branch2 = head.then(&:succ).then(&:succ) # @@ -96,19 +98,19 @@ zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! # pick only first resolved any(branch1, branch2).value! (branch1 | branch2).value! +``` +Any supplied arguments are passed to the block, promises ensure that they are visible to the block: -### Arguments - -# any supplied arguments are passed to the block, promises ensure that they are visible to the block - +```ruby future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value fulfilled_future(1).then(2, &:+).value fulfilled_future(1).chain(2) { |fulfilled, value, reason, arg| value + arg }.value +``` +Error handling: -### Error handling - +```ruby fulfilled_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates fulfilled_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 fulfilled_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied @@ -118,18 +120,18 @@ rejected_zip.result rejected_zip.then { |v| 'never happens' }.result rejected_zip.rescue { |a, b| (a || b).message }.value rejected_zip.chain { |fulfilled, values, reasons| [fulfilled, values.compact, reasons.compact] }.value +``` +Delay will not evaluate until asked by #value or other method requiring resolution. -### Delay - -# will not evaluate until asked by #value or other method requiring resolution +``` ruby future = delay { 'lazy' } sleep 0.1 # future.resolved? future.value - -# propagates trough chain allowing whole or partial lazy chains - +``` +It propagates trough chain allowing whole or partial lazy chains. +```ruby head = delay { 1 } branch1 = head.then(&:succ) branch2 = head.delay.then(&:succ) @@ -144,11 +146,11 @@ sleep 0.1 # forces only head to resolve, branch 2 stays pending join.value [head, branch1, branch2, join].map(&:resolved?) +``` +When flatting, it waits for inner future. Only the last call to value blocks thread. -### Flatting - -# waits for inner future, only the last call to value blocks thread +```ruby future { future { 1+1 } }.flat.value # more complicated example @@ -156,9 +158,11 @@ future { future { future { 1 + 1 } } }. flat(1). then { |f| f.then(&:succ) }. flat(1).value +``` +Scheduling of asynchronous tasks: -### Schedule +```ruby # it'll be executed after 0.1 seconds scheduled = schedule(0.1) { 1 } @@ -171,9 +175,11 @@ scheduled = delay { 1 }.schedule(0.1).then(&:succ) # will not be scheduled until value is requested sleep 0.1 # scheduled.value # returns after another 0.1sec +``` +Resolvable Future and Event: -### Resolvable Future and Event +```ruby future = resolvable_future event = resolvable_event() @@ -189,10 +195,11 @@ event.resolve # The threads can be joined now [t1, t2].each &:join # +``` +Callbacks: -### Callbacks - +```ruby queue = Queue.new future = delay { 1 + 1 } @@ -203,22 +210,22 @@ queue.empty? future.value queue.pop queue.pop +``` +Factory methods are taking names of the global executors +(or instances of custom executors). -### Thread-pools - -# Factory methods are taking names of the global executors -# (ot instances of custom executors) - +```ruby # executed on :fast executor, only short and non-blocking tasks can go there future_on(:fast) { 2 }. # executed on executor for blocking and long operations then_using(:io) { File.read __FILE__ }. wait +``` +Interoperability with actors: -### Interoperability with actors - +```ruby actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end @@ -230,37 +237,26 @@ future { 2 }. value actor.ask(2).then(&:succ).value - - -### Interoperability with channels - -ch1 = Concurrent::Channel.new -ch2 = Concurrent::Channel.new - -result = select(ch1, ch2) -ch1.put 1 -result.value! - - -future { 1+1 }. - then_put(ch1) -result = future { '%02d' }. - then_select(ch1, ch2). - then { |format, (value, channel)| format format, value } -result.value! - +``` ### Common use-cases Examples -# simple background processing +#### simple background processing + +```ruby future { do_stuff } +``` -# parallel background processing +#### parallel background processing + +```ruby jobs = 10.times.map { |i| future { i } } # zip(*jobs).value +``` +#### periodic task -# periodic task +```ruby def schedule_job(interval, &job) # schedule the first execution and chain restart og the job Concurrent.schedule(interval, &job).chain do |fulfilled, continue, reason| @@ -269,16 +265,17 @@ def schedule_job(interval, &job) else # handle error p reason - # retry - schedule_job(interval, &job) + # retry sooner + schedule_job(interval / 10, &job) end end end queue = Queue.new count = 0 +interval = 0.05 # small just not to delay execution of this example -schedule_job 0.05 do +schedule_job interval do queue.push count count += 1 # to continue scheduling return true, false will end the task @@ -286,6 +283,7 @@ schedule_job 0.05 do # to continue scheduling return true true else + # close the queue with nil to simplify reading it queue.push nil # to end the task return false false @@ -294,10 +292,14 @@ end # read the queue arr, v = [], nil; arr << v while (v = queue.pop) # +# arr has the results from the executed scheduled tasks arr +``` +#### How to limit processing where there are limited resources? + +By creating an actor managing the resource -# How to limit processing where there are limited resources? -# By creating an actor managing the resource +```ruby DB = Concurrent::Actor::Utils::AdHoc.spawn :db do data = Array.new(10) { |i| '*' * i } lambda do |message| @@ -317,9 +319,11 @@ concurrent_jobs = 11.times.map do |v| end # zip(*concurrent_jobs).value! +``` +In reality there is often a pool though: -# In reality there is often a pool though: +```ruby data = Array.new(10) { |i| '*' * i } pool_size = 5 diff --git a/doc/promises.out.md b/doc/promises.out.md index e46ca5915..6c61accc2 100644 --- a/doc/promises.out.md +++ b/doc/promises.out.md @@ -3,8 +3,8 @@ Promises is a new framework unifying former `Concurrent::Future`, `Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, `Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new -synchronization layer to make all the features *non-blocking* and -*lock-free*, with the exception of obviously blocking operations like +synchronization layer to make all the features *lock-free*, +with the exception of obviously blocking operations like `#wait`, `#value`, etc. As a result it lowers a danger of deadlocking and offers better performance. @@ -15,11 +15,11 @@ better performance. - What is it? - What is it for? - Main classes {Future}, {Event} -- Explain `_on` `_using` suffixes. +- Explain pool usage :io vs :fast, and `_on` `_using` suffixes. ## Old examples follow -*TODO rewrite into md with examples* +*TODO review pending* Constructors are not accessible, instead there are many constructor methods in FactoryMethods. @@ -66,16 +66,16 @@ Class.new do resolvable_event end end.new.a_method -# => <#Concurrent::Promises::ResolvableEvent:0x7ff23c2ece18 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fbb023df1e8 pending blocks:[]> Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7ff23c2e6ea0 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fbb023d7600 pending blocks:[]> ``` The module is already extended into {Promises} for convenience. ```ruby Concurrent::Promises.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7ff23c2d7cc0 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fbb023d5ad0 pending blocks:[]> ``` For this guide we include the module into `main` so we can call the factory @@ -84,7 +84,7 @@ methods in following examples directly. ```ruby include Concurrent::Promises::FactoryMethods resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7ff23c2d4e08 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fbb023cf608 pending blocks:[]> ``` Simple asynchronous task: @@ -97,11 +97,11 @@ future.value # => :result future.resolved? # => true ``` -Rejecting asynchronous task +Rejecting asynchronous task: ```ruby future = future { raise 'Boom' } -# => <#Concurrent::Promises::Future:0x7ff23c2be428 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb023b4308 pending blocks:[]> future.value # => nil future.value! rescue $! # => # future.reason # => # @@ -109,16 +109,18 @@ future.reason # => # raise future rescue $! # => # ``` -Direct creation of resolved futures +Direct creation of resolved futures: ```ruby fulfilled_future(Object.new) -# => <#Concurrent::Promises::Future:0x7ff23c10e920 fulfilled blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb023a58a8 fulfilled blocks:[]> rejected_future(StandardError.new("boom")) -# => <#Concurrent::Promises::Future:0x7ff23c106090 rejected blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb023a79a0 rejected blocks:[]> +``` -### Chaining of futures +Chaining of futures: +```ruby head = fulfilled_future 1 branch1 = head.then(&:succ) branch2 = head.then(&:succ).then(&:succ) @@ -133,27 +135,27 @@ zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! # pick only first resolved any(branch1, branch2).value! # => 2 (branch1 | branch2).value! # => 2 +``` +Any supplied arguments are passed to the block, promises ensure that they are visible to the block: -### Arguments - -# any supplied arguments are passed to the block, promises ensure that they are visible to the block - +```ruby future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value # => 5 fulfilled_future(1).then(2, &:+).value # => 3 fulfilled_future(1).chain(2) { |fulfilled, value, reason, arg| value + arg }.value # => 3 +``` +Error handling: -### Error handling - +```ruby fulfilled_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates fulfilled_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 fulfilled_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied rejected_zip = fulfilled_future(1) & rejected_future(StandardError.new('boom')) -# => <#Concurrent::Promises::Future:0x7ff23c08f350 rejected blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb023343b0 rejected blocks:[]> rejected_zip.result # => [false, [1, nil], [nil, #]] rejected_zip.then { |v| 'never happens' }.result @@ -162,27 +164,26 @@ rejected_zip.rescue { |a, b| (a || b).message }.value # => "boom" rejected_zip.chain { |fulfilled, values, reasons| [fulfilled, values.compact, reasons.compact] }.value # => [false, [1], [#]] +``` +Delay will not evaluate until asked by #value or other method requiring resolution. -### Delay - -# will not evaluate until asked by #value or other method requiring resolution +``` ruby future = delay { 'lazy' } -# => <#Concurrent::Promises::Future:0x7ff23c064e70 pending blocks:[]> -sleep 0.1 -future.resolved? # => false -future.value # => "lazy" - -# propagates trough chain allowing whole or partial lazy chains - +sleep 0.1 # +future.resolved? +future.value +``` +It propagates trough chain allowing whole or partial lazy chains. +```ruby head = delay { 1 } -# => <#Concurrent::Promises::Future:0x7ff23c054408 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb02304b38 pending blocks:[]> branch1 = head.then(&:succ) -# => <#Concurrent::Promises::Future:0x7ff23c044a30 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb022fe328 pending blocks:[]> branch2 = head.delay.then(&:succ) -# => <#Concurrent::Promises::Future:0x7ff23c036840 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb03867d68 pending blocks:[]> join = branch1 & branch2 -# => <#Concurrent::Promises::Future:0x7ff23c034e78 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb03865fe0 pending blocks:[]> sleep 0.1 # nothing will resolve [head, branch1, branch2, join].map(&:resolved?) @@ -196,11 +197,11 @@ sleep 0.1 # forces only head to resolve, branch 2 stays pending join.value # => [2, 2] [head, branch1, branch2, join].map(&:resolved?) # => [true, true, true, true] +``` +When flatting, it waits for inner future. Only the last call to value blocks thread. -### Flatting - -# waits for inner future, only the last call to value blocks thread +```ruby future { future { 1+1 } }.flat.value # => 2 # more complicated example @@ -208,53 +209,58 @@ future { future { future { 1 + 1 } } }. flat(1). then { |f| f.then(&:succ) }. flat(1).value # => 3 +``` +Scheduling of asynchronous tasks: -### Schedule +```ruby # it'll be executed after 0.1 seconds scheduled = schedule(0.1) { 1 } -# => <#Concurrent::Promises::Future:0x7ff23d005aa0 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb022a5570 pending blocks:[]> scheduled.resolved? # => false scheduled.value # available after 0.1sec # and in chain scheduled = delay { 1 }.schedule(0.1).then(&:succ) -# => <#Concurrent::Promises::Future:0x7ff23b990b58 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb022948b0 pending blocks:[]> # will not be scheduled until value is requested sleep 0.1 scheduled.value # returns after another 0.1sec +``` +Resolvable Future and Event: -### Resolvable Future and Event +```ruby future = resolvable_future -# => <#Concurrent::Promises::ResolvableFuture:0x7ff23b95a2b0 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableFuture:0x7fbb0223c1b0 pending blocks:[]> event = resolvable_event() -# => <#Concurrent::Promises::ResolvableEvent:0x7ff23b9528f8 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fbb021df0c8 pending blocks:[]> # These threads will be blocked until the future and event is resolved t1 = Thread.new { future.value } t2 = Thread.new { event.wait } future.fulfill 1 -# => <#Concurrent::Promises::ResolvableFuture:0x7ff23b95a2b0 fulfilled blocks:[]> +# => <#Concurrent::Promises::ResolvableFuture:0x7fbb0223c1b0 fulfilled blocks:[]> future.fulfill 1 rescue $! # => # future.fulfill 2, false # => false event.resolve -# => <#Concurrent::Promises::ResolvableEvent:0x7ff23b9528f8 fulfilled blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fbb021df0c8 fulfilled blocks:[]> # The threads can be joined now [t1, t2].each &:join +``` +Callbacks: -### Callbacks - -queue = Queue.new # => # +```ruby +queue = Queue.new # => # future = delay { 1 + 1 } -# => <#Concurrent::Promises::Future:0x7ff23b9203f8 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb021b4fd0 pending blocks:[]> future.on_fulfillment { queue << 1 } # evaluated asynchronously future.on_fulfillment! { queue << 2 } # evaluated on resolving thread @@ -263,26 +269,26 @@ queue.empty? # => true future.value # => 2 queue.pop # => 2 queue.pop # => 1 +``` +Factory methods are taking names of the global executors +(or instances of custom executors). -### Thread-pools - -# Factory methods are taking names of the global executors -# (ot instances of custom executors) - +```ruby # executed on :fast executor, only short and non-blocking tasks can go there future_on(:fast) { 2 }. # executed on executor for blocking and long operations then_using(:io) { File.read __FILE__ }. wait +``` +Interoperability with actors: -### Interoperability with actors - +```ruby actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end -# => # +# => # future { 2 }. @@ -291,81 +297,27 @@ future { 2 }. value # => 6 actor.ask(2).then(&:succ).value # => 5 - - -### Interoperability with channels - -ch1 = Concurrent::Channel.new -# => #, -# @__lock__=#, -# @buffer=nil, -# @capacity=1, -# @closed=false, -# @putting=[], -# @size=0, -# @taking=[]>, -# @validator= -# #> -ch2 = Concurrent::Channel.new -# => #, -# @__lock__=#, -# @buffer=nil, -# @capacity=1, -# @closed=false, -# @putting=[], -# @size=0, -# @taking=[]>, -# @validator= -# #> - -result = select(ch1, ch2) -# => <#Concurrent::Promises::Future:0x7ff23b05a980 pending blocks:[]> -ch1.put 1 # => true -result.value! -# => [1, -# #, -# @__lock__=#, -# @buffer=nil, -# @capacity=1, -# @closed=false, -# @putting=[], -# @size=0, -# @taking=[]>, -# @validator= -# #>] - - -future { 1+1 }. - then_put(ch1) -# => <#Concurrent::Promises::Future:0x7ff23ba19250 pending blocks:[]> -result = future { '%02d' }. - then_select(ch1, ch2). - then { |format, (value, channel)| format format, value } -# => <#Concurrent::Promises::Future:0x7ff23c3569f8 pending blocks:[]> -result.value! # => "02" - +``` ### Common use-cases Examples -# simple background processing +#### simple background processing + +```ruby future { do_stuff } -# => <#Concurrent::Promises::Future:0x7ff23c336928 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fbb020bfe40 pending blocks:[]> +``` -# parallel background processing +#### parallel background processing + +```ruby jobs = 10.times.map { |i| future { i } } zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] +``` +#### periodic task -# periodic task +```ruby def schedule_job(interval, &job) # schedule the first execution and chain restart og the job Concurrent.schedule(interval, &job).chain do |fulfilled, continue, reason| @@ -374,16 +326,17 @@ def schedule_job(interval, &job) else # handle error p reason - # retry - schedule_job(interval, &job) + # retry sooner + schedule_job(interval / 10, &job) end end end -queue = Queue.new # => # +queue = Queue.new # => # count = 0 # => 0 +interval = 0.05 # small just not to delay execution of this example -schedule_job 0.05 do +schedule_job interval do queue.push count count += 1 # to continue scheduling return true, false will end the task @@ -391,6 +344,7 @@ schedule_job 0.05 do # to continue scheduling return true true else + # close the queue with nil to simplify reading it queue.push nil # to end the task return false false @@ -399,10 +353,14 @@ end # read the queue arr, v = [], nil; arr << v while (v = queue.pop) +# arr has the results from the executed scheduled tasks arr # => [0, 1, 2, 3] +``` +#### How to limit processing where there are limited resources? + +By creating an actor managing the resource -# How to limit processing where there are limited resources? -# By creating an actor managing the resource +```ruby DB = Concurrent::Actor::Utils::AdHoc.spawn :db do data = Array.new(10) { |i| '*' * i } lambda do |message| @@ -423,9 +381,11 @@ end zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] +``` +In reality there is often a pool though: -# In reality there is often a pool though: +```ruby data = Array.new(10) { |i| '*' * i } # => ["", # "*", From c1d0dd9d0cb7a1513d33a15a0a93ef32d8f0592c Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 11:09:36 +0200 Subject: [PATCH 32/68] leave _using suffix just for callbacks --- lib/concurrent/edge/promises.rb | 16 ++++++++-------- spec/concurrent/promises_spec.rb | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 3254cb9dd..1cbf1f252 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -567,7 +567,7 @@ def default_executor # @!macro promises.shortcut.using # @return [Future] def chain(*args, &task) - chain_using @DefaultExecutor, *args, &task + chain_on @DefaultExecutor, *args, &task end # Chains the task to be executed asynchronously on executor after it is resolved. @@ -577,11 +577,11 @@ def chain(*args, &task) # @return [Future] # @!macro promise.param.task-future # - # @overload an_event.chain_using(executor, *args, &task) + # @overload an_event.chain_on(executor, *args, &task) # @yield [*args] to the task. - # @overload a_future.chain_using(executor, *args, &task) + # @overload a_future.chain_on(executor, *args, &task) # @yield [fulfilled?, value, reason, *args] to the task. - def chain_using(executor, *args, &task) + def chain_on(executor, *args, &task) ChainPromise.new(self, @DefaultExecutor, executor, args, &task).future end @@ -935,7 +935,7 @@ def exception(*args) # @!macro promises.shortcut.using # @return [Future] def then(*args, &task) - then_using @DefaultExecutor, *args, &task + then_on @DefaultExecutor, *args, &task end # Chains the task to be executed asynchronously on executor after it fulfills. Does not run @@ -946,14 +946,14 @@ def then(*args, &task) # @!macro promise.param.task-future # @return [Future] # @yield [value, *args] to the task. - def then_using(executor, *args, &task) + def then_on(executor, *args, &task) ThenPromise.new(self, @DefaultExecutor, executor, args, &task).future end # @!macro promises.shortcut.using # @return [Future] def rescue(*args, &task) - rescue_using @DefaultExecutor, *args, &task + rescue_on @DefaultExecutor, *args, &task end # Chains the task to be executed asynchronously on executor after it rejects. Does not run @@ -964,7 +964,7 @@ def rescue(*args, &task) # @!macro promise.param.task-future # @return [Future] # @yield [reason, *args] to the task. - def rescue_using(executor, *args, &task) + def rescue_on(executor, *args, &task) RescuePromise.new(self, @DefaultExecutor, executor, args, &task).future end diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/promises_spec.rb index 40c5075ad..8c88e6333 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -298,7 +298,7 @@ def behaves_as_delay(delay, value) it 'chains' do future0 = future { 1 }.then { |v| v + 2 } # both executed on default FAST_EXECUTOR - future1 = future0.then_using(:fast) { raise 'boo' } # executed on IO_EXECUTOR + future1 = future0.then_on(:fast) { raise 'boo' } # executed on IO_EXECUTOR future2 = future1.then { |v| v + 1 } # will reject with 'boo' error, executed on default FAST_EXECUTOR future3 = future1.rescue { |err| err.message } # executed on default FAST_EXECUTOR future4 = future0.chain { |success, value, reason| success } # executed on default FAST_EXECUTOR From 40a4cc3251b9df4422fd90189b6423e87810c728 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 11:14:18 +0200 Subject: [PATCH 33/68] Allow to insert throttle into chain --- lib/concurrent/edge/promises.rb | 14 ++++++++++++-- spec/concurrent/promises_spec.rb | 25 ++++++++++++++++--------- 2 files changed, 28 insertions(+), 11 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 1cbf1f252..70e02a8c5 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -2031,10 +2031,12 @@ def initialize(max) @Queue = Queue.new end - def limit(ready = nil, &block) + def limit(future = nil, &block) # TODO (pitr-ch 11-Jun-2016): triggers should allocate resources when they are to be required + trigger = future ? future & get_event : get_event + if block_given? - block.call(get_event).on_resolution! { done } + block.call(trigger).on_resolution! { done } else get_event end @@ -2067,4 +2069,12 @@ def get_event end end end + + class Promises::AbstractEventFuture < Synchronization::Object + + def throttle(throttle, &throttled_future) + throttle.limit(self, &throttled_future) + end + + end end diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/promises_spec.rb index 8c88e6333..c23a834e4 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -496,19 +496,26 @@ def behaves_as_delay(delay, value) describe 'Throttling' do specify do - throttle = Concurrent::Throttle.new 3 + max_tree = Concurrent::Throttle.new 3 counter = Concurrent::AtomicFixnum.new + testing = -> do + counter.increment + sleep 0.01 + # returns less then 3 since it's throttled + counter.decrement + end + expect(Concurrent::Promises.zip( *12.times.map do |i| - throttle.limit do |trigger| - trigger.then do - counter.increment - sleep 0.01 - counter.decrement - end - end + max_tree.limit { |trigger| trigger.then &testing } + end).value.all? { |v| v < 3 }).to be_truthy + + expect(Concurrent::Promises.zip( + *12.times.map do |i| + Concurrent::Promises. + fulfilled_future(i). + throttle(max_tree) { |trigger| trigger.then &testing } end).value.all? { |v| v < 3 }).to be_truthy end end - end From 773ad598fc24ec4c2fd0bfec94b4161102c254d9 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 11:40:02 +0200 Subject: [PATCH 34/68] fix bad macro usage --- lib/concurrent/edge/promises.rb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 70e02a8c5..ee1c0516a 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -564,7 +564,7 @@ def default_executor @DefaultExecutor end - # @!macro promises.shortcut.using + # @!macro promises.shortcut.on # @return [Future] def chain(*args, &task) chain_on @DefaultExecutor, *args, &task @@ -932,7 +932,7 @@ def exception(*args) end end - # @!macro promises.shortcut.using + # @!macro promises.shortcut.on # @return [Future] def then(*args, &task) then_on @DefaultExecutor, *args, &task @@ -950,7 +950,7 @@ def then_on(executor, *args, &task) ThenPromise.new(self, @DefaultExecutor, executor, args, &task).future end - # @!macro promises.shortcut.using + # @!macro promises.shortcut.on # @return [Future] def rescue(*args, &task) rescue_on @DefaultExecutor, *args, &task From ee511f23c5b0ceeb5042dd81a0a93bffa863e92b Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 12:29:37 +0200 Subject: [PATCH 35/68] fix headers, add cancellation and throttling to documetnation --- doc/promises.in.md | 56 ++++++++++++++++----- doc/promises.out.md | 117 +++++++++++++++++++++++++++++++------------- 2 files changed, 128 insertions(+), 45 deletions(-) diff --git a/doc/promises.in.md b/doc/promises.in.md index ee9671a1c..54bb6c39f 100644 --- a/doc/promises.in.md +++ b/doc/promises.in.md @@ -1,5 +1,3 @@ -# Promises Framework - Promises is a new framework unifying former `Concurrent::Future`, `Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, `Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new @@ -8,8 +6,6 @@ with the exception of obviously blocking operations like `#wait`, `#value`, etc. As a result it lowers a danger of deadlocking and offers better performance. -## Overview - *TODO* - What is it? @@ -17,7 +13,7 @@ better performance. - Main classes {Future}, {Event} - Explain pool usage :io vs :fast, and `_on` `_using` suffixes. -## Old examples follow +# Old examples *TODO review pending* @@ -219,7 +215,7 @@ Factory methods are taking names of the global executors # executed on :fast executor, only short and non-blocking tasks can go there future_on(:fast) { 2 }. # executed on executor for blocking and long operations - then_using(:io) { File.read __FILE__ }. + then_on(:io) { File.read __FILE__ }. wait ``` @@ -239,22 +235,22 @@ future { 2 }. actor.ask(2).then(&:succ).value ``` -### Common use-cases Examples +# Common use-cases Examples -#### simple background processing +## simple background processing ```ruby future { do_stuff } ``` -#### parallel background processing +## parallel background processing ```ruby jobs = 10.times.map { |i| future { i } } # zip(*jobs).value ``` -#### periodic task +## periodic task ```ruby def schedule_job(interval, &job) @@ -295,7 +291,7 @@ arr, v = [], nil; arr << v while (v = queue.pop) # # arr has the results from the executed scheduled tasks arr ``` -#### How to limit processing where there are limited resources? +## How to limit processing where there are limited resources? By creating an actor managing the resource @@ -348,3 +344,41 @@ end # zip(*concurrent_jobs).value! ``` + +# Experimental + +## Cancellation + +```ruby +source, token = Concurrent::Cancellation.create + +futures = Array.new(2) do + future(token) do |token| + token.loop_until_canceled { Thread.pass } + :done + end +end + +sleep 0.05 +source.cancel +futures.map(&:value!) +``` + +## Throttling + +```ruby +data = (0..10).to_a +max_tree = Concurrent::Throttle.new 3 + +futures = data.map do |data| + future(data) do |data| + # un-throttled + data + 1 + end.throttle(max_tree) do |trigger| + # throttled, imagine it uses DB connections or other limited resource + trigger.then { |v| v * 2 * 2 } + end +end # + +futures.map(&:value!) +``` diff --git a/doc/promises.out.md b/doc/promises.out.md index 6c61accc2..95987cfa1 100644 --- a/doc/promises.out.md +++ b/doc/promises.out.md @@ -1,5 +1,3 @@ -# Promises Framework - Promises is a new framework unifying former `Concurrent::Future`, `Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, `Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new @@ -8,8 +6,6 @@ with the exception of obviously blocking operations like `#wait`, `#value`, etc. As a result it lowers a danger of deadlocking and offers better performance. -## Overview - *TODO* - What is it? @@ -17,7 +13,7 @@ better performance. - Main classes {Future}, {Event} - Explain pool usage :io vs :fast, and `_on` `_using` suffixes. -## Old examples follow +# Old examples *TODO review pending* @@ -66,16 +62,16 @@ Class.new do resolvable_event end end.new.a_method -# => <#Concurrent::Promises::ResolvableEvent:0x7fbb023df1e8 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b1b085c8 pending blocks:[]> Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fbb023d7600 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b1b02088 pending blocks:[]> ``` The module is already extended into {Promises} for convenience. ```ruby Concurrent::Promises.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fbb023d5ad0 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b1afac48 pending blocks:[]> ``` For this guide we include the module into `main` so we can call the factory @@ -84,7 +80,7 @@ methods in following examples directly. ```ruby include Concurrent::Promises::FactoryMethods resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fbb023cf608 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b1af8830 pending blocks:[]> ``` Simple asynchronous task: @@ -101,7 +97,7 @@ Rejecting asynchronous task: ```ruby future = future { raise 'Boom' } -# => <#Concurrent::Promises::Future:0x7fbb023b4308 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b1ad9700 pending blocks:[]> future.value # => nil future.value! rescue $! # => # future.reason # => # @@ -113,9 +109,9 @@ Direct creation of resolved futures: ```ruby fulfilled_future(Object.new) -# => <#Concurrent::Promises::Future:0x7fbb023a58a8 fulfilled blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b1acaa70 fulfilled blocks:[]> rejected_future(StandardError.new("boom")) -# => <#Concurrent::Promises::Future:0x7fbb023a79a0 rejected blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b1ac97b0 rejected blocks:[]> ``` Chaining of futures: @@ -155,7 +151,7 @@ fulfilled_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # resc fulfilled_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied rejected_zip = fulfilled_future(1) & rejected_future(StandardError.new('boom')) -# => <#Concurrent::Promises::Future:0x7fbb023343b0 rejected blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b3051380 rejected blocks:[]> rejected_zip.result # => [false, [1, nil], [nil, #]] rejected_zip.then { |v| 'never happens' }.result @@ -177,13 +173,13 @@ future.value It propagates trough chain allowing whole or partial lazy chains. ```ruby head = delay { 1 } -# => <#Concurrent::Promises::Future:0x7fbb02304b38 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b3021450 pending blocks:[]> branch1 = head.then(&:succ) -# => <#Concurrent::Promises::Future:0x7fbb022fe328 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b301b398 pending blocks:[]> branch2 = head.delay.then(&:succ) -# => <#Concurrent::Promises::Future:0x7fbb03867d68 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b30190c0 pending blocks:[]> join = branch1 & branch2 -# => <#Concurrent::Promises::Future:0x7fbb03865fe0 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b30138f0 pending blocks:[]> sleep 0.1 # nothing will resolve [head, branch1, branch2, join].map(&:resolved?) @@ -217,14 +213,14 @@ Scheduling of asynchronous tasks: # it'll be executed after 0.1 seconds scheduled = schedule(0.1) { 1 } -# => <#Concurrent::Promises::Future:0x7fbb022a5570 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b1a2a7f0 pending blocks:[]> scheduled.resolved? # => false scheduled.value # available after 0.1sec # and in chain scheduled = delay { 1 }.schedule(0.1).then(&:succ) -# => <#Concurrent::Promises::Future:0x7fbb022948b0 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b1a19a18 pending blocks:[]> # will not be scheduled until value is requested sleep 0.1 scheduled.value # returns after another 0.1sec @@ -235,21 +231,21 @@ Resolvable Future and Event: ```ruby future = resolvable_future -# => <#Concurrent::Promises::ResolvableFuture:0x7fbb0223c1b0 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableFuture:0x7fc5b19c17a0 pending blocks:[]> event = resolvable_event() -# => <#Concurrent::Promises::ResolvableEvent:0x7fbb021df0c8 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b19c0468 pending blocks:[]> # These threads will be blocked until the future and event is resolved t1 = Thread.new { future.value } t2 = Thread.new { event.wait } future.fulfill 1 -# => <#Concurrent::Promises::ResolvableFuture:0x7fbb0223c1b0 fulfilled blocks:[]> +# => <#Concurrent::Promises::ResolvableFuture:0x7fc5b19c17a0 fulfilled blocks:[]> future.fulfill 1 rescue $! # => # future.fulfill 2, false # => false event.resolve -# => <#Concurrent::Promises::ResolvableEvent:0x7fbb021df0c8 fulfilled blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b19c0468 fulfilled blocks:[]> # The threads can be joined now [t1, t2].each &:join @@ -258,9 +254,9 @@ event.resolve Callbacks: ```ruby -queue = Queue.new # => # +queue = Queue.new # => # future = delay { 1 + 1 } -# => <#Concurrent::Promises::Future:0x7fbb021b4fd0 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b193a9a8 pending blocks:[]> future.on_fulfillment { queue << 1 } # evaluated asynchronously future.on_fulfillment! { queue << 2 } # evaluated on resolving thread @@ -278,7 +274,7 @@ Factory methods are taking names of the global executors # executed on :fast executor, only short and non-blocking tasks can go there future_on(:fast) { 2 }. # executed on executor for blocking and long operations - then_using(:io) { File.read __FILE__ }. + then_on(:io) { File.read __FILE__ }. wait ``` @@ -288,7 +284,7 @@ Interoperability with actors: actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end -# => # +# => # future { 2 }. @@ -299,23 +295,23 @@ future { 2 }. actor.ask(2).then(&:succ).value # => 5 ``` -### Common use-cases Examples +# Common use-cases Examples -#### simple background processing +## simple background processing ```ruby future { do_stuff } -# => <#Concurrent::Promises::Future:0x7fbb020bfe40 pending blocks:[]> +# => <#Concurrent::Promises::Future:0x7fc5b186b4f0 pending blocks:[]> ``` -#### parallel background processing +## parallel background processing ```ruby jobs = 10.times.map { |i| future { i } } zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ``` -#### periodic task +## periodic task ```ruby def schedule_job(interval, &job) @@ -332,7 +328,7 @@ def schedule_job(interval, &job) end end -queue = Queue.new # => # +queue = Queue.new # => # count = 0 # => 0 interval = 0.05 # small just not to delay execution of this example @@ -356,7 +352,7 @@ arr, v = [], nil; arr << v while (v = queue.pop) # arr has the results from the executed scheduled tasks arr # => [0, 1, 2, 3] ``` -#### How to limit processing where there are limited resources? +## How to limit processing where there are limited resources? By creating an actor managing the resource @@ -421,3 +417,56 @@ end zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] ``` + +# Experimental + +## Cancellation + +```ruby +source, token = Concurrent::Cancellation.create +# => [#]>, +# @ResolveArgs=[], +# @Token= +# #>>, +# #>] + +futures = Array.new(2) do + future(token) do |token| + token.loop_until_canceled { Thread.pass } + :done + end +end +# => [<#Concurrent::Promises::Future:0x7fc5b1938ef0 pending blocks:[]>, +# <#Concurrent::Promises::Future:0x7fc5b0a1f860 pending blocks:[]>] + +sleep 0.05 # => 0 +source.cancel # => true +futures.map(&:value!) # => [:done, :done] +``` + +## Throttling + +```ruby +data = (0..10).to_a # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] +max_tree = Concurrent::Throttle.new 3 +# => #, +# @Queue=#> + +futures = data.map do |data| + future(data) do |data| + # un-throttled + data + 1 + end.throttle(max_tree) do |trigger| + # throttled, imagine it uses DB connections or other limited resource + trigger.then { |v| v * 2 * 2 } + end +end + +futures.map(&:value!) +# => [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44] +``` From efa9c6e2e27f4eaac94b3424d0eb479b874d1c34 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 13:04:00 +0200 Subject: [PATCH 36/68] fix spec --- spec/concurrent/promises_spec.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/promises_spec.rb index c23a834e4..709edc57d 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/promises_spec.rb @@ -498,7 +498,7 @@ def behaves_as_delay(delay, value) specify do max_tree = Concurrent::Throttle.new 3 counter = Concurrent::AtomicFixnum.new - testing = -> do + testing = -> *args do counter.increment sleep 0.01 # returns less then 3 since it's throttled From 08b9aad77affab6885cf549d005e2b4c3a5d8ccf Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 15:48:37 +0200 Subject: [PATCH 37/68] Remove dependency on logging --- lib/concurrent/edge/promises.rb | 25 ++++++------------------- lib/concurrent/errors.rb | 25 ++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index ee1c0516a..faefe4975 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -2,7 +2,6 @@ require 'concurrent/atomic/atomic_boolean' require 'concurrent/atomic/atomic_fixnum' require 'concurrent/lock_free_stack' -require 'concurrent/concern/logging' require 'concurrent/errors' module Concurrent @@ -469,7 +468,6 @@ class AbstractEventFuture < Synchronization::Object safe_initialization! private(*attr_atomic(:internal_state) - [:internal_state]) - include Concern::Logging include InternalStates def initialize(promise, default_executor) @@ -924,9 +922,7 @@ def exception(*args) raise Concurrent::Error, 'it is not rejected' unless rejected? reason = internal_state.reason if reason.is_a?(::Array) - # TODO (pitr-ch 12-Jun-2016): remove logging!, how? - reason.each { |e| log ERROR, 'Promises::Future', e } - Concurrent::Error.new 'multiple exceptions, inspect log' + Concurrent::MultipleErrors.new reason else reason.exception(*args) end @@ -1119,14 +1115,9 @@ def apply(args, block) def rejected_resolution(raise_on_reassign, state) if raise_on_reassign - # TODO (pitr-ch 12-Jun-2016): remove logging?! - # print otherwise hidden error - log ERROR, 'Promises::Future', reason if reason - log ERROR, 'Promises::Future', state.reason if state.reason - - raise(Concurrent::MultipleAssignmentError.new( - "Future can be resolved only once. Current result is #{result}, " + - "trying to set #{state.result}")) + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It's #{result}, trying to set #{state.result}.", + current_result: result, new_result: state.result) end return false end @@ -1257,7 +1248,6 @@ def with_hidden_resolvable class AbstractPromise < Synchronization::Object safe_initialization! include InternalStates - include Concern::Logging def initialize(future) super() @@ -1282,7 +1272,7 @@ def touch end def to_s - "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state}>" + format '<#%s:0x%x %s>', self.class, object_id << 1, state end def inspect @@ -1298,11 +1288,8 @@ def resolve_with(new_state, raise_on_reassign = true) # @return [Future] def evaluate_to(*args, block) resolve_with Fulfilled.new(block.call(*args)) - rescue StandardError => error - resolve_with Rejected.new(error) + # TODO (pitr-ch 30-Jul-2016): figure out what should be rescued, there is an issue about it rescue Exception => error - # TODO (pitr-ch 12-Jun-2016): remove logging? - log(ERROR, 'Promises::Future', error) resolve_with Rejected.new(error) end end diff --git a/lib/concurrent/errors.rb b/lib/concurrent/errors.rb index 7d1553638..b69fec01f 100644 --- a/lib/concurrent/errors.rb +++ b/lib/concurrent/errors.rb @@ -30,7 +30,18 @@ module Concurrent # Raised when an attempt is made to modify an immutable object # (such as an `IVar`) after its final state has been set. - MultipleAssignmentError = Class.new(Error) + class MultipleAssignmentError < Error + attr_reader :inspection_data + + def initialize(message = nil, inspection_data = nil) + @inspection_data = inspection_data + super message + end + + def inspect + format '%s %s>', super[0..-2], @inspection_data.inspect + end + end # Raised by an `Executor` when it is unable to process a given task, # possibly because of a reject policy or other internal error. @@ -43,4 +54,16 @@ module Concurrent # Raised when an operation times out. TimeoutError = Class.new(Error) + # Aggregates multiple exceptions. + class MultipleErrors < Error + attr_reader :errors + + def initialize(errors, message = "#{errors.size} errors") + @errors = errors + super [*message, + *errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1) + ].join("\n") + end + end + end From 7247700d0bed10c39360d5c6cf08705ba3f0de01 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 30 Jul 2016 15:49:00 +0200 Subject: [PATCH 38/68] Remove unnecessary method --- lib/concurrent/edge/promises.rb | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index faefe4975..f4e5d1429 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -1729,10 +1729,6 @@ def on_resolvable(resolved_future) # @abstract class AbstractAnyPromise < BlockedPromise - # @!visibility private - def touch - blocked_by.each(&:touch) unless @Future.resolved? - end end class AnyResolvedFuturePromise < AbstractAnyPromise From 292cc933f89965207a69b3c66008ea6dcc405696 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 5 Nov 2016 22:22:25 +0100 Subject: [PATCH 39/68] Readme update --- README.md | 70 ++++++++++++++++++++++--------------------------------- 1 file changed, 28 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index 210fea45d..2f5f1de75 100644 --- a/README.md +++ b/README.md @@ -9,39 +9,28 @@ [![License](https://img.shields.io/badge/license-MIT-green.svg)](http://opensource.org/licenses/MIT) [![Gitter chat](https://img.shields.io/badge/IRC%20(gitter)-devs%20%26%20users-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby) - - - - - -
-

- Modern concurrency tools for Ruby. Inspired by - Erlang, - Clojure, - Scala, - Haskell, - F#, - C#, - Java, - and classic concurrency patterns. -

-

- The design goals of this gem are: -

    -
  • Be an 'unopinionated' toolbox that provides useful utilities without debating which is better or why
  • -
  • Remain free of external gem dependencies
  • -
  • Stay true to the spirit of the languages providing inspiration
  • -
  • But implement in a way that makes sense for Ruby
  • -
  • Keep the semantics as idiomatic Ruby as possible
  • -
  • Support features that make sense in Ruby
  • -
  • Exclude features that don't make sense in Ruby
  • -
  • Be small, lean, and loosely coupled
  • -
-

-
- -
+Modern concurrency tools for Ruby. Inspired by +[Erlang](http://www.erlang.org/doc/reference_manual/processes.html), +[Clojure](http://clojure.org/concurrent_programming), +[Scala](http://akka.io/), +[Haskell](http://www.haskell.org/haskellwiki/Applications_and_libraries/Concurrency_and_parallelism#Concurrent_Haskell), +[F#](http://blogs.msdn.com/b/dsyme/archive/2010/02/15/async-and-parallel-design-patterns-in-f-part-3-agents.aspx), +[C#](http://msdn.microsoft.com/en-us/library/vstudio/hh191443.aspx), +[Java](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/package-summary.html), +and classic concurrency patterns. + + + +The design goals of this gem are: + +* Be an 'unopinionated' toolbox that provides useful utilities without debating which is better or why +* Remain free of external gem dependencies +* Stay true to the spirit of the languages providing inspiration +* But implement in a way that makes sense for Ruby +* Keep the semantics as idiomatic Ruby as possible +* Support features that make sense in Ruby +* Exclude features that don't make sense in Ruby +* Be small, lean, and loosely coupled ### Supported Ruby versions @@ -66,11 +55,6 @@ We also have a [mailing list](http://groups.google.com/group/concurrent-ruby) an #### General-purpose Concurrency Abstractions * [Async](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Async.html): A mixin module that provides simple asynchronous behavior to a class. Loosely based on Erlang's [gen_server](http://www.erlang.org/doc/man/gen_server.html). -* [Promises Framework](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Promises.html): - Unified implementation of futures and promises which combines features of previous `Future`, - `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the - new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking - operations like `#wait`, `#value`. It also offers better performance. * [Future](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Future.html): An asynchronous operation that produces a value. * [Dataflow](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent.html#dataflow-class_method): Built on Futures, Dataflow allows you to create a task that will be scheduled when all of its data dependencies are available. * [Promise](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Promise.html): Similar to Futures, with more features. @@ -86,10 +70,6 @@ Collection classes that were originally part of the (deprecated) `thread_safe` g * [Map](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Map.html) A hash-like object that should have much better performance characteristics, especially under high concurrency, than `Concurrent::Hash`. * [Tuple](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Tuple.html) A fixed size array with volatile (synchronized, thread safe) getters/setters. -and other collections: - -* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/LockFreeStack.html) - Value objects inspired by other languages: * [Maybe](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Maybe.html) A thread-safe, immutable object representing an optional value, based on @@ -136,6 +116,11 @@ These features are under active development and may change frequently. They are keep backward compatibility (there may also lack tests and documentation). Semantic versions will be obeyed though. Features developed in `concurrent-ruby-edge` are expected to move to `concurrent-ruby` when final. +* [Promises Framework](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Promises.html): + Unified implementation of futures and promises which combines features of previous `Future`, + `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the + new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking + operations like `#wait`, `#value`. It also offers better performance. * [Actor](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Actor.html): Implements the Actor Model, where concurrent actors exchange messages. * [Channel](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/Channel.html): @@ -145,6 +130,7 @@ be obeyed though. Features developed in `concurrent-ruby-edge` are expected to m * [LazyRegister](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/LazyRegister.html) * [AtomicMarkableReference](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/AtomicMarkableReference.html) * [LockFreeLinkedSet](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/LockFreeLinkedSet.html) +* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/LockFreeStack.html) #### Statuses: From ef570a7736ac524080d21131fff19902eec77c27 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 5 Nov 2016 22:46:48 +0100 Subject: [PATCH 40/68] Move files where they belong --- lib/concurrent.rb | 1 - lib/concurrent/{ => edge}/lock_free_stack.rb | 0 spec/concurrent/{ => edge}/promises_spec.rb | 37 +++++++++++++++++--- 3 files changed, 33 insertions(+), 5 deletions(-) rename lib/concurrent/{ => edge}/lock_free_stack.rb (100%) rename spec/concurrent/{ => edge}/promises_spec.rb (93%) diff --git a/lib/concurrent.rb b/lib/concurrent.rb index 6fc240318..a6f4e5898 100644 --- a/lib/concurrent.rb +++ b/lib/concurrent.rb @@ -29,7 +29,6 @@ require 'concurrent/settable_struct' require 'concurrent/timer_task' require 'concurrent/tvar' -require 'concurrent/lock_free_stack' require 'concurrent/thread_safe/synchronized_delegator' require 'concurrent/thread_safe/util' diff --git a/lib/concurrent/lock_free_stack.rb b/lib/concurrent/edge/lock_free_stack.rb similarity index 100% rename from lib/concurrent/lock_free_stack.rb rename to lib/concurrent/edge/lock_free_stack.rb diff --git a/spec/concurrent/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb similarity index 93% rename from spec/concurrent/promises_spec.rb rename to spec/concurrent/edge/promises_spec.rb index 709edc57d..d2d533f67 100644 --- a/spec/concurrent/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -361,7 +361,7 @@ def behaves_as_delay(delay, value) branch1.zip(branch2).then { |b1, b2| b1 + b2 }, (branch1 & branch2).then { |b1, b2| b1 + b2 }] - sleep 0.1 + Thread.pass until branch1.resolved? expect(branch1).to be_resolved expect(branch2).not_to be_resolved @@ -496,7 +496,7 @@ def behaves_as_delay(delay, value) describe 'Throttling' do specify do - max_tree = Concurrent::Throttle.new 3 + max_tree = Concurrent::Promises::Throttle.new 3 counter = Concurrent::AtomicFixnum.new testing = -> *args do counter.increment @@ -508,14 +508,43 @@ def behaves_as_delay(delay, value) expect(Concurrent::Promises.zip( *12.times.map do |i| max_tree.limit { |trigger| trigger.then &testing } - end).value.all? { |v| v < 3 }).to be_truthy + end).value!.all? { |v| v < 3 }).to be_truthy expect(Concurrent::Promises.zip( *12.times.map do |i| Concurrent::Promises. fulfilled_future(i). throttle(max_tree) { |trigger| trigger.then &testing } - end).value.all? { |v| v < 3 }).to be_truthy + end).value!.all? { |v| v < 3 }).to be_truthy + end + + specify do + max_five = Concurrent::Promises::Throttle.new 5 + jobs = 20.times.map do |i| + max_five.limit do |trigger| + # trigger is an event, has same chain-able capabilities as current promise + trigger.then do + # at any given time there max 5 simultaneous executions of this block + the_work = i * 2 + end + end + end + result = Concurrent::Promises.zip_futures(*jobs) + p result.value! + # => [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38] + end + + specify do + max_five = Concurrent::Promises::Throttle.new 5 + jobs = 20.times.map do |i| + max_five.then_limit do + # at any given time there max 5 simultaneous executions of this block + the_work = i * 2 + end # returns promise + end + result = Concurrent::Promises.zip_futures(*jobs) + p result.value! + # => [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38] end end end From 38ab1329f780a2eb3fd89c7f425f0e617cc847f7 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Mon, 12 Dec 2016 11:34:58 +0100 Subject: [PATCH 41/68] Update throttle, add conversion methods --- lib/concurrent/edge/promises.rb | 116 +++++++++++++++++------ lib/concurrent/synchronization/object.rb | 4 +- spec/concurrent/edge/promises_spec.rb | 12 +-- 3 files changed, 94 insertions(+), 38 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index f4e5d1429..001b49e6e 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -1,7 +1,7 @@ require 'concurrent/synchronization' require 'concurrent/atomic/atomic_boolean' require 'concurrent/atomic/atomic_fixnum' -require 'concurrent/lock_free_stack' +require 'concurrent/edge/lock_free_stack' require 'concurrent/errors' module Concurrent @@ -781,7 +781,7 @@ class Event < AbstractEventFuture # # @return [Future, Event] def zip(other) - if other.is?(Future) + if other.is_a?(Future) ZipFutureEventPromise.new(other, self, @DefaultExecutor).future else ZipEventEventPromise.new(self, other, @DefaultExecutor).event @@ -825,7 +825,20 @@ def schedule(intended_time) end.flat_event end - # TODO (pitr-ch 12-Jun-2016): add to_event, to_future + # Converts event to a future. The future is fulfilled when the event is resolved, the future may never fail. + # + # @return [Future] + def to_future + future = Promises.resolvable_future + ensure + chain_resolvable(future) + end + + # Returns self, since this is event + # @return [Event] + def to_event + self + end # @!macro promises.method.with_default_executor # @return [Event] @@ -1111,6 +1124,21 @@ def apply(args, block) internal_state.apply args, block end + # Converts future to event which is resolved when future is resolved by fulfillment or rejection. + # + # @return [Event] + def to_event + event = Promises.resolvable_event + ensure + chain_resolvable(event) + end + + # Returns self, since this is a future + # @return [Future] + def to_future + self + end + private def rejected_resolution(raise_on_reassign, state) @@ -1195,7 +1223,8 @@ class ResolvableFuture < Future # which triggers all dependent futures. # # @!macro promise.param.raise_on_reassign - def resolve(fulfilled, value, reason, raise_on_reassign = true) + def resolve(fulfilled = true, value = nil, reason = nil, raise_on_reassign = true) + # TODO (pitr-ch 25-Sep-2016): should the defaults be kept to match event resolve api? resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign) end @@ -1288,8 +1317,8 @@ def resolve_with(new_state, raise_on_reassign = true) # @return [Future] def evaluate_to(*args, block) resolve_with Fulfilled.new(block.call(*args)) - # TODO (pitr-ch 30-Jul-2016): figure out what should be rescued, there is an issue about it rescue Exception => error + # TODO (pitr-ch 30-Jul-2016): figure out what should be rescued, there is an issue about it resolve_with Rejected.new(error) end end @@ -1358,7 +1387,7 @@ def on_resolution(future) # @!visibility private def touch - # TODO (pitr-ch 13-Jun-2016): on construction pass down references of delays to be touched, avoids extra casses + # TODO (pitr-ch 13-Jun-2016): on construction pass down references of delays to be touched, avoids extra CASses blocked_by.each(&:touch) end @@ -1506,6 +1535,11 @@ def clear_blocked_by! nil end + def blocked_by_add(future) + @BlockedBy.push future + future.touch if self.future.touched? + end + def resolvable?(countdown, future) !@Future.internal_state.resolved? && super(countdown, future) end @@ -1532,7 +1566,7 @@ def process_on_resolution(future) value = internal_state.value case value when Future, Event - @BlockedBy.push value + blocked_by_add value value.add_callback :callback_notify_blocked, self @Countdown.value else @@ -1566,7 +1600,7 @@ def process_on_resolution(future) value = internal_state.value case value when Future - @BlockedBy.push value + blocked_by_add value value.add_callback :callback_notify_blocked, self @Countdown.value when Event @@ -1599,7 +1633,8 @@ def process_on_resolution(future) value = internal_state.value case value when Future - # @BlockedBy.push value + # FIXME (pitr-ch 08-Dec-2016): will accumulate the completed futures + blocked_by_add value value.add_callback :callback_notify_blocked, self else resolve_with internal_state @@ -1871,7 +1906,7 @@ module FactoryMethods # only proof of concept # @return [Future] def select(*channels) - # TODO (pitr-ch 26-Mar-2016): redo, has to be non-blocking + # TODO (pitr-ch 26-Mar-2016): re-do, has to be non-blocking future do # noinspection RubyArgCount Channel.select do |s| @@ -1924,12 +1959,14 @@ def each_body(value, &block) end end + # TODO example: parallel jobs, cancell them all when one fails, clean-up in zip # inspired by https://msdn.microsoft.com/en-us/library/dd537607(v=vs.110).aspx class Cancellation < Synchronization::Object safe_initialization! def self.create(future_or_event = Promises.resolvable_event, *resolve_args) - [(i = new(future_or_event, *resolve_args)), i.token] + cancellation = new(future_or_event, *resolve_args) + [cancellation, cancellation.token] end private_class_method :new @@ -1960,20 +1997,18 @@ def initialize(cancel) @Cancel = cancel end - def event - @Cancel + def to_event + @Cancel.to_event end - alias_method :future, :event + def to_future + @Cancel.to_future + end def on_cancellation(*args, &block) @Cancel.on_resolution *args, &block end - def then(*args, &block) - @Cancel.chain *args, &block - end - def canceled? @Cancel.resolved? end @@ -1985,13 +2020,14 @@ def loop_until_canceled(&block) result end - def raise_if_canceled - raise CancelledOperationError if canceled? + def raise_if_canceled(error = CancelledOperationError) + raise error if canceled? self end - def join(*tokens) - Token.new Promises.any_event(@Cancel, *tokens.map(&:event)) + def join(*tokens, &block) + block ||= -> tokens { Promises.any_event(*tokens.map(&:to_event)) } + self.class.new block.call([@Cancel, *tokens]) end end @@ -2002,7 +2038,7 @@ def join(*tokens) # TODO (pitr-ch 27-Mar-2016): examples (scheduled to be cancelled in 10 sec) end - class Throttle < Synchronization::Object + class Promises::Throttle < Synchronization::Object safe_initialization! private *attr_atomic(:can_run) @@ -2015,16 +2051,23 @@ def initialize(max) end def limit(future = nil, &block) - # TODO (pitr-ch 11-Jun-2016): triggers should allocate resources when they are to be required - trigger = future ? future & get_event : get_event - - if block_given? - block.call(trigger).on_resolution! { done } + if future + # future.chain { block.call(new_trigger & future).on_resolution! { done } }.flat + block.call(new_trigger & future).on_resolution! { done } else - get_event + if block_given? + block.call(new_trigger).on_resolution! { done } + else + new_trigger + end end end + # TODO (pitr-ch 10-Oct-2016): maybe just then? + def then_limit(&block) + limit { |trigger| trigger.then &block } + end + def done while true current_can_run = can_run @@ -2037,7 +2080,7 @@ def done private - def get_event + def new_trigger while true current_can_run = can_run if compare_and_set_can_run current_can_run, current_can_run - 1 @@ -2059,5 +2102,18 @@ def throttle(throttle, &throttled_future) throttle.limit(self, &throttled_future) end + def then_throttle(throttle, &block) + throttle(throttle) { |trigger| trigger.then &block } + end + + end + + module Promises::FactoryMethods + + # @!visibility private + + def throttle(count) + Promises::Throttle.new count + end end end diff --git a/lib/concurrent/synchronization/object.rb b/lib/concurrent/synchronization/object.rb index e777b5546..1b8a3c296 100644 --- a/lib/concurrent/synchronization/object.rb +++ b/lib/concurrent/synchronization/object.rb @@ -70,8 +70,8 @@ def self.safe_initialization? # any instance variables with CamelCase names and isn't {.safe_initialization?}. def self.ensure_safe_initialization_when_final_fields_are_present Object.class_eval do - def self.new(*) - object = super + def self.new(*args, &block) + object = super(*args, &block) ensure has_final_field = object.instance_variables.any? { |v| v.to_s =~ /^@[A-Z]/ } if has_final_field && !safe_initialization? diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index d2d533f67..7a9cf15ff 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -395,7 +395,7 @@ def behaves_as_delay(delay, value) end it 'propagates requests for values to delayed futures' do - expect(Concurrent.future { Concurrent.delay { 1 } }.flat.value!(0.1)).to eq 1 + expect(future { delay { 1 } }.flat.value!(0.1)).to eq 1 end end @@ -467,10 +467,10 @@ def behaves_as_delay(delay, value) specify do source, token = Concurrent::Cancellation.create source.cancel - expect(token.event.resolved?).to be_truthy + expect(token.canceled?).to be_truthy cancellable_branch = Concurrent::Promises.delay { 1 } - expect((cancellable_branch | token.event).value).to be_nil + expect((cancellable_branch | token.to_event).value).to be_nil expect(cancellable_branch.resolved?).to be_falsey end @@ -478,7 +478,7 @@ def behaves_as_delay(delay, value) source, token = Concurrent::Cancellation.create cancellable_branch = Concurrent::Promises.delay { 1 } - expect(any_resolved_future(cancellable_branch, token.event).value).to eq 1 + expect(any_resolved_future(cancellable_branch, token.to_event).value).to eq 1 expect(cancellable_branch.resolved?).to be_truthy end @@ -486,10 +486,10 @@ def behaves_as_delay(delay, value) source, token = Concurrent::Cancellation.create( Concurrent::Promises.resolvable_future, false, nil, err = StandardError.new('Cancelled')) source.cancel - expect(token.future.resolved?).to be_truthy + expect(token.canceled?).to be_truthy cancellable_branch = Concurrent::Promises.delay { 1 } - expect((cancellable_branch | token.future).reason).to eq err + expect((cancellable_branch | token.to_future).reason).to eq err expect(cancellable_branch.resolved?).to be_falsey end end From bd38e3396afc6195111082b7c580df687eb88cf6 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 17 Dec 2016 21:39:04 +0100 Subject: [PATCH 42/68] Move cancellation out --- lib/concurrent-edge.rb | 1 + lib/concurrent/edge/cancellation.rb | 81 +++++++++++++++++++++++++++++ lib/concurrent/edge/promises.rb | 79 ---------------------------- 3 files changed, 82 insertions(+), 79 deletions(-) create mode 100644 lib/concurrent/edge/cancellation.rb diff --git a/lib/concurrent-edge.rb b/lib/concurrent-edge.rb index 5928efb87..22b7d2fae 100644 --- a/lib/concurrent-edge.rb +++ b/lib/concurrent-edge.rb @@ -10,3 +10,4 @@ require 'concurrent/edge/lock_free_linked_set' require 'concurrent/edge/promises' +require 'concurrent/edge/cancellation' diff --git a/lib/concurrent/edge/cancellation.rb b/lib/concurrent/edge/cancellation.rb new file mode 100644 index 000000000..ccf803c89 --- /dev/null +++ b/lib/concurrent/edge/cancellation.rb @@ -0,0 +1,81 @@ +module Concurrent + + # TODO example: parallel jobs, cancel them all when one fails, clean-up in zip + # inspired by https://msdn.microsoft.com/en-us/library/dd537607(v=vs.110).aspx + class Cancellation < Synchronization::Object + safe_initialization! + + def self.create(future_or_event = Promises.resolvable_event, *resolve_args) + cancellation = new(future_or_event, *resolve_args) + [cancellation, cancellation.token] + end + + private_class_method :new + + def initialize(future, *resolve_args) + raise ArgumentError, 'future is not Resolvable' unless future.is_a?(Promises::Resolvable) + @Cancel = future + @Token = Token.new @Cancel.with_hidden_resolvable + @ResolveArgs = resolve_args + end + + def token + @Token + end + + def cancel(raise_on_repeated_call = true) + !!@Cancel.resolve(*@ResolveArgs, raise_on_repeated_call) + end + + def canceled? + @Cancel.resolved? + end + + class Token < Synchronization::Object + safe_initialization! + + def initialize(cancel) + @Cancel = cancel + end + + def to_event + @Cancel.to_event + end + + def to_future + @Cancel.to_future + end + + def on_cancellation(*args, &block) + @Cancel.on_resolution *args, &block + end + + def canceled? + @Cancel.resolved? + end + + def loop_until_canceled(&block) + until canceled? + result = block.call + end + result + end + + def raise_if_canceled(error = CancelledOperationError) + raise error if canceled? + self + end + + def join(*tokens, &block) + block ||= -> tokens { Promises.any_event(*tokens.map(&:to_event)) } + self.class.new block.call([@Cancel, *tokens]) + end + + end + + private_constant :Token + + # FIXME (pitr-ch 27-Mar-2016): cooperation with mutex, condition, select etc? + # TODO (pitr-ch 27-Mar-2016): examples (scheduled to be cancelled in 10 sec) + end +end diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 001b49e6e..261eafed3 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -1959,85 +1959,6 @@ def each_body(value, &block) end end - # TODO example: parallel jobs, cancell them all when one fails, clean-up in zip - # inspired by https://msdn.microsoft.com/en-us/library/dd537607(v=vs.110).aspx - class Cancellation < Synchronization::Object - safe_initialization! - - def self.create(future_or_event = Promises.resolvable_event, *resolve_args) - cancellation = new(future_or_event, *resolve_args) - [cancellation, cancellation.token] - end - - private_class_method :new - - def initialize(future, *resolve_args) - raise ArgumentError, 'future is not Resolvable' unless future.is_a?(Promises::Resolvable) - @Cancel = future - @Token = Token.new @Cancel.with_hidden_resolvable - @ResolveArgs = resolve_args - end - - def token - @Token - end - - def cancel(raise_on_repeated_call = true) - !!@Cancel.resolve(*@ResolveArgs, raise_on_repeated_call) - end - - def canceled? - @Cancel.resolved? - end - - class Token < Synchronization::Object - safe_initialization! - - def initialize(cancel) - @Cancel = cancel - end - - def to_event - @Cancel.to_event - end - - def to_future - @Cancel.to_future - end - - def on_cancellation(*args, &block) - @Cancel.on_resolution *args, &block - end - - def canceled? - @Cancel.resolved? - end - - def loop_until_canceled(&block) - until canceled? - result = block.call - end - result - end - - def raise_if_canceled(error = CancelledOperationError) - raise error if canceled? - self - end - - def join(*tokens, &block) - block ||= -> tokens { Promises.any_event(*tokens.map(&:to_event)) } - self.class.new block.call([@Cancel, *tokens]) - end - - end - - private_constant :Token - - # TODO (pitr-ch 27-Mar-2016): cooperation with mutex, select etc? - # TODO (pitr-ch 27-Mar-2016): examples (scheduled to be cancelled in 10 sec) - end - class Promises::Throttle < Synchronization::Object safe_initialization! From b96e0484b75df449c54951690415f37e31ad219d Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 17 Dec 2016 21:40:56 +0100 Subject: [PATCH 43/68] Use lock-free-queue in throttling and improve API --- lib/concurrent-edge.rb | 1 + lib/concurrent/edge/lock_free_queue.rb | 116 +++++++++++++++++++++++++ lib/concurrent/edge/promises.rb | 39 ++++----- spec/concurrent/edge/promises_spec.rb | 10 +-- 4 files changed, 140 insertions(+), 26 deletions(-) create mode 100644 lib/concurrent/edge/lock_free_queue.rb diff --git a/lib/concurrent-edge.rb b/lib/concurrent-edge.rb index 22b7d2fae..7721430a3 100644 --- a/lib/concurrent-edge.rb +++ b/lib/concurrent-edge.rb @@ -8,6 +8,7 @@ require 'concurrent/edge/atomic_markable_reference' require 'concurrent/edge/lock_free_linked_set' +require 'concurrent/edge/lock_free_queue' require 'concurrent/edge/promises' require 'concurrent/edge/cancellation' diff --git a/lib/concurrent/edge/lock_free_queue.rb b/lib/concurrent/edge/lock_free_queue.rb new file mode 100644 index 000000000..8ef216e54 --- /dev/null +++ b/lib/concurrent/edge/lock_free_queue.rb @@ -0,0 +1,116 @@ +module Concurrent + + class LockFreeQueue < Synchronization::Object + + class Node < Synchronization::Object + attr_atomic :successor + + def initialize(item, successor) + super() + # published through queue, no need to be volatile or final + @Item = item + self.successor = successor + end + + def item + @Item + end + end + + safe_initialization! + + attr_atomic :head, :tail + + def initialize + super() + dummy_node = Node.new(:dummy, nil) + + self.head = dummy_node + self.tail = dummy_node + end + + def push(item) + # allocate a new node with the item embedded + new_node = Node.new(item, nil) + + # keep trying until the operation succeeds + while true + current_tail_node = tail + current_tail_successor = current_tail_node.successor + + # if our stored tail is still the current tail + if current_tail_node == tail + # if that tail was really the last node + if current_tail_successor.nil? + # if we can update the previous successor of tail to point to this new node + if current_tail_node.compare_and_set_successor(nil, new_node) + # then update tail to point to this node as well + compare_and_set_tail(current_tail_node, new_node) + # and return + return true + # else, start the loop over + end + else + # in this case, the tail ref we had wasn't the real tail + # so we try to set its successor as the real tail, then start the loop again + compare_and_set_tail(current_tail_node, current_tail_successor) + end + end + end + end + + def pop + # retry until some value can be returned + while true + # the value in @head is just a dummy node that always sits in that position, + # the real 'head' is in its successor + current_dummy_node = head + current_tail_node = tail + + current_head_node = current_dummy_node.successor + + # if our local head is still consistent with the head node, continue + # otherwise, start over + if current_dummy_node == head + # if either the queue is empty, or falling behind + if current_dummy_node == current_tail_node + # if there's nothing after the 'dummy' head node + if current_head_node.nil? + # just return nil + return nil + else + # here the head element succeeding head is not nil, but the head and tail are equal + # so tail is falling behind, update it, then start over + compare_and_set_tail(current_tail_node, current_head_node) + end + + # the queue isn't empty + # if we can set the dummy head to the 'real' head, we're free to return the value in that real head, success + elsif compare_and_set_head(current_dummy_node, current_head_node) + # grab the item from the popped node + item = current_head_node.item + + # return it, success! + return item + end + end + end + end + + # approximate + def size + successor = head.successor + count = 0 + + while true + break if successor.nil? + + current_node = successor + successor = current_node.successor + count += 1 + end + + count + end + end +end diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 261eafed3..d28e2d05d 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -1967,40 +1967,37 @@ class Promises::Throttle < Synchronization::Object def initialize(max) super() self.can_run = max - # TODO (pitr-ch 10-Jun-2016): lock-free queue is needed - @Queue = Queue.new + @Queue = LockFreeQueue.new end - def limit(future = nil, &block) - if future - # future.chain { block.call(new_trigger & future).on_resolution! { done } }.flat - block.call(new_trigger & future).on_resolution! { done } + def throttle(future = nil, &throttled_future) + if block_given? + trigger = future ? (new_trigger & future) : new_trigger + throttled_future.call(trigger).on_resolution! { done } else - if block_given? - block.call(new_trigger).on_resolution! { done } - else - new_trigger - end + new_trigger end end - # TODO (pitr-ch 10-Oct-2016): maybe just then? - def then_limit(&block) - limit { |trigger| trigger.then &block } + def then_throttle(&task) + throttle { |trigger| trigger.then &task } end + private + def done while true current_can_run = can_run if compare_and_set_can_run current_can_run, current_can_run + 1 - @Queue.pop.resolve if current_can_run < 0 + if current_can_run <= 0 + Thread.pass until (trigger = @Queue.pop) + trigger.resolve + end return self end end end - private - def new_trigger while true current_can_run = can_run @@ -2008,9 +2005,9 @@ def new_trigger if current_can_run > 0 return Promises.resolved_event else - e = Promises.resolvable_event - @Queue.push e - return e + event = Promises.resolvable_event + @Queue.push event + return event end end end @@ -2020,7 +2017,7 @@ def new_trigger class Promises::AbstractEventFuture < Synchronization::Object def throttle(throttle, &throttled_future) - throttle.limit(self, &throttled_future) + throttle.throttle(self, &throttled_future) end def then_throttle(throttle, &block) diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index 7a9cf15ff..76312118d 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -507,21 +507,21 @@ def behaves_as_delay(delay, value) expect(Concurrent::Promises.zip( *12.times.map do |i| - max_tree.limit { |trigger| trigger.then &testing } - end).value!.all? { |v| v < 3 }).to be_truthy + max_tree.throttle { |trigger| trigger.then &testing } + end).value!.all? { |v| v <= 3 }).to be_truthy expect(Concurrent::Promises.zip( *12.times.map do |i| Concurrent::Promises. fulfilled_future(i). throttle(max_tree) { |trigger| trigger.then &testing } - end).value!.all? { |v| v < 3 }).to be_truthy + end).value!.all? { |v| v <= 3 }).to be_truthy end specify do max_five = Concurrent::Promises::Throttle.new 5 jobs = 20.times.map do |i| - max_five.limit do |trigger| + max_five.throttle do |trigger| # trigger is an event, has same chain-able capabilities as current promise trigger.then do # at any given time there max 5 simultaneous executions of this block @@ -537,7 +537,7 @@ def behaves_as_delay(delay, value) specify do max_five = Concurrent::Promises::Throttle.new 5 jobs = 20.times.map do |i| - max_five.then_limit do + max_five.then_throttle do # at any given time there max 5 simultaneous executions of this block the_work = i * 2 end # returns promise From afe15b6d4e2d21c785422afdde8267bbe0e5f370 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 17 Dec 2016 22:11:50 +0100 Subject: [PATCH 44/68] Make resolving of dependent futures independent on blocked_by internal list --- lib/concurrent/edge/promises.rb | 85 +++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 37 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index d28e2d05d..55a3a2285 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -214,7 +214,7 @@ def zip_futures(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Future] def zip_futures_on(default_executor, *futures_and_or_events) - ZipFuturesPromise.new(futures_and_or_events, default_executor).future + ZipFuturesPromise.new_blocked(futures_and_or_events, futures_and_or_events, default_executor).future end alias_method :zip, :zip_futures @@ -232,7 +232,7 @@ def zip_events(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Event] def zip_events_on(default_executor, *futures_and_or_events) - ZipEventsPromise.new(futures_and_or_events, default_executor).future + ZipEventsPromise.new_blocked(futures_and_or_events, futures_and_or_events, default_executor).future end # @!macro promises.shortcut.on @@ -254,7 +254,7 @@ def any_resolved_future(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Future] def any_resolved_future_on(default_executor, *futures_and_or_events) - AnyResolvedFuturePromise.new(futures_and_or_events, default_executor).future + AnyResolvedFuturePromise.new_blocked(futures_and_or_events, futures_and_or_events, default_executor).future end # @!macro promises.shortcut.on @@ -273,7 +273,7 @@ def any_fulfilled_future(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Future] def any_fulfilled_future_on(default_executor, *futures_and_or_events) - AnyFulfilledFuturePromise.new(futures_and_or_events, default_executor).future + AnyFulfilledFuturePromise.new_blocked(futures_and_or_events, futures_and_or_events, default_executor).future end # @!macro promises.shortcut.on @@ -580,7 +580,7 @@ def chain(*args, &task) # @overload a_future.chain_on(executor, *args, &task) # @yield [fulfilled?, value, reason, *args] to the task. def chain_on(executor, *args, &task) - ChainPromise.new(self, @DefaultExecutor, executor, args, &task).future + ChainPromise.new_blocked([self], self, @DefaultExecutor, executor, args, &task).future end # Short string representation. @@ -782,9 +782,9 @@ class Event < AbstractEventFuture # @return [Future, Event] def zip(other) if other.is_a?(Future) - ZipFutureEventPromise.new(other, self, @DefaultExecutor).future + ZipFutureEventPromise.new_blocked([other, self], other, self, @DefaultExecutor).future else - ZipEventEventPromise.new(self, other, @DefaultExecutor).event + ZipEventEventPromise.new_blocked([self, other], self, other, @DefaultExecutor).event end end @@ -795,7 +795,7 @@ def zip(other) # # @return [Event] def any(event_or_future) - AnyResolvedEventPromise.new([self, event_or_future], @DefaultExecutor).event + AnyResolvedEventPromise.new_blocked([self, event_or_future], [self, event_or_future], @DefaultExecutor).event end alias_method :|, :any @@ -805,9 +805,11 @@ def any(event_or_future) # # @return [Event] def delay - ZipEventEventPromise.new(self, - DelayPromise.new(@DefaultExecutor).event, - @DefaultExecutor).event + event = DelayPromise.new(@DefaultExecutor).event + ZipEventEventPromise.new_blocked([self, event], + self, + event, + @DefaultExecutor).event end # @!macro [new] promise.method.schedule @@ -819,9 +821,11 @@ def delay # @return [Event] def schedule(intended_time) chain do - ZipEventEventPromise.new(self, - ScheduledPromise.new(@DefaultExecutor, intended_time).event, - @DefaultExecutor).event + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipEventEventPromise.new_blocked([self, event], + self, + event, + @DefaultExecutor).event end.flat_event end @@ -956,7 +960,7 @@ def then(*args, &task) # @return [Future] # @yield [value, *args] to the task. def then_on(executor, *args, &task) - ThenPromise.new(self, @DefaultExecutor, executor, args, &task).future + ThenPromise.new_blocked([self], self, @DefaultExecutor, executor, args, &task).future end # @!macro promises.shortcut.on @@ -974,16 +978,16 @@ def rescue(*args, &task) # @return [Future] # @yield [reason, *args] to the task. def rescue_on(executor, *args, &task) - RescuePromise.new(self, @DefaultExecutor, executor, args, &task).future + RescuePromise.new_blocked([self], self, @DefaultExecutor, executor, args, &task).future end # @!macro promises.method.zip # @return [Future] def zip(other) if other.is_a?(Future) - ZipFutureFuturePromise.new(self, other, @DefaultExecutor).future + ZipFutureFuturePromise.new_blocked([self, other], self, other, @DefaultExecutor).future else - ZipFutureEventPromise.new(self, other, @DefaultExecutor).future + ZipFutureEventPromise.new_blocked([self, other], self, other, @DefaultExecutor).future end end @@ -995,7 +999,7 @@ def zip(other) # # @return [Future] def any(event_or_future) - AnyResolvedFuturePromise.new([self, event_or_future], @DefaultExecutor).future + AnyResolvedFuturePromise.new_blocked([self, event_or_future], [self, event_or_future], @DefaultExecutor).future end alias_method :|, :any @@ -1005,25 +1009,29 @@ def any(event_or_future) # # @return [Future] def delay - ZipFutureEventPromise.new(self, - DelayPromise.new(@DefaultExecutor).future, - @DefaultExecutor).future + future = DelayPromise.new(@DefaultExecutor).future + ZipFutureEventPromise.new_blocked([self, future], + self, + future, + @DefaultExecutor).future end # @!macro promise.method.schedule # @return [Future] def schedule(intended_time) chain do - ZipFutureEventPromise.new(self, - ScheduledPromise.new(@DefaultExecutor, intended_time).event, - @DefaultExecutor).future + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipFutureEventPromise.new_blocked([self, event], + self, + event, + @DefaultExecutor).future end.flat end # @!macro promises.method.with_default_executor # @return [Future] def with_default_executor(executor) - FutureWrapperPromise.new(self, executor).future + FutureWrapperPromise.new_blocked([self], self, executor).future end # Creates new future which will have result of the future returned by receiver. If receiver @@ -1032,7 +1040,7 @@ def with_default_executor(executor) # @param [Integer] level how many levels of futures should flatten # @return [Future] def flat_future(level = 1) - FlatFuturePromise.new(self, level, @DefaultExecutor).future + FlatFuturePromise.new_blocked([self], self, level, @DefaultExecutor).future end alias_method :flat, :flat_future @@ -1042,7 +1050,7 @@ def flat_future(level = 1) # # @return [Event] def flat_event - FlatEventPromise.new(self, @DefaultExecutor).event + FlatEventPromise.new_blocked([self], self, @DefaultExecutor).event end # @!macro promises.shortcut.using @@ -1116,7 +1124,7 @@ def on_rejection_using(executor, *args, &callback) # end # future(0, &body).run.value! # => 5 def run - RunFuturePromise.new(self, @DefaultExecutor).future + RunFuturePromise.new_blocked([self], self, @DefaultExecutor).future end # @!visibility private @@ -1211,7 +1219,7 @@ def resolve(raise_on_reassign = true) # # @return [Event] def with_hidden_resolvable - @with_hidden_resolvable ||= EventWrapperPromise.new(self, @DefaultExecutor).event + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked([self], self, @DefaultExecutor).event end end @@ -1224,7 +1232,6 @@ class ResolvableFuture < Future # # @!macro promise.param.raise_on_reassign def resolve(fulfilled = true, value = nil, reason = nil, raise_on_reassign = true) - # TODO (pitr-ch 25-Sep-2016): should the defaults be kept to match event resolve api? resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign) end @@ -1268,7 +1275,7 @@ def evaluate_to!(*args, &block) # # @return [Future] def with_hidden_resolvable - @with_hidden_resolvable ||= FutureWrapperPromise.new(self, @DefaultExecutor).future + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked([self], self, @DefaultExecutor).future end end @@ -1360,10 +1367,13 @@ class InnerPromise < AbstractPromise # @abstract class BlockedPromise < InnerPromise # @!visibility private - def self.new(*args, &block) - promise = super(*args, &block) - promise.blocked_by.each { |f| f.add_callback :callback_notify_blocked, promise } - promise + + private_class_method :new + + def self.new_blocked(blockers, *args, &block) + promise = new(*args, &block) + ensure + blockers.each { |f| f.add_callback :callback_notify_blocked, promise } end def initialize(future, blocked_by_futures, countdown) @@ -1925,7 +1935,8 @@ class Future < AbstractEventFuture # Zips with selected value form the suplied channels # @return [Future] def then_select(*channels) - ZipFuturesPromise.new([self, Concurrent::Promises.select(*channels)], @DefaultExecutor).future + future = Concurrent::Promises.select(*channels) + ZipFuturesPromise.new_blocked([self, future], [self, future], @DefaultExecutor).future end # @note may block From 0a85d02fa94992bbab304a6f4938ef18fd654e58 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sun, 18 Dec 2016 20:48:58 +0100 Subject: [PATCH 45/68] Remove dependency in zip promise on blocked_by --- lib/concurrent/edge/promises.rb | 147 +++++++++++++++----------------- 1 file changed, 69 insertions(+), 78 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 55a3a2285..d75dc9d05 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -672,8 +672,9 @@ def resolve_with(state, raise_on_reassign = true) # @!visibility private # @return [Array] def blocks - @Callbacks.each_with_object([]) do |callback, promises| - promises.push(*(callback.select { |v| v.is_a? AbstractPromise })) + # TODO (pitr-ch 18-Dec-2016): add macro noting that debug methods may change api without warning + @Callbacks.each_with_object([]) do |(method, *args), promises| + promises.push(args[0]) if method == :callback_notify_blocked end end @@ -759,8 +760,8 @@ def async_callback_on_resolution(state, executor, args, callback) end end - def callback_notify_blocked(state, promise) - promise.on_resolution self + def callback_notify_blocked(state, promise, index) + promise.on_resolution self, index end end @@ -985,7 +986,7 @@ def rescue_on(executor, *args, &task) # @return [Future] def zip(other) if other.is_a?(Future) - ZipFutureFuturePromise.new_blocked([self, other], self, other, @DefaultExecutor).future + ZipFuturesPromise.new_blocked([self, other], [self, other], @DefaultExecutor).future else ZipFutureEventPromise.new_blocked([self, other], self, other, @DefaultExecutor).future end @@ -1009,10 +1010,10 @@ def any(event_or_future) # # @return [Future] def delay - future = DelayPromise.new(@DefaultExecutor).future - ZipFutureEventPromise.new_blocked([self, future], + event = DelayPromise.new(@DefaultExecutor).event + ZipFutureEventPromise.new_blocked([self, event], self, - future, + event, @DefaultExecutor).future end @@ -1373,7 +1374,7 @@ class BlockedPromise < InnerPromise def self.new_blocked(blockers, *args, &block) promise = new(*args, &block) ensure - blockers.each { |f| f.add_callback :callback_notify_blocked, promise } + blockers.each_with_index { |f, i| f.add_callback :callback_notify_blocked, promise, i } end def initialize(future, blocked_by_futures, countdown) @@ -1383,12 +1384,13 @@ def initialize(future, blocked_by_futures, countdown) end # @!visibility private - def on_resolution(future) - countdown = process_on_resolution(future) - resolvable = resolvable?(countdown, future) + def on_resolution(future, index) + # TODO (pitr-ch 18-Dec-2016): rename to on_blocker_resolution + countdown = process_on_resolution(future, index) + resolvable = resolvable?(countdown, future, index) if resolvable - on_resolvable(future) + on_resolvable(future, index) # futures could be deleted from blocked_by one by one here, but that would be too expensive, # it's done once when all are resolved to free their references clear_blocked_by! @@ -1428,15 +1430,15 @@ def clear_blocked_by! end # @return [true,false] if resolvable - def resolvable?(countdown, future) + def resolvable?(countdown, future, index) countdown.zero? end - def process_on_resolution(future) + def process_on_resolution(future, index) @Countdown.decrement end - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) raise NotImplementedError end end @@ -1465,7 +1467,7 @@ def initialize(blocked_by_future, default_executor, executor, args, &task) super blocked_by_future, default_executor, executor, args, &task end - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) if resolved_future.fulfilled? Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| evaluate_to lambda { future.apply args, task } @@ -1483,7 +1485,7 @@ def initialize(blocked_by_future, default_executor, executor, args, &task) super blocked_by_future, default_executor, executor, args, &task end - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) if resolved_future.rejected? Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| evaluate_to lambda { future.apply args, task } @@ -1497,7 +1499,7 @@ def on_resolvable(resolved_future) class ChainPromise < BlockedTaskPromise private - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) if Future === resolved_future Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| evaluate_to(*future.result, *args, task) @@ -1536,7 +1538,7 @@ def initialize_blocked_by(blocked_by_future) @BlockedBy = LockFreeStack.new.push(blocked_by_future) end - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) resolve_with resolved_future.internal_state end @@ -1550,8 +1552,8 @@ def blocked_by_add(future) future.touch if self.future.touched? end - def resolvable?(countdown, future) - !@Future.internal_state.resolved? && super(countdown, future) + def resolvable?(countdown, future, index) + !@Future.internal_state.resolved? && super(countdown, future, index) end end @@ -1563,8 +1565,8 @@ def initialize(blocked_by_future, default_executor) super Event.new(self, default_executor), blocked_by_future, 2 end - def process_on_resolution(future) - countdown = super(future) + def process_on_resolution(future, index) + countdown = super(future, index) if countdown.nonzero? internal_state = future.internal_state @@ -1577,8 +1579,8 @@ def process_on_resolution(future) case value when Future, Event blocked_by_add value - value.add_callback :callback_notify_blocked, self - @Countdown.value + value.add_callback :callback_notify_blocked, self, nil + countdown else resolve_with RESOLVED end @@ -1597,8 +1599,8 @@ def initialize(blocked_by_future, levels, default_executor) super Future.new(self, default_executor), blocked_by_future, 1 + levels end - def process_on_resolution(future) - countdown = super(future) + def process_on_resolution(future, index) + countdown = super(future, index) if countdown.nonzero? internal_state = future.internal_state @@ -1611,8 +1613,8 @@ def process_on_resolution(future) case value when Future blocked_by_add value - value.add_callback :callback_notify_blocked, self - @Countdown.value + value.add_callback :callback_notify_blocked, self, nil + countdown when Event evaluate_to(lambda { raise TypeError, 'cannot flatten to Event' }) else @@ -1632,7 +1634,7 @@ def initialize(blocked_by_future, default_executor) super Future.new(self, default_executor), blocked_by_future, 1 end - def process_on_resolution(future) + def process_on_resolution(future, index) internal_state = future.internal_state unless internal_state.fulfilled? @@ -1645,7 +1647,7 @@ def process_on_resolution(future) when Future # FIXME (pitr-ch 08-Dec-2016): will accumulate the completed futures blocked_by_add value - value.add_callback :callback_notify_blocked, self + value.add_callback :callback_notify_blocked, self, nil else resolve_with internal_state end @@ -1661,7 +1663,7 @@ def initialize(event1, event2, default_executor) private - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) resolve_with RESOLVED end end @@ -1669,35 +1671,20 @@ def on_resolvable(resolved_future) class ZipFutureEventPromise < BlockedPromise def initialize(future, event, default_executor) super Future.new(self, default_executor), [future, event], 2 - @FutureResult = future + @result = nil end private - def on_resolvable(resolved_future) - resolve_with @FutureResult.internal_state - end - end - - class ZipFutureFuturePromise < BlockedPromise - def initialize(future1, future2, default_executor) - super Future.new(self, default_executor), [future1, future2], 2 - @Future1Result = future1 - @Future2Result = future2 + def process_on_resolution(future, index) + # first blocking is future, take its result + @result = future.internal_state if index == 0 + # super has to be called after above to piggyback on volatile @Countdown + super future, index end - private - - def on_resolvable(resolved_future) - fulfilled1, value1, reason1 = @Future1Result.result - fulfilled2, value2, reason2 = @Future2Result.result - fulfilled = fulfilled1 && fulfilled2 - new_state = if fulfilled - FulfilledArray.new([value1, value2]) - else - PartiallyRejected.new([value1, value2], [reason1, reason2]) - end - resolve_with new_state + def on_resolvable(resolved_future, index) + resolve_with @result end end @@ -1708,7 +1695,7 @@ def initialize(event, default_executor) private - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) resolve_with RESOLVED end end @@ -1720,7 +1707,7 @@ def initialize(future, default_executor) private - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) resolve_with resolved_future.internal_state end end @@ -1730,23 +1717,28 @@ class ZipFuturesPromise < BlockedPromise private def initialize(blocked_by_futures, default_executor) - super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) + size = blocked_by_futures.size + super(Future.new(self, default_executor), blocked_by_futures, size) + @Resolutions = ::Array.new(size) + + on_resolvable nil, -1 if blocked_by_futures.empty? + end - on_resolvable nil if blocked_by_futures.empty? + def process_on_resolution(future, index) + countdown = super future, index + # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never resized? + @Resolutions[index] = future.internal_state + countdown end - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) all_fulfilled = true - values = Array.new(blocked_by.size) - reasons = Array.new(blocked_by.size) + values = Array.new(@Resolutions.size) + reasons = Array.new(@Resolutions.size) - blocked_by.each_with_index do |future, i| - if future.is_a?(Future) - fulfilled, values[i], reasons[i] = future.result - all_fulfilled &&= fulfilled - else - values[i] = reasons[i] = nil - end + @Resolutions.each_with_index do |internal_state, i| + fulfilled, values[i], reasons[i] = internal_state.result + all_fulfilled &&= fulfilled end if all_fulfilled @@ -1764,10 +1756,10 @@ class ZipEventsPromise < BlockedPromise def initialize(blocked_by_futures, default_executor) super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - on_resolvable nil if blocked_by_futures.empty? + on_resolvable nil, -1 if blocked_by_futures.empty? end - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) resolve_with RESOLVED end end @@ -1784,11 +1776,11 @@ def initialize(blocked_by_futures, default_executor) super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) end - def resolvable?(countdown, future) + def resolvable?(countdown, future, index) true end - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) resolve_with resolved_future.internal_state, false end end @@ -1801,11 +1793,11 @@ def initialize(blocked_by_futures, default_executor) super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) end - def resolvable?(countdown, future) + def resolvable?(countdown, future, index) true end - def on_resolvable(resolved_future) + def on_resolvable(resolved_future, index) resolve_with RESOLVED, false end end @@ -1814,7 +1806,7 @@ class AnyFulfilledFuturePromise < AnyResolvedFuturePromise private - def resolvable?(countdown, future) + def resolvable?(countdown, future, index) future.fulfilled? || # inlined super from BlockedPromise countdown.zero? @@ -1887,7 +1879,6 @@ def initialize(default_executor, intended_time) :RunFuturePromise, :ZipEventEventPromise, :ZipFutureEventPromise, - :ZipFutureFuturePromise, :EventWrapperPromise, :FutureWrapperPromise, :ZipFuturesPromise, From 6bfd19aec860794ad7ac2021bd5da6327c9a4d13 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Tue, 20 Dec 2016 16:45:47 +0100 Subject: [PATCH 46/68] Remove blocked_by completely --- lib/concurrent/edge/promises.rb | 422 +++++++++++++------------- spec/concurrent/edge/promises_spec.rb | 7 - 2 files changed, 205 insertions(+), 224 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index d75dc9d05..3a0194aae 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -177,7 +177,7 @@ def delay(*args, &task) # # @!macro promises.future-on2 def delay_on(default_executor, *args, &task) - DelayPromise.new(default_executor).future.then(*args, &task) + DelayPromise.new(default_executor).event.chain(*args, &task) end # @!macro promises.shortcut.on @@ -214,7 +214,7 @@ def zip_futures(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Future] def zip_futures_on(default_executor, *futures_and_or_events) - ZipFuturesPromise.new_blocked(futures_and_or_events, futures_and_or_events, default_executor).future + ZipFuturesPromise.new_blocked(futures_and_or_events, default_executor).future end alias_method :zip, :zip_futures @@ -232,7 +232,7 @@ def zip_events(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Event] def zip_events_on(default_executor, *futures_and_or_events) - ZipEventsPromise.new_blocked(futures_and_or_events, futures_and_or_events, default_executor).future + ZipEventsPromise.new_blocked(futures_and_or_events, default_executor).event end # @!macro promises.shortcut.on @@ -254,7 +254,7 @@ def any_resolved_future(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Future] def any_resolved_future_on(default_executor, *futures_and_or_events) - AnyResolvedFuturePromise.new_blocked(futures_and_or_events, futures_and_or_events, default_executor).future + AnyResolvedFuturePromise.new_blocked(futures_and_or_events, default_executor).future end # @!macro promises.shortcut.on @@ -273,7 +273,7 @@ def any_fulfilled_future(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Future] def any_fulfilled_future_on(default_executor, *futures_and_or_events) - AnyFulfilledFuturePromise.new_blocked(futures_and_or_events, futures_and_or_events, default_executor).future + AnyFulfilledFuturePromise.new_blocked(futures_and_or_events, default_executor).future end # @!macro promises.shortcut.on @@ -289,7 +289,7 @@ def any_event(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Event] def any_event_on(default_executor, *futures_and_or_events) - AnyResolvedEventPromise.new(futures_and_or_events, default_executor).event + AnyResolvedEventPromise.new_blocked(futures_and_or_events, default_executor).event end # TODO consider adding first(count, *futures) @@ -476,8 +476,6 @@ def initialize(promise, default_executor) @Condition = ConditionVariable.new @Promise = promise @DefaultExecutor = default_executor - # noinspection RubyArgCount - @Touched = AtomicBoolean.new false @Callbacks = LockFreeStack.new # noinspection RubyArgCount @Waiters = AtomicFixnum.new 0 @@ -530,8 +528,7 @@ def unscheduled? # executed. This method is called by any other method requiring resolved state, like {#wait}. # @return [self] def touch - # distribute touch to promise only once - @Promise.touch if @Touched.make_true + @Promise.touch self end @@ -580,7 +577,7 @@ def chain(*args, &task) # @overload a_future.chain_on(executor, *args, &task) # @yield [fulfilled?, value, reason, *args] to the task. def chain_on(executor, *args, &task) - ChainPromise.new_blocked([self], self, @DefaultExecutor, executor, args, &task).future + ChainPromise.new_blocked([self], @DefaultExecutor, executor, args, &task).future end # Short string representation. @@ -595,12 +592,6 @@ def inspect "#{to_s[0..-2]} blocks:[#{blocks.map(&:to_s).join(', ')}]>" end - # @deprecated - def set(*args, &block) - raise 'Use ResolvableEvent#resolve or ResolvableFuture#resolve instead, ' + - 'constructed by Promises.resolvable_event or Promises.resolvable_future respectively.' - end - # Resolves the resolvable when receiver is resolved. # # @param [Resolvable] resolvable @@ -693,7 +684,7 @@ def promise # For inspection. # @!visibility private def touched? - @Touched.value + promise.touched? end # For inspection. @@ -761,7 +752,7 @@ def async_callback_on_resolution(state, executor, args, callback) end def callback_notify_blocked(state, promise, index) - promise.on_resolution self, index + promise.on_blocker_resolution self, index end end @@ -783,9 +774,9 @@ class Event < AbstractEventFuture # @return [Future, Event] def zip(other) if other.is_a?(Future) - ZipFutureEventPromise.new_blocked([other, self], other, self, @DefaultExecutor).future + ZipFutureEventPromise.new_blocked([other, self], @DefaultExecutor).future else - ZipEventEventPromise.new_blocked([self, other], self, other, @DefaultExecutor).event + ZipEventEventPromise.new_blocked([self, other], @DefaultExecutor).event end end @@ -796,7 +787,7 @@ def zip(other) # # @return [Event] def any(event_or_future) - AnyResolvedEventPromise.new_blocked([self, event_or_future], [self, event_or_future], @DefaultExecutor).event + AnyResolvedEventPromise.new_blocked([self, event_or_future], @DefaultExecutor).event end alias_method :|, :any @@ -807,10 +798,7 @@ def any(event_or_future) # @return [Event] def delay event = DelayPromise.new(@DefaultExecutor).event - ZipEventEventPromise.new_blocked([self, event], - self, - event, - @DefaultExecutor).event + ZipEventEventPromise.new_blocked([self, event], @DefaultExecutor).event end # @!macro [new] promise.method.schedule @@ -823,10 +811,7 @@ def delay def schedule(intended_time) chain do event = ScheduledPromise.new(@DefaultExecutor, intended_time).event - ZipEventEventPromise.new_blocked([self, event], - self, - event, - @DefaultExecutor).event + ZipEventEventPromise.new_blocked([self, event], @DefaultExecutor).event end.flat_event end @@ -848,7 +833,7 @@ def to_event # @!macro promises.method.with_default_executor # @return [Event] def with_default_executor(executor) - EventWrapperPromise.new(self, executor).future + EventWrapperPromise.new_blocked([self], executor).event end private @@ -961,7 +946,7 @@ def then(*args, &task) # @return [Future] # @yield [value, *args] to the task. def then_on(executor, *args, &task) - ThenPromise.new_blocked([self], self, @DefaultExecutor, executor, args, &task).future + ThenPromise.new_blocked([self], @DefaultExecutor, executor, args, &task).future end # @!macro promises.shortcut.on @@ -979,16 +964,16 @@ def rescue(*args, &task) # @return [Future] # @yield [reason, *args] to the task. def rescue_on(executor, *args, &task) - RescuePromise.new_blocked([self], self, @DefaultExecutor, executor, args, &task).future + RescuePromise.new_blocked([self], @DefaultExecutor, executor, args, &task).future end # @!macro promises.method.zip # @return [Future] def zip(other) if other.is_a?(Future) - ZipFuturesPromise.new_blocked([self, other], [self, other], @DefaultExecutor).future + ZipFuturesPromise.new_blocked([self, other], @DefaultExecutor).future else - ZipFutureEventPromise.new_blocked([self, other], self, other, @DefaultExecutor).future + ZipFutureEventPromise.new_blocked([self, other], @DefaultExecutor).future end end @@ -1000,7 +985,7 @@ def zip(other) # # @return [Future] def any(event_or_future) - AnyResolvedFuturePromise.new_blocked([self, event_or_future], [self, event_or_future], @DefaultExecutor).future + AnyResolvedFuturePromise.new_blocked([self, event_or_future], @DefaultExecutor).future end alias_method :|, :any @@ -1011,10 +996,7 @@ def any(event_or_future) # @return [Future] def delay event = DelayPromise.new(@DefaultExecutor).event - ZipFutureEventPromise.new_blocked([self, event], - self, - event, - @DefaultExecutor).future + ZipFutureEventPromise.new_blocked([self, event], @DefaultExecutor).future end # @!macro promise.method.schedule @@ -1022,17 +1004,14 @@ def delay def schedule(intended_time) chain do event = ScheduledPromise.new(@DefaultExecutor, intended_time).event - ZipFutureEventPromise.new_blocked([self, event], - self, - event, - @DefaultExecutor).future + ZipFutureEventPromise.new_blocked([self, event], @DefaultExecutor).future end.flat end # @!macro promises.method.with_default_executor # @return [Future] def with_default_executor(executor) - FutureWrapperPromise.new_blocked([self], self, executor).future + FutureWrapperPromise.new_blocked([self], executor).future end # Creates new future which will have result of the future returned by receiver. If receiver @@ -1041,7 +1020,7 @@ def with_default_executor(executor) # @param [Integer] level how many levels of futures should flatten # @return [Future] def flat_future(level = 1) - FlatFuturePromise.new_blocked([self], self, level, @DefaultExecutor).future + FlatFuturePromise.new_blocked([self], level, @DefaultExecutor).future end alias_method :flat, :flat_future @@ -1051,7 +1030,7 @@ def flat_future(level = 1) # # @return [Event] def flat_event - FlatEventPromise.new_blocked([self], self, @DefaultExecutor).event + FlatEventPromise.new_blocked([self], @DefaultExecutor).event end # @!macro promises.shortcut.using @@ -1125,7 +1104,7 @@ def on_rejection_using(executor, *args, &callback) # end # future(0, &body).run.value! # => 5 def run - RunFuturePromise.new_blocked([self], self, @DefaultExecutor).future + RunFuturePromise.new_blocked([self], @DefaultExecutor).future end # @!visibility private @@ -1220,7 +1199,7 @@ def resolve(raise_on_reassign = true) # # @return [Event] def with_hidden_resolvable - @with_hidden_resolvable ||= EventWrapperPromise.new_blocked([self], self, @DefaultExecutor).event + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked([self], @DefaultExecutor).event end end @@ -1276,7 +1255,7 @@ def evaluate_to!(*args, &block) # # @return [Future] def with_hidden_resolvable - @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked([self], self, @DefaultExecutor).future + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked([self], @DefaultExecutor).future end end @@ -1316,6 +1295,10 @@ def inspect to_s end + def delayed + nil + end + private def resolve_with(new_state, raise_on_reassign = true) @@ -1372,41 +1355,65 @@ class BlockedPromise < InnerPromise private_class_method :new def self.new_blocked(blockers, *args, &block) - promise = new(*args, &block) + delayed = blockers.each_with_object(LockFreeStack.new, &method(:add_delayed)) + promise = new(delayed, blockers.size, *args, &block) ensure blockers.each_with_index { |f, i| f.add_callback :callback_notify_blocked, promise, i } end - def initialize(future, blocked_by_futures, countdown) + def self.add_delayed(blocker, delayed) + d = blocker.promise.delayed + delayed.push(d) if d + end + + def initialize(delayed, blockers_count, future) super(future) - initialize_blocked_by(blocked_by_futures) - @Countdown = AtomicFixnum.new countdown + @Touched = AtomicBoolean.new false + @Delayed = delayed + @Countdown = AtomicFixnum.new blockers_count end # @!visibility private - def on_resolution(future, index) - # TODO (pitr-ch 18-Dec-2016): rename to on_blocker_resolution - countdown = process_on_resolution(future, index) + def on_blocker_resolution(future, index) + countdown = process_on_blocker_resolution(future, index) resolvable = resolvable?(countdown, future, index) - if resolvable - on_resolvable(future, index) - # futures could be deleted from blocked_by one by one here, but that would be too expensive, - # it's done once when all are resolved to free their references - clear_blocked_by! - end + on_resolvable(future, index) if resolvable + end + + def delayed + @Delayed end - # @!visibility private def touch - # TODO (pitr-ch 13-Jun-2016): on construction pass down references of delays to be touched, avoids extra CASses - blocked_by.each(&:touch) + clear_propagate_touch if @Touched.make_true + end + + def clear_propagate_touch + @Delayed.clear_each { |o| propagate_touch o } + end + + # @!visibility private + def propagate_touch(stack_or_element = @Delayed) + if stack_or_element.is_a? LockFreeStack + stack_or_element.each { |element| propagate_touch element } + else + stack_or_element.touch unless stack_or_element.nil? # if still present + end end - # !visibility private + def touched? + @Touched.value + end + + # !visibility private # TODO (pitr-ch 20-Dec-2016): does it have to be at promise methods? # for inspection only def blocked_by - @BlockedBy + # TODO (pitr-ch 18-Dec-2016): doc macro debug method + + blocked_by = [] + ObjectSpace.each_object(AbstractEventFuture) { |o| blocked_by.push o if o.blocks.include? self } + blocked_by end # @!visibility private @@ -1416,25 +1423,12 @@ def inspect private - def initialize_blocked_by(blocked_by_futures) - unless blocked_by_futures.is_a?(::Array) - raise ArgumentError, "has to be array of events/futures: #{blocked_by_futures.inspect}" - end - @BlockedBy = blocked_by_futures - end - - def clear_blocked_by! - # not synchronized because we do not care when this change propagates - @BlockedBy = [] - nil - end - # @return [true,false] if resolvable def resolvable?(countdown, future, index) countdown.zero? end - def process_on_resolution(future, index) + def process_on_blocker_resolution(future, index) @Countdown.decrement end @@ -1445,9 +1439,9 @@ def on_resolvable(resolved_future, index) # @abstract class BlockedTaskPromise < BlockedPromise - def initialize(blocked_by_future, default_executor, executor, args, &task) + def initialize(delayed, blockers_count, default_executor, executor, args, &task) raise ArgumentError, 'no block given' unless block_given? - super Future.new(self, default_executor), [blocked_by_future], 1 + super delayed, 1, Future.new(self, default_executor) @Executor = executor @Task = task @Args = args @@ -1462,9 +1456,8 @@ def executor class ThenPromise < BlockedTaskPromise private - def initialize(blocked_by_future, default_executor, executor, args, &task) - raise ArgumentError, 'only Future can be appended with then' unless blocked_by_future.is_a? Future - super blocked_by_future, default_executor, executor, args, &task + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task end def on_resolvable(resolved_future, index) @@ -1481,8 +1474,8 @@ def on_resolvable(resolved_future, index) class RescuePromise < BlockedTaskPromise private - def initialize(blocked_by_future, default_executor, executor, args, &task) - super blocked_by_future, default_executor, executor, args, &task + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task end def on_resolvable(resolved_future, index) @@ -1527,45 +1520,37 @@ def initialize(default_executor, fulfilled, value, reason) end class AbstractFlatPromise < BlockedPromise - # !visibility private - def blocked_by - @BlockedBy.each.to_a - end private - def initialize_blocked_by(blocked_by_future) - @BlockedBy = LockFreeStack.new.push(blocked_by_future) - end - def on_resolvable(resolved_future, index) resolve_with resolved_future.internal_state end - def clear_blocked_by! - @BlockedBy.clear - nil + def resolvable?(countdown, future, index) + !@Future.internal_state.resolved? && super(countdown, future, index) end - def blocked_by_add(future) - @BlockedBy.push future - future.touch if self.future.touched? + def add_delayed_of(future) + if touched? + propagate_touch future.promise.delayed + else + BlockedPromise.add_delayed future, @Delayed + clear_propagate_touch if touched? + end end - def resolvable?(countdown, future, index) - !@Future.internal_state.resolved? && super(countdown, future, index) - end end class FlatEventPromise < AbstractFlatPromise private - def initialize(blocked_by_future, default_executor) - super Event.new(self, default_executor), blocked_by_future, 2 + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) end - def process_on_resolution(future, index) + def process_on_blocker_resolution(future, index) countdown = super(future, index) if countdown.nonzero? internal_state = future.internal_state @@ -1578,7 +1563,7 @@ def process_on_resolution(future, index) value = internal_state.value case value when Future, Event - blocked_by_add value + add_delayed_of value value.add_callback :callback_notify_blocked, self, nil countdown else @@ -1594,12 +1579,12 @@ class FlatFuturePromise < AbstractFlatPromise private - def initialize(blocked_by_future, levels, default_executor) + def initialize(delayed, blockers_count, levels, default_executor) raise ArgumentError, 'levels has to be higher than 0' if levels < 1 - super Future.new(self, default_executor), blocked_by_future, 1 + levels + super delayed, 1 + levels, Future.new(self, default_executor) end - def process_on_resolution(future, index) + def process_on_blocker_resolution(future, index) countdown = super(future, index) if countdown.nonzero? internal_state = future.internal_state @@ -1612,7 +1597,7 @@ def process_on_resolution(future, index) value = internal_state.value case value when Future - blocked_by_add value + add_delayed_of value value.add_callback :callback_notify_blocked, self, nil countdown when Event @@ -1630,11 +1615,11 @@ class RunFuturePromise < AbstractFlatPromise private - def initialize(blocked_by_future, default_executor) - super Future.new(self, default_executor), blocked_by_future, 1 + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) end - def process_on_resolution(future, index) + def process_on_blocker_resolution(future, index) internal_state = future.internal_state unless internal_state.fulfilled? @@ -1645,8 +1630,7 @@ def process_on_resolution(future, index) value = internal_state.value case value when Future - # FIXME (pitr-ch 08-Dec-2016): will accumulate the completed futures - blocked_by_add value + add_delayed_of value value.add_callback :callback_notify_blocked, self, nil else resolve_with internal_state @@ -1657,8 +1641,8 @@ def process_on_resolution(future, index) end class ZipEventEventPromise < BlockedPromise - def initialize(event1, event2, default_executor) - super Event.new(self, default_executor), [event1, event2], 2 + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) end private @@ -1669,14 +1653,14 @@ def on_resolvable(resolved_future, index) end class ZipFutureEventPromise < BlockedPromise - def initialize(future, event, default_executor) - super Future.new(self, default_executor), [future, event], 2 + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Future.new(self, default_executor) @result = nil end private - def process_on_resolution(future, index) + def process_on_blocker_resolution(future, index) # first blocking is future, take its result @result = future.internal_state if index == 0 # super has to be called after above to piggyback on volatile @Countdown @@ -1689,8 +1673,8 @@ def on_resolvable(resolved_future, index) end class EventWrapperPromise < BlockedPromise - def initialize(event, default_executor) - super Event.new(self, default_executor), [event], 1 + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Event.new(self, default_executor) end private @@ -1701,8 +1685,8 @@ def on_resolvable(resolved_future, index) end class FutureWrapperPromise < BlockedPromise - def initialize(future, default_executor) - super Future.new(self, default_executor), [future], 1 + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) end private @@ -1716,19 +1700,17 @@ class ZipFuturesPromise < BlockedPromise private - def initialize(blocked_by_futures, default_executor) - size = blocked_by_futures.size - super(Future.new(self, default_executor), blocked_by_futures, size) - @Resolutions = ::Array.new(size) + def initialize(delayed, blockers_count, default_executor) + super(delayed, blockers_count, Future.new(self, default_executor)) + @Resolutions = ::Array.new(blockers_count) - on_resolvable nil, -1 if blocked_by_futures.empty? + on_resolvable nil, nil if blockers_count == 0 end - def process_on_resolution(future, index) - countdown = super future, index + def process_on_blocker_resolution(future, index) # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never resized? - @Resolutions[index] = future.internal_state - countdown + @Resolutions[index] = future.internal_state # has to be set before countdown in super + super future, index end def on_resolvable(resolved_future, index) @@ -1753,10 +1735,10 @@ class ZipEventsPromise < BlockedPromise private - def initialize(blocked_by_futures, default_executor) - super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) - on_resolvable nil, -1 if blocked_by_futures.empty? + on_resolvable nil, nil if blockers_count == 0 end def on_resolvable(resolved_future, index) @@ -1772,8 +1754,8 @@ class AnyResolvedFuturePromise < AbstractAnyPromise private - def initialize(blocked_by_futures, default_executor) - super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Future.new(self, default_executor) end def resolvable?(countdown, future, index) @@ -1789,8 +1771,8 @@ class AnyResolvedEventPromise < AbstractAnyPromise private - def initialize(blocked_by_futures, default_executor) - super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) end def resolvable?(countdown, future, index) @@ -1814,16 +1796,22 @@ def resolvable?(countdown, future, index) end class DelayPromise < InnerPromise - # @!visibility private + + def initialize(default_executor) + super event = Event.new(self, default_executor) + @Delayed = LockFreeStack.new.push self + # TODO (pitr-ch 20-Dec-2016): implement directly without callback? + event.on_resolution!(@Delayed.peek) { |stack_node| stack_node.value = nil } + end + def touch @Future.resolve_with RESOLVED end - private - - def initialize(default_executor) - super Event.new(self, default_executor) + def delayed + @Delayed end + end class ScheduledPromise < InnerPromise @@ -1927,7 +1915,7 @@ class Future < AbstractEventFuture # @return [Future] def then_select(*channels) future = Concurrent::Promises.select(*channels) - ZipFuturesPromise.new_blocked([self, future], [self, future], @DefaultExecutor).future + ZipFuturesPromise.new_blocked([self, future], @DefaultExecutor).future end # @note may block @@ -1942,98 +1930,98 @@ def then_ask(actor) self.then { |v| actor.ask(v) }.flat end - include Enumerable - - def each(&block) - each_body self.value, &block - end - - def each!(&block) - each_body self.value!, &block - end - - private - - def each_body(value, &block) - (value.nil? ? [nil] : Array(value)).each(&block) - end + # include Enumerable + # + # def each(&block) + # each_body self.value, &block + # end + # + # def each!(&block) + # each_body self.value!, &block + # end + # + # private + # + # def each_body(value, &block) + # (value.nil? ? [nil] : Array(value)).each(&block) + # end end - end - class Promises::Throttle < Synchronization::Object + class Throttle < Synchronization::Object - safe_initialization! - private *attr_atomic(:can_run) + safe_initialization! + private *attr_atomic(:can_run) - def initialize(max) - super() - self.can_run = max - @Queue = LockFreeQueue.new - end + def initialize(max) + super() + self.can_run = max + @Queue = LockFreeQueue.new + end - def throttle(future = nil, &throttled_future) - if block_given? - trigger = future ? (new_trigger & future) : new_trigger - throttled_future.call(trigger).on_resolution! { done } - else - new_trigger + def throttle(future = nil, &throttled_future) + if block_given? + trigger = future ? (new_trigger & future) : new_trigger + throttled_future.call(trigger).on_resolution! { done } + else + new_trigger + end end - end - def then_throttle(&task) - throttle { |trigger| trigger.then &task } - end + def then_throttle(&task) + throttle { |trigger| trigger.then &task } + end - private + private - def done - while true - current_can_run = can_run - if compare_and_set_can_run current_can_run, current_can_run + 1 - if current_can_run <= 0 - Thread.pass until (trigger = @Queue.pop) - trigger.resolve + def done + while true + current_can_run = can_run + if compare_and_set_can_run current_can_run, current_can_run + 1 + if current_can_run <= 0 + Thread.pass until (trigger = @Queue.pop) + trigger.resolve + end + return self end - return self end end - end - def new_trigger - while true - current_can_run = can_run - if compare_and_set_can_run current_can_run, current_can_run - 1 - if current_can_run > 0 - return Promises.resolved_event - else - event = Promises.resolvable_event - @Queue.push event - return event + def new_trigger + while true + current_can_run = can_run + if compare_and_set_can_run current_can_run, current_can_run - 1 + if current_can_run > 0 + return Promises.resolved_event + else + event = Promises.resolvable_event + @Queue.push event + return event + end end end end end - end - class Promises::AbstractEventFuture < Synchronization::Object + class AbstractEventFuture < Synchronization::Object - def throttle(throttle, &throttled_future) - throttle.throttle(self, &throttled_future) - end + def throttle(throttle, &throttled_future) + throttle.throttle(self, &throttled_future) + end - def then_throttle(throttle, &block) - throttle(throttle) { |trigger| trigger.then &block } - end + def then_throttle(throttle, &block) + throttle(throttle) { |trigger| trigger.then &block } + end - end + end - module Promises::FactoryMethods + module FactoryMethods - # @!visibility private + # @!visibility private - def throttle(count) - Promises::Throttle.new count + def throttle(count) + Promises::Throttle.new count + end end end end diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index 76312118d..47839aea6 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -227,13 +227,6 @@ def behaves_as_delay(delay, value) end end - describe '.each' do - specify do - expect(fulfilled_future(nil).each.map(&:inspect)).to eq ['nil'] - expect(fulfilled_future(1).each.map(&:inspect)).to eq ['1'] - expect(fulfilled_future([1, 2]).each.map(&:inspect)).to eq ['1', '2'] - end - end describe '.zip_events' do it 'waits for all and returns event' do From 6b11145455c1901294cf0fe97dfa2cbd32a84283 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Tue, 20 Dec 2016 17:30:42 +0100 Subject: [PATCH 47/68] Eliminate some array allocations --- lib/concurrent/edge/promises.rb | 105 +++++++++++++++++++++----------- 1 file changed, 70 insertions(+), 35 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 3a0194aae..dbeaf7625 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -577,7 +577,7 @@ def chain(*args, &task) # @overload a_future.chain_on(executor, *args, &task) # @yield [fulfilled?, value, reason, *args] to the task. def chain_on(executor, *args, &task) - ChainPromise.new_blocked([self], @DefaultExecutor, executor, args, &task).future + ChainPromise.new_blocked1(self, @DefaultExecutor, executor, args, &task).future end # Short string representation. @@ -664,7 +664,7 @@ def resolve_with(state, raise_on_reassign = true) # @return [Array] def blocks # TODO (pitr-ch 18-Dec-2016): add macro noting that debug methods may change api without warning - @Callbacks.each_with_object([]) do |(method, *args), promises| + @Callbacks.each_with_object([]) do |(method, args), promises| promises.push(args[0]) if method == :callback_notify_blocked end end @@ -697,9 +697,9 @@ def waiting_threads def add_callback(method, *args) state = internal_state if resolved?(state) - call_callback method, state, *args + call_callback method, state, args else - @Callbacks.push [method, *args] + @Callbacks.push [method, args] state = internal_state # take back if it was resolved in the meanwhile call_callbacks state if resolved?(state) @@ -729,15 +729,15 @@ def wait_until_resolved(timeout) resolved? end - def call_callback(method, state, *args) + def call_callback(method, state, args) self.send method, state, *args end def call_callbacks(state) - method, *args = @Callbacks.pop + method, args = @Callbacks.pop while method - call_callback method, state, *args - method, *args = @Callbacks.pop + call_callback method, state, args + method, args = @Callbacks.pop end end @@ -774,9 +774,9 @@ class Event < AbstractEventFuture # @return [Future, Event] def zip(other) if other.is_a?(Future) - ZipFutureEventPromise.new_blocked([other, self], @DefaultExecutor).future + ZipFutureEventPromise.new_blocked2(other, self, @DefaultExecutor).future else - ZipEventEventPromise.new_blocked([self, other], @DefaultExecutor).event + ZipEventEventPromise.new_blocked2(self, other, @DefaultExecutor).event end end @@ -787,7 +787,7 @@ def zip(other) # # @return [Event] def any(event_or_future) - AnyResolvedEventPromise.new_blocked([self, event_or_future], @DefaultExecutor).event + AnyResolvedEventPromise.new_blocked2(self, event_or_future, @DefaultExecutor).event end alias_method :|, :any @@ -798,7 +798,7 @@ def any(event_or_future) # @return [Event] def delay event = DelayPromise.new(@DefaultExecutor).event - ZipEventEventPromise.new_blocked([self, event], @DefaultExecutor).event + ZipEventEventPromise.new_blocked2(self, event, @DefaultExecutor).event end # @!macro [new] promise.method.schedule @@ -811,7 +811,7 @@ def delay def schedule(intended_time) chain do event = ScheduledPromise.new(@DefaultExecutor, intended_time).event - ZipEventEventPromise.new_blocked([self, event], @DefaultExecutor).event + ZipEventEventPromise.new_blocked2(self, event, @DefaultExecutor).event end.flat_event end @@ -833,7 +833,7 @@ def to_event # @!macro promises.method.with_default_executor # @return [Event] def with_default_executor(executor) - EventWrapperPromise.new_blocked([self], executor).event + EventWrapperPromise.new_blocked1(self, executor).event end private @@ -946,7 +946,7 @@ def then(*args, &task) # @return [Future] # @yield [value, *args] to the task. def then_on(executor, *args, &task) - ThenPromise.new_blocked([self], @DefaultExecutor, executor, args, &task).future + ThenPromise.new_blocked1(self, @DefaultExecutor, executor, args, &task).future end # @!macro promises.shortcut.on @@ -964,16 +964,16 @@ def rescue(*args, &task) # @return [Future] # @yield [reason, *args] to the task. def rescue_on(executor, *args, &task) - RescuePromise.new_blocked([self], @DefaultExecutor, executor, args, &task).future + RescuePromise.new_blocked1(self, @DefaultExecutor, executor, args, &task).future end # @!macro promises.method.zip # @return [Future] def zip(other) if other.is_a?(Future) - ZipFuturesPromise.new_blocked([self, other], @DefaultExecutor).future + ZipFuturesPromise.new_blocked2(self, other, @DefaultExecutor).future else - ZipFutureEventPromise.new_blocked([self, other], @DefaultExecutor).future + ZipFutureEventPromise.new_blocked2(self, other, @DefaultExecutor).future end end @@ -985,7 +985,7 @@ def zip(other) # # @return [Future] def any(event_or_future) - AnyResolvedFuturePromise.new_blocked([self, event_or_future], @DefaultExecutor).future + AnyResolvedFuturePromise.new_blocked2(self, event_or_future, @DefaultExecutor).future end alias_method :|, :any @@ -996,7 +996,7 @@ def any(event_or_future) # @return [Future] def delay event = DelayPromise.new(@DefaultExecutor).event - ZipFutureEventPromise.new_blocked([self, event], @DefaultExecutor).future + ZipFutureEventPromise.new_blocked2(self, event, @DefaultExecutor).future end # @!macro promise.method.schedule @@ -1004,14 +1004,14 @@ def delay def schedule(intended_time) chain do event = ScheduledPromise.new(@DefaultExecutor, intended_time).event - ZipFutureEventPromise.new_blocked([self, event], @DefaultExecutor).future + ZipFutureEventPromise.new_blocked2(self, event, @DefaultExecutor).future end.flat end # @!macro promises.method.with_default_executor # @return [Future] def with_default_executor(executor) - FutureWrapperPromise.new_blocked([self], executor).future + FutureWrapperPromise.new_blocked1(self, executor).future end # Creates new future which will have result of the future returned by receiver. If receiver @@ -1020,7 +1020,7 @@ def with_default_executor(executor) # @param [Integer] level how many levels of futures should flatten # @return [Future] def flat_future(level = 1) - FlatFuturePromise.new_blocked([self], level, @DefaultExecutor).future + FlatFuturePromise.new_blocked1(self, level, @DefaultExecutor).future end alias_method :flat, :flat_future @@ -1030,7 +1030,7 @@ def flat_future(level = 1) # # @return [Event] def flat_event - FlatEventPromise.new_blocked([self], @DefaultExecutor).event + FlatEventPromise.new_blocked1(self, @DefaultExecutor).event end # @!macro promises.shortcut.using @@ -1104,7 +1104,7 @@ def on_rejection_using(executor, *args, &callback) # end # future(0, &body).run.value! # => 5 def run - RunFuturePromise.new_blocked([self], @DefaultExecutor).future + RunFuturePromise.new_blocked1(self, @DefaultExecutor).future end # @!visibility private @@ -1199,7 +1199,7 @@ def resolve(raise_on_reassign = true) # # @return [Event] def with_hidden_resolvable - @with_hidden_resolvable ||= EventWrapperPromise.new_blocked([self], @DefaultExecutor).event + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked1(self, @DefaultExecutor).event end end @@ -1255,7 +1255,7 @@ def evaluate_to!(*args, &block) # # @return [Future] def with_hidden_resolvable - @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked([self], @DefaultExecutor).future + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked1(self, @DefaultExecutor).future end end @@ -1354,16 +1354,49 @@ class BlockedPromise < InnerPromise private_class_method :new + def self.new_blocked1(blocker, *args, &block) + blocker_delayed = blocker.promise.delayed + delayed = blocker_delayed ? LockFreeStack.new.push(blocker_delayed) : nil + promise = new(delayed, 1, *args, &block) + ensure + blocker.add_callback :callback_notify_blocked, promise, 0 + end + + def self.new_blocked2(blocker1, blocker2, *args, &block) + blocker_delayed1 = blocker1.promise.delayed + blocker_delayed2 = blocker2.promise.delayed + delayed = if blocker_delayed1 + if blocker_delayed2 + LockFreeStack.new2(blocker_delayed1, blocker_delayed2) + else + LockFreeStack.new1(blocker_delayed1) + end + else + blocker_delayed2 ? LockFreeStack.new1(blocker_delayed2) : nil + end + promise = new(delayed, 2, *args, &block) + ensure + blocker1.add_callback :callback_notify_blocked, promise, 0 + blocker2.add_callback :callback_notify_blocked, promise, 1 + end + def self.new_blocked(blockers, *args, &block) - delayed = blockers.each_with_object(LockFreeStack.new, &method(:add_delayed)) + delayed = blockers.reduce(nil, &method(:add_delayed)) promise = new(delayed, blockers.size, *args, &block) ensure blockers.each_with_index { |f, i| f.add_callback :callback_notify_blocked, promise, i } end - def self.add_delayed(blocker, delayed) - d = blocker.promise.delayed - delayed.push(d) if d + def self.add_delayed(delayed, blocker) + blocker_delayed = blocker.promise.delayed + if blocker_delayed + delayed = unless delayed + LockFreeStack.new1(blocker_delayed) + else + delayed.push(blocker_delayed) + end + end + delayed end def initialize(delayed, blockers_count, future) @@ -1390,7 +1423,7 @@ def touch end def clear_propagate_touch - @Delayed.clear_each { |o| propagate_touch o } + @Delayed.clear_each { |o| propagate_touch o } if @Delayed end # @!visibility private @@ -1535,7 +1568,7 @@ def add_delayed_of(future) if touched? propagate_touch future.promise.delayed else - BlockedPromise.add_delayed future, @Delayed + BlockedPromise.add_delayed @Delayed, future clear_propagate_touch if touched? end end @@ -1581,7 +1614,9 @@ class FlatFuturePromise < AbstractFlatPromise def initialize(delayed, blockers_count, levels, default_executor) raise ArgumentError, 'levels has to be higher than 0' if levels < 1 - super delayed, 1 + levels, Future.new(self, default_executor) + # flat promise may result to a future having delayed futures, therefore we have to have empty stack + # to be able to add new delayed futures + super delayed || LockFreeStack.new, 1 + levels, Future.new(self, default_executor) end def process_on_blocker_resolution(future, index) @@ -1915,7 +1950,7 @@ class Future < AbstractEventFuture # @return [Future] def then_select(*channels) future = Concurrent::Promises.select(*channels) - ZipFuturesPromise.new_blocked([self, future], @DefaultExecutor).future + ZipFuturesPromise.new_blocked2(self, future, @DefaultExecutor).future end # @note may block From 07c968c944d13d70a4dd787ccbc15e56bf927367 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 16:01:17 +0100 Subject: [PATCH 48/68] Simplify to_s and inspect --- lib/concurrent/edge/promises.rb | 17 +++++------------ spec/concurrent/edge/promises_spec.rb | 2 +- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index dbeaf7625..1e321e311 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -580,17 +580,12 @@ def chain_on(executor, *args, &task) ChainPromise.new_blocked1(self, @DefaultExecutor, executor, args, &task).future end - # Short string representation. - # @return [String] + # @return [String] Short string representation. def to_s - "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state.to_sym}>" + format '<#%s:0x%x %s>', self.class, object_id << 1, state end - # Longer string representation. - # @return [String] - def inspect - "#{to_s[0..-2]} blocks:[#{blocks.map(&:to_s).join(', ')}]>" - end + alias_method :inspect, :to_s # Resolves the resolvable when receiver is resolved. # @@ -1288,12 +1283,10 @@ def touch end def to_s - format '<#%s:0x%x %s>', self.class, object_id << 1, state + format '<#%s:0x%x>', self.class, object_id << 1 end - def inspect - to_s - end + alias_method :inspect, :to_s def delayed nil diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index 47839aea6..2ac568f34 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -332,7 +332,7 @@ def behaves_as_delay(delay, value) # meaningful to_s and inspect defined for Future and Promise expect(head.to_s).to match /<#Concurrent::Promises::Future:0x[\da-f]+ pending>/ expect(head.inspect).to( - match(/<#Concurrent::Promises::Future:0x[\da-f]+ pending blocks:\[<#Concurrent::Promises::ThenPromise:0x[\da-f]+ pending>\]>/)) + match(/<#Concurrent::Promises::Future:0x[\da-f]+ pending>/)) # evaluates only up to three, four is left unevaluated expect(three.value!).to eq 3 From 6d8ad2ddde3042f4521e12d52fc9efc934f548c3 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 16:01:45 +0100 Subject: [PATCH 49/68] Fix spec error class --- spec/concurrent/edge/promises_spec.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index 2ac568f34..895bceca4 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -221,7 +221,7 @@ def behaves_as_delay(delay, value) let(:a_future) { future { raise 'error' } } it 'raises a concurrent error' do - expect { zip(a_future).value! }.to raise_error(Concurrent::Error) + expect { zip(a_future).value! }.to raise_error(StandardError) end end From 9509164c66d31670901c9251fcbe0c6ac30e2b51 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 16:03:20 +0100 Subject: [PATCH 50/68] LockFreeStack fast constructors for 1,2 elements --- lib/concurrent/edge/lock_free_stack.rb | 20 ++++++++++++++++++-- lib/concurrent/edge/promises.rb | 9 +++++---- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/lib/concurrent/edge/lock_free_stack.rb b/lib/concurrent/edge/lock_free_stack.rb index 66d63df8c..64929271b 100644 --- a/lib/concurrent/edge/lock_free_stack.rb +++ b/lib/concurrent/edge/lock_free_stack.rb @@ -4,7 +4,11 @@ class LockFreeStack < Synchronization::Object safe_initialization! class Node + # TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class? + attr_reader :value, :next_node + # allow to nil-ify to free GC when the entry is no longer relevant, not synchronised + attr_writer :value def initialize(value, next_node) @value = value @@ -24,9 +28,17 @@ def next_node private(*attr_atomic(:head)) - def initialize + def self.of1(value) + new Node[value, EMPTY] + end + + def self.of2(value1, value2) + new Node[value1, Node[value2, EMPTY]] + end + + def initialize(head = EMPTY) super() - self.head = EMPTY + self.head = head end def empty?(head = self.head) @@ -87,6 +99,10 @@ def clear_if(head) compare_and_set_head head, EMPTY end + def replace_if(head, new_head) + compare_and_set_head head, new_head + end + def clear_each(&block) while true current_head = head diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 1e321e311..e6f420922 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -1358,14 +1358,15 @@ def self.new_blocked1(blocker, *args, &block) def self.new_blocked2(blocker1, blocker2, *args, &block) blocker_delayed1 = blocker1.promise.delayed blocker_delayed2 = blocker2.promise.delayed + # TODO (pitr-ch 23-Dec-2016): use arrays when we know it will not grow (only flat adds delay) delayed = if blocker_delayed1 if blocker_delayed2 - LockFreeStack.new2(blocker_delayed1, blocker_delayed2) + LockFreeStack.of2(blocker_delayed1, blocker_delayed2) else - LockFreeStack.new1(blocker_delayed1) + LockFreeStack.of1(blocker_delayed1) end else - blocker_delayed2 ? LockFreeStack.new1(blocker_delayed2) : nil + blocker_delayed2 ? LockFreeStack.of1(blocker_delayed2) : nil end promise = new(delayed, 2, *args, &block) ensure @@ -1384,7 +1385,7 @@ def self.add_delayed(delayed, blocker) blocker_delayed = blocker.promise.delayed if blocker_delayed delayed = unless delayed - LockFreeStack.new1(blocker_delayed) + LockFreeStack.of1(blocker_delayed) else delayed.push(blocker_delayed) end From 001691a8c4213a6606fe2cb44f0f2028020b783c Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 16:03:49 +0100 Subject: [PATCH 51/68] Fix event's state symbol representation --- lib/concurrent/edge/promises.rb | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index e6f420922..749d2ca8c 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -458,6 +458,10 @@ def apply(args, block) PENDING = Pending.new RESOLVED = Fulfilled.new(nil) + def RESOLVED.to_sym + :resolved + end + private_constant :PENDING, :RESOLVED end From 183a4fc1d0446704b0e3276c65e997cb9a535354 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 16:04:53 +0100 Subject: [PATCH 52/68] Deal correctly with multiple exceptions in #exception method --- lib/concurrent/edge/promises.rb | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 749d2ca8c..21469ee8c 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -920,13 +920,14 @@ def value!(timeout = nil) # @example # raise Promises.rejected_future(StandardError.new("boom")) # @raise [StandardError] when raising not rejected future + # @return [Exception] def exception(*args) raise Concurrent::Error, 'it is not rejected' unless rejected? - reason = internal_state.reason - if reason.is_a?(::Array) + reason = Array(internal_state.reason).compact + if reason.size > 1 Concurrent::MultipleErrors.new reason else - reason.exception(*args) + reason[0].exception(*args) end end From 7e17458a74164afe7c8f03ce5b2872595a6824aa Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 16:48:43 +0100 Subject: [PATCH 53/68] More documentation --- .yardopts | 1 + doc/promises-main.md | 44 + doc/promises.in.md | 801 ++++++++++---- doc/promises.init.rb | 2 +- doc/promises.out.md | 990 +++++++++++++----- lib/concurrent/edge/cancellation.rb | 98 +- lib/concurrent/edge/promises.rb | 70 +- tasks/update_doc.rake | 47 +- .../default/fulldoc/html/css/common.css | 10 + 9 files changed, 1566 insertions(+), 497 deletions(-) create mode 100644 doc/promises-main.md diff --git a/.yardopts b/.yardopts index 262c698f9..a778ea15d 100644 --- a/.yardopts +++ b/.yardopts @@ -11,5 +11,6 @@ ./ext/concurrent_ruby_ext/**/*.c - doc/thread_pools.md +doc/promises.out.md README.md LICENSE.txt diff --git a/doc/promises-main.md b/doc/promises-main.md new file mode 100644 index 000000000..dde704dba --- /dev/null +++ b/doc/promises-main.md @@ -0,0 +1,44 @@ +# Description + +Promises is a new framework unifying former `Concurrent::Future`, +`Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, +`Concurrent.dataflow`, `Delay`, `TimerTask` . It extensively uses the new +synchronization layer to make all the methods *lock-free* (with the exception +of obviously blocking operations like `#wait`, `#value`, etc.). As a result it +lowers a danger of deadlocking and offers better performance. + +It provides tools as other promise libraries, users coming from other languages +and other promise libraries will find the same tools here (probably named +differently though). The naming convention borrows heavily from JS promises. + +This framework however is not just a re-implementation of other promise +library, it takes inspiration from many other promise libraries, adds new +ideas, and integrates with other abstractions like actors and channels. +Therefore it is much more likely that user fill find a suitable solution for +his problem in this library, or if needed he will be able to combine parts +which were designed to work together well (rather than having to combine +fragilely independent tools). + +> *Note:* The channel and actor integration is younger and will stay in edge for +> a little longer than core promises. + +> *TODO* +> +> - What is it? +> - What is it for? +> - Main classes {Future}, {Event} +> - Explain pool usage :io vs :fast, and `_on` `_using` suffixes. +> - Why is this better than other solutions, integration actors and channels + +# Main classes + +The main public user-facing classes are {Concurrent::Promises::Event} and +{Concurrent::Promises::Future} which share common ancestor +{Concurrent::Promises::AbstractEventFuture}. + +**Event:** +> {include:Concurrent::Promises::Event} + +**Future:** +> {include:Concurrent::Promises::Future} + diff --git a/doc/promises.in.md b/doc/promises.in.md index 54bb6c39f..2a02149d0 100644 --- a/doc/promises.in.md +++ b/doc/promises.in.md @@ -1,24 +1,10 @@ -Promises is a new framework unifying former `Concurrent::Future`, -`Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, -`Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new -synchronization layer to make all the features *lock-free*, -with the exception of obviously blocking operations like -`#wait`, `#value`, etc. As a result it lowers a danger of deadlocking and offers -better performance. +# Basics -*TODO* +## Factory methods -- What is it? -- What is it for? -- Main classes {Future}, {Event} -- Explain pool usage :io vs :fast, and `_on` `_using` suffixes. - -# Old examples - -*TODO review pending* - -Constructors are not accessible, instead there are many constructor methods in -FactoryMethods. +Future and Event are created indirectly with constructor methods in +FactoryMethods. They are not designed for inheritance but rather for +composition. ```ruby Concurrent::Promises::FactoryMethods.instance_methods false @@ -37,167 +23,428 @@ end.new.a_method Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event ``` -The module is already extended into {Promises} for convenience. + +The module is already extended into {Concurrent::Promises} for convenience. ```ruby Concurrent::Promises.resolvable_event ``` -For this guide we include the module into `main` so we can call the factory -methods in following examples directly. +For this guide we introduce a shortcut in `main` so we can call the factory +methods in following examples by using `Promisses` directly. + +```ruby +Promises = Concurrent::Promises # +Promises.resolvable_event +``` + +## Asynchronous task + +The most basic use-case of the framework is asynchronous processing. A task can +be processed asynchronously by using a `future` factory method. The block will +be executed on an internal thread pool. + +Arguments of `future` are passed to the block and evaluation starts immediately. ```ruby -include Concurrent::Promises::FactoryMethods # -resolvable_event +future = Promises.future(0.1) do |duration| + sleep duration + :result +end ``` -Simple asynchronous task: +Asks if the future is resolved, here it will be still in the middle of the +sleep call. ```ruby -future = future(0.1) { |duration| sleep duration; :result } # evaluation starts immediately future.resolved? -# block until evaluated +``` + +Retrieving the value will block until the future is resolved. + +```ruby future.value future.resolved? ``` -Rejecting asynchronous task: +If the task fails we talk about the future being rejected. + +```ruby +future = Promises.future { raise 'Boom' } +``` + +There is no result, the future was rejected with a reason. ```ruby -future = future { raise 'Boom' } future.value -future.value! rescue $! future.reason -# re-raising +``` + +It can be forced to raise the reason for rejection when retrieving the value. + +```ruby +begin + future.value! +rescue => e + e +end +``` + +Which is the same as `future.value! rescue $!` which will be used hereafter. + +Or it can be used directly as argument for raise, since it implements exception +method. + +```ruby raise future rescue $! ``` -Direct creation of resolved futures: +## States + +Lets define a inspection helper for methods. ```ruby -fulfilled_future(Object.new) -rejected_future(StandardError.new("boom")) +def inspect_methods(*methods, of:) + methods.reduce({}) { |h, m| h.update m => of.send(m) } +end # ``` -Chaining of futures: +Event has `pending` and `resolved` state. ```ruby -head = fulfilled_future 1 # -branch1 = head.then(&:succ) # +event = Promises.resolvable_event # +inspect_methods(:state, :pending?, :resolved?, of: event) + +event.resolve # +inspect_methods(:state, :pending?, :resolved?, of: event) +``` + +Future's `resolved` state is further specified to be `fulfilled` or `rejected`. + +```ruby +future = Promises.resolvable_future # +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + of: future) + +future.fulfill :value # +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + :result, :value, :reason, of: future) + +future = Promises.rejected_future StandardError.new # +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + :result, :value, :reason, of: future) +``` + +## Direct creation of resolved futures + +When an existing value has to wrapped in a future it does not have to go +through evaluation as follows. + +```ruby +Promises.future { :value } +``` + +Instead it can be created directly. + +```ruby +Promises.fulfilled_future(:value) +Promises.rejected_future(StandardError.new('Ups')) +Promises.resolved_future(true, :value, nil) +Promises.resolved_future(false, nil, StandardError.new('Ups')) +``` + +## Chaining + +Big advantage of promises is ability to chain tasks together without blocking +current thread. + +```ruby +Promises. + future(2) { |v| v.succ }. + then(&:succ). + value! +``` + +As `future` factory method takes argument, `then` method takes as well. Any +supplied arguments are passed to the block, and the library ensures that they +are visible to the block. + +```ruby +Promises. + future('3') { |s| s.to_i }. + then(2) { |v, arg| v + arg }. + value +Promises. + fulfilled_future('3'). + then(&:to_i). + then(2, &:+). + value +Promises. + fulfilled_future(1). + chain(2) { |fulfilled, value, reason, arg| value + arg }. + value +``` + +Passing the arguments in (similarly as for a thread `Thread.new(arg) { |arg| +do_stuff arg }`) is **required**, both following examples may break. + +```ruby +arg = 1 +Thread.new { do_stuff arg } +Promises.future { do_stuff arg } +``` + +## Branching, and zipping + +Besides chaining it can also be branched. + +```ruby +head = Promises.fulfilled_future -1 # +branch1 = head.then(&:abs) # branch2 = head.then(&:succ).then(&:succ) # + +branch1.value! +branch2.value! +``` + +It can be combined back to one future by zipping (`zip`, `&`). + +```ruby branch1.zip(branch2).value! -# zip is aliased as & -(branch1 & branch2).then { |a, b| a + b }.value! -(branch1 & branch2).then(&:+).value! -# or a class method zip from FactoryMethods can be used to zip multiple futures -zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! -# pick only first resolved -any(branch1, branch2).value! +(branch1 & branch2). + then { |a, b| a + b }. + value! +(branch1 & branch2). + then(&:+). + value! +Promises. + zip(branch1, branch2, branch1). + then { |*values| values.reduce(&:+) }. + value! +``` + +Instead of zipping only the first one can be taken if needed. + +```ruby +Promises.any(branch1, branch2).value! (branch1 | branch2).value! ``` -Any supplied arguments are passed to the block, promises ensure that they are visible to the block: +## Blocking methods + +In these examples we have used blocking methods like `value` extensively for +their convenience, however in practice is better to avoid them and continue +chaining. + +If they need to be used (e.g. when integrating with threads), `value!` is a +better option over `value` when rejections are not dealt with differently. +Otherwise the rejection are not handled and probably silently forgotten. + +## Error handling + +When one of the tasks in the chain fails, the rejection propagates down the +chain without executing the tasks created with `then`. + +```ruby +Promises. + fulfilled_future(Object.new). + then(&:succ). + then(&:succ). + result +``` + +As `then` chained tasks execute only on fulfilled futures, there is a `rescue` +method which chains a task which is executed only when the future is rejected. +It can be used to recover from rejection. + +Using rescue to fulfill to 0 instead of the error. + +```ruby +Promises. + fulfilled_future(Object.new). + then(&:succ). + then(&:succ). + rescue { |err| 0 }. + result +``` + +Rescue not executed when there is no rejection. ```ruby -future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value -fulfilled_future(1).then(2, &:+).value -fulfilled_future(1).chain(2) { |fulfilled, value, reason, arg| value + arg }.value +Promises. + fulfilled_future(1). + then(&:succ). + then(&:succ). + rescue { |e| 0 }. + result ``` -Error handling: +Tasks added with `chain` are evaluated always. ```ruby -fulfilled_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates -fulfilled_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 -fulfilled_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied +Promises. + fulfilled_future(1). + chain { |fulfilled, value, reason| fulfilled ? value : reason }. + value! +Promises. + rejected_future(StandardError.new('Ups')). + chain { |fulfilled, value, reason| fulfilled ? value : reason }. + value! +``` -rejected_zip = fulfilled_future(1) & rejected_future(StandardError.new('boom')) +Zip is rejected if any of the zipped futures is. + +```ruby +rejected_zip = Promises.zip( + Promises.fulfilled_future(1), + Promises.rejected_future(StandardError.new('Ups'))) rejected_zip.result -rejected_zip.then { |v| 'never happens' }.result -rejected_zip.rescue { |a, b| (a || b).message }.value -rejected_zip.chain { |fulfilled, values, reasons| [fulfilled, values.compact, reasons.compact] }.value +rejected_zip. + rescue { |reason1, reason2| (reason1 || reason2).message }. + value ``` -Delay will not evaluate until asked by #value or other method requiring resolution. +## Delayed futures + +Delayed futures will not evaluate until asked by `touch` or other method +requiring resolution. -``` ruby -future = delay { 'lazy' } +```ruby +future = Promises.delay { sleep 0.1; 'lazy' } sleep 0.1 # future.resolved? -future.value +future.touch +sleep 0.2 # +future.resolved? ``` -It propagates trough chain allowing whole or partial lazy chains. + +All blocking methods like `wait`, `value` call `touch` and trigger evaluation. + ```ruby -head = delay { 1 } -branch1 = head.then(&:succ) -branch2 = head.delay.then(&:succ) -join = branch1 & branch2 +Promises.delay { :value }.value +``` + +It propagates trough chain up allowing whole or partial lazy chains. -sleep 0.1 # nothing will resolve +```ruby +head = Promises.delay { 1 } # +branch1 = head.then(&:succ) # +branch2 = head.delay.then(&:succ) # +join = branch1 & branch2 # + +sleep 0.1 # +``` + +Nothing resolves. + +```ruby [head, branch1, branch2, join].map(&:resolved?) +``` +Force `branch1` evaluation. + +```ruby branch1.value -sleep 0.1 # forces only head to resolve, branch 2 stays pending +sleep 0.1 # [head, branch1, branch2, join].map(&:resolved?) +``` +Force evaluation of both by calling `value` on `join`. + +```ruby join.value [head, branch1, branch2, join].map(&:resolved?) ``` -When flatting, it waits for inner future. Only the last call to value blocks thread. +## Flatting + +Sometimes it is needed to wait for a inner future. Apparent solution is to wait +inside the future `Promises.future { Promises.future { 1+1 }.value }.value` +however as mentioned before, `value` calls should be **avoided** to avoid +blocking threads. Therefore there is a flat method which is a correct solution +in this situation and does not block any thread. ```ruby -future { future { 1+1 } }.flat.value +Promises.future { Promises.future { 1+1 } }.flat.value! +``` -# more complicated example -future { future { future { 1 + 1 } } }. +A more complicated example. +```ruby +Promises. + future { Promises.future { Promises.future { 1 + 1 } } }. flat(1). - then { |f| f.then(&:succ) }. - flat(1).value + then { |future| future.then(&:succ) }. + flat(1). + value! ``` -Scheduling of asynchronous tasks: +## Scheduling -```ruby +Tasks can be planned to be executed with a time delay. -# it'll be executed after 0.1 seconds -scheduled = schedule(0.1) { 1 } +Schedule task to be executed in 0.1 seconds. +```ruby +scheduled = Promises.schedule(0.1) { 1 } scheduled.resolved? -scheduled.value # available after 0.1sec +``` -# and in chain -scheduled = delay { 1 }.schedule(0.1).then(&:succ) -# will not be scheduled until value is requested -sleep 0.1 # -scheduled.value # returns after another 0.1sec +Value will become available after 0.1 seconds. + +```ruby +scheduled.value ``` -Resolvable Future and Event: +It can be used in the chain as well, where the delay is counted form a moment +its parent resolves. Therefore following future will be resolved in 0.2 seconds. ```ruby +future = Promises. + future { sleep 0.1; :result }. + schedule(0.1). + then(&:to_s). + value! +``` -future = resolvable_future -event = resolvable_event() +Time can be used as well. -# These threads will be blocked until the future and event is resolved -t1 = Thread.new { future.value } # -t2 = Thread.new { event.wait } # +```ruby +Promises.schedule(Time.now + 10) { :val } +``` + +## Resolvable Future and Event: +Sometimes it is required to resolve a future externally, in these cases +`resolvable_future` and `resolvable_event` factory methods can be uses. See +{Concurrent::Promises::ResolvableFuture} and +{Concurrent::Promises::ResolvableEvent}. + +```ruby +future = Promises.resolvable_future +``` + +The thread will be blocked until the future is resolved + +```ruby +thread = Thread.new { future.value } # future.fulfill 1 +thread.value +``` + +Future can be resolved only once. + +```ruby future.fulfill 1 rescue $! future.fulfill 2, false -event.resolve - -# The threads can be joined now -[t1, t2].each &:join # ``` -Callbacks: +# Advanced + +## Callbacks ```ruby queue = Queue.new -future = delay { 1 + 1 } +future = Promises.delay { 1 + 1 } future.on_fulfillment { queue << 1 } # evaluated asynchronously future.on_fulfillment! { queue << 2 } # evaluated on resolving thread @@ -208,96 +455,249 @@ queue.pop queue.pop ``` -Factory methods are taking names of the global executors -(or instances of custom executors). +## Using executors + +Factory methods, chain, and callback methods have all other version of them +which takes executor argument. + +It takes an instance of an executor or a symbol which is a shortcuts for the +two global pools in concurrent-ruby. `fast` for short and non-blocking tasks +and `:io` for blocking and long tasks. ```ruby -# executed on :fast executor, only short and non-blocking tasks can go there -future_on(:fast) { 2 }. - # executed on executor for blocking and long operations +Promises.future_on(:fast) { 2 }. then_on(:io) { File.read __FILE__ }. - wait + value.size ``` -Interoperability with actors: +# Interoperability + +## Actors + +Create an actor which takes received numbers and returns the number squared. ```ruby actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end +``` +Send result of `1+1` to the actor, and add 2 to the result send back from the +actor. -future { 2 }. +```ruby +Promises. + future { 1 + 1 }. then_ask(actor). then { |v| v + 2 }. - value + value! +``` + +So `(1 + 1)**2 + 2 = 6`. + +The `ask` method returns future. -actor.ask(2).then(&:succ).value +```ruby +actor.ask(2).then(&:succ).value! ``` -# Common use-cases Examples +## Channels + +> *TODO: To be added* -## simple background processing +# Use-cases + +## Simple background processing ```ruby -future { do_stuff } +Promises.future { do_stuff } ``` -## parallel background processing +## Parallel background processing ```ruby -jobs = 10.times.map { |i| future { i } } # -zip(*jobs).value +tasks = 4.times.map { |i| Promises.future(i) { |i| i*2 } } +Promises.zip(*tasks).value! ``` -## periodic task +## Actor background processing + +Actors are mainly keep and isolate state, they should stay responsive not being +blocked by a longer running computations. It desirable to offload the work to +stateless promises. + +Lets define an actor which will process jobs, while staying responsive, and +tracking the number of tasks being processed. ```ruby -def schedule_job(interval, &job) - # schedule the first execution and chain restart og the job - Concurrent.schedule(interval, &job).chain do |fulfilled, continue, reason| - if fulfilled - schedule_job(interval, &job) if continue +class Computer < Concurrent::Actor::RestartingContext + def initialize + super() + @jobs = {} + end + + def on_message(msg) + command, *args = msg + case command + # new job to process + when :run + job = args[0] + @jobs[job] = envelope.future + # Process asynchronously and send message back when done. + Concurrent::Promises.future(&job).chain(job) do |fulfilled, value, reason, job| + self.tell [:done, job, fulfilled, value, reason] + end + # Do not make return value of this method to be answer of this message. + # We are answering later in :done by resolving the future kept in @jobs. + Concurrent::Actor::Behaviour::MESSAGE_PROCESSED + when :done + job, fulfilled, value, reason = *args + future = @jobs.delete job + # Answer the job's result. + future.resolve fulfilled, value, reason + when :status + { running_jobs: @jobs.size } else - # handle error - p reason - # retry sooner - schedule_job(interval / 10, &job) + # Continue to fail with unknown message. + pass end end end +``` -queue = Queue.new -count = 0 -interval = 0.05 # small just not to delay execution of this example +Create the computer actor and send it 3 jobs. -schedule_job interval do - queue.push count - count += 1 - # to continue scheduling return true, false will end the task - if count < 4 - # to continue scheduling return true - true +```ruby +computer = Concurrent::Actor.spawn Computer, :computer +results = 3.times.map { computer.ask [:run, -> { sleep 0.1; :result }] } +computer.ask(:status).value! +results.map(&:value!) +``` +## Too many threads / fibers + +Sometimes an application requires to process a lot of tasks concurrently. If +the number of concurrent tasks is high enough than it is not possible to create +a Thread for each of them. A partially satisfactory solution could be to use +Fibers, but that solution locks the application on MRI since other Ruby +implementations are using threads for each Fiber. + +This library provides a {Concurrent::Promises::Future#run} method on a future +to simulate threads without actually accepting one all the time. The run method +is similar to {Concurrent::Promises::Future#flat} but it will keep flattening +until it's fulfilled with non future value, then the value is taken as a result +of the process simulated by `run`. + +```ruby +body = lambda do |v| + # Some computation step of the process + new_v = v + 1 + # Is the process finished? + if new_v < 5 + # Continue computing with new value, does not have to be recursive. + # It just has to return a future. + Promises.future(new_v, &body) else - # close the queue with nil to simplify reading it - queue.push nil - # to end the task return false - false + # The process is finished, fulfill the final value with `new_v`. + new_v end end +Promises.future(0, &body).run.value! # => 5 +``` -# read the queue -arr, v = [], nil; arr << v while (v = queue.pop) # -# arr has the results from the executed scheduled tasks -arr +This solution works well an any Ruby implementation. + +> TODO add more complete example + +## Cancellation + +### Simple + +Lets have two processes which will count until cancelled. + +```ruby +source, token = Concurrent::Cancellation.create + +count_until_cancelled = -> token, count do + if token.canceled? + count + else + Promises.future token, count+1, &count_until_cancelled + end +end # + +futures = Array.new(2) do + Promises.future(token, 0, &count_until_cancelled).run +end + +sleep 0.01 # +source.cancel +futures.map(&:value!) ``` -## How to limit processing where there are limited resources? -By creating an actor managing the resource +Cancellation can also be used as event or future to log or plan re-execution. ```ruby -DB = Concurrent::Actor::Utils::AdHoc.spawn :db do - data = Array.new(10) { |i| '*' * i } +token.to_event.chain do + # log cancellation + # plane re-execution +end +``` + +### Parallel background processing with cancellation + +Each task tries to count to 1000 but there is a randomly failing test. The +tasks share a cancellation, when one of them fails it cancels the others. + +```ruby +source, token = Concurrent::Cancellation.create +tasks = 4.times.map do |i| + Promises.future(source, token, i) do |source, token, i| + count = 0 + 1000.times do + break count = :cancelled if token.canceled? + count += 1 + sleep 0.01 + if rand > 0.95 + source.cancel + raise 'random error' + end + count + end + end +end +Promises.zip(*tasks).result +``` + +Without the randomly failing part it produces following. + +```ruby +source, token = Concurrent::Cancellation.create +tasks = 4.times.map do |i| + Promises.future(source, token, i) do |source, token, i| + count = 0 + 1000.times do + break count = :cancelled if token.canceled? + count += 1 + # sleep 0.01 + # if rand > 0.95 + # source.cancel + # raise 'random error' + # end + end + count + end +end +Promises.zip(*tasks).result +``` + +## Throttling concurrency + +By creating an actor managing the resource we can control how many threads is +accessing the resource. In this case one at the time. + +```ruby +data = Array.new(10) { |i| '*' * i } +DB = Concurrent::Actor::Utils::AdHoc.spawn :db, data do |data| lambda do |message| # pretending that this queries a DB data[message] @@ -305,27 +705,28 @@ DB = Concurrent::Actor::Utils::AdHoc.spawn :db do end concurrent_jobs = 11.times.map do |v| - - fulfilled_future(v). + DB. # ask the DB with the `v`, only one at the time, rest is parallel - then_ask(DB). + ask(v). # get size of the string, rejects for 11 then(&:size). - rescue { |reason| reason.message } # translate error to value (exception, message) + # translate error to a value (message of the exception) + rescue { |reason| reason.message } end # -zip(*concurrent_jobs).value! +Promises.zip(*concurrent_jobs).value! ``` -In reality there is often a pool though: +Often there is more then one DB connections, then the pool can be used. ```ruby -data = Array.new(10) { |i| '*' * i } pool_size = 5 DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| # DB connection constructor - Concurrent::Actor::Utils::AdHoc.spawn(name: "worker-#{index}", args: [data]) do |data| + Concurrent::Actor::Utils::AdHoc.spawn( + name: "connection-#{index}", + args: [data]) do |data| lambda do |message| # pretending that this queries a DB data[message] @@ -334,51 +735,91 @@ DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| end concurrent_jobs = 11.times.map do |v| - - fulfilled_future(v). - # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel - then_ask(DB_POOL). + DB_POOL. + # ask the DB with the `v`, only one at the time, rest is parallel + ask(v). + # get size of the string, rejects for 11 then(&:size). - rescue { |reason| reason.message } + # translate error to a value (message of the exception) + rescue { |reason| reason.message } end # -zip(*concurrent_jobs).value! +Promises.zip(*concurrent_jobs).value! ``` -# Experimental +In other cases the DB adapter maintains its internal connection pool and we +just need to limit concurrent access to the DB's API to avoid the calls being +blocked. -## Cancellation +Lets pretend that the `#[]` method on `DB_INTERNAL_POOL` is using the internal +pool of size 3. We create throttle with the same size ```ruby -source, token = Concurrent::Cancellation.create +DB_INTERNAL_POOL = Concurrent::Array.new data -futures = Array.new(2) do - future(token) do |token| - token.loop_until_canceled { Thread.pass } - :done - end -end +max_tree = Promises.throttle 3 + +futures = 11.times.map do |i| + max_tree. + # throttled tasks, at most 3 simultaneous calls of [] on the database + then_throttle { DB_INTERNAL_POOL[i] }. + # un-throttled tasks, unlimited concurrency + then { |starts| starts.size }. + rescue { |reason| reason.message } +end # -sleep 0.05 -source.cancel futures.map(&:value!) ``` -## Throttling +## Long stream of tasks + +> TODO Channel + +## Parallel enumerable ? + +> TODO + +## Periodic task + +> TODO revisit, use cancellation, add to library ```ruby -data = (0..10).to_a -max_tree = Concurrent::Throttle.new 3 +def schedule_job(interval, &job) + # schedule the first execution and chain restart of the job + Promises.schedule(interval, &job).chain do |fulfilled, continue, reason| + if fulfilled + schedule_job(interval, &job) if continue + else + # handle error + reason + # retry sooner + schedule_job(interval, &job) + end + end +end -futures = data.map do |data| - future(data) do |data| - # un-throttled - data + 1 - end.throttle(max_tree) do |trigger| - # throttled, imagine it uses DB connections or other limited resource - trigger.then { |v| v * 2 * 2 } +queue = Queue.new +count = 0 +interval = 0.05 # small just not to delay execution of this example + +schedule_job interval do + queue.push count + count += 1 + # to continue scheduling return true, false will end the task + if count < 4 + # to continue scheduling return true + true + else + # close the queue with nil to simplify reading it + queue.push nil + # to end the task return false + false end -end # +end -futures.map(&:value!) + # read the queue +arr, v = [], nil; arr << v while (v = queue.pop) # + # arr has the results from the executed scheduled tasks +arr ``` + diff --git a/doc/promises.init.rb b/doc/promises.init.rb index 4fdb8550e..a84fa2d11 100644 --- a/doc/promises.init.rb +++ b/doc/promises.init.rb @@ -4,4 +4,4 @@ def do_stuff :stuff end -Concurrent.use_stdlib_logger Logger::DEBUG +# Concurrent.use_stdlib_logger Logger::DEBUG diff --git a/doc/promises.out.md b/doc/promises.out.md index 95987cfa1..57aec0026 100644 --- a/doc/promises.out.md +++ b/doc/promises.out.md @@ -1,55 +1,42 @@ -Promises is a new framework unifying former `Concurrent::Future`, -`Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, -`Concurrent.dataflow`, `Delay`, and `TimerTask`. It extensively uses the new -synchronization layer to make all the features *lock-free*, -with the exception of obviously blocking operations like -`#wait`, `#value`, etc. As a result it lowers a danger of deadlocking and offers -better performance. +# Basics -*TODO* +## Factory methods -- What is it? -- What is it for? -- Main classes {Future}, {Event} -- Explain pool usage :io vs :fast, and `_on` `_using` suffixes. - -# Old examples - -*TODO review pending* - -Constructors are not accessible, instead there are many constructor methods in -FactoryMethods. +Future and Event are created indirectly with constructor methods in +FactoryMethods. They are not designed for inheritance but rather for +composition. ```ruby Concurrent::Promises::FactoryMethods.instance_methods false -# => [:resolvable_event, -# :resolvable_event_on, +# => [:select, +# :zip, +# :create, +# :delay, +# :future, # :resolvable_future, +# :resolvable_event, +# :resolvable_event_on, # :resolvable_future_on, -# :future, # :future_on, # :resolved_future, # :fulfilled_future, # :rejected_future, # :resolved_event, -# :create, -# :delay, # :delay_on, # :schedule, # :schedule_on, # :zip_futures, # :zip_futures_on, -# :zip, # :zip_events, # :zip_events_on, # :any_resolved_future, -# :any, # :any_resolved_future_on, +# :any, # :any_fulfilled_future, # :any_fulfilled_future_on, # :any_event, # :any_event_on, -# :select] +# :throttle] ``` The module can be included or extended where needed. @@ -62,201 +49,484 @@ Class.new do resolvable_event end end.new.a_method -# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b1b085c8 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fbfd2d2a890 pending> Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b1b02088 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fbfd2d28978 pending> ``` -The module is already extended into {Promises} for convenience. + +The module is already extended into {Concurrent::Promises} for convenience. ```ruby Concurrent::Promises.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b1afac48 pending blocks:[]> +# => <#Concurrent::Promises::ResolvableEvent:0x7fbfd2d197c0 pending> +``` + +For this guide we introduce a shortcut in `main` so we can call the factory +methods in following examples by using `Promisses` directly. + +```ruby +Promises = Concurrent::Promises +Promises.resolvable_event +# => <#Concurrent::Promises::ResolvableEvent:0x7fbfd2d12628 pending> ``` -For this guide we include the module into `main` so we can call the factory -methods in following examples directly. +## Asynchronous task + +The most basic use-case of the framework is asynchronous processing. A task can +be processed asynchronously by using a `future` factory method. The block will +be executed on an internal thread pool. + +Arguments of `future` are passed to the block and evaluation starts immediately. ```ruby -include Concurrent::Promises::FactoryMethods -resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b1af8830 pending blocks:[]> +future = Promises.future(0.1) do |duration| + sleep duration + :result +end +# => <#Concurrent::Promises::Future:0x7fbfd2d027c8 pending> ``` -Simple asynchronous task: +Asks if the future is resolved, here it will be still in the middle of the +sleep call. ```ruby -future = future(0.1) { |duration| sleep duration; :result } # evaluation starts immediately future.resolved? # => false -# block until evaluated +``` + +Retrieving the value will block until the future is resolved. + +```ruby future.value # => :result future.resolved? # => true ``` -Rejecting asynchronous task: +If the task fails we talk about the future being rejected. + +```ruby +future = Promises.future { raise 'Boom' } +# => <#Concurrent::Promises::Future:0x7fbfd2cf1c70 pending> +``` + +There is no result, the future was rejected with a reason. ```ruby -future = future { raise 'Boom' } -# => <#Concurrent::Promises::Future:0x7fc5b1ad9700 pending blocks:[]> future.value # => nil -future.value! rescue $! # => # future.reason # => # -# re-raising +``` + +It can be forced to raise the reason for rejection when retrieving the value. + +```ruby +begin + future.value! +rescue => e + e +end # => # +``` + +Which is the same as `future.value! rescue $!` which will be used hereafter. + +Or it can be used directly as argument for raise, since it implements exception +method. + +```ruby raise future rescue $! # => # ``` -Direct creation of resolved futures: +## States + +Lets define a inspection helper for methods. ```ruby -fulfilled_future(Object.new) -# => <#Concurrent::Promises::Future:0x7fc5b1acaa70 fulfilled blocks:[]> -rejected_future(StandardError.new("boom")) -# => <#Concurrent::Promises::Future:0x7fc5b1ac97b0 rejected blocks:[]> +def inspect_methods(*methods, of:) + methods.reduce({}) { |h, m| h.update m => of.send(m) } +end ``` -Chaining of futures: +Event has `pending` and `resolved` state. ```ruby -head = fulfilled_future 1 -branch1 = head.then(&:succ) +event = Promises.resolvable_event +inspect_methods(:state, :pending?, :resolved?, of: event) +# => {:state=>:pending, :pending?=>true, :resolved?=>false} + +event.resolve +inspect_methods(:state, :pending?, :resolved?, of: event) +# => {:state=>:resolved, :pending?=>false, :resolved?=>true} +``` + +Future's `resolved` state is further specified to be `fulfilled` or `rejected`. + +```ruby +future = Promises.resolvable_future +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + of: future) +# => {:state=>:pending, +# :pending?=>true, +# :resolved?=>false, +# :fulfilled?=>false, +# :rejected?=>false} + +future.fulfill :value +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + :result, :value, :reason, of: future) +# => {:state=>:fulfilled, +# :pending?=>false, +# :resolved?=>true, +# :fulfilled?=>true, +# :rejected?=>false, +# :result=>[true, :value, nil], +# :value=>:value, +# :reason=>nil} + +future = Promises.rejected_future StandardError.new +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + :result, :value, :reason, of: future) +# => {:state=>:rejected, +# :pending?=>false, +# :resolved?=>true, +# :fulfilled?=>false, +# :rejected?=>true, +# :result=>[false, nil, #], +# :value=>nil, +# :reason=>#} +``` + +## Direct creation of resolved futures + +When an existing value has to wrapped in a future it does not have to go +through evaluation as follows. + +```ruby +Promises.future { :value } +# => <#Concurrent::Promises::Future:0x7fbfd2c69ca8 pending> +``` + +Instead it can be created directly. + +```ruby +Promises.fulfilled_future(:value) +# => <#Concurrent::Promises::Future:0x7fbfd2c61a30 fulfilled> +Promises.rejected_future(StandardError.new('Ups')) +# => <#Concurrent::Promises::Future:0x7fbfd2c606f8 rejected> +Promises.resolved_future(true, :value, nil) +# => <#Concurrent::Promises::Future:0x7fbfd2c5b1a8 fulfilled> +Promises.resolved_future(false, nil, StandardError.new('Ups')) +# => <#Concurrent::Promises::Future:0x7fbfd2c591f0 rejected> +``` + +## Chaining + +Big advantage of promises is ability to chain tasks together without blocking +current thread. + +```ruby +Promises. + future(2) { |v| v.succ }. + then(&:succ). + value! # => 4 +``` + +As `future` factory method takes argument, `then` method takes as well. Any +supplied arguments are passed to the block, and the library ensures that they +are visible to the block. + +```ruby +Promises. + future('3') { |s| s.to_i }. + then(2) { |v, arg| v + arg }. + value # => 5 +Promises. + fulfilled_future('3'). + then(&:to_i). + then(2, &:+). + value # => 5 +Promises. + fulfilled_future(1). + chain(2) { |fulfilled, value, reason, arg| value + arg }. + value # => 3 +``` + +Passing the arguments in (similarly as for a thread `Thread.new(arg) { |arg| +do_stuff arg }`) is **required**, both following examples may break. + +```ruby +arg = 1 # => 1 +Thread.new { do_stuff arg } +# => # +Promises.future { do_stuff arg } +# => <#Concurrent::Promises::Future:0x7fbfd2bebf10 pending> +``` + +## Branching, and zipping + +Besides chaining it can also be branched. + +```ruby +head = Promises.fulfilled_future -1 +branch1 = head.then(&:abs) branch2 = head.then(&:succ).then(&:succ) -branch1.zip(branch2).value! # => [2, 3] -# zip is aliased as & -(branch1 & branch2).then { |a, b| a + b }.value! -# => 5 -(branch1 & branch2).then(&:+).value! # => 5 -# or a class method zip from FactoryMethods can be used to zip multiple futures -zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! -# => 7 -# pick only first resolved -any(branch1, branch2).value! # => 2 -(branch1 | branch2).value! # => 2 + +branch1.value! # => 1 +branch2.value! # => 1 +``` + +It can be combined back to one future by zipping (`zip`, `&`). + +```ruby +branch1.zip(branch2).value! # => [1, 1] +(branch1 & branch2). + then { |a, b| a + b }. + value! # => 2 +(branch1 & branch2). + then(&:+). + value! # => 2 +Promises. + zip(branch1, branch2, branch1). + then { |*values| values.reduce(&:+) }. + value! # => 3 ``` -Any supplied arguments are passed to the block, promises ensure that they are visible to the block: +Instead of zipping only the first one can be taken if needed. ```ruby -future('3') { |s| s.to_i }.then(2) { |a, b| a + b }.value -# => 5 -fulfilled_future(1).then(2, &:+).value # => 3 -fulfilled_future(1).chain(2) { |fulfilled, value, reason, arg| value + arg }.value -# => 3 +Promises.any(branch1, branch2).value! # => 1 +(branch1 | branch2).value! # => 1 ``` -Error handling: +## Blocking methods + +In these examples we have used blocking methods like `value` extensively for +their convenience, however in practice is better to avoid them and continue +chaining. + +If they need to be used (e.g. when integrating with threads), `value!` is a +better option over `value` when rejections are not dealt with differently. +Otherwise the rejection are not handled and probably silently forgotten. + +## Error handling + +When one of the tasks in the chain fails, the rejection propagates down the +chain without executing the tasks created with `then`. + +```ruby +Promises. + fulfilled_future(Object.new). + then(&:succ). + then(&:succ). + result +# => [false, +# nil, +# #>] +``` + +As `then` chained tasks execute only on fulfilled futures, there is a `rescue` +method which chains a task which is executed only when the future is rejected. +It can be used to recover from rejection. + +Using rescue to fulfill to 0 instead of the error. + +```ruby +Promises. + fulfilled_future(Object.new). + then(&:succ). + then(&:succ). + rescue { |err| 0 }. + result # => [true, 0, nil] +``` + +Rescue not executed when there is no rejection. + +```ruby +Promises. + fulfilled_future(1). + then(&:succ). + then(&:succ). + rescue { |e| 0 }. + result # => [true, 3, nil] +``` + +Tasks added with `chain` are evaluated always. ```ruby -fulfilled_future(Object.new).then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates -fulfilled_future(Object.new).then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 -fulfilled_future(1).then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied +Promises. + fulfilled_future(1). + chain { |fulfilled, value, reason| fulfilled ? value : reason }. + value! # => 1 +Promises. + rejected_future(StandardError.new('Ups')). + chain { |fulfilled, value, reason| fulfilled ? value : reason }. + value! # => # +``` -rejected_zip = fulfilled_future(1) & rejected_future(StandardError.new('boom')) -# => <#Concurrent::Promises::Future:0x7fc5b3051380 rejected blocks:[]> +Zip is rejected if any of the zipped futures is. + +```ruby +rejected_zip = Promises.zip( + Promises.fulfilled_future(1), + Promises.rejected_future(StandardError.new('Ups'))) +# => <#Concurrent::Promises::Future:0x7fbfd2b12dc8 rejected> rejected_zip.result -# => [false, [1, nil], [nil, #]] -rejected_zip.then { |v| 'never happens' }.result -# => [false, [1, nil], [nil, #]] -rejected_zip.rescue { |a, b| (a || b).message }.value -# => "boom" -rejected_zip.chain { |fulfilled, values, reasons| [fulfilled, values.compact, reasons.compact] }.value -# => [false, [1], [#]] -``` - -Delay will not evaluate until asked by #value or other method requiring resolution. - -``` ruby -future = delay { 'lazy' } -sleep 0.1 # -future.resolved? -future.value -``` -It propagates trough chain allowing whole or partial lazy chains. -```ruby -head = delay { 1 } -# => <#Concurrent::Promises::Future:0x7fc5b3021450 pending blocks:[]> -branch1 = head.then(&:succ) -# => <#Concurrent::Promises::Future:0x7fc5b301b398 pending blocks:[]> -branch2 = head.delay.then(&:succ) -# => <#Concurrent::Promises::Future:0x7fc5b30190c0 pending blocks:[]> -join = branch1 & branch2 -# => <#Concurrent::Promises::Future:0x7fc5b30138f0 pending blocks:[]> - -sleep 0.1 # nothing will resolve +# => [false, [1, nil], [nil, #]] +rejected_zip. + rescue { |reason1, reason2| (reason1 || reason2).message }. + value # => "Ups" +``` + +## Delayed futures + +Delayed futures will not evaluate until asked by `touch` or other method +requiring resolution. + +```ruby +future = Promises.delay { sleep 0.1; 'lazy' } +# => <#Concurrent::Promises::Future:0x7fbfd2af8f68 pending> +sleep 0.1 +future.resolved? # => false +future.touch +# => <#Concurrent::Promises::Future:0x7fbfd2af8f68 pending> +sleep 0.2 +future.resolved? # => true +``` + +All blocking methods like `wait`, `value` call `touch` and trigger evaluation. + +```ruby +Promises.delay { :value }.value # => :value +``` + +It propagates trough chain up allowing whole or partial lazy chains. + +```ruby +head = Promises.delay { 1 } +branch1 = head.then(&:succ) +branch2 = head.delay.then(&:succ) +join = branch1 & branch2 + +sleep 0.1 +``` + +Nothing resolves. + +```ruby [head, branch1, branch2, join].map(&:resolved?) # => [false, false, false, false] +``` + +Force `branch1` evaluation. +```ruby branch1.value # => 2 -sleep 0.1 # forces only head to resolve, branch 2 stays pending +sleep 0.1 [head, branch1, branch2, join].map(&:resolved?) # => [true, true, false, false] +``` +Force evaluation of both by calling `value` on `join`. + +```ruby join.value # => [2, 2] [head, branch1, branch2, join].map(&:resolved?) # => [true, true, true, true] ``` -When flatting, it waits for inner future. Only the last call to value blocks thread. +## Flatting + +Sometimes it is needed to wait for a inner future. Apparent solution is to wait +inside the future `Promises.future { Promises.future { 1+1 }.value }.value` +however as mentioned before, `value` calls should be **avoided** to avoid +blocking threads. Therefore there is a flat method which is a correct solution +in this situation and does not block any thread. ```ruby -future { future { 1+1 } }.flat.value # => 2 +Promises.future { Promises.future { 1+1 } }.flat.value! +# => 2 +``` -# more complicated example -future { future { future { 1 + 1 } } }. +A more complicated example. +```ruby +Promises. + future { Promises.future { Promises.future { 1 + 1 } } }. flat(1). - then { |f| f.then(&:succ) }. - flat(1).value # => 3 + then { |future| future.then(&:succ) }. + flat(1). + value! # => 3 ``` -Scheduling of asynchronous tasks: +## Scheduling -```ruby +Tasks can be planned to be executed with a time delay. -# it'll be executed after 0.1 seconds -scheduled = schedule(0.1) { 1 } -# => <#Concurrent::Promises::Future:0x7fc5b1a2a7f0 pending blocks:[]> +Schedule task to be executed in 0.1 seconds. +```ruby +scheduled = Promises.schedule(0.1) { 1 } +# => <#Concurrent::Promises::Future:0x7fbfd2a72490 pending> scheduled.resolved? # => false -scheduled.value # available after 0.1sec +``` -# and in chain -scheduled = delay { 1 }.schedule(0.1).then(&:succ) -# => <#Concurrent::Promises::Future:0x7fc5b1a19a18 pending blocks:[]> -# will not be scheduled until value is requested -sleep 0.1 -scheduled.value # returns after another 0.1sec +Value will become available after 0.1 seconds. + +```ruby +scheduled.value # => 1 ``` -Resolvable Future and Event: +It can be used in the chain as well, where the delay is counted form a moment +its parent resolves. Therefore following future will be resolved in 0.2 seconds. ```ruby +future = Promises. + future { sleep 0.1; :result }. + schedule(0.1). + then(&:to_s). + value! # => "result" +``` + +Time can be used as well. + +```ruby +Promises.schedule(Time.now + 10) { :val } +# => <#Concurrent::Promises::Future:0x7fbfd41b0698 pending> +``` -future = resolvable_future -# => <#Concurrent::Promises::ResolvableFuture:0x7fc5b19c17a0 pending blocks:[]> -event = resolvable_event() -# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b19c0468 pending blocks:[]> +## Resolvable Future and Event: -# These threads will be blocked until the future and event is resolved -t1 = Thread.new { future.value } -t2 = Thread.new { event.wait } +Sometimes it is required to resolve a future externally, in these cases +`resolvable_future` and `resolvable_event` factory methods can be uses. See +{Concurrent::Promises::ResolvableFuture} and +{Concurrent::Promises::ResolvableEvent}. +```ruby +future = Promises.resolvable_future +# => <#Concurrent::Promises::ResolvableFuture:0x7fbfd2a5a480 pending> +``` + +The thread will be blocked until the future is resolved + +```ruby +thread = Thread.new { future.value } future.fulfill 1 -# => <#Concurrent::Promises::ResolvableFuture:0x7fc5b19c17a0 fulfilled blocks:[]> +# => <#Concurrent::Promises::ResolvableFuture:0x7fbfd2a5a480 fulfilled> +thread.value # => 1 +``` + +Future can be resolved only once. + +```ruby future.fulfill 1 rescue $! -# => # +# => #[true, 1, nil], :new_result=>[true, 1, nil]}> future.fulfill 2, false # => false -event.resolve -# => <#Concurrent::Promises::ResolvableEvent:0x7fc5b19c0468 fulfilled blocks:[]> - -# The threads can be joined now -[t1, t2].each &:join ``` -Callbacks: +# Advanced + +## Callbacks ```ruby -queue = Queue.new # => # -future = delay { 1 + 1 } -# => <#Concurrent::Promises::Future:0x7fc5b193a9a8 pending blocks:[]> +queue = Queue.new # => # +future = Promises.delay { 1 + 1 } +# => <#Concurrent::Promises::Future:0x7fbfd2a30f90 pending> future.on_fulfillment { queue << 1 } # evaluated asynchronously future.on_fulfillment! { queue << 2 } # evaluated on resolving thread @@ -267,119 +537,270 @@ queue.pop # => 2 queue.pop # => 1 ``` -Factory methods are taking names of the global executors -(or instances of custom executors). +## Using executors + +Factory methods, chain, and callback methods have all other version of them +which takes executor argument. + +It takes an instance of an executor or a symbol which is a shortcuts for the +two global pools in concurrent-ruby. `fast` for short and non-blocking tasks +and `:io` for blocking and long tasks. ```ruby -# executed on :fast executor, only short and non-blocking tasks can go there -future_on(:fast) { 2 }. - # executed on executor for blocking and long operations +Promises.future_on(:fast) { 2 }. then_on(:io) { File.read __FILE__ }. - wait + value.size # => 18754 ``` -Interoperability with actors: +# Interoperability + +## Actors + +Create an actor which takes received numbers and returns the number squared. ```ruby actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end -# => # +# => # +``` +Send result of `1+1` to the actor, and add 2 to the result send back from the +actor. -future { 2 }. +```ruby +Promises. + future { 1 + 1 }. then_ask(actor). then { |v| v + 2 }. - value # => 6 + value! # => 6 +``` + +So `(1 + 1)**2 + 2 = 6`. + +The `ask` method returns future. -actor.ask(2).then(&:succ).value # => 5 +```ruby +actor.ask(2).then(&:succ).value! # => 5 ``` -# Common use-cases Examples +## Channels + +> *TODO: To be added* + +# Use-cases -## simple background processing +## Simple background processing ```ruby -future { do_stuff } -# => <#Concurrent::Promises::Future:0x7fc5b186b4f0 pending blocks:[]> +Promises.future { do_stuff } +# => <#Concurrent::Promises::Future:0x7fbfd298a758 pending> ``` -## parallel background processing +## Parallel background processing ```ruby -jobs = 10.times.map { |i| future { i } } -zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] +tasks = 4.times.map { |i| Promises.future(i) { |i| i*2 } } +# => [<#Concurrent::Promises::Future:0x7fbfd2982738 pending>, +# <#Concurrent::Promises::Future:0x7fbfd29813b0 pending>, +# <#Concurrent::Promises::Future:0x7fbfd297bf78 pending>, +# <#Concurrent::Promises::Future:0x7fbfd297b0f0 pending>] +Promises.zip(*tasks).value! # => [0, 2, 4, 6] ``` -## periodic task +## Actor background processing + +Actors are mainly keep and isolate state, they should stay responsive not being +blocked by a longer running computations. It desirable to offload the work to +stateless promises. + +Lets define an actor which will process jobs, while staying responsive, and +tracking the number of tasks being processed. ```ruby -def schedule_job(interval, &job) - # schedule the first execution and chain restart og the job - Concurrent.schedule(interval, &job).chain do |fulfilled, continue, reason| - if fulfilled - schedule_job(interval, &job) if continue +class Computer < Concurrent::Actor::RestartingContext + def initialize + super() + @jobs = {} + end + + def on_message(msg) + command, *args = msg + case command + # new job to process + when :run + job = args[0] + @jobs[job] = envelope.future + # Process asynchronously and send message back when done. + Concurrent::Promises.future(&job).chain(job) do |fulfilled, value, reason, job| + self.tell [:done, job, fulfilled, value, reason] + end + # Do not make return value of this method to be answer of this message. + # We are answering later in :done by resolving the future kept in @jobs. + Concurrent::Actor::Behaviour::MESSAGE_PROCESSED + when :done + job, fulfilled, value, reason = *args + future = @jobs.delete job + # Answer the job's result. + future.resolve fulfilled, value, reason + when :status + { running_jobs: @jobs.size } else - # handle error - p reason - # retry sooner - schedule_job(interval / 10, &job) + # Continue to fail with unknown message. + pass end end end +``` -queue = Queue.new # => # -count = 0 # => 0 -interval = 0.05 # small just not to delay execution of this example +Create the computer actor and send it 3 jobs. -schedule_job interval do - queue.push count - count += 1 - # to continue scheduling return true, false will end the task - if count < 4 - # to continue scheduling return true - true +```ruby +computer = Concurrent::Actor.spawn Computer, :computer +# => # +results = 3.times.map { computer.ask [:run, -> { sleep 0.1; :result }] } +# => [<#Concurrent::Promises::Future:0x7fbfd3130d68 pending>, +# <#Concurrent::Promises::Future:0x7fbfd312b2a0 pending>, +# <#Concurrent::Promises::Future:0x7fbfd3129d60 pending>] +computer.ask(:status).value! # => {:running_jobs=>3} +results.map(&:value!) # => [:result, :result, :result] +``` +## Too many threads / fibers + +Sometimes an application requires to process a lot of tasks concurrently. If +the number of concurrent tasks is high enough than it is not possible to create +a Thread for each of them. A partially satisfactory solution could be to use +Fibers, but that solution locks the application on MRI since other Ruby +implementations are using threads for each Fiber. + +This library provides a {Concurrent::Promises::Future#run} method on a future +to simulate threads without actually accepting one all the time. The run method +is similar to {Concurrent::Promises::Future#flat} but it will keep flattening +until it's fulfilled with non future value, then the value is taken as a result +of the process simulated by `run`. + +```ruby +body = lambda do |v| + # Some computation step of the process + new_v = v + 1 + # Is the process finished? + if new_v < 5 + # Continue computing with new value, does not have to be recursive. + # It just has to return a future. + Promises.future(new_v, &body) else - # close the queue with nil to simplify reading it - queue.push nil - # to end the task return false - false + # The process is finished, fulfill the final value with `new_v`. + new_v end end - -# read the queue -arr, v = [], nil; arr << v while (v = queue.pop) -# arr has the results from the executed scheduled tasks -arr # => [0, 1, 2, 3] +Promises.future(0, &body).run.value! # => 5 ``` -## How to limit processing where there are limited resources? -By creating an actor managing the resource +This solution works well an any Ruby implementation. + +> TODO add more complete example + +## Cancellation + +### Simple + +Lets have two processes which will count until cancelled. ```ruby -DB = Concurrent::Actor::Utils::AdHoc.spawn :db do - data = Array.new(10) { |i| '*' * i } - lambda do |message| - # pretending that this queries a DB - data[message] +source, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fbfd3b596c8 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fbfd3b58a70 canceled:false>] + +count_until_cancelled = -> token, count do + if token.canceled? + count + else + Promises.future token, count+1, &count_until_cancelled end +end + +futures = Array.new(2) do + Promises.future(token, 0, &count_until_cancelled).run end +# => [<#Concurrent::Promises::Future:0x7fbfd3b38310 pending>, +# <#Concurrent::Promises::Future:0x7fbfd3b31628 pending>] -concurrent_jobs = 11.times.map do |v| +sleep 0.01 +source.cancel # => true +futures.map(&:value!) # => [50, 52] +``` - fulfilled_future(v). - # ask the DB with the `v`, only one at the time, rest is parallel - then_ask(DB). - # get size of the string, rejects for 11 - then(&:size). - rescue { |reason| reason.message } # translate error to value (exception, message) -end +Cancellation can also be used as event or future to log or plan re-execution. -zip(*concurrent_jobs).value! -# => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] +```ruby +token.to_event.chain do + # log cancellation + # plane re-execution +end +``` + +### Parallel background processing with cancellation + +Each task tries to count to 1000 but there is a randomly failing test. The +tasks share a cancellation, when one of them fails it cancels the others. + +```ruby +source, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fbfd3862c30 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fbfd38622d0 canceled:false>] +tasks = 4.times.map do |i| + Promises.future(source, token, i) do |source, token, i| + count = 0 + 1000.times do + break count = :cancelled if token.canceled? + count += 1 + sleep 0.01 + if rand > 0.95 + source.cancel + raise 'random error' + end + count + end + end +end +# => [<#Concurrent::Promises::Future:0x7fbfd3852358 pending>, +# <#Concurrent::Promises::Future:0x7fbfd384b8c8 pending>, +# <#Concurrent::Promises::Future:0x7fbfd3033ed8 pending>, +# <#Concurrent::Promises::Future:0x7fbfd302bee0 pending>] +Promises.zip(*tasks).result +# => [false, +# [nil, :cancelled, :cancelled, :cancelled], +# [#, nil, nil, nil]] ``` -In reality there is often a pool though: +Without the randomly failing part it produces following. + +```ruby +source, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fbfd29aa990 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fbfd29aa2b0 canceled:false>] +tasks = 4.times.map do |i| + Promises.future(source, token, i) do |source, token, i| + count = 0 + 1000.times do + break count = :cancelled if token.canceled? + count += 1 + # sleep 0.01 + # if rand > 0.95 + # source.cancel + # raise 'random error' + # end + end + count + end +end +Promises.zip(*tasks).result # => [true, [1000, 1000, 1000, 1000], nil] +``` + +## Throttling concurrency + +By creating an actor managing the resource we can control how many threads is +accessing the resource. In this case one at the time. ```ruby data = Array.new(10) { |i| '*' * i } @@ -393,11 +814,37 @@ data = Array.new(10) { |i| '*' * i } # "*******", # "********", # "*********"] +DB = Concurrent::Actor::Utils::AdHoc.spawn :db, data do |data| + lambda do |message| + # pretending that this queries a DB + data[message] + end +end + +concurrent_jobs = 11.times.map do |v| + DB. + # ask the DB with the `v`, only one at the time, rest is parallel + ask(v). + # get size of the string, rejects for 11 + then(&:size). + # translate error to a value (message of the exception) + rescue { |reason| reason.message } +end + +Promises.zip(*concurrent_jobs).value! +# => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] +``` + +Often there is more then one DB connections, then the pool can be used. + +```ruby pool_size = 5 # => 5 DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| # DB connection constructor - Concurrent::Actor::Utils::AdHoc.spawn(name: "worker-#{index}", args: [data]) do |data| + Concurrent::Actor::Utils::AdHoc.spawn( + name: "connection-#{index}", + args: [data]) do |data| lambda do |message| # pretending that this queries a DB data[message] @@ -406,67 +853,104 @@ DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| end concurrent_jobs = 11.times.map do |v| - - fulfilled_future(v). - # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel - then_ask(DB_POOL). + DB_POOL. + # ask the DB with the `v`, only one at the time, rest is parallel + ask(v). + # get size of the string, rejects for 11 then(&:size). - rescue { |reason| reason.message } + # translate error to a value (message of the exception) + rescue { |reason| reason.message } end -zip(*concurrent_jobs).value! +Promises.zip(*concurrent_jobs).value! # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] ``` -# Experimental +In other cases the DB adapter maintains its internal connection pool and we +just need to limit concurrent access to the DB's API to avoid the calls being +blocked. -## Cancellation +Lets pretend that the `#[]` method on `DB_INTERNAL_POOL` is using the internal +pool of size 3. We create throttle with the same size ```ruby -source, token = Concurrent::Cancellation.create -# => [#]>, -# @ResolveArgs=[], -# @Token= -# #>>, -# #>] +DB_INTERNAL_POOL = Concurrent::Array.new data +# => ["", +# "*", +# "**", +# "***", +# "****", +# "*****", +# "******", +# "*******", +# "********", +# "*********"] -futures = Array.new(2) do - future(token) do |token| - token.loop_until_canceled { Thread.pass } - :done - end -end -# => [<#Concurrent::Promises::Future:0x7fc5b1938ef0 pending blocks:[]>, -# <#Concurrent::Promises::Future:0x7fc5b0a1f860 pending blocks:[]>] +max_tree = Promises.throttle 3 +# => <#Concurrent::Promises::Throttle:0x7fbfd294a018 limit:3> -sleep 0.05 # => 0 -source.cancel # => true -futures.map(&:value!) # => [:done, :done] +futures = 11.times.map do |i| + max_tree. + # throttled tasks, at most 3 simultaneous calls of [] on the database + then_throttle { DB_INTERNAL_POOL[i] }. + # un-throttled tasks, unlimited concurrency + then { |starts| starts.size }. + rescue { |reason| reason.message } +end + +futures.map(&:value!) +# => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] ``` -## Throttling +## Long stream of tasks + +> TODO Channel + +## Parallel enumerable ? + +> TODO + +## Periodic task + +> TODO revisit, use cancellation, add to library ```ruby -data = (0..10).to_a # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] -max_tree = Concurrent::Throttle.new 3 -# => #, -# @Queue=#> +def schedule_job(interval, &job) + # schedule the first execution and chain restart of the job + Promises.schedule(interval, &job).chain do |fulfilled, continue, reason| + if fulfilled + schedule_job(interval, &job) if continue + else + # handle error + reason + # retry sooner + schedule_job(interval, &job) + end + end +end -futures = data.map do |data| - future(data) do |data| - # un-throttled - data + 1 - end.throttle(max_tree) do |trigger| - # throttled, imagine it uses DB connections or other limited resource - trigger.then { |v| v * 2 * 2 } +queue = Queue.new # => # +count = 0 # => 0 +interval = 0.05 # small just not to delay execution of this example + +schedule_job interval do + queue.push count + count += 1 + # to continue scheduling return true, false will end the task + if count < 4 + # to continue scheduling return true + true + else + # close the queue with nil to simplify reading it + queue.push nil + # to end the task return false + false end -end +end -futures.map(&:value!) -# => [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44] + # read the queue +arr, v = [], nil; arr << v while (v = queue.pop) + # arr has the results from the executed scheduled tasks +arr # => [0, 1, 2, 3] ``` + diff --git a/lib/concurrent/edge/cancellation.rb b/lib/concurrent/edge/cancellation.rb index ccf803c89..30ebf9b33 100644 --- a/lib/concurrent/edge/cancellation.rb +++ b/lib/concurrent/edge/cancellation.rb @@ -1,59 +1,97 @@ module Concurrent - # TODO example: parallel jobs, cancel them all when one fails, clean-up in zip - # inspired by https://msdn.microsoft.com/en-us/library/dd537607(v=vs.110).aspx + # Provides tools for cooperative cancellation. + # Inspired by https://msdn.microsoft.com/en-us/library/dd537607(v=vs.110).aspx + # @example + # # Create new cancellation. `cancellation` is used for cancelling, `token` is passed down to + # # tasks for cooperative cancellation + # cancellation, token = Concurrent::Cancellation.create + # Thread.new(token) do |token| + # # Count 1+1 (simulating some other meaningful work) repeatedly until the token is cancelled through + # # cancellation. + # token.loop_until_canceled { 1+1 } + # end + # sleep 0.1 + # cancellation.cancel # Stop the thread by cancelling class Cancellation < Synchronization::Object safe_initialization! - def self.create(future_or_event = Promises.resolvable_event, *resolve_args) - cancellation = new(future_or_event, *resolve_args) + # Creates the cancellation object. Returns both the cancellation and the token for convenience. + # @param [Object] resolve_args resolve_args Arguments which are used when resolve method is called on + # resolvable_future_or_event + # @param [Promises::Resolvable] resolvable_future_or_event resolvable used to track cancellation. + # Can be retrieved by `token.to_future` ot `token.to_event`. + # @example + # cancellation, token = Concurrent::Cancellation.create + # @return [Array(Cancellation, Cancellation::Token)] + def self.create(resolvable_future_or_event = Promises.resolvable_event, *resolve_args) + cancellation = new(resolvable_future_or_event, *resolve_args) [cancellation, cancellation.token] end private_class_method :new - def initialize(future, *resolve_args) - raise ArgumentError, 'future is not Resolvable' unless future.is_a?(Promises::Resolvable) - @Cancel = future - @Token = Token.new @Cancel.with_hidden_resolvable - @ResolveArgs = resolve_args - end - + # Returns the token associated with the cancellation. + # @return [Token] def token @Token end + # Cancel this cancellation. All executions depending on the token will cooperatively stop. + # @return [true, false] + # @raise when cancelling for the second tim def cancel(raise_on_repeated_call = true) !!@Cancel.resolve(*@ResolveArgs, raise_on_repeated_call) end + # Is the cancellation cancelled? + # @return [true, false] def canceled? @Cancel.resolved? end + # Short string representation. + # @return [String] + def to_s + format '<#%s:0x%x canceled:%s>', self.class, object_id << 1, canceled? + end + + alias_method :inspect, :to_s + + private + + def initialize(future, *resolve_args) + raise ArgumentError, 'future is not Resolvable' unless future.is_a?(Promises::Resolvable) + @Cancel = future + @Token = Token.new @Cancel.with_hidden_resolvable + @ResolveArgs = resolve_args + end + + # Created through {Cancellation.create}, passed down to tasks to be able to check if canceled. class Token < Synchronization::Object safe_initialization! - def initialize(cancel) - @Cancel = cancel - end - + # @return [Event] Event which will be resolved when the token is cancelled. def to_event @Cancel.to_event end + # @return [Future] Future which will be resolved when the token is cancelled with arguments passed in + # {Cancellation.create} . def to_future @Cancel.to_future end - def on_cancellation(*args, &block) - @Cancel.on_resolution *args, &block - end - + # Is the token cancelled? + # @return [true, false] def canceled? @Cancel.resolved? end + # Repeatedly evaluates block until the token is {#canceled?}. + # @yield to the block repeatedly. + # @yieldreturn [Object] + # @return [Object] last result of the block def loop_until_canceled(&block) until canceled? result = block.call @@ -61,19 +99,37 @@ def loop_until_canceled(&block) result end + # Raise error when cancelled + # @param [#exception] error to be risen + # @raise the error + # @return [self] def raise_if_canceled(error = CancelledOperationError) raise error if canceled? self end + # Creates a new token which is cancelled when any of the tokens is. + # @param [Token] tokens to combine + # @return [Token] new token def join(*tokens, &block) block ||= -> tokens { Promises.any_event(*tokens.map(&:to_event)) } self.class.new block.call([@Cancel, *tokens]) end - end + # Short string representation. + # @return [String] + def to_s + format '<#%s:0x%x canceled:%s>', self.class, object_id << 1, canceled? + end + + alias_method :inspect, :to_s + + private - private_constant :Token + def initialize(cancel) + @Cancel = cancel + end + end # FIXME (pitr-ch 27-Mar-2016): cooperation with mutex, condition, select etc? # TODO (pitr-ch 27-Mar-2016): examples (scheduled to be cancelled in 10 sec) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 21469ee8c..45bca0ca7 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -6,7 +6,12 @@ module Concurrent - # {include:file:doc/promises.out.md} + + # # Guide + # + # The guide is best place to start with promises, see {file:doc/promises.out.md}. + # + # {include:file:doc/promises-main.md} module Promises # @!macro [new] promises.param.default_executor @@ -137,19 +142,19 @@ def resolved_event(default_executor = :io) # @param [nil] nil # @return [Event] resolved event. # - # @overload create(a_future = nil, default_executor = :io) + # @overload create(a_future, default_executor = :io) # @param [Future] a_future # @return [Future] a future which will be resolved when a_future is. # - # @overload create(an_event = nil, default_executor = :io) + # @overload create(an_event, default_executor = :io) # @param [Event] an_event # @return [Event] an event which will be resolved when an_event is. # - # @overload create(exception = nil, default_executor = :io) + # @overload create(exception, default_executor = :io) # @param [Exception] exception # @return [Future] a rejected future with the exception as its reason. # - # @overload create(value = nil, default_executor = :io) + # @overload create(value, default_executor = :io) # @param [Object] value when none of the above overloads fits # @return [Future] a fulfilled future with the value. def create(argument = nil, default_executor = :io) @@ -662,7 +667,6 @@ def resolve_with(state, raise_on_reassign = true) # @!visibility private # @return [Array] def blocks - # TODO (pitr-ch 18-Dec-2016): add macro noting that debug methods may change api without warning @Callbacks.each_with_object([]) do |(method, args), promises| promises.push(args[0]) if method == :callback_notify_blocked end @@ -1099,10 +1103,10 @@ def on_rejection_using(executor, *args, &callback) # @return [Future] # @example # body = lambda do |v| - # v += 1 - # v < 5 ? future(v, &body) : v + # v += 1 + # v < 5 ? Promises.future(v, &body) : v # end - # future(0, &body).run.value! # => 5 + # Promises.future(0, &body).run.value! # => 5 def run RunFuturePromise.new_blocked1(self, @DefaultExecutor).future end @@ -1237,7 +1241,7 @@ def reject(reason, raise_on_reassign = true) # @yieldreturn [Object] value # @return [self] def evaluate_to(*args, &block) - # TODO (pitr-ch 13-Jun-2016): add raise_on_reassign + # FIXME (pitr-ch 13-Jun-2016): add raise_on_reassign promise.evaluate_to(*args, block) end @@ -1323,20 +1327,16 @@ def initialize(default_executor) super ResolvableFuture.new(self, default_executor) end - # @!visibility private def fulfill(value, raise_on_reassign) resolve_with Fulfilled.new(value), raise_on_reassign end - # @!visibility private def reject(reason, raise_on_reassign) resolve_with Rejected.new(reason), raise_on_reassign end - # @!visibility private public :evaluate_to - # @!visibility private def evaluate_to!(*args, block) evaluate_to(*args, block).wait! end @@ -1348,7 +1348,6 @@ class InnerPromise < AbstractPromise # @abstract class BlockedPromise < InnerPromise - # @!visibility private private_class_method :new @@ -1405,7 +1404,6 @@ def initialize(delayed, blockers_count, future) @Countdown = AtomicFixnum.new blockers_count end - # @!visibility private def on_blocker_resolution(future, index) countdown = process_on_blocker_resolution(future, index) resolvable = resolvable?(countdown, future, index) @@ -1421,39 +1419,30 @@ def touch clear_propagate_touch if @Touched.make_true end - def clear_propagate_touch - @Delayed.clear_each { |o| propagate_touch o } if @Delayed - end - - # @!visibility private - def propagate_touch(stack_or_element = @Delayed) - if stack_or_element.is_a? LockFreeStack - stack_or_element.each { |element| propagate_touch element } - else - stack_or_element.touch unless stack_or_element.nil? # if still present - end - end - def touched? @Touched.value end - # !visibility private # TODO (pitr-ch 20-Dec-2016): does it have to be at promise methods? # for inspection only def blocked_by - # TODO (pitr-ch 18-Dec-2016): doc macro debug method - blocked_by = [] ObjectSpace.each_object(AbstractEventFuture) { |o| blocked_by.push o if o.blocks.include? self } blocked_by end - # @!visibility private - def inspect - "#{to_s[0..-2]} blocked_by:[#{ blocked_by.map(&:to_s).join(', ')}]>" + private + + def clear_propagate_touch + @Delayed.clear_each { |o| propagate_touch o } if @Delayed end - private + def propagate_touch(stack_or_element = @Delayed) + if stack_or_element.is_a? LockFreeStack + stack_or_element.each { |element| propagate_touch element } + else + stack_or_element.touch unless stack_or_element.nil? # if still present + end + end # @return [true,false] if resolvable def resolvable?(countdown, future, index) @@ -1479,7 +1468,6 @@ def initialize(delayed, blockers_count, default_executor, executor, args, &task) @Args = args end - # @!visibility private def executor @Executor end @@ -1742,7 +1730,7 @@ def initialize(delayed, blockers_count, default_executor) end def process_on_blocker_resolution(future, index) - # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never resized? + # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never re-sized? @Resolutions[index] = future.internal_state # has to be set before countdown in super super future, index end @@ -1849,14 +1837,12 @@ def delayed end class ScheduledPromise < InnerPromise - # @!visibility private def intended_time @IntendedTime end - # @!visibility private def inspect - "#{to_s[0..-2]} intended_time:[#{@IntendedTime}}>" + "#{to_s[0..-2]} intended_time: #{@IntendedTime}>" end private @@ -1917,6 +1903,8 @@ def initialize(default_executor, intended_time) end # TODO try stealing pool, each thread has it's own queue +# TODO (pitr-ch 18-Dec-2016): doc macro debug method +# TODO (pitr-ch 18-Dec-2016): add macro noting that debug methods may change api without warning ### Experimental features follow diff --git a/tasks/update_doc.rake b/tasks/update_doc.rake index a37af832f..f8dc71ad7 100644 --- a/tasks/update_doc.rake +++ b/tasks/update_doc.rake @@ -1,15 +1,60 @@ require 'yard' -YARD::Rake::YardocTask.new +require 'md_ruby_eval' + +module YARD + module Templates::Helpers + # The helper module for HTML templates. + module HtmlHelper + def signature_types(meth, link = true) + meth = convert_method_to_overload(meth) + if meth.respond_to?(:object) && !meth.has_tag?(:return) + meth = meth.object + end + + type = options.default_return || "" + if meth.tag(:return) && meth.tag(:return).types + types = meth.tags(:return).map { |t| t.types ? t.types : [] }.flatten.uniq + first = link ? h(types.first) : format_types([types.first], false) + # if types.size == 2 && types.last == 'nil' + # type = first + '?' + # elsif types.size == 2 && types.last =~ /^(Array)?<#{Regexp.quote types.first}>$/ + # type = first + '+' + # elsif types.size > 2 + # type = [first, '...'].join(', ') + if types == ['void'] && options.hide_void_return + type = "" + else + type = link ? h(types.join(", ")) : format_types(types, false) + end + elsif !type.empty? + type = link ? h(type) : format_types([type], false) + end + type = "(#{type}) " unless type.empty? + type + end + end + end +end root = File.expand_path File.join(File.dirname(__FILE__), '..') +task yard: %w(yard:preprocess yard:doc) + namespace :yard do + YARD::Rake::YardocTask.new(:doc) + cmd = lambda do |command| puts ">> executing: #{command}" system command or raise "#{command} failed" end + task :preprocess do + Dir.chdir File.join(__dir__, '..', 'doc') do + cmd.call 'bundle exec md-ruby-eval --auto' or raise + end + end + desc 'Pushes generated documentation to github pages: http://ruby-concurrency.github.io/concurrent-ruby/' task :push => [:setup, :yard] do diff --git a/yard-template/default/fulldoc/html/css/common.css b/yard-template/default/fulldoc/html/css/common.css index dfd9d858a..f7f7f98b8 100644 --- a/yard-template/default/fulldoc/html/css/common.css +++ b/yard-template/default/fulldoc/html/css/common.css @@ -4,6 +4,16 @@ body { line-height: 18px; } +.docstring h1:before { + content: '# '; + color: silver; +} + +.docstring h2:before { + content: '## '; + color: silver; +} + .docstring code, .docstring .object_link a, #filecontents code { padding: 0px 3px 1px 3px; border: 1px solid #eef; From fa5417e185707519f7885e9ce98ac5a77c395c02 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 19:25:15 +0100 Subject: [PATCH 54/68] Polishing throttling --- doc/promises.in.md | 4 +- doc/promises.out.md | 112 +++++++------ lib/concurrent/edge/promises.rb | 217 +++++++++++++++----------- spec/concurrent/edge/promises_spec.rb | 64 +++----- 4 files changed, 209 insertions(+), 188 deletions(-) diff --git a/doc/promises.in.md b/doc/promises.in.md index 2a02149d0..c5bd87007 100644 --- a/doc/promises.in.md +++ b/doc/promises.in.md @@ -757,12 +757,12 @@ pool of size 3. We create throttle with the same size ```ruby DB_INTERNAL_POOL = Concurrent::Array.new data -max_tree = Promises.throttle 3 +max_tree = Promises::Throttle.new 3 futures = 11.times.map do |i| max_tree. # throttled tasks, at most 3 simultaneous calls of [] on the database - then_throttle { DB_INTERNAL_POOL[i] }. + then_throttled { DB_INTERNAL_POOL[i] }. # un-throttled tasks, unlimited concurrency then { |starts| starts.size }. rescue { |reason| reason.message } diff --git a/doc/promises.out.md b/doc/promises.out.md index 57aec0026..7307dad73 100644 --- a/doc/promises.out.md +++ b/doc/promises.out.md @@ -8,8 +8,7 @@ composition. ```ruby Concurrent::Promises::FactoryMethods.instance_methods false -# => [:select, -# :zip, +# => [:zip, # :create, # :delay, # :future, @@ -35,8 +34,7 @@ Concurrent::Promises::FactoryMethods.instance_methods false # :any_fulfilled_future, # :any_fulfilled_future_on, # :any_event, -# :any_event_on, -# :throttle] +# :any_event_on] ``` The module can be included or extended where needed. @@ -49,17 +47,17 @@ Class.new do resolvable_event end end.new.a_method -# => <#Concurrent::Promises::ResolvableEvent:0x7fbfd2d2a890 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fa95ca52488 pending> Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fbfd2d28978 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fa95ca4a9e0 pending> ``` The module is already extended into {Concurrent::Promises} for convenience. ```ruby Concurrent::Promises.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fbfd2d197c0 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fa95ca48708 pending> ``` For this guide we introduce a shortcut in `main` so we can call the factory @@ -68,7 +66,7 @@ methods in following examples by using `Promisses` directly. ```ruby Promises = Concurrent::Promises Promises.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fbfd2d12628 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fa95ca41d18 pending> ``` ## Asynchronous task @@ -84,7 +82,7 @@ future = Promises.future(0.1) do |duration| sleep duration :result end -# => <#Concurrent::Promises::Future:0x7fbfd2d027c8 pending> +# => <#Concurrent::Promises::Future:0x7fa95b3eb410 pending> ``` Asks if the future is resolved, here it will be still in the middle of the @@ -105,7 +103,7 @@ If the task fails we talk about the future being rejected. ```ruby future = Promises.future { raise 'Boom' } -# => <#Concurrent::Promises::Future:0x7fbfd2cf1c70 pending> +# => <#Concurrent::Promises::Future:0x7fa95b3dae58 pending> ``` There is no result, the future was rejected with a reason. @@ -200,20 +198,20 @@ through evaluation as follows. ```ruby Promises.future { :value } -# => <#Concurrent::Promises::Future:0x7fbfd2c69ca8 pending> +# => <#Concurrent::Promises::Future:0x7fa95b89f510 pending> ``` Instead it can be created directly. ```ruby Promises.fulfilled_future(:value) -# => <#Concurrent::Promises::Future:0x7fbfd2c61a30 fulfilled> +# => <#Concurrent::Promises::Future:0x7fa95b390bc8 fulfilled> Promises.rejected_future(StandardError.new('Ups')) -# => <#Concurrent::Promises::Future:0x7fbfd2c606f8 rejected> +# => <#Concurrent::Promises::Future:0x7fa95b38b628 rejected> Promises.resolved_future(true, :value, nil) -# => <#Concurrent::Promises::Future:0x7fbfd2c5b1a8 fulfilled> +# => <#Concurrent::Promises::Future:0x7fa95b38a688 fulfilled> Promises.resolved_future(false, nil, StandardError.new('Ups')) -# => <#Concurrent::Promises::Future:0x7fbfd2c591f0 rejected> +# => <#Concurrent::Promises::Future:0x7fa95b3892b0 rejected> ``` ## Chaining @@ -254,9 +252,9 @@ do_stuff arg }`) is **required**, both following examples may break. ```ruby arg = 1 # => 1 Thread.new { do_stuff arg } -# => # +# => # Promises.future { do_stuff arg } -# => <#Concurrent::Promises::Future:0x7fbfd2bebf10 pending> +# => <#Concurrent::Promises::Future:0x7fa95b321020 pending> ``` ## Branching, and zipping @@ -318,7 +316,7 @@ Promises. result # => [false, # nil, -# #>] +# #>] ``` As `then` chained tasks execute only on fulfilled futures, there is a `rescue` @@ -366,7 +364,7 @@ Zip is rejected if any of the zipped futures is. rejected_zip = Promises.zip( Promises.fulfilled_future(1), Promises.rejected_future(StandardError.new('Ups'))) -# => <#Concurrent::Promises::Future:0x7fbfd2b12dc8 rejected> +# => <#Concurrent::Promises::Future:0x7fa95b2428e8 rejected> rejected_zip.result # => [false, [1, nil], [nil, #]] rejected_zip. @@ -381,11 +379,11 @@ requiring resolution. ```ruby future = Promises.delay { sleep 0.1; 'lazy' } -# => <#Concurrent::Promises::Future:0x7fbfd2af8f68 pending> +# => <#Concurrent::Promises::Future:0x7fa95b229eb0 pending> sleep 0.1 future.resolved? # => false future.touch -# => <#Concurrent::Promises::Future:0x7fbfd2af8f68 pending> +# => <#Concurrent::Promises::Future:0x7fa95b229eb0 pending> sleep 0.2 future.resolved? # => true ``` @@ -462,7 +460,7 @@ Schedule task to be executed in 0.1 seconds. ```ruby scheduled = Promises.schedule(0.1) { 1 } -# => <#Concurrent::Promises::Future:0x7fbfd2a72490 pending> +# => <#Concurrent::Promises::Future:0x7fa95c9b1ce0 pending> scheduled.resolved? # => false ``` @@ -487,7 +485,7 @@ Time can be used as well. ```ruby Promises.schedule(Time.now + 10) { :val } -# => <#Concurrent::Promises::Future:0x7fbfd41b0698 pending> +# => <#Concurrent::Promises::Future:0x7fa95c972ae0 pending> ``` ## Resolvable Future and Event: @@ -499,7 +497,7 @@ Sometimes it is required to resolve a future externally, in these cases ```ruby future = Promises.resolvable_future -# => <#Concurrent::Promises::ResolvableFuture:0x7fbfd2a5a480 pending> +# => <#Concurrent::Promises::ResolvableFuture:0x7fa95c970e98 pending> ``` The thread will be blocked until the future is resolved @@ -507,7 +505,7 @@ The thread will be blocked until the future is resolved ```ruby thread = Thread.new { future.value } future.fulfill 1 -# => <#Concurrent::Promises::ResolvableFuture:0x7fbfd2a5a480 fulfilled> +# => <#Concurrent::Promises::ResolvableFuture:0x7fa95c970e98 fulfilled> thread.value # => 1 ``` @@ -524,9 +522,9 @@ future.fulfill 2, false # => false ## Callbacks ```ruby -queue = Queue.new # => # +queue = Queue.new # => # future = Promises.delay { 1 + 1 } -# => <#Concurrent::Promises::Future:0x7fbfd2a30f90 pending> +# => <#Concurrent::Promises::Future:0x7fa95e0547b8 pending> future.on_fulfillment { queue << 1 } # evaluated asynchronously future.on_fulfillment! { queue << 2 } # evaluated on resolving thread @@ -549,7 +547,7 @@ and `:io` for blocking and long tasks. ```ruby Promises.future_on(:fast) { 2 }. then_on(:io) { File.read __FILE__ }. - value.size # => 18754 + value.size # => 18760 ``` # Interoperability @@ -562,7 +560,7 @@ Create an actor which takes received numbers and returns the number squared. actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end -# => # +# => # ``` Send result of `1+1` to the actor, and add 2 to the result send back from the @@ -594,17 +592,17 @@ actor.ask(2).then(&:succ).value! # => 5 ```ruby Promises.future { do_stuff } -# => <#Concurrent::Promises::Future:0x7fbfd298a758 pending> +# => <#Concurrent::Promises::Future:0x7fa95b1eb8e0 pending> ``` ## Parallel background processing ```ruby tasks = 4.times.map { |i| Promises.future(i) { |i| i*2 } } -# => [<#Concurrent::Promises::Future:0x7fbfd2982738 pending>, -# <#Concurrent::Promises::Future:0x7fbfd29813b0 pending>, -# <#Concurrent::Promises::Future:0x7fbfd297bf78 pending>, -# <#Concurrent::Promises::Future:0x7fbfd297b0f0 pending>] +# => [<#Concurrent::Promises::Future:0x7fa95b1e2e48 pending>, +# <#Concurrent::Promises::Future:0x7fa95b1e1f70 pending>, +# <#Concurrent::Promises::Future:0x7fa95b1e1188 pending>, +# <#Concurrent::Promises::Future:0x7fa95b1e0198 pending>] Promises.zip(*tasks).value! # => [0, 2, 4, 6] ``` @@ -657,11 +655,11 @@ Create the computer actor and send it 3 jobs. ```ruby computer = Concurrent::Actor.spawn Computer, :computer -# => # +# => # results = 3.times.map { computer.ask [:run, -> { sleep 0.1; :result }] } -# => [<#Concurrent::Promises::Future:0x7fbfd3130d68 pending>, -# <#Concurrent::Promises::Future:0x7fbfd312b2a0 pending>, -# <#Concurrent::Promises::Future:0x7fbfd3129d60 pending>] +# => [<#Concurrent::Promises::Future:0x7fa95a3d30c8 pending>, +# <#Concurrent::Promises::Future:0x7fa95a3d0e40 pending>, +# <#Concurrent::Promises::Future:0x7fa95a3cb760 pending>] computer.ask(:status).value! # => {:running_jobs=>3} results.map(&:value!) # => [:result, :result, :result] ``` @@ -708,8 +706,8 @@ Lets have two processes which will count until cancelled. ```ruby source, token = Concurrent::Cancellation.create -# => [<#Concurrent::Cancellation:0x7fbfd3b596c8 canceled:false>, -# <#Concurrent::Cancellation::Token:0x7fbfd3b58a70 canceled:false>] +# => [<#Concurrent::Cancellation:0x7fa95a223840 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fa95a2222b0 canceled:false>] count_until_cancelled = -> token, count do if token.canceled? @@ -722,12 +720,12 @@ end futures = Array.new(2) do Promises.future(token, 0, &count_until_cancelled).run end -# => [<#Concurrent::Promises::Future:0x7fbfd3b38310 pending>, -# <#Concurrent::Promises::Future:0x7fbfd3b31628 pending>] +# => [<#Concurrent::Promises::Future:0x7fa95b1b8a08 pending>, +# <#Concurrent::Promises::Future:0x7fa95b1aa110 pending>] sleep 0.01 source.cancel # => true -futures.map(&:value!) # => [50, 52] +futures.map(&:value!) # => [65, 66] ``` Cancellation can also be used as event or future to log or plan re-execution. @@ -746,8 +744,8 @@ tasks share a cancellation, when one of them fails it cancels the others. ```ruby source, token = Concurrent::Cancellation.create -# => [<#Concurrent::Cancellation:0x7fbfd3862c30 canceled:false>, -# <#Concurrent::Cancellation::Token:0x7fbfd38622d0 canceled:false>] +# => [<#Concurrent::Cancellation:0x7fa95c9c8c60 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fa95c9c8710 canceled:false>] tasks = 4.times.map do |i| Promises.future(source, token, i) do |source, token, i| count = 0 @@ -763,22 +761,22 @@ tasks = 4.times.map do |i| end end end -# => [<#Concurrent::Promises::Future:0x7fbfd3852358 pending>, -# <#Concurrent::Promises::Future:0x7fbfd384b8c8 pending>, -# <#Concurrent::Promises::Future:0x7fbfd3033ed8 pending>, -# <#Concurrent::Promises::Future:0x7fbfd302bee0 pending>] +# => [<#Concurrent::Promises::Future:0x7fa95c9a1818 pending>, +# <#Concurrent::Promises::Future:0x7fa95c9a0aa8 pending>, +# <#Concurrent::Promises::Future:0x7fa95c98bb30 pending>, +# <#Concurrent::Promises::Future:0x7fa95c98aed8 pending>] Promises.zip(*tasks).result # => [false, -# [nil, :cancelled, :cancelled, :cancelled], -# [#, nil, nil, nil]] +# [nil, :cancelled, :cancelled, nil], +# [#, nil, nil, #]] ``` Without the randomly failing part it produces following. ```ruby source, token = Concurrent::Cancellation.create -# => [<#Concurrent::Cancellation:0x7fbfd29aa990 canceled:false>, -# <#Concurrent::Cancellation::Token:0x7fbfd29aa2b0 canceled:false>] +# => [<#Concurrent::Cancellation:0x7fa95ca897d0 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fa95ca83c68 canceled:false>] tasks = 4.times.map do |i| Promises.future(source, token, i) do |source, token, i| count = 0 @@ -886,13 +884,13 @@ DB_INTERNAL_POOL = Concurrent::Array.new data # "********", # "*********"] -max_tree = Promises.throttle 3 -# => <#Concurrent::Promises::Throttle:0x7fbfd294a018 limit:3> +max_tree = Promises::Throttle.new 3 +# => <#Concurrent::Promises::Throttle:0x7fa95b2d0cb0 limit:3 can_run:3> futures = 11.times.map do |i| max_tree. # throttled tasks, at most 3 simultaneous calls of [] on the database - then_throttle { DB_INTERNAL_POOL[i] }. + then_throttled { DB_INTERNAL_POOL[i] }. # un-throttled tasks, unlimited concurrency then { |starts| starts.size }. rescue { |reason| reason.message } @@ -929,7 +927,7 @@ def schedule_job(interval, &job) end end -queue = Queue.new # => # +queue = Queue.new # => # count = 0 # => 0 interval = 0.05 # small just not to delay execution of this example diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 45bca0ca7..2df173ed4 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -1906,101 +1906,86 @@ def initialize(default_executor, intended_time) # TODO (pitr-ch 18-Dec-2016): doc macro debug method # TODO (pitr-ch 18-Dec-2016): add macro noting that debug methods may change api without warning -### Experimental features follow - module Concurrent module Promises - module FactoryMethods - - # @!visibility private - - # only proof of concept - # @return [Future] - def select(*channels) - # TODO (pitr-ch 26-Mar-2016): re-do, has to be non-blocking - future do - # noinspection RubyArgCount - Channel.select do |s| - channels.each do |ch| - s.take(ch) { |value| [value, ch] } - end - end - end - end - end class Future < AbstractEventFuture - # @!visibility private - - # Zips with selected value form the suplied channels - # @return [Future] - def then_select(*channels) - future = Concurrent::Promises.select(*channels) - ZipFuturesPromise.new_blocked2(self, future, @DefaultExecutor).future - end - - # @note may block - # @note only proof of concept - def then_put(channel) - on_fulfillment(:io) { |value| channel.put value } - end - - # Asks the actor with its value. - # @return [Future] new future with the response form the actor - def then_ask(actor) - self.then { |v| actor.ask(v) }.flat + module ActorIntegration + # Asks the actor with its value. + # @return [Future] new future with the response form the actor + def then_ask(actor) + self.then { |v| actor.ask(v) }.flat + end end - # include Enumerable - # - # def each(&block) - # each_body self.value, &block - # end - # - # def each!(&block) - # each_body self.value!, &block - # end - # - # private - # - # def each_body(value, &block) - # (value.nil? ? [nil] : Array(value)).each(&block) - # end - + include ActorIntegration end + # A tool manage concurrency level of future tasks. + # @example With futures + # data = (1..5).to_a + # db = data.reduce({}) { |h, v| h.update v => v.to_s } + # max_two = Promises.throttle 2 + # + # futures = data.map do |data| + # Promises.future(data) { |data| + # # un-throttled, concurrency level equal data.size + # data + 1 + # }.then_throttle(max_two, db) { |v, db| + # # throttled, only 2 tasks executed at the same time + # # e.g. limiting access to db + # db[v] + # } + # end + # + # futures.map(&:value!) # => [2, 3, 4, 5, nil] + # + # @example With Threads + # max_two = Concurrent::Throttle.new 2 + # 5.timse class Throttle < Synchronization::Object + # TODO (pitr-ch 23-Dec-2016): move into different file + # TODO (pitr-ch 23-Dec-2016): move to Concurrent space + # TODO (pitr-ch 21-Dec-2016): consider using sized channel for implementation instead when available safe_initialization! private *attr_atomic(:can_run) - def initialize(max) + # New throttle. + # @param [Integer] limit + def initialize(limit) super() - self.can_run = max + @Limit = limit + self.can_run = limit @Queue = LockFreeQueue.new end - def throttle(future = nil, &throttled_future) - if block_given? - trigger = future ? (new_trigger & future) : new_trigger - throttled_future.call(trigger).on_resolution! { done } - else - new_trigger - end + # @return [Integer] The limit. + def limit + @Limit end - def then_throttle(&task) - throttle { |trigger| trigger.then &task } + def trigger + while true + current_can_run = can_run + if compare_and_set_can_run current_can_run, current_can_run - 1 + if current_can_run > 0 + return Promises.resolved_event + else + event = Promises.resolvable_event + @Queue.push event + return event + end + end + end end - private - - def done + def release while true current_can_run = can_run if compare_and_set_can_run current_can_run, current_can_run + 1 - if current_can_run <= 0 + if current_can_run < 0 Thread.pass until (trigger = @Queue.pop) trigger.resolve end @@ -2009,41 +1994,95 @@ def done end end - def new_trigger - while true - current_can_run = can_run - if compare_and_set_can_run current_can_run, current_can_run - 1 - if current_can_run > 0 - return Promises.resolved_event - else - event = Promises.resolvable_event - @Queue.push event - return event - end - end + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x limit:%s can_run:%d>', self.class, object_id << 1, @Limit, can_run + end + + alias_method :inspect, :to_s + + module PromisesIntegration + # TODO (pitr-ch 23-Dec-2016): apply similar pattern elsewhere + + def throttled(&throttled_futures) + throttled_futures.call(trigger).on_resolution! { release } + end + + def then_throttled(*args, &task) + trigger.then(*args, &task).on_resolution! { release } end end + + include PromisesIntegration end class AbstractEventFuture < Synchronization::Object + module ThrottleIntegration + def throttled_by(throttle, &throttled_futures) + a_trigger = throttle.trigger & self + throttled_futures.call(a_trigger).on_resolution! { throttle.release } + end - def throttle(throttle, &throttled_future) - throttle.throttle(self, &throttled_future) + def then_throttled_by(throttle, *args, &block) + throttled_by(throttle) { |trigger| trigger.then(*args, &block) } + end end - def then_throttle(throttle, &block) - throttle(throttle) { |trigger| trigger.then &block } + include ThrottleIntegration + end + + ### Experimental features follow + + module FactoryMethods + + # @!visibility private + + module ChannelIntegration + + # @!visibility private + + # only proof of concept + # @return [Future] + def select(*channels) + # TODO (pitr-ch 26-Mar-2016): re-do, has to be non-blocking + future do + # noinspection RubyArgCount + Channel.select do |s| + channels.each do |ch| + s.take(ch) { |value| [value, ch] } + end + end + end + end end + include ChannelIntegration end - module FactoryMethods + class Future < AbstractEventFuture # @!visibility private - def throttle(count) - Promises::Throttle.new count + module ChannelIntegration + + # @!visibility private + + # Zips with selected value form the suplied channels + # @return [Future] + def then_select(*channels) + future = Concurrent::Promises.select(*channels) + ZipFuturesPromise.new_blocked_by2(self, future, @DefaultExecutor).future + end + + # @note may block + # @note only proof of concept + def then_put(channel) + on_fulfillment_using(:io, channel) { |value, channel| channel.put value } + end end + + include ChannelIntegration end + end end diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index 895bceca4..7494c48b2 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -227,7 +227,6 @@ def behaves_as_delay(delay, value) end end - describe '.zip_events' do it 'waits for all and returns event' do a = fulfilled_future 1 @@ -489,55 +488,40 @@ def behaves_as_delay(delay, value) describe 'Throttling' do specify do - max_tree = Concurrent::Promises::Throttle.new 3 + limit = 4 + throttle = Concurrent::Promises::Throttle.new limit counter = Concurrent::AtomicFixnum.new testing = -> *args do counter.increment - sleep 0.01 + sleep rand * 0.1 + 0.1 # returns less then 3 since it's throttled - counter.decrement + v = counter.decrement + 1 + v end - expect(Concurrent::Promises.zip( - *12.times.map do |i| - max_tree.throttle { |trigger| trigger.then &testing } - end).value!.all? { |v| v <= 3 }).to be_truthy + expect(p(Concurrent::Promises.zip( + *20.times.map do |i| + throttle.throttled { |trigger| trigger.then(throttle, &testing) } + end).value!).all? { |v| v <= limit }).to be_truthy + + expect(p(Concurrent::Promises.zip( + *20.times.map do |i| + throttle.then_throttled(throttle, &testing) + end).value!).all? { |v| v <= limit }).to be_truthy - expect(Concurrent::Promises.zip( - *12.times.map do |i| + expect(p(Concurrent::Promises.zip( + *20.times.map do |i| Concurrent::Promises. fulfilled_future(i). - throttle(max_tree) { |trigger| trigger.then &testing } - end).value!.all? { |v| v <= 3 }).to be_truthy - end - - specify do - max_five = Concurrent::Promises::Throttle.new 5 - jobs = 20.times.map do |i| - max_five.throttle do |trigger| - # trigger is an event, has same chain-able capabilities as current promise - trigger.then do - # at any given time there max 5 simultaneous executions of this block - the_work = i * 2 - end - end - end - result = Concurrent::Promises.zip_futures(*jobs) - p result.value! - # => [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38] - end + throttled_by(throttle) { |trigger| trigger.then(throttle, &testing) } + end).value!).all? { |v| v <= limit }).to be_truthy - specify do - max_five = Concurrent::Promises::Throttle.new 5 - jobs = 20.times.map do |i| - max_five.then_throttle do - # at any given time there max 5 simultaneous executions of this block - the_work = i * 2 - end # returns promise - end - result = Concurrent::Promises.zip_futures(*jobs) - p result.value! - # => [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38] + expect(p(Concurrent::Promises.zip( + *20.times.map do |i| + Concurrent::Promises. + fulfilled_future(i). + then_throttled_by(throttle, throttle, &testing) + end).value!).all? { |v| v <= limit }).to be_truthy end end end From 79c835c77eb421222d0305c4ab322b607cc33dd1 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 21:18:09 +0100 Subject: [PATCH 55/68] Move throttle to its own file --- lib/concurrent/edge/promises.rb | 139 +++++++------------------------- lib/concurrent/edge/throttle.rb | 110 +++++++++++++++++++++++++ 2 files changed, 140 insertions(+), 109 deletions(-) create mode 100644 lib/concurrent/edge/throttle.rb diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 2df173ed4..bbdd005fb 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -14,6 +14,35 @@ module Concurrent # {include:file:doc/promises-main.md} module Promises + # TODO (pitr-ch 23-Dec-2016): move out + # @!visibility private + module ReInclude + def included(base) + included_into << [:include, base] + super(base) + end + + def extended(base) + included_into << [:extend, base] + super(base) + end + + def include(*modules) + super(*modules) + modules.reverse.each do |module_being_included| + included_into.each do |method, mod| + mod.send method, module_being_included + end + end + end + + private + + def included_into + @included_into ||= [] + end + end + # @!macro [new] promises.param.default_executor # @param [Executor, :io, :fast] default_executor Instance of an executor or a name of the # global executor. Default executor propagates to chained futures unless overridden with @@ -46,6 +75,7 @@ module Promises # Container of all {Future}, {Event} factory methods. They are never constructed directly with # new. module FactoryMethods + extend ReInclude # @!macro promises.shortcut.on # @return [ResolvableEvent] @@ -1922,115 +1952,6 @@ def then_ask(actor) include ActorIntegration end - # A tool manage concurrency level of future tasks. - # @example With futures - # data = (1..5).to_a - # db = data.reduce({}) { |h, v| h.update v => v.to_s } - # max_two = Promises.throttle 2 - # - # futures = data.map do |data| - # Promises.future(data) { |data| - # # un-throttled, concurrency level equal data.size - # data + 1 - # }.then_throttle(max_two, db) { |v, db| - # # throttled, only 2 tasks executed at the same time - # # e.g. limiting access to db - # db[v] - # } - # end - # - # futures.map(&:value!) # => [2, 3, 4, 5, nil] - # - # @example With Threads - # max_two = Concurrent::Throttle.new 2 - # 5.timse - class Throttle < Synchronization::Object - # TODO (pitr-ch 23-Dec-2016): move into different file - # TODO (pitr-ch 23-Dec-2016): move to Concurrent space - # TODO (pitr-ch 21-Dec-2016): consider using sized channel for implementation instead when available - - safe_initialization! - private *attr_atomic(:can_run) - - # New throttle. - # @param [Integer] limit - def initialize(limit) - super() - @Limit = limit - self.can_run = limit - @Queue = LockFreeQueue.new - end - - # @return [Integer] The limit. - def limit - @Limit - end - - def trigger - while true - current_can_run = can_run - if compare_and_set_can_run current_can_run, current_can_run - 1 - if current_can_run > 0 - return Promises.resolved_event - else - event = Promises.resolvable_event - @Queue.push event - return event - end - end - end - end - - def release - while true - current_can_run = can_run - if compare_and_set_can_run current_can_run, current_can_run + 1 - if current_can_run < 0 - Thread.pass until (trigger = @Queue.pop) - trigger.resolve - end - return self - end - end - end - - # @return [String] Short string representation. - def to_s - format '<#%s:0x%x limit:%s can_run:%d>', self.class, object_id << 1, @Limit, can_run - end - - alias_method :inspect, :to_s - - module PromisesIntegration - # TODO (pitr-ch 23-Dec-2016): apply similar pattern elsewhere - - def throttled(&throttled_futures) - throttled_futures.call(trigger).on_resolution! { release } - end - - def then_throttled(*args, &task) - trigger.then(*args, &task).on_resolution! { release } - end - end - - include PromisesIntegration - end - - class AbstractEventFuture < Synchronization::Object - module ThrottleIntegration - def throttled_by(throttle, &throttled_futures) - a_trigger = throttle.trigger & self - throttled_futures.call(a_trigger).on_resolution! { throttle.release } - end - - def then_throttled_by(throttle, *args, &block) - throttled_by(throttle) { |trigger| trigger.then(*args, &block) } - end - end - - include ThrottleIntegration - end - ### Experimental features follow module FactoryMethods diff --git a/lib/concurrent/edge/throttle.rb b/lib/concurrent/edge/throttle.rb new file mode 100644 index 000000000..66ecaa1c4 --- /dev/null +++ b/lib/concurrent/edge/throttle.rb @@ -0,0 +1,110 @@ +module Concurrent + + # A tool manage concurrency level of future tasks. + # @example With futures + # data = (1..5).to_a + # db = data.reduce({}) { |h, v| h.update v => v.to_s } + # max_two = Promises.throttle 2 + # + # futures = data.map do |data| + # Promises.future(data) { |data| + # # un-throttled, concurrency level equal data.size + # data + 1 + # }.then_throttle(max_two, db) { |v, db| + # # throttled, only 2 tasks executed at the same time + # # e.g. limiting access to db + # db[v] + # } + # end + # + # futures.map(&:value!) # => [2, 3, 4, 5, nil] + # + # @example With Threads + # # TODO (pitr-ch 23-Dec-2016): thread example, add blocking block method for threads + class Throttle < Synchronization::Object + # TODO (pitr-ch 21-Dec-2016): consider using sized channel for implementation instead when available + + safe_initialization! + private *attr_atomic(:can_run) + + # New throttle. + # @param [Integer] limit + def initialize(limit) + super() + @Limit = limit + self.can_run = limit + @Queue = LockFreeQueue.new + end + + # @return [Integer] The limit. + def limit + @Limit + end + + def trigger + while true + current_can_run = can_run + if compare_and_set_can_run current_can_run, current_can_run - 1 + if current_can_run > 0 + return Promises.resolved_event + else + event = Promises.resolvable_event + @Queue.push event + return event + end + end + end + end + + def release + while true + current_can_run = can_run + if compare_and_set_can_run current_can_run, current_can_run + 1 + if current_can_run < 0 + Thread.pass until (trigger = @Queue.pop) + trigger.resolve + end + return self + end + end + end + + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x limit:%s can_run:%d>', self.class, object_id << 1, @Limit, can_run + end + + alias_method :inspect, :to_s + + module PromisesIntegration + + def throttled(&throttled_futures) + throttled_futures.call(trigger).on_resolution! { release } + end + + def then_throttled(*args, &task) + trigger.then(*args, &task).on_resolution! { release } + end + end + + include PromisesIntegration + end + + module Promises + + class AbstractEventFuture < Synchronization::Object + module ThrottleIntegration + def throttled_by(throttle, &throttled_futures) + a_trigger = throttle.trigger & self + throttled_futures.call(a_trigger).on_resolution! { throttle.release } + end + + def then_throttled_by(throttle, *args, &block) + throttled_by(throttle) { |trigger| trigger.then(*args, &block) } + end + end + + include ThrottleIntegration + end + end +end From c3c9a47c3139c21e605ccbf627031db2dbe7b456 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 21:19:44 +0100 Subject: [PATCH 56/68] Better names for new_blocked methods --- lib/concurrent/edge/promises.rb | 61 +++++++++++++++++---------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index bbdd005fb..3abbce306 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -9,7 +9,7 @@ module Concurrent # # Guide # - # The guide is best place to start with promises, see {file:doc/promises.out.md}. + # The guide is **best place** to start with promises, see {file:doc/promises.out.md}. # # {include:file:doc/promises-main.md} module Promises @@ -249,7 +249,7 @@ def zip_futures(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Future] def zip_futures_on(default_executor, *futures_and_or_events) - ZipFuturesPromise.new_blocked(futures_and_or_events, default_executor).future + ZipFuturesPromise.new_blocked_by(futures_and_or_events, default_executor).future end alias_method :zip, :zip_futures @@ -267,7 +267,7 @@ def zip_events(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Event] def zip_events_on(default_executor, *futures_and_or_events) - ZipEventsPromise.new_blocked(futures_and_or_events, default_executor).event + ZipEventsPromise.new_blocked_by(futures_and_or_events, default_executor).event end # @!macro promises.shortcut.on @@ -289,7 +289,7 @@ def any_resolved_future(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Future] def any_resolved_future_on(default_executor, *futures_and_or_events) - AnyResolvedFuturePromise.new_blocked(futures_and_or_events, default_executor).future + AnyResolvedFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future end # @!macro promises.shortcut.on @@ -308,7 +308,7 @@ def any_fulfilled_future(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Future] def any_fulfilled_future_on(default_executor, *futures_and_or_events) - AnyFulfilledFuturePromise.new_blocked(futures_and_or_events, default_executor).future + AnyFulfilledFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future end # @!macro promises.shortcut.on @@ -324,7 +324,7 @@ def any_event(*futures_and_or_events) # @param [AbstractEventFuture] futures_and_or_events # @return [Event] def any_event_on(default_executor, *futures_and_or_events) - AnyResolvedEventPromise.new_blocked(futures_and_or_events, default_executor).event + AnyResolvedEventPromise.new_blocked_by(futures_and_or_events, default_executor).event end # TODO consider adding first(count, *futures) @@ -616,7 +616,7 @@ def chain(*args, &task) # @overload a_future.chain_on(executor, *args, &task) # @yield [fulfilled?, value, reason, *args] to the task. def chain_on(executor, *args, &task) - ChainPromise.new_blocked1(self, @DefaultExecutor, executor, args, &task).future + ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future end # @return [String] Short string representation. @@ -677,6 +677,7 @@ def on_resolution_using(executor, *args, &callback) # Any futures depending on it will use the new default executor. # @!macro promises.shortcut.event-future # @abstract + # @return [AbstractEventFuture] def with_default_executor(executor) raise NotImplementedError end @@ -807,9 +808,9 @@ class Event < AbstractEventFuture # @return [Future, Event] def zip(other) if other.is_a?(Future) - ZipFutureEventPromise.new_blocked2(other, self, @DefaultExecutor).future + ZipFutureEventPromise.new_blocked_by2(other, self, @DefaultExecutor).future else - ZipEventEventPromise.new_blocked2(self, other, @DefaultExecutor).event + ZipEventEventPromise.new_blocked_by2(self, other, @DefaultExecutor).event end end @@ -820,7 +821,7 @@ def zip(other) # # @return [Event] def any(event_or_future) - AnyResolvedEventPromise.new_blocked2(self, event_or_future, @DefaultExecutor).event + AnyResolvedEventPromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).event end alias_method :|, :any @@ -831,7 +832,7 @@ def any(event_or_future) # @return [Event] def delay event = DelayPromise.new(@DefaultExecutor).event - ZipEventEventPromise.new_blocked2(self, event, @DefaultExecutor).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event end # @!macro [new] promise.method.schedule @@ -844,7 +845,7 @@ def delay def schedule(intended_time) chain do event = ScheduledPromise.new(@DefaultExecutor, intended_time).event - ZipEventEventPromise.new_blocked2(self, event, @DefaultExecutor).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event end.flat_event end @@ -866,7 +867,7 @@ def to_event # @!macro promises.method.with_default_executor # @return [Event] def with_default_executor(executor) - EventWrapperPromise.new_blocked1(self, executor).event + EventWrapperPromise.new_blocked_by1(self, executor).event end private @@ -980,7 +981,7 @@ def then(*args, &task) # @return [Future] # @yield [value, *args] to the task. def then_on(executor, *args, &task) - ThenPromise.new_blocked1(self, @DefaultExecutor, executor, args, &task).future + ThenPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future end # @!macro promises.shortcut.on @@ -998,16 +999,16 @@ def rescue(*args, &task) # @return [Future] # @yield [reason, *args] to the task. def rescue_on(executor, *args, &task) - RescuePromise.new_blocked1(self, @DefaultExecutor, executor, args, &task).future + RescuePromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future end # @!macro promises.method.zip # @return [Future] def zip(other) if other.is_a?(Future) - ZipFuturesPromise.new_blocked2(self, other, @DefaultExecutor).future + ZipFuturesPromise.new_blocked_by2(self, other, @DefaultExecutor).future else - ZipFutureEventPromise.new_blocked2(self, other, @DefaultExecutor).future + ZipFutureEventPromise.new_blocked_by2(self, other, @DefaultExecutor).future end end @@ -1019,7 +1020,7 @@ def zip(other) # # @return [Future] def any(event_or_future) - AnyResolvedFuturePromise.new_blocked2(self, event_or_future, @DefaultExecutor).future + AnyResolvedFuturePromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).future end alias_method :|, :any @@ -1030,7 +1031,7 @@ def any(event_or_future) # @return [Future] def delay event = DelayPromise.new(@DefaultExecutor).event - ZipFutureEventPromise.new_blocked2(self, event, @DefaultExecutor).future + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future end # @!macro promise.method.schedule @@ -1038,14 +1039,14 @@ def delay def schedule(intended_time) chain do event = ScheduledPromise.new(@DefaultExecutor, intended_time).event - ZipFutureEventPromise.new_blocked2(self, event, @DefaultExecutor).future + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future end.flat end # @!macro promises.method.with_default_executor # @return [Future] def with_default_executor(executor) - FutureWrapperPromise.new_blocked1(self, executor).future + FutureWrapperPromise.new_blocked_by1(self, executor).future end # Creates new future which will have result of the future returned by receiver. If receiver @@ -1054,7 +1055,7 @@ def with_default_executor(executor) # @param [Integer] level how many levels of futures should flatten # @return [Future] def flat_future(level = 1) - FlatFuturePromise.new_blocked1(self, level, @DefaultExecutor).future + FlatFuturePromise.new_blocked_by1(self, level, @DefaultExecutor).future end alias_method :flat, :flat_future @@ -1064,7 +1065,7 @@ def flat_future(level = 1) # # @return [Event] def flat_event - FlatEventPromise.new_blocked1(self, @DefaultExecutor).event + FlatEventPromise.new_blocked_by1(self, @DefaultExecutor).event end # @!macro promises.shortcut.using @@ -1091,7 +1092,7 @@ def on_fulfillment!(*args, &callback) # @!macro promises.param.args # @!macro promise.param.callback # @return [self] - # @yield [value *args] to the callback. + # @yield [value, *args] to the callback. def on_fulfillment_using(executor, *args, &callback) add_callback :async_callback_on_fulfillment, executor, args, callback end @@ -1138,7 +1139,7 @@ def on_rejection_using(executor, *args, &callback) # end # Promises.future(0, &body).run.value! # => 5 def run - RunFuturePromise.new_blocked1(self, @DefaultExecutor).future + RunFuturePromise.new_blocked_by1(self, @DefaultExecutor).future end # @!visibility private @@ -1233,7 +1234,7 @@ def resolve(raise_on_reassign = true) # # @return [Event] def with_hidden_resolvable - @with_hidden_resolvable ||= EventWrapperPromise.new_blocked1(self, @DefaultExecutor).event + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked_by1(self, @DefaultExecutor).event end end @@ -1289,7 +1290,7 @@ def evaluate_to!(*args, &block) # # @return [Future] def with_hidden_resolvable - @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked1(self, @DefaultExecutor).future + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked_by1(self, @DefaultExecutor).future end end @@ -1381,7 +1382,7 @@ class BlockedPromise < InnerPromise private_class_method :new - def self.new_blocked1(blocker, *args, &block) + def self.new_blocked_by1(blocker, *args, &block) blocker_delayed = blocker.promise.delayed delayed = blocker_delayed ? LockFreeStack.new.push(blocker_delayed) : nil promise = new(delayed, 1, *args, &block) @@ -1389,7 +1390,7 @@ def self.new_blocked1(blocker, *args, &block) blocker.add_callback :callback_notify_blocked, promise, 0 end - def self.new_blocked2(blocker1, blocker2, *args, &block) + def self.new_blocked_by2(blocker1, blocker2, *args, &block) blocker_delayed1 = blocker1.promise.delayed blocker_delayed2 = blocker2.promise.delayed # TODO (pitr-ch 23-Dec-2016): use arrays when we know it will not grow (only flat adds delay) @@ -1408,7 +1409,7 @@ def self.new_blocked2(blocker1, blocker2, *args, &block) blocker2.add_callback :callback_notify_blocked, promise, 1 end - def self.new_blocked(blockers, *args, &block) + def self.new_blocked_by(blockers, *args, &block) delayed = blockers.reduce(nil, &method(:add_delayed)) promise = new(delayed, blockers.size, *args, &block) ensure From 160a45786ff75edd110c54b6b8fb28a96bfea1cd Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 21:20:36 +0100 Subject: [PATCH 57/68] Few to_s improvements --- lib/concurrent/atomic/atomic_fixnum.rb | 4 ++++ lib/concurrent/atomic/atomic_reference.rb | 7 +++++++ lib/concurrent/edge/lock_free_stack.rb | 5 +++++ 3 files changed, 16 insertions(+) diff --git a/lib/concurrent/atomic/atomic_fixnum.rb b/lib/concurrent/atomic/atomic_fixnum.rb index 1c2b0726a..cf93f7c06 100644 --- a/lib/concurrent/atomic/atomic_fixnum.rb +++ b/lib/concurrent/atomic/atomic_fixnum.rb @@ -129,5 +129,9 @@ module Concurrent # # @!macro atomic_fixnum_public_api class AtomicFixnum < AtomicFixnumImplementation + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x value:%s>', self.class, object_id << 1, get + end end end diff --git a/lib/concurrent/atomic/atomic_reference.rb b/lib/concurrent/atomic/atomic_reference.rb index 46dbbdf1b..ba935a2d8 100644 --- a/lib/concurrent/atomic/atomic_reference.rb +++ b/lib/concurrent/atomic/atomic_reference.rb @@ -40,3 +40,10 @@ class Concurrent::AtomicReference < Concurrent::CAtomicReference class Concurrent::AtomicReference < Concurrent::MutexAtomicReference end end + +class Concurrent::AtomicReference + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x value:%s>', self.class, object_id << 1, get + end +end diff --git a/lib/concurrent/edge/lock_free_stack.rb b/lib/concurrent/edge/lock_free_stack.rb index 64929271b..211580a99 100644 --- a/lib/concurrent/edge/lock_free_stack.rb +++ b/lib/concurrent/edge/lock_free_stack.rb @@ -113,5 +113,10 @@ def clear_each(&block) end end end + + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x %s>', self.class, object_id << 1, to_a.to_s + end end end From 6d0bb3447c19f56f893590e8e48de931c12f15be Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 21:21:25 +0100 Subject: [PATCH 58/68] Concurrent::Map#each_pair should return enumerable when no block given --- lib/concurrent/collection/map/non_concurrent_map_backend.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/concurrent/collection/map/non_concurrent_map_backend.rb b/lib/concurrent/collection/map/non_concurrent_map_backend.rb index 1c9aa8984..ba86d7c0f 100644 --- a/lib/concurrent/collection/map/non_concurrent_map_backend.rb +++ b/lib/concurrent/collection/map/non_concurrent_map_backend.rb @@ -95,6 +95,7 @@ def clear end def each_pair + return enum_for :each_pair unless block_given? dupped_backend.each_pair do |k, v| yield k, v end From 036b0c4647413858bf18a7851c1071afdcc962e2 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 21:22:09 +0100 Subject: [PATCH 59/68] Update spec to moved throttle --- spec/concurrent/edge/promises_spec.rb | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index 7494c48b2..49df8da63 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -489,39 +489,39 @@ def behaves_as_delay(delay, value) describe 'Throttling' do specify do limit = 4 - throttle = Concurrent::Promises::Throttle.new limit + throttle = Concurrent::Throttle.new limit counter = Concurrent::AtomicFixnum.new testing = -> *args do counter.increment - sleep rand * 0.1 + 0.1 + sleep rand * 0.02 + 0.02 # returns less then 3 since it's throttled v = counter.decrement + 1 v end - expect(p(Concurrent::Promises.zip( + expect(Concurrent::Promises.zip( *20.times.map do |i| throttle.throttled { |trigger| trigger.then(throttle, &testing) } - end).value!).all? { |v| v <= limit }).to be_truthy + end).value!.all? { |v| v <= limit }).to be_truthy - expect(p(Concurrent::Promises.zip( + expect(Concurrent::Promises.zip( *20.times.map do |i| throttle.then_throttled(throttle, &testing) - end).value!).all? { |v| v <= limit }).to be_truthy + end).value!.all? { |v| v <= limit }).to be_truthy - expect(p(Concurrent::Promises.zip( + expect(Concurrent::Promises.zip( *20.times.map do |i| Concurrent::Promises. fulfilled_future(i). throttled_by(throttle) { |trigger| trigger.then(throttle, &testing) } - end).value!).all? { |v| v <= limit }).to be_truthy + end).value!.all? { |v| v <= limit }).to be_truthy - expect(p(Concurrent::Promises.zip( + expect(Concurrent::Promises.zip( *20.times.map do |i| Concurrent::Promises. fulfilled_future(i). then_throttled_by(throttle, throttle, &testing) - end).value!).all? { |v| v <= limit }).to be_truthy + end).value!.all? { |v| v <= limit }).to be_truthy end end end From 9d7b3cb041bb6f0014656cf7ad5d1f681e406938 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 23:19:46 +0100 Subject: [PATCH 60/68] Update dependencies --- Gemfile | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/Gemfile b/Gemfile index 6dde8818b..20032a19d 100644 --- a/Gemfile +++ b/Gemfile @@ -4,18 +4,19 @@ gemspec name: 'concurrent-ruby' gemspec name: 'concurrent-ruby-edge' group :development do - gem 'rake', '~> 10.4.2' + gem 'rake', '~> 10.0' gem 'rake-compiler', '~> 0.9.5' gem 'rake-compiler-dock', '~> 0.4.3' gem 'gem-compiler', '~> 0.3.0' - gem 'benchmark-ips', '~> 2.2.0' + gem 'benchmark-ips', '~> 2.7' # documentation gem 'countloc', '~> 0.4.0', :platforms => :mri, :require => false - gem 'yard', '~> 0.8.7.6', :require => false + gem 'yard', '~> 0.8.0', :require => false # TODO (pitr-ch 15-Oct-2016): does not work on 1.9.3 anymore - gem 'inch', '~> 0.6.3', :platforms => :mri, :require => false - gem 'redcarpet', '~> 3.3.2', platforms: :mri # understands github markdown + # TODO remove, reports private classes as undocumented + gem 'inch', '~> 0.7.0', :platforms => :mri, :require => false + gem 'redcarpet', '~> 3.3', platforms: :mri # understands github markdown gem 'md-ruby-eval' end From 41cf14df36d66f3b9dc82f59dd2fd8645daa224f Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Fri, 23 Dec 2016 23:21:59 +0100 Subject: [PATCH 61/68] More documentation for Throttle and other minor improvmenets --- doc/promises.in.md | 4 +- doc/promises.out.md | 106 ++++++++--------- lib/concurrent-edge.rb | 1 + lib/concurrent/edge/lock_free_linked_set.rb | 2 +- lib/concurrent/edge/promises.rb | 21 +++- lib/concurrent/edge/throttle.rb | 121 ++++++++++++++++---- spec/concurrent/edge/promises_spec.rb | 4 +- 7 files changed, 172 insertions(+), 87 deletions(-) diff --git a/doc/promises.in.md b/doc/promises.in.md index c5bd87007..4c77ba3f3 100644 --- a/doc/promises.in.md +++ b/doc/promises.in.md @@ -757,12 +757,12 @@ pool of size 3. We create throttle with the same size ```ruby DB_INTERNAL_POOL = Concurrent::Array.new data -max_tree = Promises::Throttle.new 3 +max_tree = Concurrent::Throttle.new 3 futures = 11.times.map do |i| max_tree. # throttled tasks, at most 3 simultaneous calls of [] on the database - then_throttled { DB_INTERNAL_POOL[i] }. + throttled_future { DB_INTERNAL_POOL[i] }. # un-throttled tasks, unlimited concurrency then { |starts| starts.size }. rescue { |reason| reason.message } diff --git a/doc/promises.out.md b/doc/promises.out.md index 7307dad73..cd9b3c05a 100644 --- a/doc/promises.out.md +++ b/doc/promises.out.md @@ -47,17 +47,17 @@ Class.new do resolvable_event end end.new.a_method -# => <#Concurrent::Promises::ResolvableEvent:0x7fa95ca52488 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fb4ba3e8c78 pending> Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fa95ca4a9e0 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fb4ba3e2850 pending> ``` The module is already extended into {Concurrent::Promises} for convenience. ```ruby Concurrent::Promises.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fa95ca48708 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fb4ba3e08e8 pending> ``` For this guide we introduce a shortcut in `main` so we can call the factory @@ -66,7 +66,7 @@ methods in following examples by using `Promisses` directly. ```ruby Promises = Concurrent::Promises Promises.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fa95ca41d18 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fb4ba3d8be8 pending> ``` ## Asynchronous task @@ -82,7 +82,7 @@ future = Promises.future(0.1) do |duration| sleep duration :result end -# => <#Concurrent::Promises::Future:0x7fa95b3eb410 pending> +# => <#Concurrent::Promises::Future:0x7fb4ba3c8248 pending> ``` Asks if the future is resolved, here it will be still in the middle of the @@ -103,7 +103,7 @@ If the task fails we talk about the future being rejected. ```ruby future = Promises.future { raise 'Boom' } -# => <#Concurrent::Promises::Future:0x7fa95b3dae58 pending> +# => <#Concurrent::Promises::Future:0x7fb4ba3b1bd8 pending> ``` There is no result, the future was rejected with a reason. @@ -198,20 +198,20 @@ through evaluation as follows. ```ruby Promises.future { :value } -# => <#Concurrent::Promises::Future:0x7fa95b89f510 pending> +# => <#Concurrent::Promises::Future:0x7fb4ba322230 pending> ``` Instead it can be created directly. ```ruby Promises.fulfilled_future(:value) -# => <#Concurrent::Promises::Future:0x7fa95b390bc8 fulfilled> +# => <#Concurrent::Promises::Future:0x7fb4ba31a648 fulfilled> Promises.rejected_future(StandardError.new('Ups')) -# => <#Concurrent::Promises::Future:0x7fa95b38b628 rejected> +# => <#Concurrent::Promises::Future:0x7fb4ba319298 rejected> Promises.resolved_future(true, :value, nil) -# => <#Concurrent::Promises::Future:0x7fa95b38a688 fulfilled> +# => <#Concurrent::Promises::Future:0x7fb4ba3133e8 fulfilled> Promises.resolved_future(false, nil, StandardError.new('Ups')) -# => <#Concurrent::Promises::Future:0x7fa95b3892b0 rejected> +# => <#Concurrent::Promises::Future:0x7fb4ba311700 rejected> ``` ## Chaining @@ -252,9 +252,9 @@ do_stuff arg }`) is **required**, both following examples may break. ```ruby arg = 1 # => 1 Thread.new { do_stuff arg } -# => # +# => # Promises.future { do_stuff arg } -# => <#Concurrent::Promises::Future:0x7fa95b321020 pending> +# => <#Concurrent::Promises::Future:0x7fb4ba2a0dc0 pending> ``` ## Branching, and zipping @@ -316,7 +316,7 @@ Promises. result # => [false, # nil, -# #>] +# #>] ``` As `then` chained tasks execute only on fulfilled futures, there is a `rescue` @@ -364,7 +364,7 @@ Zip is rejected if any of the zipped futures is. rejected_zip = Promises.zip( Promises.fulfilled_future(1), Promises.rejected_future(StandardError.new('Ups'))) -# => <#Concurrent::Promises::Future:0x7fa95b2428e8 rejected> +# => <#Concurrent::Promises::Future:0x7fb4bc332390 rejected> rejected_zip.result # => [false, [1, nil], [nil, #]] rejected_zip. @@ -379,11 +379,11 @@ requiring resolution. ```ruby future = Promises.delay { sleep 0.1; 'lazy' } -# => <#Concurrent::Promises::Future:0x7fa95b229eb0 pending> +# => <#Concurrent::Promises::Future:0x7fb4bc3188f0 pending> sleep 0.1 future.resolved? # => false future.touch -# => <#Concurrent::Promises::Future:0x7fa95b229eb0 pending> +# => <#Concurrent::Promises::Future:0x7fb4bc3188f0 pending> sleep 0.2 future.resolved? # => true ``` @@ -460,7 +460,7 @@ Schedule task to be executed in 0.1 seconds. ```ruby scheduled = Promises.schedule(0.1) { 1 } -# => <#Concurrent::Promises::Future:0x7fa95c9b1ce0 pending> +# => <#Concurrent::Promises::Future:0x7fb4bc288958 pending> scheduled.resolved? # => false ``` @@ -485,7 +485,7 @@ Time can be used as well. ```ruby Promises.schedule(Time.now + 10) { :val } -# => <#Concurrent::Promises::Future:0x7fa95c972ae0 pending> +# => <#Concurrent::Promises::Future:0x7fb4bc252330 pending> ``` ## Resolvable Future and Event: @@ -497,7 +497,7 @@ Sometimes it is required to resolve a future externally, in these cases ```ruby future = Promises.resolvable_future -# => <#Concurrent::Promises::ResolvableFuture:0x7fa95c970e98 pending> +# => <#Concurrent::Promises::ResolvableFuture:0x7fb4bc250620 pending> ``` The thread will be blocked until the future is resolved @@ -505,7 +505,7 @@ The thread will be blocked until the future is resolved ```ruby thread = Thread.new { future.value } future.fulfill 1 -# => <#Concurrent::Promises::ResolvableFuture:0x7fa95c970e98 fulfilled> +# => <#Concurrent::Promises::ResolvableFuture:0x7fb4bc250620 fulfilled> thread.value # => 1 ``` @@ -522,9 +522,9 @@ future.fulfill 2, false # => false ## Callbacks ```ruby -queue = Queue.new # => # +queue = Queue.new # => # future = Promises.delay { 1 + 1 } -# => <#Concurrent::Promises::Future:0x7fa95e0547b8 pending> +# => <#Concurrent::Promises::Future:0x7fb4bb9d4730 pending> future.on_fulfillment { queue << 1 } # evaluated asynchronously future.on_fulfillment! { queue << 2 } # evaluated on resolving thread @@ -547,7 +547,7 @@ and `:io` for blocking and long tasks. ```ruby Promises.future_on(:fast) { 2 }. then_on(:io) { File.read __FILE__ }. - value.size # => 18760 + value.size # => 18764 ``` # Interoperability @@ -560,7 +560,7 @@ Create an actor which takes received numbers and returns the number squared. actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end -# => # +# => # ``` Send result of `1+1` to the actor, and add 2 to the result send back from the @@ -592,17 +592,17 @@ actor.ask(2).then(&:succ).value! # => 5 ```ruby Promises.future { do_stuff } -# => <#Concurrent::Promises::Future:0x7fa95b1eb8e0 pending> +# => <#Concurrent::Promises::Future:0x7fb4bb947740 pending> ``` ## Parallel background processing ```ruby tasks = 4.times.map { |i| Promises.future(i) { |i| i*2 } } -# => [<#Concurrent::Promises::Future:0x7fa95b1e2e48 pending>, -# <#Concurrent::Promises::Future:0x7fa95b1e1f70 pending>, -# <#Concurrent::Promises::Future:0x7fa95b1e1188 pending>, -# <#Concurrent::Promises::Future:0x7fa95b1e0198 pending>] +# => [<#Concurrent::Promises::Future:0x7fb4bb93f090 pending>, +# <#Concurrent::Promises::Future:0x7fb4bb93e488 pending>, +# <#Concurrent::Promises::Future:0x7fb4bb93d6f0 pending>, +# <#Concurrent::Promises::Future:0x7fb4bb93c778 pending>] Promises.zip(*tasks).value! # => [0, 2, 4, 6] ``` @@ -655,11 +655,11 @@ Create the computer actor and send it 3 jobs. ```ruby computer = Concurrent::Actor.spawn Computer, :computer -# => # +# => # results = 3.times.map { computer.ask [:run, -> { sleep 0.1; :result }] } -# => [<#Concurrent::Promises::Future:0x7fa95a3d30c8 pending>, -# <#Concurrent::Promises::Future:0x7fa95a3d0e40 pending>, -# <#Concurrent::Promises::Future:0x7fa95a3cb760 pending>] +# => [<#Concurrent::Promises::Future:0x7fb4ba990450 pending>, +# <#Concurrent::Promises::Future:0x7fb4ba9897e0 pending>, +# <#Concurrent::Promises::Future:0x7fb4ba988318 pending>] computer.ask(:status).value! # => {:running_jobs=>3} results.map(&:value!) # => [:result, :result, :result] ``` @@ -706,8 +706,8 @@ Lets have two processes which will count until cancelled. ```ruby source, token = Concurrent::Cancellation.create -# => [<#Concurrent::Cancellation:0x7fa95a223840 canceled:false>, -# <#Concurrent::Cancellation::Token:0x7fa95a2222b0 canceled:false>] +# => [<#Concurrent::Cancellation:0x7fb4ba1bc300 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fb4ba1b7670 canceled:false>] count_until_cancelled = -> token, count do if token.canceled? @@ -720,12 +720,12 @@ end futures = Array.new(2) do Promises.future(token, 0, &count_until_cancelled).run end -# => [<#Concurrent::Promises::Future:0x7fa95b1b8a08 pending>, -# <#Concurrent::Promises::Future:0x7fa95b1aa110 pending>] +# => [<#Concurrent::Promises::Future:0x7fb4ba13d578 pending>, +# <#Concurrent::Promises::Future:0x7fb4ba13c308 pending>] sleep 0.01 source.cancel # => true -futures.map(&:value!) # => [65, 66] +futures.map(&:value!) # => [35, 34] ``` Cancellation can also be used as event or future to log or plan re-execution. @@ -744,8 +744,8 @@ tasks share a cancellation, when one of them fails it cancels the others. ```ruby source, token = Concurrent::Cancellation.create -# => [<#Concurrent::Cancellation:0x7fa95c9c8c60 canceled:false>, -# <#Concurrent::Cancellation::Token:0x7fa95c9c8710 canceled:false>] +# => [<#Concurrent::Cancellation:0x7fb4ba053130 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fb4ba051fb0 canceled:false>] tasks = 4.times.map do |i| Promises.future(source, token, i) do |source, token, i| count = 0 @@ -761,22 +761,22 @@ tasks = 4.times.map do |i| end end end -# => [<#Concurrent::Promises::Future:0x7fa95c9a1818 pending>, -# <#Concurrent::Promises::Future:0x7fa95c9a0aa8 pending>, -# <#Concurrent::Promises::Future:0x7fa95c98bb30 pending>, -# <#Concurrent::Promises::Future:0x7fa95c98aed8 pending>] +# => [<#Concurrent::Promises::Future:0x7fb4ba03b0f8 pending>, +# <#Concurrent::Promises::Future:0x7fb4ba0397a8 pending>, +# <#Concurrent::Promises::Future:0x7fb4ba038308 pending>, +# <#Concurrent::Promises::Future:0x7fb4ba0332b8 pending>] Promises.zip(*tasks).result # => [false, -# [nil, :cancelled, :cancelled, nil], -# [#, nil, nil, #]] +# [:cancelled, :cancelled, nil, :cancelled], +# [nil, nil, #, nil]] ``` Without the randomly failing part it produces following. ```ruby source, token = Concurrent::Cancellation.create -# => [<#Concurrent::Cancellation:0x7fa95ca897d0 canceled:false>, -# <#Concurrent::Cancellation::Token:0x7fa95ca83c68 canceled:false>] +# => [<#Concurrent::Cancellation:0x7fb4bb9ee0b8 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fb4bb9ed9d8 canceled:false>] tasks = 4.times.map do |i| Promises.future(source, token, i) do |source, token, i| count = 0 @@ -884,13 +884,13 @@ DB_INTERNAL_POOL = Concurrent::Array.new data # "********", # "*********"] -max_tree = Promises::Throttle.new 3 -# => <#Concurrent::Promises::Throttle:0x7fa95b2d0cb0 limit:3 can_run:3> +max_tree = Concurrent::Throttle.new 3 +# => <#Concurrent::Throttle:0x7fb4ba2f22d8 limit:3 can_run:3> futures = 11.times.map do |i| max_tree. # throttled tasks, at most 3 simultaneous calls of [] on the database - then_throttled { DB_INTERNAL_POOL[i] }. + throttled_future { DB_INTERNAL_POOL[i] }. # un-throttled tasks, unlimited concurrency then { |starts| starts.size }. rescue { |reason| reason.message } @@ -927,7 +927,7 @@ def schedule_job(interval, &job) end end -queue = Queue.new # => # +queue = Queue.new # => # count = 0 # => 0 interval = 0.05 # small just not to delay execution of this example diff --git a/lib/concurrent-edge.rb b/lib/concurrent-edge.rb index 7721430a3..ad818acef 100644 --- a/lib/concurrent-edge.rb +++ b/lib/concurrent-edge.rb @@ -12,3 +12,4 @@ require 'concurrent/edge/promises' require 'concurrent/edge/cancellation' +require 'concurrent/edge/throttle' diff --git a/lib/concurrent/edge/lock_free_linked_set.rb b/lib/concurrent/edge/lock_free_linked_set.rb index 62964037d..42c3a9f8f 100644 --- a/lib/concurrent/edge/lock_free_linked_set.rb +++ b/lib/concurrent/edge/lock_free_linked_set.rb @@ -124,7 +124,7 @@ def remove(item) # # An iterator to loop through the set. # - # @yield [Object] each item in the set + # @yield [item] each item in the set # @yieldparam [Object] item the item you to remove from the set # # @return [Object] self: the linked set on which each was called diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 3abbce306..86b58037b 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -614,7 +614,10 @@ def chain(*args, &task) # @overload an_event.chain_on(executor, *args, &task) # @yield [*args] to the task. # @overload a_future.chain_on(executor, *args, &task) - # @yield [fulfilled?, value, reason, *args] to the task. + # @yield [fulfilled, value, reason, *args] to the task. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Exception] reason def chain_on(executor, *args, &task) ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future end @@ -652,7 +655,10 @@ def on_resolution(*args, &callback) # @overload an_event.on_resolution!(*args, &callback) # @yield [*args] to the callback. # @overload a_future.on_resolution!(*args, &callback) - # @yield [fulfilled?, value, reason, *args] to the callback. + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Exception] reason def on_resolution!(*args, &callback) add_callback :callback_on_resolution, args, callback end @@ -667,7 +673,10 @@ def on_resolution!(*args, &callback) # @overload an_event.on_resolution_using(executor, *args, &callback) # @yield [*args] to the callback. # @overload a_future.on_resolution_using(executor, *args, &callback) - # @yield [fulfilled?, value, reason, *args] to the callback. + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Exception] reason def on_resolution_using(executor, *args, &callback) add_callback :async_callback_on_resolution, executor, args, callback end @@ -1080,7 +1089,7 @@ def on_fulfillment(*args, &callback) # @!macro promises.param.args # @!macro promise.param.callback # @return [self] - # @yield [value *args] to the callback. + # @yield [value, *args] to the callback. def on_fulfillment!(*args, &callback) add_callback :callback_on_fulfillment, args, callback end @@ -1109,7 +1118,7 @@ def on_rejection(*args, &callback) # @!macro promises.param.args # @!macro promise.param.callback # @return [self] - # @yield [reason *args] to the callback. + # @yield [reason, *args] to the callback. def on_rejection!(*args, &callback) add_callback :callback_on_rejection, args, callback end @@ -1121,7 +1130,7 @@ def on_rejection!(*args, &callback) # @!macro promises.param.args # @!macro promise.param.callback # @return [self] - # @yield [reason *args] to the callback. + # @yield [reason, *args] to the callback. def on_rejection_using(executor, *args, &callback) add_callback :async_callback_on_rejection, executor, args, callback end diff --git a/lib/concurrent/edge/throttle.rb b/lib/concurrent/edge/throttle.rb index 66ecaa1c4..f1cd060d7 100644 --- a/lib/concurrent/edge/throttle.rb +++ b/lib/concurrent/edge/throttle.rb @@ -1,26 +1,47 @@ module Concurrent - - # A tool manage concurrency level of future tasks. - # @example With futures - # data = (1..5).to_a - # db = data.reduce({}) { |h, v| h.update v => v.to_s } - # max_two = Promises.throttle 2 + # @!macro [new] throttle.example.throttled_block + # @example + # max_two = Throttle.new 2 + # 10.times.map do + # Thread.new do + # max_two.throttled_block do + # # Only 2 at the same time + # do_stuff + # end + # end + # end + # @!macro [new] throttle.example.throttled_future_chain + # @example + # throttle.throttled_future_chain do |trigger| + # trigger. + # # 2 throttled promises + # chain { 1 }. + # then(&:succ) + # end + # @!macro [new] throttle.example.then_throttled_by + # @example + # data = (1..5).to_a + # db = data.reduce({}) { |h, v| h.update v => v.to_s } + # max_two = Throttle.new 2 # - # futures = data.map do |data| - # Promises.future(data) { |data| - # # un-throttled, concurrency level equal data.size - # data + 1 - # }.then_throttle(max_two, db) { |v, db| - # # throttled, only 2 tasks executed at the same time - # # e.g. limiting access to db - # db[v] - # } - # end + # futures = data.map do |data| + # Promises.future(data) do |data| + # # un-throttled, concurrency level equal data.size + # data + 1 + # end.then_throttled_by(max_two, db) do |v, db| + # # throttled, only 2 tasks executed at the same time + # # e.g. limiting access to db + # db[v] + # end + # end # - # futures.map(&:value!) # => [2, 3, 4, 5, nil] + # futures.map(&:value!) # => [2, 3, 4, 5, nil] + + # A tool manage concurrency level of future tasks. # - # @example With Threads - # # TODO (pitr-ch 23-Dec-2016): thread example, add blocking block method for threads + # @!macro throttle.example.then_throttled_by + # @!macro throttle.example.throttled_future_chain + # @!macro throttle.example.throttled_block class Throttle < Synchronization::Object # TODO (pitr-ch 21-Dec-2016): consider using sized channel for implementation instead when available @@ -41,6 +62,10 @@ def limit @Limit end + # New event which will be resolved when depending tasks can execute. + # Has to be used and after the critical work is done {#release} must be called exactly once. + # @return [Promises::Event] + # @see #release def trigger while true current_can_run = can_run @@ -56,6 +81,9 @@ def trigger end end + # Has to be called once for each trigger after it is ok to execute another throttled task. + # @return [self] + # @see #trigger def release while true current_can_run = can_run @@ -69,6 +97,18 @@ def release end end + # Blocks current thread until the block can be executed. + # @yield to throttled block + # @yieldreturn [Object] is used as a result of the method + # @return [Object] the result of the block + # @!macro throttle.example.throttled_block + def throttled_block(&block) + trigger.wait + block.call + ensure + release + end + # @return [String] Short string representation. def to_s format '<#%s:0x%x limit:%s can_run:%d>', self.class, object_id << 1, @Limit, can_run @@ -78,12 +118,22 @@ def to_s module PromisesIntegration - def throttled(&throttled_futures) + # Allows to throttle a chain of promises. + # @yield [trigger] a trigger which has to be used to build up a chain of promises, the last one is result + # of the block. When the last one resolves, {Throttle#release} is called on the throttle. + # @yieldparam [Promises::Event, Promises::Future] trigger + # @yieldreturn [Promises::Event, Promises::Future] The final future of the throttled chain. + # @return [Promises::Event, Promises::Future] The final future of the throttled chain. + # @!macro throttle.example.throttled_future_chain + def throttled_future_chain(&throttled_futures) throttled_futures.call(trigger).on_resolution! { release } end - def then_throttled(*args, &task) - trigger.then(*args, &task).on_resolution! { release } + # Behaves as {Promises::FactoryMethods#future} but the future is throttled. + # @return [Promises::Future] + # @see Promises::FactoryMethods#future + def throttled_future(*args, &task) + trigger.chain(*args, &task).on_resolution! { release } end end @@ -95,13 +145,38 @@ module Promises class AbstractEventFuture < Synchronization::Object module ThrottleIntegration def throttled_by(throttle, &throttled_futures) - a_trigger = throttle.trigger & self + a_trigger = self & self.chain { throttle.trigger }.flat_event throttled_futures.call(a_trigger).on_resolution! { throttle.release } end + # Behaves as {Promises::AbstractEventFuture#chain} but the it is throttled. + # @return [Promises::Future, Promises::Event] + # @see Promises::AbstractEventFuture#chain + def chain_throttled_by(throttle, *args, &block) + throttled_by(throttle) { |trigger| trigger.chain(*args, &block) } + end + end + + include ThrottleIntegration + end + + class Future < AbstractEventFuture + module ThrottleIntegration + + # Behaves as {Promises::Future#then} but the it is throttled. + # @return [Promises::Future] + # @see Promises::Future#then + # @!macro throttle.example.then_throttled_by def then_throttled_by(throttle, *args, &block) throttled_by(throttle) { |trigger| trigger.then(*args, &block) } end + + # Behaves as {Promises::Future#rescue} but the it is throttled. + # @return [Promises::Future] + # @see Promises::Future#rescue + def rescue_throttled_by(throttle, *args, &block) + throttled_by(throttle) { |trigger| trigger.rescue(*args, &block) } + end end include ThrottleIntegration diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index 49df8da63..48c933f97 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -501,12 +501,12 @@ def behaves_as_delay(delay, value) expect(Concurrent::Promises.zip( *20.times.map do |i| - throttle.throttled { |trigger| trigger.then(throttle, &testing) } + throttle.throttled_future_chain { |trigger| trigger.then(throttle, &testing) } end).value!.all? { |v| v <= limit }).to be_truthy expect(Concurrent::Promises.zip( *20.times.map do |i| - throttle.then_throttled(throttle, &testing) + throttle.throttled_future(throttle, &testing) end).value!.all? { |v| v <= limit }).to be_truthy expect(Concurrent::Promises.zip( From 399d276e0b2c4ace317dfedefc62275d440ba9c1 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 24 Dec 2016 00:09:01 +0100 Subject: [PATCH 62/68] Deal with bad executor early --- lib/concurrent/actor.rb | 8 ++++++-- lib/concurrent/actor/core.rb | 1 - 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/concurrent/actor.rb b/lib/concurrent/actor.rb index 4f47c5527..9d0d40694 100644 --- a/lib/concurrent/actor.rb +++ b/lib/concurrent/actor.rb @@ -65,10 +65,14 @@ def self.root # @param args see {.to_spawn_options} # @return [Reference] never the actual actor def self.spawn(*args, &block) + options = to_spawn_options(*args) + if options[:executor] && options[:executor].is_a?(ImmediateExecutor) + raise ArgumentError, 'ImmediateExecutor is not supported' + end if Actor.current - Core.new(to_spawn_options(*args).merge(parent: Actor.current), &block).reference + Core.new(options.merge(parent: Actor.current), &block).reference else - root.ask([:spawn, to_spawn_options(*args), block]).value! + root.ask([:spawn, options, block]).value! end end diff --git a/lib/concurrent/actor/core.rb b/lib/concurrent/actor/core.rb index e531fccfa..1546dfe0c 100644 --- a/lib/concurrent/actor/core.rb +++ b/lib/concurrent/actor/core.rb @@ -172,7 +172,6 @@ def ns_initialize(opts, &block) allocate_context @executor = Type! opts.fetch(:executor, @context.default_executor), Concurrent::AbstractExecutorService - raise ArgumentError, 'ImmediateExecutor is not supported' if @executor.is_a? ImmediateExecutor @reference = (Child! opts[:reference_class] || @context.default_reference_class, Reference).new self @name = (Type! opts.fetch(:name), String, Symbol).to_s From a0a7702580e240de9ddb5d6358e02fd81cf1c23e Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 24 Dec 2016 16:30:35 +0100 Subject: [PATCH 63/68] Remove deadlocking std logger --- examples/init.rb | 2 +- lib/concurrent/configuration.rb | 68 +++++++++++++-------------- spec/concurrent/edge/promises_spec.rb | 1 - spec/spec_helper.rb | 2 +- 4 files changed, 36 insertions(+), 37 deletions(-) diff --git a/examples/init.rb b/examples/init.rb index 4fdb8550e..7166a4a76 100644 --- a/examples/init.rb +++ b/examples/init.rb @@ -4,4 +4,4 @@ def do_stuff :stuff end -Concurrent.use_stdlib_logger Logger::DEBUG +Concurrent.use_simple_logger Logger::DEBUG diff --git a/lib/concurrent/configuration.rb b/lib/concurrent/configuration.rb index 9b3953757..b4774bfce 100644 --- a/lib/concurrent/configuration.rb +++ b/lib/concurrent/configuration.rb @@ -10,46 +10,46 @@ module Concurrent extend Concern::Logging - autoload :Options, 'concurrent/options' - autoload :TimerSet, 'concurrent/executor/timer_set' + autoload :Options, 'concurrent/options' + autoload :TimerSet, 'concurrent/executor/timer_set' autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor' # @return [Logger] Logger with provided level and output. - def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr) - logger = Logger.new(output) - logger.level = level - logger.formatter = lambda do |severity, datetime, progname, msg| - formatted_message = case msg + def self.create_simple_logger(level = Logger::FATAL, output = $stderr) + # TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking + lambda do |severity, progname, message = nil, &block| + return false if severity < level + + message = block ? block.call : message + formatted_message = case message when String - msg + message when Exception format "%s (%s)\n%s", - msg.message, msg.class, (msg.backtrace || []).join("\n") + message.message, message.class, (message.backtrace || []).join("\n") else - msg.inspect + message.inspect end - format "[%s] %5s -- %s: %s\n", - datetime.strftime('%Y-%m-%d %H:%M:%S.%L'), - severity, - progname, - formatted_message - end - lambda do |loglevel, progname, message = nil, &block| - logger.add loglevel, message, progname, &block + output.print format "[%s] %5s -- %s: %s\n", + Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'), + Logger::SEV_LABEL[severity], + progname, + formatted_message + true end end - # Use logger created by #create_stdlib_logger to log concurrent-ruby messages. - def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr) - Concurrent.global_logger = create_stdlib_logger level, output + # Use logger created by #create_simple_logger to log concurrent-ruby messages. + def self.use_simple_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_simple_logger level, output end # Suppresses all output when used for logging. NULL_LOGGER = lambda { |level, progname, message = nil, &block| } # @!visibility private - GLOBAL_LOGGER = AtomicReference.new(create_stdlib_logger(Logger::WARN)) + GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN)) private_constant :GLOBAL_LOGGER def self.global_logger @@ -131,23 +131,23 @@ def self.executor(executor_identifier) def self.new_fast_executor(opts = {}) FixedThreadPool.new( - [2, Concurrent.processor_count].max, - auto_terminate: opts.fetch(:auto_terminate, true), - idletime: 60, # 1 minute - max_queue: 0, # unlimited - fallback_policy: :abort # shouldn't matter -- 0 max queue + [2, Concurrent.processor_count].max, + auto_terminate: opts.fetch(:auto_terminate, true), + idletime: 60, # 1 minute + max_queue: 0, # unlimited + fallback_policy: :abort # shouldn't matter -- 0 max queue ) end def self.new_io_executor(opts = {}) ThreadPoolExecutor.new( - min_threads: [2, Concurrent.processor_count].max, - max_threads: ThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE, - # max_threads: 1000, - auto_terminate: opts.fetch(:auto_terminate, true), - idletime: 60, # 1 minute - max_queue: 0, # unlimited - fallback_policy: :abort # shouldn't matter -- 0 max queue + min_threads: [2, Concurrent.processor_count].max, + max_threads: ThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE, + # max_threads: 1000, + auto_terminate: opts.fetch(:auto_terminate, true), + idletime: 60, # 1 minute + max_queue: 0, # unlimited + fallback_policy: :abort # shouldn't matter -- 0 max queue ) end end diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index 48c933f97..0970d7c00 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -1,7 +1,6 @@ require 'concurrent/edge/promises' require 'thread' -Concurrent.use_stdlib_logger Logger::DEBUG describe 'Concurrent::Promises' do diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb index ef3bbe748..30205f522 100644 --- a/spec/spec_helper.rb +++ b/spec/spec_helper.rb @@ -24,7 +24,7 @@ require 'concurrent' require 'concurrent-edge' -Concurrent.use_stdlib_logger Logger::FATAL +Concurrent.use_simple_logger Logger::FATAL # import all the support files Dir[File.join(File.dirname(__FILE__), 'support/**/*.rb')].each { |f| require File.expand_path(f) } From b8ec2e33a9a84b258acc1c33a94484f0ef111030 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 24 Dec 2016 16:47:17 +0100 Subject: [PATCH 64/68] Prefer spawn! over spawn in actor specs --- spec/concurrent/actor_spec.rb | 52 +++++++++++++++++------------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/spec/concurrent/actor_spec.rb b/spec/concurrent/actor_spec.rb index 2603a38d3..ee77bd21e 100644 --- a/spec/concurrent/actor_spec.rb +++ b/spec/concurrent/actor_spec.rb @@ -24,7 +24,7 @@ def initialize(queue) def on_message(message) case message when :child - AdHoc.spawn(:pong, @queue) { |queue| -> m { queue << m } } + AdHoc.spawn!(:pong, @queue) { |queue| -> m { queue << m } } else @queue << message message @@ -33,16 +33,16 @@ def on_message(message) end it 'forbids Immediate executor' do - expect { Utils::AdHoc.spawn name: 'test', executor: ImmediateExecutor.new }.to raise_error + expect { Utils::AdHoc.spawn! name: 'test', executor: ImmediateExecutor.new }.to raise_error end describe 'spawning' do - describe 'Actor#spawn' do + describe 'Actor#spawn!' do behaviour = -> v { -> _ { v } } - subjects = { spawn: -> { Actor.spawn(AdHoc, :ping, 'arg', &behaviour) }, - context_spawn: -> { AdHoc.spawn(:ping, 'arg', &behaviour) }, - spawn_by_hash: -> { Actor.spawn(class: AdHoc, name: :ping, args: ['arg'], &behaviour) }, - context_spawn_by_hash: -> { AdHoc.spawn(name: :ping, args: ['arg'], &behaviour) } } + subjects = { spawn: -> { Actor.spawn!(AdHoc, :ping, 'arg', &behaviour) }, + context_spawn: -> { AdHoc.spawn!(:ping, 'arg', &behaviour) }, + spawn_by_hash: -> { Actor.spawn!(class: AdHoc, name: :ping, args: ['arg'], &behaviour) }, + context_spawn_by_hash: -> { AdHoc.spawn!(name: :ping, args: ['arg'], &behaviour) } } subjects.each do |desc, subject_definition| describe desc do @@ -89,14 +89,14 @@ def on_message(message) end it 'terminates on failed message processing' do - a = AdHoc.spawn(name: :fail, logger: Concurrent::NULL_LOGGER) { -> _ { raise } } + a = AdHoc.spawn!(name: :fail, logger: Concurrent::NULL_LOGGER) { -> _ { raise } } expect(a.ask(nil).wait.rejected?).to be_truthy expect(a.ask!(:terminated?)).to be_truthy end end describe 'messaging' do - subject { AdHoc.spawn(:add) { c = 0; -> v { c = c + v } } } + subject { AdHoc.spawn!(:add) { c = 0; -> v { c = c + v } } } specify do subject.tell(1).tell(1) subject << 1 << 1 @@ -107,10 +107,10 @@ def on_message(message) describe 'children' do let(:parent) do - AdHoc.spawn(:parent) do + AdHoc.spawn!(:parent) do -> message do if message == :child - AdHoc.spawn(:child) { -> _ { parent } } + AdHoc.spawn!(:child) { -> _ { parent } } else children end @@ -128,7 +128,7 @@ def on_message(message) end describe 'envelope' do - subject { AdHoc.spawn(:subject) { -> _ { envelope } } } + subject { AdHoc.spawn!(:subject) { -> _ { envelope } } } specify do envelope = subject.ask!('a') expect(envelope).to be_a_kind_of Envelope @@ -142,8 +142,8 @@ def on_message(message) describe 'termination' do subject do - AdHoc.spawn(:parent) do - child = AdHoc.spawn(:child) { -> v { v } } + AdHoc.spawn!(:parent) do + child = AdHoc.spawn!(:child) { -> v { v } } -> v { child } end end @@ -171,8 +171,8 @@ def on_message(message) describe 'message redirecting' do let(:parent) do - AdHoc.spawn(:parent) do - child = AdHoc.spawn(:child) { -> m { m+1 } } + AdHoc.spawn!(:parent) do + child = AdHoc.spawn!(:child) { -> m { m+1 } } -> message do if message == :child child @@ -192,9 +192,9 @@ def on_message(message) queue = Queue.new failure = nil # FIXME this leads to weird message processing ordering - # failure = AdHoc.spawn(:failure) { -> m { terminate! } } + # failure = AdHoc.spawn!(:failure) { -> m { terminate! } } monitor = AdHoc.spawn!(:monitor) do - failure = AdHoc.spawn(:failure) { -> m { m } } + failure = AdHoc.spawn!(:failure) { -> m { m } } failure << :link -> m { queue << [m, envelope.sender] } end @@ -209,7 +209,7 @@ def on_message(message) queue = Queue.new failure = nil monitor = AdHoc.spawn!(:monitor) do - failure = AdHoc.spawn(name: :failure, link: true) { -> m { m } } + failure = AdHoc.spawn!(name: :failure, link: true) { -> m { m } } -> m { queue << [m, envelope.sender] } end @@ -225,8 +225,8 @@ def on_message(message) queue = Queue.new resuming_behaviour = Behaviour.restarting_behaviour_definition(:resume!) - test = AdHoc.spawn name: :tester, behaviour_definition: resuming_behaviour do - actor = AdHoc.spawn name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do + test = AdHoc.spawn! name: :tester, behaviour_definition: resuming_behaviour do + actor = AdHoc.spawn! name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do queue << :init -> m { m == :add ? 1 : pass } end @@ -248,8 +248,8 @@ def on_message(message) it 'pauses on error and resets' do queue = Queue.new - test = AdHoc.spawn name: :tester, behaviour_definition: Behaviour.restarting_behaviour_definition do - actor = AdHoc.spawn name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do + test = AdHoc.spawn! name: :tester, behaviour_definition: Behaviour.restarting_behaviour_definition do + actor = AdHoc.spawn! name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do queue << :init -> m { m == :object_id ? self.object_id : pass } end @@ -284,9 +284,9 @@ def on_message(message) end end - test = AdHoc.spawn name: :tester, behaviour_definition: resuming_behaviour do + test = AdHoc.spawn! name: :tester, behaviour_definition: resuming_behaviour do - actor = AdHoc.spawn name: :pausing, + actor = AdHoc.spawn! name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do queue << :init -> m { m == :add ? 1 : pass } @@ -316,7 +316,7 @@ def on_message(message) it 'supports asks', buggy: true do children = Queue.new pool = Concurrent::Actor::Utils::Pool.spawn! 'pool', 5 do |index| - worker = Concurrent::Actor::Utils::AdHoc.spawn name: "worker-#{index}", supervised: true do + worker = Concurrent::Actor::Utils::AdHoc.spawn! name: "worker-#{index}", supervised: true do lambda do |message| fail if message == :fail 5 + message From eb81fffed0be650bdb2aaaf3d65fc7e6ea6e3b2d Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 24 Dec 2016 16:47:57 +0100 Subject: [PATCH 65/68] avoid "can't be called from trap context" errors in ThreadLocalVar --- .../atomic/ruby_thread_local_var.rb | 36 ++++++++++--------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/lib/concurrent/atomic/ruby_thread_local_var.rb b/lib/concurrent/atomic/ruby_thread_local_var.rb index 4ec041e27..06afae731 100644 --- a/lib/concurrent/atomic/ruby_thread_local_var.rb +++ b/lib/concurrent/atomic/ruby_thread_local_var.rb @@ -29,8 +29,8 @@ class RubyThreadLocalVar < AbstractThreadLocalVar # array, so we don't leak memory # @!visibility private - FREE = [] - LOCK = Mutex.new + FREE = [] + LOCK = Mutex.new ARRAYS = {} # used as a hash set @@next = 0 private_constant :FREE, :LOCK, :ARRAYS @@ -72,9 +72,9 @@ def value=(value) def allocate_storage @index = LOCK.synchronize do FREE.pop || begin - result = @@next - @@next += 1 - result + result = @@next + @@next += 1 + result end end ObjectSpace.define_finalizer(self, self.class.threadlocal_finalizer(@index)) @@ -83,13 +83,15 @@ def allocate_storage # @!visibility private def self.threadlocal_finalizer(index) proc do - LOCK.synchronize do - FREE.push(index) - # The cost of GC'ing a TLV is linear in the number of threads using TLVs - # But that is natural! More threads means more storage is used per TLV - # So naturally more CPU time is required to free more storage - ARRAYS.each_value do |array| - array[index] = nil + Thread.new do # avoid error: can't be called from trap context + LOCK.synchronize do + FREE.push(index) + # The cost of GC'ing a TLV is linear in the number of threads using TLVs + # But that is natural! More threads means more storage is used per TLV + # So naturally more CPU time is required to free more storage + ARRAYS.each_value do |array| + array[index] = nil + end end end end @@ -98,10 +100,12 @@ def self.threadlocal_finalizer(index) # @!visibility private def self.thread_finalizer(array) proc do - LOCK.synchronize do - # The thread which used this thread-local array is now gone - # So don't hold onto a reference to the array (thus blocking GC) - ARRAYS.delete(array.object_id) + Thread.new do # avoid error: can't be called from trap context + LOCK.synchronize do + # The thread which used this thread-local array is now gone + # So don't hold onto a reference to the array (thus blocking GC) + ARRAYS.delete(array.object_id) + end end end end From 456980eef4815ccc1c0161644fc23469cb92a505 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Sat, 24 Dec 2016 17:54:40 +0100 Subject: [PATCH 66/68] Minor fixes --- README.md | 2 ++ lib/concurrent-edge.rb | 1 + lib/concurrent/atomic/thread_local_var.rb | 12 ++++++------ lib/concurrent/atomic_reference/jruby+truffle.rb | 3 ++- lib/concurrent/edge/promises.rb | 2 +- lib/concurrent/synchronization/rbx_object.rb | 1 + 6 files changed, 13 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 2f5f1de75..d74a84b1a 100644 --- a/README.md +++ b/README.md @@ -136,6 +136,8 @@ be obeyed though. Features developed in `concurrent-ruby-edge` are expected to m *Why are these not in core?* +- **Promises Framework** - They are being finalized to be able to be moved to core. They'll deprecate old + implementation. - **Actor** - Partial documentation and tests; depends on new future/promise framework; stability is good. - **Channel** - Brand new implementation; partial documentation and tests; stability is good. - **LazyRegister** - Missing documentation and tests. diff --git a/lib/concurrent-edge.rb b/lib/concurrent-edge.rb index ad818acef..59c5fa7d8 100644 --- a/lib/concurrent-edge.rb +++ b/lib/concurrent-edge.rb @@ -9,6 +9,7 @@ require 'concurrent/edge/atomic_markable_reference' require 'concurrent/edge/lock_free_linked_set' require 'concurrent/edge/lock_free_queue' +require 'concurrent/edge/lock_free_stack' require 'concurrent/edge/promises' require 'concurrent/edge/cancellation' diff --git a/lib/concurrent/atomic/thread_local_var.rb b/lib/concurrent/atomic/thread_local_var.rb index e64a2d268..f86de15cc 100644 --- a/lib/concurrent/atomic/thread_local_var.rb +++ b/lib/concurrent/atomic/thread_local_var.rb @@ -11,7 +11,7 @@ module Concurrent # Creates a thread local variable. # # @param [Object] default the default value when otherwise unset - # @param [Proc] block Optional block that gets called to obtain the + # @param [Proc] default_block Optional block that gets called to obtain the # default value for each thread # @!macro [new] thread_local_var_method_get @@ -72,28 +72,28 @@ module Concurrent # the current thread will ever see that change. # # @!macro thread_safe_variable_comparison - # + # # @example # v = ThreadLocalVar.new(14) # v.value #=> 14 # v.value = 2 # v.value #=> 2 - # + # # @example # v = ThreadLocalVar.new(14) - # + # # t1 = Thread.new do # v.value #=> 14 # v.value = 1 # v.value #=> 1 # end - # + # # t2 = Thread.new do # v.value #=> 14 # v.value = 2 # v.value #=> 2 # end - # + # # v.value #=> 14 # # @see https://docs.oracle.com/javase/7/docs/api/java/lang/ThreadLocal.html Java ThreadLocal diff --git a/lib/concurrent/atomic_reference/jruby+truffle.rb b/lib/concurrent/atomic_reference/jruby+truffle.rb index 75c675078..ebb52dc38 100644 --- a/lib/concurrent/atomic_reference/jruby+truffle.rb +++ b/lib/concurrent/atomic_reference/jruby+truffle.rb @@ -1 +1,2 @@ -require 'concurrent/atomic_reference/mutex_atomic' +require 'atomic' +require 'concurrent/atomic_reference/rbx' diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 86b58037b..6bfac5725 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -229,7 +229,7 @@ def schedule(intended_time, *args, &task) # @param [Numeric, Time] intended_time `Numeric` means to run in `intended_time` seconds. # `Time` means to run on `intended_time`. def schedule_on(default_executor, intended_time, *args, &task) - ScheduledPromise.new(default_executor, intended_time).future.then(*args, &task) + ScheduledPromise.new(default_executor, intended_time).event.chain(*args, &task) end # @!macro promises.shortcut.on diff --git a/lib/concurrent/synchronization/rbx_object.rb b/lib/concurrent/synchronization/rbx_object.rb index 302535992..b9a0e3f90 100644 --- a/lib/concurrent/synchronization/rbx_object.rb +++ b/lib/concurrent/synchronization/rbx_object.rb @@ -30,6 +30,7 @@ def #{name}=(value) def full_memory_barrier # Rubinius instance variables are not volatile so we need to insert barrier + # TODO (pitr 26-Nov-2015): check comments like ^ Rubinius.memory_barrier end end From a756a70119d38b21802cafab7bc5f8b70a4365da Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Tue, 27 Dec 2016 01:11:18 +0100 Subject: [PATCH 67/68] Documentation updates --- doc/promises-main.md | 79 +++++--- doc/promises.in.md | 222 ++++++++++++++++---- doc/promises.init.rb | 2 - doc/promises.out.md | 349 +++++++++++++++++++++++--------- lib/concurrent/edge/promises.rb | 17 +- 5 files changed, 489 insertions(+), 180 deletions(-) diff --git a/doc/promises-main.md b/doc/promises-main.md index dde704dba..526b2405c 100644 --- a/doc/promises-main.md +++ b/doc/promises-main.md @@ -1,34 +1,48 @@ -# Description - -Promises is a new framework unifying former `Concurrent::Future`, +Promises is a new framework unifying former tools `Concurrent::Future`, `Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, -`Concurrent.dataflow`, `Delay`, `TimerTask` . It extensively uses the new -synchronization layer to make all the methods *lock-free* (with the exception -of obviously blocking operations like `#wait`, `#value`, etc.). As a result it -lowers a danger of deadlocking and offers better performance. - -It provides tools as other promise libraries, users coming from other languages -and other promise libraries will find the same tools here (probably named -differently though). The naming convention borrows heavily from JS promises. +`Concurrent.dataflow`, `Delay`, and `TimerTask` of concurrent-ruby. It +extensively uses the new synchronization layer to make all the methods +*lock-free* (with the exception of obviously blocking operations like `#wait`, +`#value`, etc.). As a result it lowers danger of deadlocking and offers +better performance. + +It provides similar tools as other promise libraries do, users coming from +other languages and other promise libraries will find the same tools here +(probably named differently though). The naming conventions were borrowed +heavily from JS promises. -This framework however is not just a re-implementation of other promise -library, it takes inspiration from many other promise libraries, adds new -ideas, and integrates with other abstractions like actors and channels. -Therefore it is much more likely that user fill find a suitable solution for -his problem in this library, or if needed he will be able to combine parts -which were designed to work together well (rather than having to combine -fragilely independent tools). - -> *Note:* The channel and actor integration is younger and will stay in edge for -> a little longer than core promises. - -> *TODO* -> -> - What is it? -> - What is it for? -> - Main classes {Future}, {Event} -> - Explain pool usage :io vs :fast, and `_on` `_using` suffixes. -> - Why is this better than other solutions, integration actors and channels +This framework, however, is not just a re-implementation of other promise +library, it draws inspiration from many other promise libraries, adds new +ideas, and is integrated with other abstractions like actors and channels. + +Therefore it is likely that user will find a suitable solution for a problem in +this framework. If the problem is simple user can pick one suitable +abstraction, e.g. just promises or actors. If the problem is complex user can +combine parts (promises, channels, actors) which were designed to work together +well to a solution. Rather than having to combine fragilely independent tools. + +This framework allows its users to: + +- Process tasks asynchronously +- Chain, branch, and zip the asynchronous tasks together + - Therefore, to create directed acyclic graph (hereafter DAG) of tasks +- Create delayed tasks (or delayed DAG of tasks) +- Create scheduled tasks (or delayed DAG of tasks) +- Deal with errors through rejections +- Reduce danger of deadlocking +- Control the concurrency level of tasks +- Simulate thread-like processing without occupying threads + - It allows to create tens of thousands simulations on one thread + pool + - It works well on all Ruby implementations +- Use actors to maintain isolated states and to seamlessly combine + it with promises +- Build parallel processing stream system with back + pressure (parts, which are not keeping up, signal to the other parts of the + system to slow down). + +**The guide is best place to start with promises, see** +**{file:doc/promises.out.md}.** # Main classes @@ -36,9 +50,12 @@ The main public user-facing classes are {Concurrent::Promises::Event} and {Concurrent::Promises::Future} which share common ancestor {Concurrent::Promises::AbstractEventFuture}. -**Event:** +**{Concurrent::Promises::AbstractEventFuture}:** +> {include:Concurrent::Promises::AbstractEventFuture} + +**{Concurrent::Promises::Event}:** > {include:Concurrent::Promises::Event} -**Future:** +**{Concurrent::Promises::Future}:** > {include:Concurrent::Promises::Future} diff --git a/doc/promises.in.md b/doc/promises.in.md index 4c77ba3f3..0d1d06614 100644 --- a/doc/promises.in.md +++ b/doc/promises.in.md @@ -7,7 +7,7 @@ FactoryMethods. They are not designed for inheritance but rather for composition. ```ruby -Concurrent::Promises::FactoryMethods.instance_methods false +Concurrent::Promises::FactoryMethods.instance_methods ``` The module can be included or extended where needed. @@ -438,6 +438,27 @@ future.fulfill 1 rescue $! future.fulfill 2, false ``` +## How are promises executed? + +Promises use global pools to execute the tasks. Therefore each task may run on +different thread which implies that users have to be careful not to depend on +Thread local variables (or they have to set at the begging of the task and +cleaned up at the end of the task). + +Since the tasks are running on may different threads of the thread pool, it's +better to follow following rules: + +- Use only data passed in through arguments or values of parent futures, to + have better control over what are futures accessing. +- The data passed in and out of futures are easier to deal with if they are + immutable or at least treated as such. +- Any mutable and mutated object accessed by more than one threads or futures + must be thread safe, see {Concurrent::Array}, {Concurrent::Hash}, and + {Concurrent::Map}. (Value of a future may be consumed by many futures.) +- Futures can access outside objects, but they has to be thread-safe. + +> *TODO: This part to be extended* + # Advanced ## Callbacks @@ -470,6 +491,25 @@ Promises.future_on(:fast) { 2 }. value.size ``` +## Run (simulated process) + +Similar to flatting is running. When `run` is called on a future it will flat +indefinitely as long the future fulfils into a `Future` value. It can be used +to simulate a thread like processing without actually occupying the thread. + +```ruby +count = lambda do |v| + v += 1 + v < 5 ? Promises.future_on(:fast, v, &count) : v +end +400.times. + map { Promises.future_on(:fast, 0, &count).run.value! }. + all? { |v| v == 5 } +``` + +Therefore the above example finished fine on the the `:fast` thread pool even +though it has much less threads than there is the simulated process. + # Interoperability ## Actors @@ -500,10 +540,47 @@ The `ask` method returns future. ```ruby actor.ask(2).then(&:succ).value! ``` +## ProcessingActor + +> *TODO: Documentation to be added in few days* + +## Channel + +There is an implementation of channel as well. Lets start by creating a +channel with capacity 2 messages. + +```ruby +ch1 = Concurrent::Promises::Channel.new 2 +``` -## Channels +We push 3 messages, it can be observed that the last future representing the +push is not fulfilled since the capacity prevents it. When the work which fills +the channel depends on the futures created by push it can be used to create +back pressure – the filling work is delayed until the channel has space for +more messages. -> *TODO: To be added* +```ruby +pushes = 3.times.map { |i| ch1.push i } +ch1.pop.value! +pushes +``` + +A selection over channels can be created with select_channel factory method. It +will be fulfilled with a first message available in any of the channels. It +returns a pair to be able to find out which channel had the message available. + +```ruby +ch2 = Concurrent::Promises::Channel.new 2 +result = Concurrent::Promises.select_channel(ch1, ch2) +result.value! + +Promises.future { 1+1 }.then_push_channel(ch1) +result = ( + Concurrent::Promises.fulfilled_future('%02d') & + Concurrent::Promises.select_channel(ch1, ch2)). + then { |format, (channel, value)| format format, value } +result.value! +``` # Use-cases @@ -573,7 +650,7 @@ results = 3.times.map { computer.ask [:run, -> { sleep 0.1; :result }] } computer.ask(:status).value! results.map(&:value!) ``` -## Too many threads / fibers +## Solving the Thread count limit by thread simulation Sometimes an application requires to process a lot of tasks concurrently. If the number of concurrent tasks is high enough than it is not possible to create @@ -606,7 +683,7 @@ Promises.future(0, &body).run.value! # => 5 This solution works well an any Ruby implementation. -> TODO add more complete example +> *TODO: More examples to be added.* ## Cancellation @@ -771,55 +848,116 @@ end # futures.map(&:value!) ``` -## Long stream of tasks +## Long stream of tasks, applying back pressure + +Lets assume that we queuing an API for a data and the queries can be faster +than we are able to process them. This example shows how to use channel as a +buffer and how to apply back pressure to slow down the queries. + +```ruby +require 'json' # + +channel = Promises::Channel.new 6 +source, token = Concurrent::Cancellation.create + +def query_random_text(token, channel) + Promises.future do + # for simplicity the query is omitted + # url = 'some api' + # Net::HTTP.get(URI(url)) + sleep 0.1 + { 'message' => + 'Lorem ipsum rhoncus scelerisque vulputate diam inceptos' + }.to_json + end.then(token) do |value, token| + # The push to channel is fulfilled only after the message is successfully + # published to the channel, therefore it will not continue querying until + # current message is pushed. + channel.push(value) | + # It could wait on the push indefinitely if the token is not checked + # here with `or` (the pipe). + token.to_future + end.flat_future.then(token) do |_, token| + # query again after the message is pushed to buffer + query_random_text(token, channel) unless token.canceled? + end +end + +words = [] +words_throttle = Concurrent::Throttle.new 1 + +def count_words_in_random_text(token, channel, words, words_throttle) + channel.pop.then do |response| + string = JSON.load(response)['message'] + # processing is slower than querying + sleep 0.2 + words_count = string.scan(/\w+/).size + end.then_throttled_by(words_throttle, words) do |words_count, words| + # safe since throttled to only 1 task at a time + words << words_count + end.then(token) do |_, token| + # count words in next message + unless token.canceled? + count_words_in_random_text(token, channel, words, words_throttle) + end + end +end + +query_processes = 3.times.map do + Promises.future(token, channel, &method(:query_random_text)).run +end + +word_counter_processes = 2.times.map do + Promises.future(token, channel, words, words_throttle, + &method(:count_words_in_random_text)).run +end + +sleep 0.5 +``` + +Let it run for a while then cancel it and ensure that the runs all fulfilled +(therefore ended) after the cancellation. Finally print the result. -> TODO Channel +```ruby +source.cancel +query_processes.map(&:wait!) +word_counter_processes.map(&:wait!) +words +``` -## Parallel enumerable ? +Compared to using threads directly this is highly configurable and compostable +solution. -> TODO ## Periodic task -> TODO revisit, use cancellation, add to library +By combining `schedule`, `run` and `Cancellation` periodically executed task +can be easily created. ```ruby -def schedule_job(interval, &job) - # schedule the first execution and chain restart of the job - Promises.schedule(interval, &job).chain do |fulfilled, continue, reason| - if fulfilled - schedule_job(interval, &job) if continue - else - # handle error - reason - # retry sooner - schedule_job(interval, &job) - end - end +repeating_scheduled_task = -> interval, token, task do + Promises. + # Schedule the task. + schedule(interval, token, &task). + # If successful schedule again. + # Alternatively use chain to schedule always. + then { repeating_scheduled_task.call(interval, token, task) } end -queue = Queue.new -count = 0 -interval = 0.05 # small just not to delay execution of this example - -schedule_job interval do - queue.push count - count += 1 - # to continue scheduling return true, false will end the task - if count < 4 - # to continue scheduling return true - true - else - # close the queue with nil to simplify reading it - queue.push nil - # to end the task return false - false +cancellation, token = Concurrent::Cancellation.create + +task = -> token do + 5.times do + token.raise_if_canceled + # do stuff + print '.' + sleep 0.01 end end - # read the queue -arr, v = [], nil; arr << v while (v = queue.pop) # - # arr has the results from the executed scheduled tasks -arr +result = Promises.future(0.1, token, task, &repeating_scheduled_task).run +sleep 0.2 +cancellation.cancel +result.result ``` diff --git a/doc/promises.init.rb b/doc/promises.init.rb index a84fa2d11..c3ed8aafb 100644 --- a/doc/promises.init.rb +++ b/doc/promises.init.rb @@ -3,5 +3,3 @@ def do_stuff :stuff end - -# Concurrent.use_stdlib_logger Logger::DEBUG diff --git a/doc/promises.out.md b/doc/promises.out.md index cd9b3c05a..0155468ad 100644 --- a/doc/promises.out.md +++ b/doc/promises.out.md @@ -7,7 +7,7 @@ FactoryMethods. They are not designed for inheritance but rather for composition. ```ruby -Concurrent::Promises::FactoryMethods.instance_methods false +Concurrent::Promises::FactoryMethods.instance_methods # => [:zip, # :create, # :delay, @@ -34,7 +34,8 @@ Concurrent::Promises::FactoryMethods.instance_methods false # :any_fulfilled_future, # :any_fulfilled_future_on, # :any_event, -# :any_event_on] +# :any_event_on, +# :select_channel] ``` The module can be included or extended where needed. @@ -47,17 +48,17 @@ Class.new do resolvable_event end end.new.a_method -# => <#Concurrent::Promises::ResolvableEvent:0x7fb4ba3e8c78 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fece40e07d0 pending> Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fb4ba3e2850 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fece4178990 pending> ``` The module is already extended into {Concurrent::Promises} for convenience. ```ruby Concurrent::Promises.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fb4ba3e08e8 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fece40dd198 pending> ``` For this guide we introduce a shortcut in `main` so we can call the factory @@ -66,7 +67,7 @@ methods in following examples by using `Promisses` directly. ```ruby Promises = Concurrent::Promises Promises.resolvable_event -# => <#Concurrent::Promises::ResolvableEvent:0x7fb4ba3d8be8 pending> +# => <#Concurrent::Promises::ResolvableEvent:0x7fece40da740 pending> ``` ## Asynchronous task @@ -82,7 +83,7 @@ future = Promises.future(0.1) do |duration| sleep duration :result end -# => <#Concurrent::Promises::Future:0x7fb4ba3c8248 pending> +# => <#Concurrent::Promises::Future:0x7fece40d2108 pending> ``` Asks if the future is resolved, here it will be still in the middle of the @@ -103,7 +104,7 @@ If the task fails we talk about the future being rejected. ```ruby future = Promises.future { raise 'Boom' } -# => <#Concurrent::Promises::Future:0x7fb4ba3b1bd8 pending> +# => <#Concurrent::Promises::Future:0x7fece4170808 pending> ``` There is no result, the future was rejected with a reason. @@ -198,20 +199,20 @@ through evaluation as follows. ```ruby Promises.future { :value } -# => <#Concurrent::Promises::Future:0x7fb4ba322230 pending> +# => <#Concurrent::Promises::Future:0x7fece092fed0 pending> ``` Instead it can be created directly. ```ruby Promises.fulfilled_future(:value) -# => <#Concurrent::Promises::Future:0x7fb4ba31a648 fulfilled> +# => <#Concurrent::Promises::Future:0x7fece10db4e0 fulfilled> Promises.rejected_future(StandardError.new('Ups')) -# => <#Concurrent::Promises::Future:0x7fb4ba319298 rejected> +# => <#Concurrent::Promises::Future:0x7fece10d3768 rejected> Promises.resolved_future(true, :value, nil) -# => <#Concurrent::Promises::Future:0x7fb4ba3133e8 fulfilled> +# => <#Concurrent::Promises::Future:0x7fece10cba90 fulfilled> Promises.resolved_future(false, nil, StandardError.new('Ups')) -# => <#Concurrent::Promises::Future:0x7fb4ba311700 rejected> +# => <#Concurrent::Promises::Future:0x7fece10c97e0 rejected> ``` ## Chaining @@ -252,9 +253,9 @@ do_stuff arg }`) is **required**, both following examples may break. ```ruby arg = 1 # => 1 Thread.new { do_stuff arg } -# => # +# => # Promises.future { do_stuff arg } -# => <#Concurrent::Promises::Future:0x7fb4ba2a0dc0 pending> +# => <#Concurrent::Promises::Future:0x7fece4052a48 pending> ``` ## Branching, and zipping @@ -316,7 +317,7 @@ Promises. result # => [false, # nil, -# #>] +# #>] ``` As `then` chained tasks execute only on fulfilled futures, there is a `rescue` @@ -364,7 +365,7 @@ Zip is rejected if any of the zipped futures is. rejected_zip = Promises.zip( Promises.fulfilled_future(1), Promises.rejected_future(StandardError.new('Ups'))) -# => <#Concurrent::Promises::Future:0x7fb4bc332390 rejected> +# => <#Concurrent::Promises::Future:0x7fece2b54538 rejected> rejected_zip.result # => [false, [1, nil], [nil, #]] rejected_zip. @@ -379,11 +380,11 @@ requiring resolution. ```ruby future = Promises.delay { sleep 0.1; 'lazy' } -# => <#Concurrent::Promises::Future:0x7fb4bc3188f0 pending> +# => <#Concurrent::Promises::Future:0x7fece2b3d018 pending> sleep 0.1 future.resolved? # => false future.touch -# => <#Concurrent::Promises::Future:0x7fb4bc3188f0 pending> +# => <#Concurrent::Promises::Future:0x7fece2b3d018 pending> sleep 0.2 future.resolved? # => true ``` @@ -460,7 +461,7 @@ Schedule task to be executed in 0.1 seconds. ```ruby scheduled = Promises.schedule(0.1) { 1 } -# => <#Concurrent::Promises::Future:0x7fb4bc288958 pending> +# => <#Concurrent::Promises::Future:0x7fece2acca98 pending> scheduled.resolved? # => false ``` @@ -485,7 +486,7 @@ Time can be used as well. ```ruby Promises.schedule(Time.now + 10) { :val } -# => <#Concurrent::Promises::Future:0x7fb4bc252330 pending> +# => <#Concurrent::Promises::Future:0x7fece2aa7f40 pending> ``` ## Resolvable Future and Event: @@ -497,7 +498,7 @@ Sometimes it is required to resolve a future externally, in these cases ```ruby future = Promises.resolvable_future -# => <#Concurrent::Promises::ResolvableFuture:0x7fb4bc250620 pending> +# => <#Concurrent::Promises::ResolvableFuture:0x7fece2aa5948 pending> ``` The thread will be blocked until the future is resolved @@ -505,7 +506,7 @@ The thread will be blocked until the future is resolved ```ruby thread = Thread.new { future.value } future.fulfill 1 -# => <#Concurrent::Promises::ResolvableFuture:0x7fb4bc250620 fulfilled> +# => <#Concurrent::Promises::ResolvableFuture:0x7fece2aa5948 fulfilled> thread.value # => 1 ``` @@ -517,14 +518,35 @@ future.fulfill 1 rescue $! future.fulfill 2, false # => false ``` +## How are promises executed? + +Promises use global pools to execute the tasks. Therefore each task may run on +different thread which implies that users have to be careful not to depend on +Thread local variables (or they have to set at the begging of the task and +cleaned up at the end of the task). + +Since the tasks are running on may different threads of the thread pool, it's +better to follow following rules: + +- Use only data passed in through arguments or values of parent futures, to + have better control over what are futures accessing. +- The data passed in and out of futures are easier to deal with if they are + immutable or at least treated as such. +- Any mutable and mutated object accessed by more than one threads or futures + must be thread safe, see {Concurrent::Array}, {Concurrent::Hash}, and + {Concurrent::Map}. (Value of a future may be consumed by many futures.) +- Futures can access outside objects, but they has to be thread-safe. + +> *TODO: This part to be extended* + # Advanced ## Callbacks ```ruby -queue = Queue.new # => # +queue = Queue.new # => # future = Promises.delay { 1 + 1 } -# => <#Concurrent::Promises::Future:0x7fb4bb9d4730 pending> +# => <#Concurrent::Promises::Future:0x7fece2a85670 pending> future.on_fulfillment { queue << 1 } # evaluated asynchronously future.on_fulfillment! { queue << 2 } # evaluated on resolving thread @@ -547,9 +569,29 @@ and `:io` for blocking and long tasks. ```ruby Promises.future_on(:fast) { 2 }. then_on(:io) { File.read __FILE__ }. - value.size # => 18764 + value.size # => 23539 ``` +## Run (simulated process) + +Similar to flatting is running. When `run` is called on a future it will flat +indefinitely as long the future fulfils into a `Future` value. It can be used +to simulate a thread like processing without actually occupying the thread. + +```ruby +count = lambda do |v| + v += 1 + v < 5 ? Promises.future_on(:fast, v, &count) : v +end +# => # +400.times. + map { Promises.future_on(:fast, 0, &count).run.value! }. + all? { |v| v == 5 } # => true +``` + +Therefore the above example finished fine on the the `:fast` thread pool even +though it has much less threads than there is the simulated process. + # Interoperability ## Actors @@ -560,7 +602,7 @@ Create an actor which takes received numbers and returns the number squared. actor = Concurrent::Actor::Utils::AdHoc.spawn :square do -> v { v ** 2 } end -# => # +# => # ``` Send result of `1+1` to the actor, and add 2 to the result send back from the @@ -581,10 +623,59 @@ The `ask` method returns future. ```ruby actor.ask(2).then(&:succ).value! # => 5 ``` +## ProcessActor + +> *TODO: Documentation to be added in few days* + +## Channel + +There is an implementation of channel as well. Lets start by creating a +channel with capacity 2 messages. + +```ruby +ch1 = Concurrent::Promises::Channel.new 2 +# => <#Concurrent::Promises::Channel:0x7fece2a17e18 size:2> +``` -## Channels +We push 3 messages, it can be observed that the last future representing the +push is not fulfilled since the capacity prevents it. When the work which fills +the channel depends on the futures created by push it can be used to create +back pressure – the filling work is delayed until the channel has space for +more messages. -> *TODO: To be added* +```ruby +pushes = 3.times.map { |i| ch1.push i } +# => [<#Concurrent::Promises::Future:0x7fece29fe030 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece29fd590 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece29fc1e0 pending>] +ch1.pop.value! # => 0 +pushes +# => [<#Concurrent::Promises::Future:0x7fece29fe030 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece29fd590 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece29fc1e0 fulfilled>] +``` + +A selection over channels can be created with select_channel factory method. It +will be fulfilled with a first message available in any of the channels. It +returns a pair to be able to find out which channel had the message available. + +```ruby +ch2 = Concurrent::Promises::Channel.new 2 +# => <#Concurrent::Promises::Channel:0x7fece29d7d40 size:2> +result = Concurrent::Promises.select_channel(ch1, ch2) +# => <#Concurrent::Promises::ResolvableFuture:0x7fece29d6f30 fulfilled> +result.value! +# => [<#Concurrent::Promises::Channel:0x7fece2a17e18 size:2>, 1] + +Promises.future { 1+1 }.then_push_channel(ch1) +# => <#Concurrent::Promises::Future:0x7fece29cdc00 pending> +result = ( + Concurrent::Promises.fulfilled_future('%02d') & + Concurrent::Promises.select_channel(ch1, ch2)). + then { |format, (channel, value)| format format, value } +# => <#Concurrent::Promises::Future:0x7fece29c49c0 pending> +result.value! # => "02" +``` # Use-cases @@ -592,17 +683,17 @@ actor.ask(2).then(&:succ).value! # => 5 ```ruby Promises.future { do_stuff } -# => <#Concurrent::Promises::Future:0x7fb4bb947740 pending> +# => <#Concurrent::Promises::Future:0x7fece29b44f8 pending> ``` ## Parallel background processing ```ruby tasks = 4.times.map { |i| Promises.future(i) { |i| i*2 } } -# => [<#Concurrent::Promises::Future:0x7fb4bb93f090 pending>, -# <#Concurrent::Promises::Future:0x7fb4bb93e488 pending>, -# <#Concurrent::Promises::Future:0x7fb4bb93d6f0 pending>, -# <#Concurrent::Promises::Future:0x7fb4bb93c778 pending>] +# => [<#Concurrent::Promises::Future:0x7fece29a47d8 pending>, +# <#Concurrent::Promises::Future:0x7fece4192fc0 pending>, +# <#Concurrent::Promises::Future:0x7fece4191e68 pending>, +# <#Concurrent::Promises::Future:0x7fece4191148 pending>] Promises.zip(*tasks).value! # => [0, 2, 4, 6] ``` @@ -655,15 +746,15 @@ Create the computer actor and send it 3 jobs. ```ruby computer = Concurrent::Actor.spawn Computer, :computer -# => # +# => # results = 3.times.map { computer.ask [:run, -> { sleep 0.1; :result }] } -# => [<#Concurrent::Promises::Future:0x7fb4ba990450 pending>, -# <#Concurrent::Promises::Future:0x7fb4ba9897e0 pending>, -# <#Concurrent::Promises::Future:0x7fb4ba988318 pending>] +# => [<#Concurrent::Promises::Future:0x7fece50178c0 pending>, +# <#Concurrent::Promises::Future:0x7fece5015c50 pending>, +# <#Concurrent::Promises::Future:0x7fece5015020 pending>] computer.ask(:status).value! # => {:running_jobs=>3} results.map(&:value!) # => [:result, :result, :result] ``` -## Too many threads / fibers +## Solving the Thread count limit by thread simulation Sometimes an application requires to process a lot of tasks concurrently. If the number of concurrent tasks is high enough than it is not possible to create @@ -696,7 +787,7 @@ Promises.future(0, &body).run.value! # => 5 This solution works well an any Ruby implementation. -> TODO add more complete example +> *TODO: More examples to be added.* ## Cancellation @@ -706,8 +797,8 @@ Lets have two processes which will count until cancelled. ```ruby source, token = Concurrent::Cancellation.create -# => [<#Concurrent::Cancellation:0x7fb4ba1bc300 canceled:false>, -# <#Concurrent::Cancellation::Token:0x7fb4ba1b7670 canceled:false>] +# => [<#Concurrent::Cancellation:0x7fece4071678 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece406a508 canceled:false>] count_until_cancelled = -> token, count do if token.canceled? @@ -720,12 +811,12 @@ end futures = Array.new(2) do Promises.future(token, 0, &count_until_cancelled).run end -# => [<#Concurrent::Promises::Future:0x7fb4ba13d578 pending>, -# <#Concurrent::Promises::Future:0x7fb4ba13c308 pending>] +# => [<#Concurrent::Promises::Future:0x7fece4050478 pending>, +# <#Concurrent::Promises::Future:0x7fece403b0a0 pending>] sleep 0.01 source.cancel # => true -futures.map(&:value!) # => [35, 34] +futures.map(&:value!) # => [63, 63] ``` Cancellation can also be used as event or future to log or plan re-execution. @@ -744,8 +835,8 @@ tasks share a cancellation, when one of them fails it cancels the others. ```ruby source, token = Concurrent::Cancellation.create -# => [<#Concurrent::Cancellation:0x7fb4ba053130 canceled:false>, -# <#Concurrent::Cancellation::Token:0x7fb4ba051fb0 canceled:false>] +# => [<#Concurrent::Cancellation:0x7fece2a7c638 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece2a77a70 canceled:false>] tasks = 4.times.map do |i| Promises.future(source, token, i) do |source, token, i| count = 0 @@ -761,10 +852,10 @@ tasks = 4.times.map do |i| end end end -# => [<#Concurrent::Promises::Future:0x7fb4ba03b0f8 pending>, -# <#Concurrent::Promises::Future:0x7fb4ba0397a8 pending>, -# <#Concurrent::Promises::Future:0x7fb4ba038308 pending>, -# <#Concurrent::Promises::Future:0x7fb4ba0332b8 pending>] +# => [<#Concurrent::Promises::Future:0x7fece2a741b8 pending>, +# <#Concurrent::Promises::Future:0x7fece2a6eee8 pending>, +# <#Concurrent::Promises::Future:0x7fece2a6d520 pending>, +# <#Concurrent::Promises::Future:0x7fece2a6c8a0 pending>] Promises.zip(*tasks).result # => [false, # [:cancelled, :cancelled, nil, :cancelled], @@ -775,8 +866,8 @@ Without the randomly failing part it produces following. ```ruby source, token = Concurrent::Cancellation.create -# => [<#Concurrent::Cancellation:0x7fb4bb9ee0b8 canceled:false>, -# <#Concurrent::Cancellation::Token:0x7fb4bb9ed9d8 canceled:false>] +# => [<#Concurrent::Cancellation:0x7fece2a26670 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece2a26198 canceled:false>] tasks = 4.times.map do |i| Promises.future(source, token, i) do |source, token, i| count = 0 @@ -885,7 +976,7 @@ DB_INTERNAL_POOL = Concurrent::Array.new data # "*********"] max_tree = Concurrent::Throttle.new 3 -# => <#Concurrent::Throttle:0x7fb4ba2f22d8 limit:3 can_run:3> +# => <#Concurrent::Throttle:0x7fece0843be8 limit:3 can_run:3> futures = 11.times.map do |i| max_tree. @@ -900,55 +991,131 @@ futures.map(&:value!) # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] ``` -## Long stream of tasks +## Long stream of tasks, applying back pressure + +> *TODO: To be added, parallel IO query > buffer > parallel processing, include a bad example as well + +Lets assume that we queuing an API for a data and the queries can be faster +than we are able to process them. This example shows how to use channel as a +buffer and how to apply back pressure to slow down the queries. -> TODO Channel +```ruby +require 'json' + +channel = Promises::Channel.new 6 +# => <#Concurrent::Promises::Channel:0x7fece41918f0 size:6> +source, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fece4190428 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece418bec8 canceled:false>] + +def query_random_text(token, channel) + Promises.future do + # for simplicity the query is omitted + # url = 'some api' + # Net::HTTP.get(URI(url)) + sleep 0.1 + { 'message' => + 'Lorem ipsum rhoncus scelerisque vulputate diam inceptos' + }.to_json + end.then(token) do |value, token| + # It could wait on the push indefinitely if the token is not checked here + # with or (the pipe) + channel.push(value) | token.to_future + end.flat_future.then(token) do |_, token| + # query again after the message is pushed to buffer + query_random_text(token, channel) unless token.canceled? + end +end + +words = [] # => [] +words_throttle = Concurrent::Throttle.new 1 +# => <#Concurrent::Throttle:0x7fece29973a8 limit:1 can_run:1> + +def count_words_in_random_text(token, channel, words, words_throttle) + channel.pop.then do |response| + string = JSON.load(response)['message'] + # processing is slower than querying + sleep 0.2 + words_count = string.scan(/\w+/).size + end.then_throttled_by(words_throttle, words) do |words_count, words| + # safe since throttled to only 1 task at a time + words << words_count + end.then(token) do |_, token| + # count words in next message + unless token.canceled? + count_words_in_random_text(token, channel, words, words_throttle) + end + end +end + +query_processes = 3.times.map do + Promises.future(token, channel, &method(:query_random_text)).run +end +# => [<#Concurrent::Promises::Future:0x7fece41818d8 pending>, +# <#Concurrent::Promises::Future:0x7fece40e20f8 pending>, +# <#Concurrent::Promises::Future:0x7fece40e0668 pending>] + +word_counter_processes = 2.times.map do + Promises.future(token, channel, words, words_throttle, + &method(:count_words_in_random_text)).run +end +# => [<#Concurrent::Promises::Future:0x7fece40dc860 pending>, +# <#Concurrent::Promises::Future:0x7fece40db230 pending>] -## Parallel enumerable ? +sleep 0.5 # => 1 +``` + +Let it run for a while then cancel it and ensure that the runs all fulfil +(therefore end) after the cancellation. Finally print the result. + +```ruby +source.cancel # => true +query_processes.map(&:wait!) +# => [<#Concurrent::Promises::Future:0x7fece41818d8 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece40e20f8 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece40e0668 fulfilled>] +word_counter_processes.map(&:wait!) +# => [<#Concurrent::Promises::Future:0x7fece40dc860 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece40db230 fulfilled>] +words # => [7, 7, 7, 7] +``` -> TODO ## Periodic task -> TODO revisit, use cancellation, add to library +By combining `schedule`, `run` and `Cancellation` periodically executed task +can be easily created. ```ruby -def schedule_job(interval, &job) - # schedule the first execution and chain restart of the job - Promises.schedule(interval, &job).chain do |fulfilled, continue, reason| - if fulfilled - schedule_job(interval, &job) if continue - else - # handle error - reason - # retry sooner - schedule_job(interval, &job) - end - end +repeating_scheduled_task = -> interval, token, task do + Promises. + # Schedule the task. + schedule(interval, token, &task). + # If successful schedule again. + # Alternatively use chain to schedule always. + then { repeating_scheduled_task.call(interval, token, task) } end -queue = Queue.new # => # -count = 0 # => 0 -interval = 0.05 # small just not to delay execution of this example - -schedule_job interval do - queue.push count - count += 1 - # to continue scheduling return true, false will end the task - if count < 4 - # to continue scheduling return true - true - else - # close the queue with nil to simplify reading it - queue.push nil - # to end the task return false - false +cancellation, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fece294db90 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece2947ba0 canceled:false>] + +task = -> token do + 5.times do + token.raise_if_canceled + # do stuff + print '.' + sleep 0.01 end end - # read the queue -arr, v = [], nil; arr << v while (v = queue.pop) - # arr has the results from the executed scheduled tasks -arr # => [0, 1, 2, 3] +result = Promises.future(0.1, token, task, &repeating_scheduled_task).run +# => <#Concurrent::Promises::Future:0x7fece28ce750 pending> +sleep 0.2 # => 0 +cancellation.cancel # => true +result.result +# => [false, +# nil, +# #] ``` diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index 6bfac5725..c90b06ec5 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -7,10 +7,6 @@ module Concurrent - # # Guide - # - # The guide is **best place** to start with promises, see {file:doc/promises.out.md}. - # # {include:file:doc/promises-main.md} module Promises @@ -502,7 +498,7 @@ def RESOLVED.to_sym private_constant :InternalStates - # Common ancestor of {Event} and {Future} classes + # Common ancestor of {Event} and {Future} classes, many shared methods are defined here. class AbstractEventFuture < Synchronization::Object safe_initialization! private(*attr_atomic(:internal_state) - [:internal_state]) @@ -558,11 +554,6 @@ def resolved?(state = internal_state) state.resolved? end - # @deprecated - def unscheduled? - raise 'unsupported' - end - # Propagates touch. Requests all the delayed futures, which it depends on, to be # executed. This method is called by any other method requiring resolved state, like {#wait}. # @return [self] @@ -1331,10 +1322,6 @@ def state def touch end - def to_s - format '<#%s:0x%x>', self.class, object_id << 1 - end - alias_method :inspect, :to_s def delayed @@ -1439,8 +1426,10 @@ def self.add_delayed(delayed, blocker) def initialize(delayed, blockers_count, future) super(future) + # noinspection RubyArgCount @Touched = AtomicBoolean.new false @Delayed = delayed + # noinspection RubyArgCount @Countdown = AtomicFixnum.new blockers_count end From 0f33d0914be27958df0b359905d3af512e3ffe55 Mon Sep 17 00:00:00 2001 From: Petr Chalupa Date: Tue, 27 Dec 2016 01:13:53 +0100 Subject: [PATCH 68/68] Add Promise::Channel --- .../edge/old_channel_integration.rb | 54 ++++++++ lib/concurrent/edge/promises.rb | 129 ++++++++++++++---- spec/concurrent/edge/promises_spec.rb | 91 ++++++++++-- 3 files changed, 234 insertions(+), 40 deletions(-) create mode 100644 lib/concurrent/edge/old_channel_integration.rb diff --git a/lib/concurrent/edge/old_channel_integration.rb b/lib/concurrent/edge/old_channel_integration.rb new file mode 100644 index 000000000..fbdbba728 --- /dev/null +++ b/lib/concurrent/edge/old_channel_integration.rb @@ -0,0 +1,54 @@ +module Concurrent + module Promises + module FactoryMethods + + # @!visibility private + + module OldChannelIntegration + + # @!visibility private + + # only proof of concept + # @return [Future] + def select(*channels) + # TODO (pitr-ch 26-Mar-2016): re-do, has to be non-blocking + future do + # noinspection RubyArgCount + Channel.select do |s| + channels.each do |ch| + s.take(ch) { |value| [value, ch] } + end + end + end + end + end + + include OldChannelIntegration + end + + class Future < AbstractEventFuture + + # @!visibility private + + module OldChannelIntegration + + # @!visibility private + + # Zips with selected value form the suplied channels + # @return [Future] + def then_select(*channels) + future = Concurrent::Promises.select(*channels) + ZipFuturesPromise.new_blocked_by2(self, future, @DefaultExecutor).future + end + + # @note may block + # @note only proof of concept + def then_put(channel) + on_fulfillment_using(:io, channel) { |value, channel| channel.put value } + end + end + + include OldChannelIntegration + end + end +end diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb index c90b06ec5..36b5ba3a2 100644 --- a/lib/concurrent/edge/promises.rb +++ b/lib/concurrent/edge/promises.rb @@ -1951,57 +1951,128 @@ def then_ask(actor) include ActorIntegration end - ### Experimental features follow + class Channel < Concurrent::Synchronization::Object + safe_initialization! - module FactoryMethods + # Default size of the Channel, makes it accept unlimited number of messages. + UNLIMITED = Object.new + UNLIMITED.singleton_class.class_eval do + include Comparable - # @!visibility private + def <=>(other) + 1 + end - module ChannelIntegration + def to_s + 'unlimited' + end + end + + # A channel to pass messages between promises. The size is limited to support back pressure. + # @param [Integer, UNLIMITED] size the maximum number of messages stored in the channel. + def initialize(size = UNLIMITED) + super() + @Size = size + # TODO (pitr-ch 26-Dec-2016): replace with lock-free implementation + @Mutex = Mutex.new + @Probes = [] + @Messages = [] + @PendingPush = [] + end - # @!visibility private - # only proof of concept - # @return [Future] - def select(*channels) - # TODO (pitr-ch 26-Mar-2016): re-do, has to be non-blocking - future do - # noinspection RubyArgCount - Channel.select do |s| - channels.each do |ch| - s.take(ch) { |value| [value, ch] } + # Returns future which will fulfill when the message is added to the channel. Its value is the message. + # @param [Object] message + # @return [Future] + def push(message) + @Mutex.synchronize do + while true + if @Probes.empty? + if @Size > @Messages.size + @Messages.push message + return Promises.fulfilled_future message + else + pushed = Promises.resolvable_future + @PendingPush.push [message, pushed] + return pushed.with_hidden_resolvable + end + else + probe = @Probes.shift + if probe.fulfill [self, message], false + return Promises.fulfilled_future(message) end end end end end - include ChannelIntegration + # Returns a future witch will become fulfilled with a value from the channel when one is available. + # @param [ResolvableFuture] probe the future which will be fulfilled with a channel value + # @return [Future] the probe, its value will be the message when available. + def pop(probe = Concurrent::Promises.resolvable_future) + # TODO (pitr-ch 26-Dec-2016): improve performance + pop_for_select(probe).then(&:last) + end + + # @!visibility private + def pop_for_select(probe = Concurrent::Promises.resolvable_future) + @Mutex.synchronize do + if @Messages.empty? + @Probes.push probe + else + message = @Messages.shift + probe.fulfill [self, message] + + unless @PendingPush.empty? + message, pushed = @PendingPush.shift + @Messages.push message + pushed.fulfill message + end + end + end + probe + end + + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x size:%s>', self.class, object_id << 1, @Size + end + + alias_method :inspect, :to_s end class Future < AbstractEventFuture + module NewChannelIntegration - # @!visibility private + # @param [Channel] channel to push to. + # @return [Future] a future which is fulfilled after the message is pushed to the channel. + # May take a moment if the channel is full. + def then_push_channel(channel) + self.then { |value| channel.push value }.flat_future + end + + # TODO (pitr-ch 26-Dec-2016): does it make sense to have rescue an chain variants as well, check other integrations as well + end - module ChannelIntegration + include NewChannelIntegration + end - # @!visibility private + module FactoryMethods - # Zips with selected value form the suplied channels - # @return [Future] - def then_select(*channels) - future = Concurrent::Promises.select(*channels) - ZipFuturesPromise.new_blocked_by2(self, future, @DefaultExecutor).future - end + module NewChannelIntegration - # @note may block - # @note only proof of concept - def then_put(channel) - on_fulfillment_using(:io, channel) { |value, channel| channel.put value } + # Selects a channel which is ready to be read from. + # @param [Channel] channels + # @return [Future] a future which is fulfilled with pair [channel, message] when one of the channels is + # available for reading + def select_channel(*channels) + probe = Promises.resolvable_future + channels.each { |ch| ch.pop_for_select probe } + probe end end - include ChannelIntegration + include NewChannelIntegration end end diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb index 0970d7c00..5635791aa 100644 --- a/spec/concurrent/edge/promises_spec.rb +++ b/spec/concurrent/edge/promises_spec.rb @@ -426,19 +426,17 @@ def behaves_as_delay(delay, value) end it 'with channel' do - ch1 = Concurrent::Channel.new - ch2 = Concurrent::Channel.new + ch1 = Concurrent::Promises::Channel.new + ch2 = Concurrent::Promises::Channel.new - result = Concurrent::Promises.select(ch1, ch2) - ch1.put 1 - expect(result.value!).to eq [1, ch1] + result = Concurrent::Promises.select_channel(ch1, ch2) + ch1.push 1 + expect(result.value!).to eq [ch1, 1] - future { 1+1 }. - then_put(ch1) - result = future { '%02d' }. - then_select(ch1, ch2). - then { |format, (value, channel)| format format, value } + future { 1+1 }.then_push_channel(ch1) + result = (Concurrent::Promises.future { '%02d' } & Concurrent::Promises.select_channel(ch1, ch2)). + then { |format, (channel, value)| format format, value } expect(result.value!).to eq '02' end end @@ -487,7 +485,7 @@ def behaves_as_delay(delay, value) describe 'Throttling' do specify do - limit = 4 + limit = 4 throttle = Concurrent::Throttle.new limit counter = Concurrent::AtomicFixnum.new testing = -> *args do @@ -523,4 +521,75 @@ def behaves_as_delay(delay, value) end).value!.all? { |v| v <= limit }).to be_truthy end end + + describe 'Promises::Channel' do + specify do + channel = Concurrent::Promises::Channel.new 1 + + pushed1 = channel.push 1 + expect(pushed1.resolved?).to be_truthy + expect(pushed1.value!).to eq 1 + + pushed2 = channel.push 2 + expect(pushed2.resolved?).to be_falsey + + popped = channel.pop + expect(pushed1.value!).to eq 1 + expect(pushed2.resolved?).to be_truthy + expect(pushed2.value!).to eq 2 + expect(popped.value!).to eq 1 + + popped = channel.pop + expect(popped.value!).to eq 2 + + popped = channel.pop + expect(popped.resolved?).to be_falsey + + pushed3 = channel.push 3 + expect(popped.value!).to eq 3 + expect(pushed3.resolved?).to be_truthy + expect(pushed3.value!).to eq 3 + end + + specify do + ch1 = Concurrent::Promises::Channel.new + ch2 = Concurrent::Promises::Channel.new + ch3 = Concurrent::Promises::Channel.new + + add = -> do + (ch1.pop & ch2.pop).then do |a, b| + if a == :done && b == :done + :done + else + ch3.push a + b + add.call + end + end + end + + ch1.push 1 + ch2.push 2 + ch1.push 'a' + ch2.push 'b' + ch1.push nil + ch2.push true + + result = Concurrent::Promises.future(&add).run.result + expect(result[0..1]).to eq [false, nil] + expect(result[2]).to be_a_kind_of(NoMethodError) + expect(ch3.pop.value!).to eq 3 + expect(ch3.pop.value!).to eq 'ab' + + ch1.push 1 + ch2.push 2 + ch1.push 'a' + ch2.push 'b' + ch1.push :done + ch2.push :done + + expect(Concurrent::Promises.future(&add).run.result).to eq [true, :done, nil] + expect(ch3.pop.value!).to eq 3 + expect(ch3.pop.value!).to eq 'ab' + end + end end