From 5d956215a8ca8c245ca610855b142f809fcc7b08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Juli=C3=A1n=20Espina?= Date: Thu, 9 Jan 2025 17:22:29 -0600 Subject: [PATCH 1/7] Introduce `NativeAsyncJob` --- cli/src/main.rs | 6 +- core/engine/src/builtins/promise/mod.rs | 8 +- core/engine/src/context/mod.rs | 12 +- core/engine/src/job.rs | 171 +++++++++++++++---- core/engine/src/object/builtins/jspromise.rs | 34 ++-- examples/src/bin/module_fetch_async.rs | 4 +- 6 files changed, 173 insertions(+), 62 deletions(-) diff --git a/cli/src/main.rs b/cli/src/main.rs index 45ece5bf58f..b9b75a6ba1a 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -13,7 +13,7 @@ mod helper; use boa_engine::{ builtins::promise::PromiseState, context::ContextBuilder, - job::{FutureJob, JobQueue, NativeJob}, + job::{BoxedFuture, JobQueue, NativeJob}, module::{Module, SimpleModuleLoader}, optimizer::OptimizerOptions, script::Script, @@ -456,7 +456,7 @@ fn add_runtime(context: &mut Context) { struct Jobs(RefCell>); impl JobQueue for Jobs { - fn enqueue_promise_job(&self, job: NativeJob, _: &mut Context) { + fn enqueue_job(&self, job: NativeJob, _: &mut Context) { self.0.borrow_mut().push_back(job); } @@ -474,7 +474,7 @@ impl JobQueue for Jobs { } } - fn enqueue_future_job(&self, future: FutureJob, _: &mut Context) { + fn enqueue_async_job(&self, future: BoxedFuture, _: &mut Context) { let job = pollster::block_on(future); self.0.borrow_mut().push_back(job); } diff --git a/core/engine/src/builtins/promise/mod.rs b/core/engine/src/builtins/promise/mod.rs index 5a580b87a18..fa89985c436 100644 --- a/core/engine/src/builtins/promise/mod.rs +++ b/core/engine/src/builtins/promise/mod.rs @@ -1889,7 +1889,7 @@ impl Promise { // c. Perform HostEnqueuePromiseJob(fulfillJob.[[Job]], fulfillJob.[[Realm]]). context .job_queue() - .enqueue_promise_job(fulfill_job, context); + .enqueue_job(fulfill_job, context); } // 11. Else, @@ -1909,7 +1909,7 @@ impl Promise { let reject_job = new_promise_reaction_job(reject_reaction, reason.clone(), context); // e. Perform HostEnqueuePromiseJob(rejectJob.[[Job]], rejectJob.[[Realm]]). - context.job_queue().enqueue_promise_job(reject_job, context); + context.job_queue().enqueue_job(reject_job, context); // 12. Set promise.[[PromiseIsHandled]] to true. promise @@ -1985,7 +1985,7 @@ impl Promise { let job = new_promise_reaction_job(reaction, argument.clone(), context); // b. Perform HostEnqueuePromiseJob(job.[[Job]], job.[[Realm]]). - context.job_queue().enqueue_promise_job(job, context); + context.job_queue().enqueue_job(job, context); } // 2. Return unused. } @@ -2178,7 +2178,7 @@ impl Promise { ); // 15. Perform HostEnqueuePromiseJob(job.[[Job]], job.[[Realm]]). - context.job_queue().enqueue_promise_job(job, context); + context.job_queue().enqueue_job(job, context); // 16. Return undefined. Ok(JsValue::undefined()) diff --git a/core/engine/src/context/mod.rs b/core/engine/src/context/mod.rs index a5dfd173070..e6635edf8d7 100644 --- a/core/engine/src/context/mod.rs +++ b/core/engine/src/context/mod.rs @@ -1,5 +1,6 @@ //! The ECMAScript context. +use std::cell::RefCell; use std::{cell::Cell, path::Path, rc::Rc}; use boa_ast::StatementList; @@ -13,6 +14,7 @@ use intrinsics::Intrinsics; #[cfg(feature = "temporal")] use temporal_rs::tzdb::FsTzdbProvider; +use crate::job::NativeAsyncJob; use crate::vm::RuntimeLimits; use crate::{ builtins, @@ -470,7 +472,13 @@ impl Context { /// Enqueues a [`NativeJob`] on the [`JobQueue`]. #[inline] pub fn enqueue_job(&mut self, job: NativeJob) { - self.job_queue().enqueue_promise_job(job, self); + self.job_queue().enqueue_job(job, self); + } + + /// Enqueues a [`NativeAsyncJob`] on the [`JobQueue`]. + #[inline] + pub fn enqueue_async_job(&mut self, job: NativeAsyncJob) { + self.job_queue().enqueue_async_job(job, self); } /// Runs all the jobs in the job queue. @@ -489,7 +497,7 @@ impl Context { /// provide a custom implementor of `JobQueue` to the context. #[allow(clippy::future_not_send)] pub async fn run_jobs_async(&mut self) { - self.job_queue().run_jobs_async(self).await; + self.job_queue().run_jobs_async(&RefCell::new(self)).await; self.clear_kept_objects(); } diff --git a/core/engine/src/job.rs b/core/engine/src/job.rs index 63cd6b794db..dd3a4f21c31 100644 --- a/core/engine/src/job.rs +++ b/core/engine/src/job.rs @@ -26,9 +26,6 @@ use crate::{ }; use boa_gc::{Finalize, Trace}; -/// The [`Future`] job passed to the [`JobQueue::enqueue_future_job`] operation. -pub type FutureJob = Pin + 'static>>; - /// An ECMAScript [Job] closure. /// /// The specification allows scheduling any [`NativeJob`] closure by the host into the job queue. @@ -133,6 +130,95 @@ impl NativeJob { } } +/// The [`Future`] job passed to the [`JobQueue::enqueue_future_job`] operation. +pub type BoxedFuture<'a> = Pin> + 'a>>; + +/// An ECMAScript [Job] that can be run asynchronously. +/// +/// ## [`Trace`]? +/// +/// `NativeJob` doesn't implement `Trace` because it doesn't need to; all jobs can only be run once +/// and putting a [`JobQueue`] on a garbage collected object is not allowed. +/// +/// Additionally, the garbage collector doesn't need to trace the captured variables because the closures +/// are always stored on the [`JobQueue`], which is always rooted, which means the captured variables +/// are also rooted. +/// +/// [Job]: https://tc39.es/ecma262/#sec-jobs +/// [`NativeFunction`]: crate::native_function::NativeFunction +pub struct NativeAsyncJob { + f: Box FnOnce(&'a RefCell<&mut Context>) -> BoxedFuture<'a>>, + realm: Option, +} + +impl Debug for NativeAsyncJob { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NativeAsyncJob") + .field("f", &"Closure") + .finish() + } +} + +impl NativeAsyncJob { + /// Creates a new `NativeAsyncJob` from a closure. + pub fn new(f: F) -> Self + where + F: for<'a> FnOnce(&'a RefCell<&mut Context>) -> BoxedFuture<'a> + 'static, + { + Self { + f: Box::new(f), + realm: None, + } + } + + /// Creates a new `NativeJob` from a closure and an execution realm. + pub fn with_realm(f: F, realm: Realm) -> Self + where + F: for<'a> FnOnce(&'a RefCell<&mut Context>) -> BoxedFuture<'a> + 'static, + { + Self { + f: Box::new(f), + realm: Some(realm), + } + } + + /// Gets a reference to the execution realm of the job. + #[must_use] + pub const fn realm(&self) -> Option<&Realm> { + self.realm.as_ref() + } + + /// Calls the native job with the specified [`Context`]. + /// + /// # Note + /// + /// If the native job has an execution realm defined, this sets the running execution + /// context to the realm's before calling the inner closure, and resets it after execution. + pub fn call<'a>( + self, + context: &'a RefCell<&mut Context>, + ) -> impl Future> + use<'a> { + // If realm is not null, each time job is invoked the implementation must perform + // implementation-defined steps such that execution is prepared to evaluate ECMAScript + // code at the time of job's invocation. + if let Some(realm) = self.realm { + let old_realm = context.borrow_mut().enter_realm(realm); + + // Let scriptOrModule be GetActiveScriptOrModule() at the time HostEnqueuePromiseJob is + // invoked. If realm is not null, each time job is invoked the implementation must + // perform implementation-defined steps such that scriptOrModule is the active script or + // module at the time of job's invocation. + let result = (self.f)(context); + + context.borrow_mut().enter_realm(old_realm); + + result + } else { + (self.f)(context) + } + } +} + /// [`JobCallback`][spec] records. /// /// [spec]: https://tc39.es/ecma262/#sec-jobcallback-records @@ -209,7 +295,12 @@ pub trait JobQueue { /// /// [spec]: https://tc39.es/ecma262/#sec-hostenqueuepromisejob /// [Jobs]: https://tc39.es/ecma262/#sec-jobs - fn enqueue_promise_job(&self, job: NativeJob, context: &mut Context); + fn enqueue_job(&self, job: NativeJob, context: &mut Context); + + /// Enqueues a new [`NativeAsyncJob`] job on the job queue. + /// + /// Calling `future` returns a Rust [`Future`] that can be sent to a runtime for concurrent computation. + fn enqueue_async_job(&self, future: NativeAsyncJob, context: &mut Context); /// Runs all jobs in the queue. /// @@ -218,14 +309,6 @@ pub trait JobQueue { /// it should only run one iteration of the queue. fn run_jobs(&self, context: &mut Context); - /// Enqueues a new [`Future`] job on the job queue. - /// - /// On completion, `future` returns a new [`NativeJob`] that needs to be enqueued into the - /// job queue to update the state of the inner `Promise`, which is what ECMAScript sees. Failing - /// to do this will leave the inner `Promise` in the `pending` state, which won't call any `then` - /// or `catch` handlers, even if `future` was already completed. - fn enqueue_future_job(&self, future: FutureJob, context: &mut Context); - /// Asynchronously runs all jobs in the queue. /// /// Running a job could enqueue more jobs in the queue. The implementor of the trait @@ -234,15 +317,15 @@ pub trait JobQueue { /// /// By default forwards to [`JobQueue::run_jobs`]. Implementors using async should override this /// with a proper algorithm to run jobs asynchronously. - fn run_jobs_async<'a, 'ctx, 'fut>( + fn run_jobs_async<'a, 'b, 'fut>( &'a self, - context: &'ctx mut Context, + context: &'b RefCell<&mut Context>, ) -> Pin + 'fut>> where 'a: 'fut, - 'ctx: 'fut, + 'b: 'fut, { - Box::pin(async { self.run_jobs(context) }) + Box::pin(async { self.run_jobs(&mut context.borrow_mut()) }) } } @@ -267,11 +350,11 @@ pub trait JobQueue { pub struct IdleJobQueue; impl JobQueue for IdleJobQueue { - fn enqueue_promise_job(&self, _: NativeJob, _: &mut Context) {} + fn enqueue_job(&self, _: NativeJob, _: &mut Context) {} - fn run_jobs(&self, _: &mut Context) {} + fn enqueue_async_job(&self, _: NativeAsyncJob, _: &mut Context) {} - fn enqueue_future_job(&self, _: FutureJob, _: &mut Context) {} + fn run_jobs(&self, _: &mut Context) {} } /// A simple FIFO job queue that bails on the first error. @@ -281,7 +364,10 @@ impl JobQueue for IdleJobQueue { /// /// To disable running promise jobs on the engine, see [`IdleJobQueue`]. #[derive(Default)] -pub struct SimpleJobQueue(RefCell>); +pub struct SimpleJobQueue { + job_queue: RefCell>, + async_job_queue: RefCell>, +} impl Debug for SimpleJobQueue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -298,25 +384,40 @@ impl SimpleJobQueue { } impl JobQueue for SimpleJobQueue { - fn enqueue_promise_job(&self, job: NativeJob, _: &mut Context) { - self.0.borrow_mut().push_back(job); + fn enqueue_job(&self, job: NativeJob, _: &mut Context) { + self.job_queue.borrow_mut().push_back(job); + } + + fn enqueue_async_job(&self, job: NativeAsyncJob, _: &mut Context) { + self.async_job_queue.borrow_mut().push_back(job); } fn run_jobs(&self, context: &mut Context) { - // Yeah, I have no idea why Rust extends the lifetime of a `RefCell` that should be immediately - // dropped after calling `pop_front`. - let mut next_job = self.0.borrow_mut().pop_front(); - while let Some(job) = next_job { - if job.call(context).is_err() { - self.0.borrow_mut().clear(); + let context = RefCell::new(context); + loop { + let mut next_job = self.async_job_queue.borrow_mut().pop_front(); + while let Some(job) = next_job { + if pollster::block_on(job.call(&context)).is_err() { + self.async_job_queue.borrow_mut().clear(); + return; + }; + next_job = self.async_job_queue.borrow_mut().pop_front(); + } + + // Yeah, I have no idea why Rust extends the lifetime of a `RefCell` that should be immediately + // dropped after calling `pop_front`. + let mut next_job = self.job_queue.borrow_mut().pop_front(); + while let Some(job) = next_job { + if job.call(&mut context.borrow_mut()).is_err() { + self.job_queue.borrow_mut().clear(); + return; + }; + next_job = self.job_queue.borrow_mut().pop_front(); + } + + if self.async_job_queue.borrow().is_empty() && self.job_queue.borrow().is_empty() { return; - }; - next_job = self.0.borrow_mut().pop_front(); + } } } - - fn enqueue_future_job(&self, future: FutureJob, context: &mut Context) { - let job = pollster::block_on(future); - self.enqueue_promise_job(job, context); - } } diff --git a/core/engine/src/object/builtins/jspromise.rs b/core/engine/src/object/builtins/jspromise.rs index 777113b8cec..e6d24154f40 100644 --- a/core/engine/src/object/builtins/jspromise.rs +++ b/core/engine/src/object/builtins/jspromise.rs @@ -8,7 +8,7 @@ use crate::{ promise::{PromiseState, ResolvingFunctions}, Promise, }, - job::NativeJob, + job::NativeAsyncJob, object::JsObject, value::TryFromJs, Context, JsArgs, JsError, JsNativeError, JsResult, JsValue, NativeFunction, @@ -292,21 +292,23 @@ impl JsPromise { { let (promise, resolvers) = Self::new_pending(context); - let future = async move { - let result = future.await; - - NativeJob::new(move |context| match result { - Ok(v) => resolvers.resolve.call(&JsValue::undefined(), &[v], context), - Err(e) => { - let e = e.to_opaque(context); - resolvers.reject.call(&JsValue::undefined(), &[e], context) - } - }) - }; - - context - .job_queue() - .enqueue_future_job(Box::pin(future), context); + context.job_queue().enqueue_async_job( + NativeAsyncJob::new(move |context| { + Box::pin(async move { + let result = future.await; + + let context = &mut context.borrow_mut(); + match result { + Ok(v) => resolvers.resolve.call(&JsValue::undefined(), &[v], context), + Err(e) => { + let e = e.to_opaque(context); + resolvers.reject.call(&JsValue::undefined(), &[e], context) + } + } + }) + }), + context, + ); promise } diff --git a/examples/src/bin/module_fetch_async.rs b/examples/src/bin/module_fetch_async.rs index 6f9b2a7b09d..1b6b15cb95d 100644 --- a/examples/src/bin/module_fetch_async.rs +++ b/examples/src/bin/module_fetch_async.rs @@ -2,7 +2,7 @@ use std::{cell::RefCell, collections::VecDeque, future::Future, pin::Pin, rc::Rc use boa_engine::{ builtins::promise::PromiseState, - job::{FutureJob, JobQueue, NativeJob}, + job::{BoxedFuture, JobQueue, NativeJob}, js_string, module::ModuleLoader, Context, JsNativeError, JsResult, JsString, JsValue, Module, @@ -81,7 +81,7 @@ impl ModuleLoader for HttpModuleLoader { // `JobQueue`. context .job_queue() - .enqueue_future_job(Box::pin(fetch), context) + .enqueue_async_job(Box::pin(fetch), context) } } From f1b1d66ba6586318403689fe2262d9f915469cb9 Mon Sep 17 00:00:00 2001 From: jedel1043 Date: Fri, 10 Jan 2025 03:13:22 -0600 Subject: [PATCH 2/7] Finish implementation --- Cargo.lock | 1 + cli/src/main.rs | 34 +++--- core/engine/src/builtins/promise/mod.rs | 4 +- core/engine/src/job.rs | 9 +- examples/Cargo.toml | 1 + examples/src/bin/module_fetch_async.rs | 145 ++++++++++++------------ examples/src/bin/smol_event_loop.rs | 106 ++++++++++++----- examples/src/bin/tokio_event_loop.rs | 108 ++++++++++++------ 8 files changed, 256 insertions(+), 152 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d2cc7e740d9..e9b84e2a181 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,6 +439,7 @@ dependencies = [ "boa_parser", "boa_runtime", "futures-concurrency", + "futures-lite 2.5.0", "isahc", "smol", "time", diff --git a/cli/src/main.rs b/cli/src/main.rs index b9b75a6ba1a..f62c47d75c9 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -13,7 +13,7 @@ mod helper; use boa_engine::{ builtins::promise::PromiseState, context::ContextBuilder, - job::{BoxedFuture, JobQueue, NativeJob}, + job::{JobQueue, NativeAsyncJob, NativeJob}, module::{Module, SimpleModuleLoader}, optimizer::OptimizerOptions, script::Script, @@ -453,29 +453,37 @@ fn add_runtime(context: &mut Context) { } #[derive(Default)] -struct Jobs(RefCell>); +struct Jobs { + jobs: RefCell>, + async_jobs: RefCell>, +} impl JobQueue for Jobs { fn enqueue_job(&self, job: NativeJob, _: &mut Context) { - self.0.borrow_mut().push_back(job); + self.jobs.borrow_mut().push_back(job); + } + + fn enqueue_async_job(&self, async_job: NativeAsyncJob, _: &mut Context) { + self.async_jobs.borrow_mut().push_back(async_job); } fn run_jobs(&self, context: &mut Context) { loop { - let jobs = std::mem::take(&mut *self.0.borrow_mut()); - if jobs.is_empty() { + if self.jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { return; } - for job in jobs { - if let Err(e) = job.call(context) { - eprintln!("Uncaught {e}"); + let async_jobs = std::mem::take(&mut *self.async_jobs.borrow_mut()); + for async_job in async_jobs { + if let Err(err) = pollster::block_on(async_job.call(&RefCell::new(context))) { + eprintln!("Uncaught {err}"); + } + let jobs = std::mem::take(&mut *self.jobs.borrow_mut()); + for job in jobs { + if let Err(e) = job.call(context) { + eprintln!("Uncaught {e}"); + } } } } } - - fn enqueue_async_job(&self, future: BoxedFuture, _: &mut Context) { - let job = pollster::block_on(future); - self.0.borrow_mut().push_back(job); - } } diff --git a/core/engine/src/builtins/promise/mod.rs b/core/engine/src/builtins/promise/mod.rs index fa89985c436..cda49ce2309 100644 --- a/core/engine/src/builtins/promise/mod.rs +++ b/core/engine/src/builtins/promise/mod.rs @@ -1887,9 +1887,7 @@ impl Promise { new_promise_reaction_job(fulfill_reaction, value.clone(), context); // c. Perform HostEnqueuePromiseJob(fulfillJob.[[Job]], fulfillJob.[[Realm]]). - context - .job_queue() - .enqueue_job(fulfill_job, context); + context.job_queue().enqueue_job(fulfill_job, context); } // 11. Else, diff --git a/core/engine/src/job.rs b/core/engine/src/job.rs index dd3a4f21c31..eed3d77a91f 100644 --- a/core/engine/src/job.rs +++ b/core/engine/src/job.rs @@ -146,6 +146,7 @@ pub type BoxedFuture<'a> = Pin> + 'a>> /// /// [Job]: https://tc39.es/ecma262/#sec-jobs /// [`NativeFunction`]: crate::native_function::NativeFunction +#[allow(clippy::type_complexity)] pub struct NativeAsyncJob { f: Box FnOnce(&'a RefCell<&mut Context>) -> BoxedFuture<'a>>, realm: Option, @@ -171,7 +172,7 @@ impl NativeAsyncJob { } } - /// Creates a new `NativeJob` from a closure and an execution realm. + /// Creates a new `NativeAsyncJob` from a closure and an execution realm. pub fn with_realm(f: F, realm: Realm) -> Self where F: for<'a> FnOnce(&'a RefCell<&mut Context>) -> BoxedFuture<'a> + 'static, @@ -188,11 +189,11 @@ impl NativeAsyncJob { self.realm.as_ref() } - /// Calls the native job with the specified [`Context`]. + /// Calls the native async job with the specified [`Context`]. /// /// # Note /// - /// If the native job has an execution realm defined, this sets the running execution + /// If the native async job has an execution realm defined, this sets the running execution /// context to the realm's before calling the inner closure, and resets it after execution. pub fn call<'a>( self, @@ -300,7 +301,7 @@ pub trait JobQueue { /// Enqueues a new [`NativeAsyncJob`] job on the job queue. /// /// Calling `future` returns a Rust [`Future`] that can be sent to a runtime for concurrent computation. - fn enqueue_async_job(&self, future: NativeAsyncJob, context: &mut Context); + fn enqueue_async_job(&self, async_job: NativeAsyncJob, context: &mut Context); /// Runs all jobs in the queue. /// diff --git a/examples/Cargo.toml b/examples/Cargo.toml index f9a3990ce17..de0b103ecea 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -19,6 +19,7 @@ boa_runtime.workspace = true time.workspace = true smol.workspace = true futures-concurrency.workspace = true +futures-lite.workspace = true isahc.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread", "time", "macros"] } diff --git a/examples/src/bin/module_fetch_async.rs b/examples/src/bin/module_fetch_async.rs index 1b6b15cb95d..5925e0900bb 100644 --- a/examples/src/bin/module_fetch_async.rs +++ b/examples/src/bin/module_fetch_async.rs @@ -2,7 +2,7 @@ use std::{cell::RefCell, collections::VecDeque, future::Future, pin::Pin, rc::Rc use boa_engine::{ builtins::promise::PromiseState, - job::{BoxedFuture, JobQueue, NativeJob}, + job::{JobQueue, NativeAsyncJob, NativeJob}, js_string, module::ModuleLoader, Context, JsNativeError, JsResult, JsString, JsValue, Module, @@ -28,60 +28,59 @@ impl ModuleLoader for HttpModuleLoader { ) { let url = specifier.to_std_string_escaped(); - let fetch = async move { - // Adding some prints to show the non-deterministic nature of the async fetches. - // Try to run the example several times to see how sometimes the fetches start in order - // but finish in disorder. - println!("Fetching `{url}`..."); - // This could also retry fetching in case there's an error while requesting the module. - let body: Result<_, isahc::Error> = async { - let mut response = Request::get(&url) - .redirect_policy(RedirectPolicy::Limit(5)) - .body(())? - .send_async() - .await?; - - Ok(response.text().await?) - } - .await; - println!("Finished fetching `{url}`"); - - // Since the async context cannot take the `context` by ref, we have to continue - // parsing inside a new `NativeJob` that will be enqueued into the promise job queue. - NativeJob::new(move |context| -> JsResult { - let body = match body { - Ok(body) => body, - Err(err) => { - // On error we always call `finish_load` to notify the load promise about the - // error. - finish_load( - Err(JsNativeError::typ().with_message(err.to_string()).into()), - context, - ); - - // Just returns anything to comply with `NativeJob::new`'s signature. - return Ok(JsValue::undefined()); + // Just enqueue the future for now. We'll advance all the enqueued futures inside our custom + // `JobQueue`. + context.enqueue_async_job(NativeAsyncJob::with_realm( + move |context| { + Box::pin(async move { + // Adding some prints to show the non-deterministic nature of the async fetches. + // Try to run the example several times to see how sometimes the fetches start in order + // but finish in disorder. + println!("Fetching `{url}`..."); + + // This could also retry fetching in case there's an error while requesting the module. + let body: Result<_, isahc::Error> = async { + let mut response = Request::get(&url) + .redirect_policy(RedirectPolicy::Limit(5)) + .body(())? + .send_async() + .await?; + + Ok(response.text().await?) } - }; - - // Could also add a path if needed. - let source = Source::from_bytes(body.as_bytes()); + .await; + + println!("Finished fetching `{url}`"); + + let body = match body { + Ok(body) => body, + Err(err) => { + // On error we always call `finish_load` to notify the load promise about the + // error. + finish_load( + Err(JsNativeError::typ().with_message(err.to_string()).into()), + &mut context.borrow_mut(), + ); + + // Just returns anything to comply with `NativeAsyncJob::new`'s signature. + return Ok(JsValue::undefined()); + } + }; - let module = Module::parse(source, None, context); + // Could also add a path if needed. + let source = Source::from_bytes(body.as_bytes()); - // We don't do any error handling, `finish_load` takes care of that for us. - finish_load(module, context); + let module = Module::parse(source, None, &mut context.borrow_mut()); - // Also needed to match `NativeJob::new`. - Ok(JsValue::undefined()) - }) - }; + // We don't do any error handling, `finish_load` takes care of that for us. + finish_load(module, &mut context.borrow_mut()); - // Just enqueue the future for now. We'll advance all the enqueued futures inside our custom - // `JobQueue`. - context - .job_queue() - .enqueue_async_job(Box::pin(fetch), context) + // Also needed to match `NativeAsyncJob::new`. + Ok(JsValue::undefined()) + }) + }, + context.realm().clone(), + )); } } @@ -170,14 +169,14 @@ fn main() -> JsResult<()> { // Taken from the `smol_event_loop.rs` example. /// An event queue using smol to drive futures to completion. struct Queue { - futures: RefCell>, + async_jobs: RefCell>, jobs: RefCell>, } impl Queue { fn new() -> Self { Self { - futures: RefCell::default(), + async_jobs: RefCell::default(), jobs: RefCell::default(), } } @@ -193,66 +192,66 @@ impl Queue { } impl JobQueue for Queue { - fn enqueue_promise_job(&self, job: NativeJob, _context: &mut Context) { + fn enqueue_job(&self, job: NativeJob, _context: &mut Context) { self.jobs.borrow_mut().push_back(job); } - fn enqueue_future_job(&self, future: FutureJob, _context: &mut Context) { - self.futures.borrow_mut().push(future); + fn enqueue_async_job(&self, async_job: NativeAsyncJob, _context: &mut Context) { + self.async_jobs.borrow_mut().push(async_job); } // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished... fn run_jobs(&self, context: &mut Context) { - smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(context))); + smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(&RefCell::new(context)))); } // ...the async flavor won't, which allows concurrent execution with external async tasks. - fn run_jobs_async<'a, 'ctx, 'fut>( + fn run_jobs_async<'a, 'b, 'fut>( &'a self, - context: &'ctx mut Context, + context: &'b RefCell<&mut Context>, ) -> Pin + 'fut>> where 'a: 'fut, - 'ctx: 'fut, + 'b: 'fut, { Box::pin(async move { // Early return in case there were no jobs scheduled. - if self.jobs.borrow().is_empty() && self.futures.borrow().is_empty() { + if self.jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { return; } let mut group = FutureGroup::new(); loop { - group.extend(std::mem::take(&mut *self.futures.borrow_mut())); + for job in std::mem::take(&mut *self.async_jobs.borrow_mut()) { + group.insert(job.call(context)); + } if self.jobs.borrow().is_empty() { - let Some(job) = group.next().await else { + let Some(result) = group.next().await else { // Both queues are empty. We can exit. return; }; - // Important to schedule the returned `job` into the job queue, since that's - // what allows updating the `Promise` seen by ECMAScript for when the future - // completes. - self.enqueue_promise_job(job, context); + if let Err(err) = result { + eprintln!("Uncaught {err}"); + } continue; } // We have some jobs pending on the microtask queue. Try to poll the pending // tasks once to see if any of them finished, and run the pending microtasks // otherwise. - let Some(job) = future::poll_once(group.next()).await.flatten() else { + let Some(result) = future::poll_once(group.next()).await.flatten() else { // No completed jobs. Run the microtask queue once. - self.drain_jobs(context); + self.drain_jobs(&mut context.borrow_mut()); continue; }; - // Important to schedule the returned `job` into the job queue, since that's - // what allows updating the `Promise` seen by ECMAScript for when the future - // completes. - self.enqueue_promise_job(job, context); + if let Err(err) = result { + eprintln!("Uncaught {err}"); + } // Only one macrotask can be executed before the next drain of the microtask queue. - self.drain_jobs(context); + self.drain_jobs(&mut context.borrow_mut()); } }) } diff --git a/examples/src/bin/smol_event_loop.rs b/examples/src/bin/smol_event_loop.rs index 45b9ade8914..f241697e541 100644 --- a/examples/src/bin/smol_event_loop.rs +++ b/examples/src/bin/smol_event_loop.rs @@ -2,17 +2,18 @@ use std::{ cell::RefCell, collections::VecDeque, future::Future, + pin::Pin, rc::Rc, time::{Duration, Instant}, }; use boa_engine::{ context::ContextBuilder, - job::{FutureJob, JobQueue, NativeJob}, + job::{JobQueue, NativeAsyncJob, NativeJob}, js_string, native_function::NativeFunction, property::Attribute, - Context, JsArgs, JsResult, JsValue, Script, Source, + Context, JsArgs, JsNativeError, JsResult, JsValue, Script, Source, }; use boa_runtime::Console; use futures_concurrency::future::FutureGroup; @@ -30,16 +31,17 @@ fn main() { externally_async_event_loop(); } +// Taken from the `smol_event_loop.rs` example. /// An event queue using smol to drive futures to completion. struct Queue { - futures: RefCell>, + async_jobs: RefCell>, jobs: RefCell>, } impl Queue { fn new() -> Self { Self { - futures: RefCell::default(), + async_jobs: RefCell::default(), jobs: RefCell::default(), } } @@ -55,66 +57,66 @@ impl Queue { } impl JobQueue for Queue { - fn enqueue_promise_job(&self, job: NativeJob, _context: &mut Context) { + fn enqueue_job(&self, job: NativeJob, _context: &mut Context) { self.jobs.borrow_mut().push_back(job); } - fn enqueue_future_job(&self, future: FutureJob, _context: &mut Context) { - self.futures.borrow_mut().push(future); + fn enqueue_async_job(&self, async_job: NativeAsyncJob, _context: &mut Context) { + self.async_jobs.borrow_mut().push_back(async_job); } // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished... fn run_jobs(&self, context: &mut Context) { - smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(context))); + smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(&RefCell::new(context)))); } // ...the async flavor won't, which allows concurrent execution with external async tasks. - fn run_jobs_async<'a, 'ctx, 'fut>( + fn run_jobs_async<'a, 'b, 'fut>( &'a self, - context: &'ctx mut Context, - ) -> std::pin::Pin + 'fut>> + context: &'b RefCell<&mut Context>, + ) -> Pin + 'fut>> where 'a: 'fut, - 'ctx: 'fut, + 'b: 'fut, { Box::pin(async move { // Early return in case there were no jobs scheduled. - if self.jobs.borrow().is_empty() && self.futures.borrow().is_empty() { + if self.jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { return; } let mut group = FutureGroup::new(); loop { - group.extend(std::mem::take(&mut *self.futures.borrow_mut())); + for job in std::mem::take(&mut *self.async_jobs.borrow_mut()) { + group.insert(job.call(context)); + } if self.jobs.borrow().is_empty() { - let Some(job) = group.next().await else { + let Some(result) = group.next().await else { // Both queues are empty. We can exit. return; }; - // Important to schedule the returned `job` into the job queue, since that's - // what allows updating the `Promise` seen by ECMAScript for when the future - // completes. - self.enqueue_promise_job(job, context); + if let Err(err) = result { + eprintln!("Uncaught {err}"); + } continue; } // We have some jobs pending on the microtask queue. Try to poll the pending // tasks once to see if any of them finished, and run the pending microtasks // otherwise. - let Some(job) = future::poll_once(group.next()).await.flatten() else { + let Some(result) = future::poll_once(group.next()).await.flatten() else { // No completed jobs. Run the microtask queue once. - self.drain_jobs(context); + self.drain_jobs(&mut context.borrow_mut()); continue; }; - // Important to schedule the returned `job` into the job queue, since that's - // what allows updating the `Promise` seen by ECMAScript for when the future - // completes. - self.enqueue_promise_job(job, context); + if let Err(err) = result { + eprintln!("Uncaught {err}"); + } // Only one macrotask can be executed before the next drain of the microtask queue. - self.drain_jobs(context); + self.drain_jobs(&mut context.borrow_mut()); } }) } @@ -138,6 +140,38 @@ fn delay( } } +// Example interval function. We cannot use a function returning async in this case since it would +// borrow the context for too long, but using a `NativeAsyncJob` we can! +fn interval(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult { + let Some(function) = args.get_or_undefined(0).as_callable().cloned() else { + return Err(JsNativeError::typ() + .with_message("arg must be a callable") + .into()); + }; + + let this = this.clone(); + let delay = args.get_or_undefined(1).to_u32(context)?; + let args = args.get(2..).unwrap_or_default().to_vec(); + + context.enqueue_async_job(NativeAsyncJob::with_realm( + move |context| { + Box::pin(async move { + let mut timer = smol::Timer::interval(Duration::from_millis(u64::from(delay))); + for _ in 0..10 { + timer.next().await; + if let Err(err) = function.call(&this, &args, &mut context.borrow_mut()) { + eprintln!("Uncaught {err}"); + } + } + Ok(JsValue::undefined()) + }) + }, + context.realm().clone(), + )); + + Ok(JsValue::undefined()) +} + /// Adds the custom runtime to the context. fn add_runtime(context: &mut Context) { // First add the `console` object, to be able to call `console.log()`. @@ -154,19 +188,37 @@ fn add_runtime(context: &mut Context) { NativeFunction::from_async_fn(delay), ) .expect("the delay builtin shouldn't exist"); + + // Finally, bind the defined async job to the ECMAScript function "interval". + context + .register_global_builtin_callable( + js_string!("timeout"), + 1, + NativeFunction::from_fn_ptr(interval), + ) + .expect("the delay builtin shouldn't exist"); } // Script that does multiple calls to multiple async timers. const SCRIPT: &str = r" function print(elapsed) { - console.log(`Finished delay. Elapsed time: ${elapsed * 1000} ms`) + console.log(`Finished delay. Elapsed time: ${elapsed * 1000} ms`); } + delay(1000).then(print); delay(500).then(print); delay(200).then(print); delay(600).then(print); delay(30).then(print); + let i = 0; + function counter() { + console.log(`Iteration number ${i} for JS interval`); + i += 1; + } + + timeout(counter, 100); + for(let i = 0; i <= 100000; i++) { // Emulate a long-running evaluation of a script. } diff --git a/examples/src/bin/tokio_event_loop.rs b/examples/src/bin/tokio_event_loop.rs index c6e0cd248d2..3f033f61338 100644 --- a/examples/src/bin/tokio_event_loop.rs +++ b/examples/src/bin/tokio_event_loop.rs @@ -9,13 +9,15 @@ use std::{ use boa_engine::{ context::ContextBuilder, - job::{FutureJob, JobQueue, NativeJob}, + job::{JobQueue, NativeAsyncJob, NativeJob}, js_string, native_function::NativeFunction, property::Attribute, - Context, JsArgs, JsResult, JsValue, Script, Source, + Context, JsArgs, JsNativeError, JsResult, JsValue, Script, Source, }; use boa_runtime::Console; +use futures_concurrency::future::FutureGroup; +use futures_lite::{future, StreamExt}; use tokio::{task, time}; // This example shows how to create an event loop using the tokio runtime. @@ -32,14 +34,14 @@ fn main() { /// An event queue using tokio to drive futures to completion. struct Queue { - futures: RefCell>, + async_jobs: RefCell>, jobs: RefCell>, } impl Queue { fn new() -> Self { Self { - futures: RefCell::default(), + async_jobs: RefCell::default(), jobs: RefCell::default(), } } @@ -55,12 +57,12 @@ impl Queue { } impl JobQueue for Queue { - fn enqueue_promise_job(&self, job: NativeJob, _context: &mut Context) { + fn enqueue_job(&self, job: NativeJob, _context: &mut Context) { self.jobs.borrow_mut().push_back(job); } - fn enqueue_future_job(&self, future: FutureJob, _context: &mut Context) { - self.futures.borrow_mut().push(future); + fn enqueue_async_job(&self, async_job: NativeAsyncJob, _context: &mut Context) { + self.async_jobs.borrow_mut().push_back(async_job); } // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished... @@ -70,41 +72,37 @@ impl JobQueue for Queue { .build() .unwrap(); - task::LocalSet::default().block_on(&runtime, self.run_jobs_async(context)); + task::LocalSet::default().block_on(&runtime, self.run_jobs_async(&RefCell::new(context))); } // ...the async flavor won't, which allows concurrent execution with external async tasks. - fn run_jobs_async<'a, 'ctx, 'fut>( + fn run_jobs_async<'a, 'b, 'fut>( &'a self, - context: &'ctx mut Context, + context: &'b RefCell<&mut Context>, ) -> Pin + 'fut>> where 'a: 'fut, - 'ctx: 'fut, + 'b: 'fut, { Box::pin(async move { // Early return in case there were no jobs scheduled. - if self.jobs.borrow().is_empty() && self.futures.borrow().is_empty() { + if self.jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { return; } - let mut join_set = task::JoinSet::new(); + let mut group = FutureGroup::new(); loop { - for future in std::mem::take(&mut *self.futures.borrow_mut()) { - join_set.spawn_local(future); + for job in std::mem::take(&mut *self.async_jobs.borrow_mut()) { + group.insert(job.call(context)); } if self.jobs.borrow().is_empty() { - let Some(job) = join_set.join_next().await else { + let Some(result) = group.next().await else { // Both queues are empty. We can exit. return; }; - // Important to schedule the returned `job` into the job queue, since that's - // what allows updating the `Promise` seen by ECMAScript for when the future - // completes. - match job { - Ok(job) => self.enqueue_promise_job(job, context), - Err(e) => eprintln!("{e}"), + if let Err(err) = result { + eprintln!("Uncaught {err}"); } continue; @@ -113,24 +111,20 @@ impl JobQueue for Queue { // We have some jobs pending on the microtask queue. Try to poll the pending // tasks once to see if any of them finished, and run the pending microtasks // otherwise. - let Some(job) = join_set.try_join_next() else { + let Some(result) = future::poll_once(group.next()).await.flatten() else { // No completed jobs. Run the microtask queue once. - self.drain_jobs(context); + self.drain_jobs(&mut context.borrow_mut()); task::yield_now().await; continue; }; - // Important to schedule the returned `job` into the job queue, since that's - // what allows updating the `Promise` seen by ECMAScript for when the future - // completes. - match job { - Ok(job) => self.enqueue_promise_job(job, context), - Err(e) => eprintln!("{e}"), + if let Err(err) = result { + eprintln!("Uncaught {err}"); } // Only one macrotask can be executed before the next drain of the microtask queue. - self.drain_jobs(context); + self.drain_jobs(&mut context.borrow_mut()); } }) } @@ -154,6 +148,38 @@ fn delay( } } +// Example interval function. We cannot use a function returning async in this case since it would +// borrow the context for too long, but using a `NativeAsyncJob` we can! +fn interval(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult { + let Some(function) = args.get_or_undefined(0).as_callable().cloned() else { + return Err(JsNativeError::typ() + .with_message("arg must be a callable") + .into()); + }; + + let this = this.clone(); + let delay = args.get_or_undefined(1).to_u32(context)?; + let args = args.get(2..).unwrap_or_default().to_vec(); + + context.enqueue_async_job(NativeAsyncJob::with_realm( + move |context| { + Box::pin(async move { + let mut timer = time::interval(Duration::from_millis(u64::from(delay))); + for _ in 0..10 { + timer.tick().await; + if let Err(err) = function.call(&this, &args, &mut context.borrow_mut()) { + eprintln!("Uncaught {err}"); + } + } + Ok(JsValue::undefined()) + }) + }, + context.realm().clone(), + )); + + Ok(JsValue::undefined()) +} + /// Adds the custom runtime to the context. fn add_runtime(context: &mut Context) { // First add the `console` object, to be able to call `console.log()`. @@ -170,19 +196,37 @@ fn add_runtime(context: &mut Context) { NativeFunction::from_async_fn(delay), ) .expect("the delay builtin shouldn't exist"); + + // Finally, bind the defined async job to the ECMAScript function "interval". + context + .register_global_builtin_callable( + js_string!("timeout"), + 1, + NativeFunction::from_fn_ptr(interval), + ) + .expect("the delay builtin shouldn't exist"); } // Script that does multiple calls to multiple async timers. const SCRIPT: &str = r" function print(elapsed) { - console.log(`Finished delay. Elapsed time: ${elapsed * 1000} ms`) + console.log(`Finished delay. Elapsed time: ${elapsed * 1000} ms`); } + delay(1000).then(print); delay(500).then(print); delay(200).then(print); delay(600).then(print); delay(30).then(print); + let i = 0; + function counter() { + console.log(`Iteration number ${i} for JS interval`); + i += 1; + } + + timeout(counter, 100); + for(let i = 0; i <= 100000; i++) { // Emulate a long-running evaluation of a script. } From e77907c1de8814cf6d9fa950ecd016c006679da9 Mon Sep 17 00:00:00 2001 From: jedel1043 Date: Fri, 10 Jan 2025 09:08:05 -0600 Subject: [PATCH 3/7] Docs & Reviews --- core/engine/src/job.rs | 2 +- examples/src/bin/smol_event_loop.rs | 4 ++-- examples/src/bin/tokio_event_loop.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/engine/src/job.rs b/core/engine/src/job.rs index eed3d77a91f..3539339195d 100644 --- a/core/engine/src/job.rs +++ b/core/engine/src/job.rs @@ -130,7 +130,7 @@ impl NativeJob { } } -/// The [`Future`] job passed to the [`JobQueue::enqueue_future_job`] operation. +/// The [`Future`] job returned by a [`NativeAsyncJob`] operation. pub type BoxedFuture<'a> = Pin> + 'a>>; /// An ECMAScript [Job] that can be run asynchronously. diff --git a/examples/src/bin/smol_event_loop.rs b/examples/src/bin/smol_event_loop.rs index f241697e541..c673b5f96c0 100644 --- a/examples/src/bin/smol_event_loop.rs +++ b/examples/src/bin/smol_event_loop.rs @@ -192,7 +192,7 @@ fn add_runtime(context: &mut Context) { // Finally, bind the defined async job to the ECMAScript function "interval". context .register_global_builtin_callable( - js_string!("timeout"), + js_string!("interval"), 1, NativeFunction::from_fn_ptr(interval), ) @@ -217,7 +217,7 @@ const SCRIPT: &str = r" i += 1; } - timeout(counter, 100); + interval(counter, 100); for(let i = 0; i <= 100000; i++) { // Emulate a long-running evaluation of a script. diff --git a/examples/src/bin/tokio_event_loop.rs b/examples/src/bin/tokio_event_loop.rs index 3f033f61338..cc8b33cede3 100644 --- a/examples/src/bin/tokio_event_loop.rs +++ b/examples/src/bin/tokio_event_loop.rs @@ -200,7 +200,7 @@ fn add_runtime(context: &mut Context) { // Finally, bind the defined async job to the ECMAScript function "interval". context .register_global_builtin_callable( - js_string!("timeout"), + js_string!("interval"), 1, NativeFunction::from_fn_ptr(interval), ) @@ -225,7 +225,7 @@ const SCRIPT: &str = r" i += 1; } - timeout(counter, 100); + interval(counter, 100); for(let i = 0; i <= 100000; i++) { // Emulate a long-running evaluation of a script. From e848ef054697c70e2f98a3151092847c66308ceb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Juli=C3=A1n=20Espina?= Date: Fri, 10 Jan 2025 20:40:10 -0600 Subject: [PATCH 4/7] Fix realm of async job --- core/engine/src/job.rs | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/core/engine/src/job.rs b/core/engine/src/job.rs index 3539339195d..574c66445c7 100644 --- a/core/engine/src/job.rs +++ b/core/engine/src/job.rs @@ -195,15 +195,19 @@ impl NativeAsyncJob { /// /// If the native async job has an execution realm defined, this sets the running execution /// context to the realm's before calling the inner closure, and resets it after execution. - pub fn call<'a>( + pub fn call<'a, 'b>( self, - context: &'a RefCell<&mut Context>, - ) -> impl Future> + use<'a> { + context: &'a RefCell<&'b mut Context>, + // We can make our users assume `Unpin` because `self.f` is already boxed, so we shouldn't + // need pin at all. + ) -> impl Future> + Unpin + use<'a, 'b> { // If realm is not null, each time job is invoked the implementation must perform // implementation-defined steps such that execution is prepared to evaluate ECMAScript // code at the time of job's invocation. - if let Some(realm) = self.realm { - let old_realm = context.borrow_mut().enter_realm(realm); + let realm = self.realm; + + let mut future = if let Some(realm) = &realm { + let old_realm = context.borrow_mut().enter_realm(realm.clone()); // Let scriptOrModule be GetActiveScriptOrModule() at the time HostEnqueuePromiseJob is // invoked. If realm is not null, each time job is invoked the implementation must @@ -212,11 +216,25 @@ impl NativeAsyncJob { let result = (self.f)(context); context.borrow_mut().enter_realm(old_realm); - result } else { (self.f)(context) - } + }; + + std::future::poll_fn(move |cx| { + // We need to do the same dance again since the inner code could assume we're still + // on the same realm. + if let Some(realm) = &realm { + let old_realm = context.borrow_mut().enter_realm(realm.clone()); + + let poll_result = future.as_mut().poll(cx); + + context.borrow_mut().enter_realm(old_realm); + poll_result + } else { + future.as_mut().poll(cx) + } + }) } } From c5ac389dbc64588f3faaa4f999a0eba71d967a08 Mon Sep 17 00:00:00 2001 From: jedel1043 Date: Sat, 11 Jan 2025 00:59:06 -0600 Subject: [PATCH 5/7] Revamp `JobQueue` into `JobExecutor` --- cli/src/main.rs | 28 +- core/engine/src/builtins/promise/mod.rs | 22 +- core/engine/src/context/mod.rs | 65 ++-- core/engine/src/job.rs | 297 +++++++++++-------- core/engine/src/object/builtins/jspromise.rs | 8 +- core/engine/src/script.rs | 4 +- examples/src/bin/module_fetch_async.rs | 135 ++++----- examples/src/bin/smol_event_loop.rs | 61 ++-- examples/src/bin/tokio_event_loop.rs | 61 ++-- 9 files changed, 373 insertions(+), 308 deletions(-) diff --git a/cli/src/main.rs b/cli/src/main.rs index f62c47d75c9..2ff42930ef1 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -13,7 +13,7 @@ mod helper; use boa_engine::{ builtins::promise::PromiseState, context::ContextBuilder, - job::{JobQueue, NativeAsyncJob, NativeJob}, + job::{Job, JobExecutor, NativeAsyncJob, PromiseJob}, module::{Module, SimpleModuleLoader}, optimizer::OptimizerOptions, script::Script, @@ -336,10 +336,10 @@ fn main() -> Result<()> { let args = Opt::parse(); - let queue = Rc::new(Jobs::default()); + let executor = Rc::new(Executor::default()); let loader = Rc::new(SimpleModuleLoader::new(&args.root).map_err(|e| eyre!(e.to_string()))?); let mut context = ContextBuilder::new() - .job_queue(queue) + .job_executor(executor) .module_loader(loader.clone()) .build() .map_err(|e| eyre!(e.to_string()))?; @@ -453,23 +453,23 @@ fn add_runtime(context: &mut Context) { } #[derive(Default)] -struct Jobs { - jobs: RefCell>, +struct Executor { + promise_jobs: RefCell>, async_jobs: RefCell>, } -impl JobQueue for Jobs { - fn enqueue_job(&self, job: NativeJob, _: &mut Context) { - self.jobs.borrow_mut().push_back(job); - } - - fn enqueue_async_job(&self, async_job: NativeAsyncJob, _: &mut Context) { - self.async_jobs.borrow_mut().push_back(async_job); +impl JobExecutor for Executor { + fn enqueue_job(&self, job: Job, _: &mut Context) { + match job { + Job::PromiseJob(job) => self.promise_jobs.borrow_mut().push_back(job), + Job::AsyncJob(job) => self.async_jobs.borrow_mut().push_back(job), + job => eprintln!("unsupported job type {job:?}"), + } } fn run_jobs(&self, context: &mut Context) { loop { - if self.jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { + if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { return; } let async_jobs = std::mem::take(&mut *self.async_jobs.borrow_mut()); @@ -477,7 +477,7 @@ impl JobQueue for Jobs { if let Err(err) = pollster::block_on(async_job.call(&RefCell::new(context))) { eprintln!("Uncaught {err}"); } - let jobs = std::mem::take(&mut *self.jobs.borrow_mut()); + let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut()); for job in jobs { if let Err(e) = job.call(context) { eprintln!("Uncaught {e}"); diff --git a/core/engine/src/builtins/promise/mod.rs b/core/engine/src/builtins/promise/mod.rs index cda49ce2309..d5795f294c7 100644 --- a/core/engine/src/builtins/promise/mod.rs +++ b/core/engine/src/builtins/promise/mod.rs @@ -11,7 +11,7 @@ use crate::{ builtins::{Array, BuiltInObject}, context::intrinsics::{Intrinsics, StandardConstructor, StandardConstructors}, error::JsNativeError, - job::{JobCallback, NativeJob}, + job::{JobCallback, PromiseJob}, js_string, native_function::NativeFunction, object::{ @@ -1887,7 +1887,9 @@ impl Promise { new_promise_reaction_job(fulfill_reaction, value.clone(), context); // c. Perform HostEnqueuePromiseJob(fulfillJob.[[Job]], fulfillJob.[[Realm]]). - context.job_queue().enqueue_job(fulfill_job, context); + context + .job_executor() + .enqueue_job(fulfill_job.into(), context); } // 11. Else, @@ -1907,7 +1909,9 @@ impl Promise { let reject_job = new_promise_reaction_job(reject_reaction, reason.clone(), context); // e. Perform HostEnqueuePromiseJob(rejectJob.[[Job]], rejectJob.[[Realm]]). - context.job_queue().enqueue_job(reject_job, context); + context + .job_executor() + .enqueue_job(reject_job.into(), context); // 12. Set promise.[[PromiseIsHandled]] to true. promise @@ -1983,7 +1987,7 @@ impl Promise { let job = new_promise_reaction_job(reaction, argument.clone(), context); // b. Perform HostEnqueuePromiseJob(job.[[Job]], job.[[Realm]]). - context.job_queue().enqueue_job(job, context); + context.job_executor().enqueue_job(job.into(), context); } // 2. Return unused. } @@ -2176,7 +2180,7 @@ impl Promise { ); // 15. Perform HostEnqueuePromiseJob(job.[[Job]], job.[[Realm]]). - context.job_queue().enqueue_job(job, context); + context.job_executor().enqueue_job(job.into(), context); // 16. Return undefined. Ok(JsValue::undefined()) @@ -2237,7 +2241,7 @@ fn new_promise_reaction_job( mut reaction: ReactionRecord, argument: JsValue, context: &mut Context, -) -> NativeJob { +) -> PromiseJob { // Inverting order since `job` captures `reaction` by value. // 2. Let handlerRealm be null. @@ -2318,7 +2322,7 @@ fn new_promise_reaction_job( }; // 4. Return the Record { [[Job]]: job, [[Realm]]: handlerRealm }. - NativeJob::with_realm(job, realm, context) + PromiseJob::with_realm(job, realm, context) } /// More information: @@ -2330,7 +2334,7 @@ fn new_promise_resolve_thenable_job( thenable: JsValue, then: JobCallback, context: &mut Context, -) -> NativeJob { +) -> PromiseJob { // Inverting order since `job` captures variables by value. // 2. Let getThenRealmResult be Completion(GetFunctionRealm(then.[[Callback]])). @@ -2372,5 +2376,5 @@ fn new_promise_resolve_thenable_job( }; // 6. Return the Record { [[Job]]: job, [[Realm]]: thenRealm }. - NativeJob::with_realm(job, realm, context) + PromiseJob::with_realm(job, realm, context) } diff --git a/core/engine/src/context/mod.rs b/core/engine/src/context/mod.rs index e6635edf8d7..3d5415e8b48 100644 --- a/core/engine/src/context/mod.rs +++ b/core/engine/src/context/mod.rs @@ -14,12 +14,12 @@ use intrinsics::Intrinsics; #[cfg(feature = "temporal")] use temporal_rs::tzdb::FsTzdbProvider; -use crate::job::NativeAsyncJob; +use crate::job::Job; use crate::vm::RuntimeLimits; use crate::{ builtins, class::{Class, ClassBuilder}, - job::{JobQueue, NativeJob, SimpleJobQueue}, + job::{JobExecutor, SimpleJobExecutor}, js_string, module::{IdleModuleLoader, ModuleLoader, SimpleModuleLoader}, native_function::NativeFunction, @@ -113,7 +113,7 @@ pub struct Context { host_hooks: &'static dyn HostHooks, - job_queue: Rc, + job_executor: Rc, module_loader: Rc, @@ -135,7 +135,7 @@ impl std::fmt::Debug for Context { .field("interner", &self.interner) .field("vm", &self.vm) .field("strict", &self.strict) - .field("promise_job_queue", &"JobQueue") + .field("job_executor", &"JobExecutor") .field("hooks", &"HostHooks") .field("module_loader", &"ModuleLoader") .field("optimizer_options", &self.optimizer_options); @@ -188,7 +188,7 @@ impl Context { /// ``` /// /// Note that this won't run any scheduled promise jobs; you need to call [`Context::run_jobs`] - /// on the context or [`JobQueue::run_jobs`] on the provided queue to run them. + /// on the context or [`JobExecutor::run_jobs`] on the provided queue to run them. #[allow(clippy::unit_arg, dropping_copy_types)] pub fn eval(&mut self, src: Source<'_, R>) -> JsResult { let main_timer = Profiler::global().start_event("Script evaluation", "Main"); @@ -469,35 +469,31 @@ impl Context { self.strict = strict; } - /// Enqueues a [`NativeJob`] on the [`JobQueue`]. + /// Enqueues a [`Job`] on the [`JobExecutor`]. #[inline] - pub fn enqueue_job(&mut self, job: NativeJob) { - self.job_queue().enqueue_job(job, self); + pub fn enqueue_job(&mut self, job: Job) { + self.job_executor().enqueue_job(job, self); } - /// Enqueues a [`NativeAsyncJob`] on the [`JobQueue`]. - #[inline] - pub fn enqueue_async_job(&mut self, job: NativeAsyncJob) { - self.job_queue().enqueue_async_job(job, self); - } - - /// Runs all the jobs in the job queue. + /// Runs all the jobs with the provided job executor. #[inline] pub fn run_jobs(&mut self) { - self.job_queue().run_jobs(self); + self.job_executor().run_jobs(self); self.clear_kept_objects(); } - /// Asynchronously runs all the jobs in the job queue. + /// Asynchronously runs all the jobs with the provided job executor. /// /// # Note /// /// Concurrent job execution cannot be guaranteed by the engine, since this depends on the - /// specific handling of each [`JobQueue`]. If you want to execute jobs concurrently, you must - /// provide a custom implementor of `JobQueue` to the context. + /// specific handling of each [`JobExecutor`]. If you want to execute jobs concurrently, you must + /// provide a custom implementatin of `JobExecutor` to the context. #[allow(clippy::future_not_send)] pub async fn run_jobs_async(&mut self) { - self.job_queue().run_jobs_async(&RefCell::new(self)).await; + self.job_executor() + .run_jobs_async(&RefCell::new(self)) + .await; self.clear_kept_objects(); } @@ -554,11 +550,11 @@ impl Context { self.host_hooks } - /// Gets the job queue. + /// Gets the job executor. #[inline] #[must_use] - pub fn job_queue(&self) -> Rc { - self.job_queue.clone() + pub fn job_executor(&self) -> Rc { + self.job_executor.clone() } /// Gets the module loader. @@ -889,7 +885,7 @@ impl Context { pub struct ContextBuilder { interner: Option, host_hooks: Option<&'static dyn HostHooks>, - job_queue: Option>, + job_executor: Option>, module_loader: Option>, can_block: bool, #[cfg(feature = "intl")] @@ -901,7 +897,7 @@ pub struct ContextBuilder { impl std::fmt::Debug for ContextBuilder { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { #[derive(Clone, Copy, Debug)] - struct JobQueue; + struct JobExecutor; #[derive(Clone, Copy, Debug)] struct HostHooks; #[derive(Clone, Copy, Debug)] @@ -911,7 +907,10 @@ impl std::fmt::Debug for ContextBuilder { out.field("interner", &self.interner) .field("host_hooks", &self.host_hooks.as_ref().map(|_| HostHooks)) - .field("job_queue", &self.job_queue.as_ref().map(|_| JobQueue)) + .field( + "job_executor", + &self.job_executor.as_ref().map(|_| JobExecutor), + ) .field( "module_loader", &self.module_loader.as_ref().map(|_| ModuleLoader), @@ -1024,10 +1023,10 @@ impl ContextBuilder { self } - /// Initializes the [`JobQueue`] for the context. + /// Initializes the [`JobExecutor`] for the context. #[must_use] - pub fn job_queue(mut self, job_queue: Rc) -> Self { - self.job_queue = Some(job_queue); + pub fn job_executor(mut self, job_executor: Rc) -> Self { + self.job_executor = Some(job_executor); self } @@ -1098,9 +1097,9 @@ impl ContextBuilder { Rc::new(IdleModuleLoader) }; - let job_queue = self - .job_queue - .unwrap_or_else(|| Rc::new(SimpleJobQueue::new())); + let job_executor = self + .job_executor + .unwrap_or_else(|| Rc::new(SimpleJobExecutor::new())); let mut context = Context { interner: self.interner.unwrap_or_default(), @@ -1127,7 +1126,7 @@ impl ContextBuilder { instructions_remaining: self.instructions_remaining, kept_alive: Vec::new(), host_hooks, - job_queue, + job_executor, module_loader, optimizer_options: OptimizerOptions::OPTIMIZE_ALL, root_shape, diff --git a/core/engine/src/job.rs b/core/engine/src/job.rs index 574c66445c7..30581a001cf 100644 --- a/core/engine/src/job.rs +++ b/core/engine/src/job.rs @@ -1,21 +1,33 @@ //! Boa's API to create and customize `ECMAScript` jobs and job queues. //! -//! [`NativeJob`] is an ECMAScript [Job], or a closure that runs an `ECMAScript` computation when -//! there's no other computation running. +//! [`Job`] is an ECMAScript [Job], or a closure that runs an `ECMAScript` computation when +//! there's no other computation running. The module defines several type of jobs: +//! - [`PromiseJob`] for Promise related jobs. +//! - [`NativeAsyncJob`] for jobs that support [`Future`]. +//! - [`NativeJob`] for generic jobs that aren't related to Promises. //! //! [`JobCallback`] is an ECMAScript [`JobCallback`] record, containing an `ECMAScript` function //! that is executed when a promise is either fulfilled or rejected. //! -//! [`JobQueue`] is a trait encompassing the required functionality for a job queue; this allows +//! [`JobExecutor`] is a trait encompassing the required functionality for a job executor; this allows //! implementing custom event loops, custom handling of Jobs or other fun things. //! This trait is also accompanied by two implementors of the trait: -//! - [`IdleJobQueue`], which is a queue that does nothing, and the default queue if no queue is +//! - [`IdleJobExecutor`], which is an executor that does nothing, and the default executor if no executor is //! provided. Useful for hosts that want to disable promises. -//! - [`SimpleJobQueue`], which is a simple FIFO queue that runs all jobs to completion, bailing +//! - [`SimpleJobExecutor`], which is a simple FIFO queue that runs all jobs to completion, bailing //! on the first error encountered. //! +//! ## [`Trace`]? +//! +//! Most of the types defined in this module don't implement `Trace`. This is because most jobs can only +//! be run once, and putting a `JobExecutor` on a garbage collected object is not allowed. +//! +//! In addition to that, not implementing `Trace` makes it so that the garbage collector can consider +//! any captured variables inside jobs as roots, since you cannot store jobs within a [`Gc`]. +//! //! [Job]: https://tc39.es/ecma262/#sec-jobs //! [JobCallback]: https://tc39.es/ecma262/#sec-jobcallback-records +//! [`Gc`]: boa_gc::Gc use std::{cell::RefCell, collections::VecDeque, fmt::Debug, future::Future, pin::Pin}; @@ -26,41 +38,14 @@ use crate::{ }; use boa_gc::{Finalize, Trace}; -/// An ECMAScript [Job] closure. -/// -/// The specification allows scheduling any [`NativeJob`] closure by the host into the job queue. -/// However, host-defined jobs must abide to a set of requirements. -/// -/// ### Requirements -/// -/// - At some future point in time, when there is no running execution context and the execution -/// context stack is empty, the implementation must: -/// - Perform any host-defined preparation steps. -/// - Invoke the Job Abstract Closure. -/// - Perform any host-defined cleanup steps, after which the execution context stack must be empty. -/// - Only one Job may be actively undergoing evaluation at any point in time. -/// - Once evaluation of a Job starts, it must run to completion before evaluation of any other Job starts. -/// - The Abstract Closure must return a normal completion, implementing its own handling of errors. -/// -/// `NativeJob`s API differs slightly on the last requirement, since it allows closures returning -/// [`JsResult`], but it's okay because `NativeJob`s are handled by the host anyways; a host could -/// pass a closure returning `Err` and handle the error on [`JobQueue::run_jobs`], making the closure -/// effectively run as if it never returned `Err`. -/// -/// ## [`Trace`]? -/// -/// `NativeJob` doesn't implement `Trace` because it doesn't need to; all jobs can only be run once -/// and putting a [`JobQueue`] on a garbage collected object is not allowed. +/// An ECMAScript [Job Abstract Closure]. /// -/// On the other hand, it seems like this type breaks all the safety checks of the -/// [`NativeFunction`] API, since you can capture any `Trace` variable into the closure... but it -/// doesn't! -/// The garbage collector doesn't need to trace the captured variables because the closures -/// are always stored on the [`JobQueue`], which is always rooted, which means the captured variables -/// are also rooted, allowing us to capture any variable in the closure for free! +/// This is basically a synchronous task that needs to be run to progress [`Promise`] objects, +/// or unblock threads waiting on [`Atomics.waitAsync`]. /// /// [Job]: https://tc39.es/ecma262/#sec-jobs -/// [`NativeFunction`]: crate::native_function::NativeFunction +/// [`Promise`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise +/// [`Atomics.waitAsync`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Atomics/waitAsync pub struct NativeJob { #[allow(clippy::type_complexity)] f: Box JsResult>, @@ -69,7 +54,7 @@ pub struct NativeJob { impl Debug for NativeJob { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("NativeJob").field("f", &"Closure").finish() + f.debug_struct("NativeJob").finish_non_exhaustive() } } @@ -135,17 +120,8 @@ pub type BoxedFuture<'a> = Pin> + 'a>> /// An ECMAScript [Job] that can be run asynchronously. /// -/// ## [`Trace`]? -/// -/// `NativeJob` doesn't implement `Trace` because it doesn't need to; all jobs can only be run once -/// and putting a [`JobQueue`] on a garbage collected object is not allowed. -/// -/// Additionally, the garbage collector doesn't need to trace the captured variables because the closures -/// are always stored on the [`JobQueue`], which is always rooted, which means the captured variables -/// are also rooted. -/// -/// [Job]: https://tc39.es/ecma262/#sec-jobs -/// [`NativeFunction`]: crate::native_function::NativeFunction +/// This is an additional type of job that is not defined by the specification, enabling running `Future` tasks +/// created by ECMAScript code in an easier way. #[allow(clippy::type_complexity)] pub struct NativeAsyncJob { f: Box FnOnce(&'a RefCell<&mut Context>) -> BoxedFuture<'a>>, @@ -238,6 +214,69 @@ impl NativeAsyncJob { } } +/// An ECMAScript [Job Abstract Closure] executing code related to [`Promise`] objects. +/// +/// This represents the [`HostEnqueuePromiseJob`] operation from the specification. +/// +/// ### [Requirements] +/// +/// - If realm is not null, each time job is invoked the implementation must perform implementation-defined +/// steps such that execution is prepared to evaluate ECMAScript code at the time of job's invocation. +/// - Let `scriptOrModule` be [`GetActiveScriptOrModule()`] at the time `HostEnqueuePromiseJob` is invoked. +/// If realm is not null, each time job is invoked the implementation must perform implementation-defined steps +/// such that `scriptOrModule` is the active script or module at the time of job's invocation. +/// - Jobs must run in the same order as the `HostEnqueuePromiseJob` invocations that scheduled them. +/// +/// Of all the requirements, Boa guarantees the first two by its internal implementation of `NativeJob`, meaning +/// implementations of [`JobExecutor`] must only guarantee that jobs are run in the same order as they're enqueued. +/// +/// [`Promise`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise +/// [`HostEnqueuePromiseJob`]: https://tc39.es/ecma262/#sec-hostenqueuepromisejob +/// [Job Abstract Closure]: https://tc39.es/ecma262/#sec-jobs +/// [Requirements]: https://tc39.es/ecma262/multipage/executable-code-and-execution-contexts.html#sec-hostenqueuepromisejob +/// [`GetActiveScriptOrModule()`]: https://tc39.es/ecma262/multipage/executable-code-and-execution-contexts.html#sec-getactivescriptormodule +pub struct PromiseJob(NativeJob); + +impl Debug for PromiseJob { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("PromiseJob").finish_non_exhaustive() + } +} + +impl PromiseJob { + /// Creates a new `PromiseJob` from a closure. + pub fn new(f: F) -> Self + where + F: FnOnce(&mut Context) -> JsResult + 'static, + { + Self(NativeJob::new(f)) + } + + /// Creates a new `PromiseJob` from a closure and an execution realm. + pub fn with_realm(f: F, realm: Realm, context: &mut Context) -> Self + where + F: FnOnce(&mut Context) -> JsResult + 'static, + { + Self(NativeJob::with_realm(f, realm, context)) + } + + /// Gets a reference to the execution realm of the `PromiseJob`. + #[must_use] + pub const fn realm(&self) -> Option<&Realm> { + self.0.realm() + } + + /// Calls the `PromiseJob` with the specified [`Context`]. + /// + /// # Note + /// + /// If the job has an execution realm defined, this sets the running execution + /// context to the realm's before calling the inner closure, and resets it after execution. + pub fn call(self, context: &mut Context) -> JsResult { + self.0.call(context) + } +} + /// [`JobCallback`][spec] records. /// /// [spec]: https://tc39.es/ecma262/#sec-jobcallback-records @@ -287,54 +326,71 @@ impl JobCallback { } } -/// A queue of `ECMAscript` [Jobs]. +/// A job that needs to be handled by a [`JobExecutor`]. /// -/// This is the main API that allows creating custom event loops with custom job queues. +/// # Requirements /// -/// [Jobs]: https://tc39.es/ecma262/#sec-jobs -pub trait JobQueue { - /// [`HostEnqueuePromiseJob ( job, realm )`][spec]. - /// - /// Enqueues a [`NativeJob`] on the job queue. - /// - /// # Requirements - /// - /// Per the [spec]: - /// > An implementation of `HostEnqueuePromiseJob` must conform to the requirements in [9.5][Jobs] as well as the - /// > following: - /// > - If `realm` is not null, each time `job` is invoked the implementation must perform implementation-defined steps - /// > such that execution is prepared to evaluate ECMAScript code at the time of job's invocation. - /// > - Let `scriptOrModule` be `GetActiveScriptOrModule()` at the time `HostEnqueuePromiseJob` is invoked. If realm - /// > is not null, each time job is invoked the implementation must perform implementation-defined steps such that - /// > `scriptOrModule` is the active script or module at the time of job's invocation. - /// > - Jobs must run in the same order as the `HostEnqueuePromiseJob` invocations that scheduled them. +/// The specification defines many types of jobs, but all of them must adhere to a set of requirements: +/// +/// - At some future point in time, when there is no running execution context and the execution +/// context stack is empty, the implementation must: +/// - Perform any host-defined preparation steps. +/// - Invoke the Job Abstract Closure. +/// - Perform any host-defined cleanup steps, after which the execution context stack must be empty. +/// - Only one Job may be actively undergoing evaluation at any point in time. +/// - Once evaluation of a Job starts, it must run to completion before evaluation of any other Job starts. +/// - The Abstract Closure must return a normal completion, implementing its own handling of errors. +/// +/// Boa is a little bit flexible on the last requirement, since it allows jobs to return either +/// values or errors, but the rest of the requirements must be followed for all conformant implementations. +/// +/// Additionally, each job type can have additional requirements that must also be followed in addition +/// to the previous ones. +#[non_exhaustive] +#[derive(Debug)] +pub enum Job { + /// A `Promise`-related job. /// - /// Of all the requirements, Boa guarantees the first two by its internal implementation of `NativeJob`, meaning - /// the implementer must only guarantee that jobs are run in the same order as they're enqueued. + /// See [`PromiseJob`] for more information. + PromiseJob(PromiseJob), + /// A [`Future`]-related job. /// - /// [spec]: https://tc39.es/ecma262/#sec-hostenqueuepromisejob - /// [Jobs]: https://tc39.es/ecma262/#sec-jobs - fn enqueue_job(&self, job: NativeJob, context: &mut Context); + /// See [`NativeAsyncJob`] for more information. + AsyncJob(NativeAsyncJob), +} - /// Enqueues a new [`NativeAsyncJob`] job on the job queue. - /// - /// Calling `future` returns a Rust [`Future`] that can be sent to a runtime for concurrent computation. - fn enqueue_async_job(&self, async_job: NativeAsyncJob, context: &mut Context); +impl From for Job { + fn from(native_async_job: NativeAsyncJob) -> Self { + Job::AsyncJob(native_async_job) + } +} + +impl From for Job { + fn from(promise_job: PromiseJob) -> Self { + Job::PromiseJob(promise_job) + } +} - /// Runs all jobs in the queue. +/// An executor of `ECMAscript` [Jobs]. +/// +/// This is the main API that allows creating custom event loops. +/// +/// [Jobs]: https://tc39.es/ecma262/#sec-jobs +pub trait JobExecutor { + /// Enqueues a `Job` on the executor. /// - /// Running a job could enqueue more jobs in the queue. The implementor of the trait - /// determines if the method should loop until there are no more queued jobs or if - /// it should only run one iteration of the queue. + /// This method combines all the host-defined job enqueueing operations into a single method. + /// See the [spec] for more information on the requirements that each operation must follow. + /// + /// [spec]: https://tc39.es/ecma262/#sec-jobs + fn enqueue_job(&self, job: Job, context: &mut Context); + + /// Runs all jobs in the executor. fn run_jobs(&self, context: &mut Context); - /// Asynchronously runs all jobs in the queue. + /// Asynchronously runs all jobs in the executor. /// - /// Running a job could enqueue more jobs in the queue. The implementor of the trait - /// determines if the method should loop until there are no more queued jobs or if - /// it should only run one iteration of the queue. - /// - /// By default forwards to [`JobQueue::run_jobs`]. Implementors using async should override this + /// By default forwards to [`JobExecutor::run_jobs`]. Implementors using async should override this /// with a proper algorithm to run jobs asynchronously. fn run_jobs_async<'a, 'b, 'fut>( &'a self, @@ -348,93 +404,90 @@ pub trait JobQueue { } } -/// A job queue that does nothing. +/// A job executor that does nothing. /// -/// This queue is mostly useful if you want to disable the promise capabilities of the engine. This +/// This executor is mostly useful if you want to disable the promise capabilities of the engine. This /// can be done by passing it to the [`ContextBuilder`]: /// /// ``` /// use boa_engine::{ /// context::ContextBuilder, -/// job::{IdleJobQueue, JobQueue}, +/// job::{IdleJobExecutor, JobExecutor}, /// }; /// use std::rc::Rc; /// -/// let queue = Rc::new(IdleJobQueue); -/// let context = ContextBuilder::new().job_queue(queue).build(); +/// let executor = Rc::new(IdleJobExecutor); +/// let context = ContextBuilder::new().job_executor(executor).build(); /// ``` /// /// [`ContextBuilder`]: crate::context::ContextBuilder #[derive(Debug, Clone, Copy)] -pub struct IdleJobQueue; - -impl JobQueue for IdleJobQueue { - fn enqueue_job(&self, _: NativeJob, _: &mut Context) {} +pub struct IdleJobExecutor; - fn enqueue_async_job(&self, _: NativeAsyncJob, _: &mut Context) {} +impl JobExecutor for IdleJobExecutor { + fn enqueue_job(&self, _: Job, _: &mut Context) {} fn run_jobs(&self, _: &mut Context) {} } -/// A simple FIFO job queue that bails on the first error. +/// A simple FIFO executor that bails on the first error. /// -/// This is the default job queue for the [`Context`], but it is mostly pretty limited for -/// custom event queues. +/// This is the default job executor for the [`Context`], but it is mostly pretty limited for +/// custom event loop. /// -/// To disable running promise jobs on the engine, see [`IdleJobQueue`]. +/// To disable running promise jobs on the engine, see [`IdleJobExecutor`]. #[derive(Default)] -pub struct SimpleJobQueue { - job_queue: RefCell>, - async_job_queue: RefCell>, +pub struct SimpleJobExecutor { + jobs: RefCell>, + async_jobs: RefCell>, } -impl Debug for SimpleJobQueue { +impl Debug for SimpleJobExecutor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("SimpleQueue").field(&"..").finish() + f.debug_struct("SimpleJobExecutor").finish_non_exhaustive() } } -impl SimpleJobQueue { - /// Creates an empty `SimpleJobQueue`. +impl SimpleJobExecutor { + /// Creates a new `SimpleJobExecutor`. #[must_use] pub fn new() -> Self { Self::default() } } -impl JobQueue for SimpleJobQueue { - fn enqueue_job(&self, job: NativeJob, _: &mut Context) { - self.job_queue.borrow_mut().push_back(job); - } - - fn enqueue_async_job(&self, job: NativeAsyncJob, _: &mut Context) { - self.async_job_queue.borrow_mut().push_back(job); +impl JobExecutor for SimpleJobExecutor { + fn enqueue_job(&self, job: Job, _: &mut Context) { + match job { + Job::PromiseJob(p) => self.jobs.borrow_mut().push_back(p), + Job::AsyncJob(a) => self.async_jobs.borrow_mut().push_back(a), + } } fn run_jobs(&self, context: &mut Context) { let context = RefCell::new(context); loop { - let mut next_job = self.async_job_queue.borrow_mut().pop_front(); + let mut next_job = self.async_jobs.borrow_mut().pop_front(); while let Some(job) = next_job { if pollster::block_on(job.call(&context)).is_err() { - self.async_job_queue.borrow_mut().clear(); + self.async_jobs.borrow_mut().clear(); return; }; - next_job = self.async_job_queue.borrow_mut().pop_front(); + next_job = self.async_jobs.borrow_mut().pop_front(); } // Yeah, I have no idea why Rust extends the lifetime of a `RefCell` that should be immediately // dropped after calling `pop_front`. - let mut next_job = self.job_queue.borrow_mut().pop_front(); + let mut next_job = self.jobs.borrow_mut().pop_front(); while let Some(job) = next_job { if job.call(&mut context.borrow_mut()).is_err() { - self.job_queue.borrow_mut().clear(); + self.jobs.borrow_mut().clear(); return; }; - next_job = self.job_queue.borrow_mut().pop_front(); + next_job = self.jobs.borrow_mut().pop_front(); } - if self.async_job_queue.borrow().is_empty() && self.job_queue.borrow().is_empty() { + if self.async_jobs.borrow().is_empty() && self.jobs.borrow().is_empty() { return; } } diff --git a/core/engine/src/object/builtins/jspromise.rs b/core/engine/src/object/builtins/jspromise.rs index e6d24154f40..695c26cd9e3 100644 --- a/core/engine/src/object/builtins/jspromise.rs +++ b/core/engine/src/object/builtins/jspromise.rs @@ -292,7 +292,7 @@ impl JsPromise { { let (promise, resolvers) = Self::new_pending(context); - context.job_queue().enqueue_async_job( + context.enqueue_job( NativeAsyncJob::new(move |context| { Box::pin(async move { let result = future.await; @@ -306,8 +306,8 @@ impl JsPromise { } } }) - }), - context, + }) + .into(), ); promise @@ -1085,7 +1085,7 @@ impl JsPromise { /// Run jobs until this promise is resolved or rejected. This could /// result in an infinite loop if the promise is never resolved or - /// rejected (e.g. with a [`boa_engine::job::JobQueue`] that does + /// rejected (e.g. with a [`boa_engine::job::JobExecutor`] that does /// not prioritize properly). If you need more control over how /// the promise handles timing out, consider using /// [`Context::run_jobs`] directly. diff --git a/core/engine/src/script.rs b/core/engine/src/script.rs index 52c59110e22..4adf0118d91 100644 --- a/core/engine/src/script.rs +++ b/core/engine/src/script.rs @@ -160,9 +160,9 @@ impl Script { /// Evaluates this script and returns its result. /// /// Note that this won't run any scheduled promise jobs; you need to call [`Context::run_jobs`] - /// on the context or [`JobQueue::run_jobs`] on the provided queue to run them. + /// on the context or [`JobExecutor::run_jobs`] on the provided queue to run them. /// - /// [`JobQueue::run_jobs`]: crate::job::JobQueue::run_jobs + /// [`JobExecutor::run_jobs`]: crate::job::JobExecutor::run_jobs pub fn evaluate(&self, context: &mut Context) -> JsResult { let _timer = Profiler::global().start_event("Execution", "Main"); diff --git a/examples/src/bin/module_fetch_async.rs b/examples/src/bin/module_fetch_async.rs index 5925e0900bb..458d70f6d13 100644 --- a/examples/src/bin/module_fetch_async.rs +++ b/examples/src/bin/module_fetch_async.rs @@ -2,7 +2,7 @@ use std::{cell::RefCell, collections::VecDeque, future::Future, pin::Pin, rc::Rc use boa_engine::{ builtins::promise::PromiseState, - job::{JobQueue, NativeAsyncJob, NativeJob}, + job::{Job, JobExecutor, NativeAsyncJob, PromiseJob}, js_string, module::ModuleLoader, Context, JsNativeError, JsResult, JsString, JsValue, Module, @@ -29,58 +29,61 @@ impl ModuleLoader for HttpModuleLoader { let url = specifier.to_std_string_escaped(); // Just enqueue the future for now. We'll advance all the enqueued futures inside our custom - // `JobQueue`. - context.enqueue_async_job(NativeAsyncJob::with_realm( - move |context| { - Box::pin(async move { - // Adding some prints to show the non-deterministic nature of the async fetches. - // Try to run the example several times to see how sometimes the fetches start in order - // but finish in disorder. - println!("Fetching `{url}`..."); - - // This could also retry fetching in case there's an error while requesting the module. - let body: Result<_, isahc::Error> = async { - let mut response = Request::get(&url) - .redirect_policy(RedirectPolicy::Limit(5)) - .body(())? - .send_async() - .await?; - - Ok(response.text().await?) - } - .await; - - println!("Finished fetching `{url}`"); - - let body = match body { - Ok(body) => body, - Err(err) => { - // On error we always call `finish_load` to notify the load promise about the - // error. - finish_load( - Err(JsNativeError::typ().with_message(err.to_string()).into()), - &mut context.borrow_mut(), - ); - - // Just returns anything to comply with `NativeAsyncJob::new`'s signature. - return Ok(JsValue::undefined()); + // `JobExecutor`. + context.enqueue_job( + NativeAsyncJob::with_realm( + move |context| { + Box::pin(async move { + // Adding some prints to show the non-deterministic nature of the async fetches. + // Try to run the example several times to see how sometimes the fetches start in order + // but finish in disorder. + println!("Fetching `{url}`..."); + + // This could also retry fetching in case there's an error while requesting the module. + let body: Result<_, isahc::Error> = async { + let mut response = Request::get(&url) + .redirect_policy(RedirectPolicy::Limit(5)) + .body(())? + .send_async() + .await?; + + Ok(response.text().await?) } - }; - - // Could also add a path if needed. - let source = Source::from_bytes(body.as_bytes()); - - let module = Module::parse(source, None, &mut context.borrow_mut()); - - // We don't do any error handling, `finish_load` takes care of that for us. - finish_load(module, &mut context.borrow_mut()); - - // Also needed to match `NativeAsyncJob::new`. - Ok(JsValue::undefined()) - }) - }, - context.realm().clone(), - )); + .await; + + println!("Finished fetching `{url}`"); + + let body = match body { + Ok(body) => body, + Err(err) => { + // On error we always call `finish_load` to notify the load promise about the + // error. + finish_load( + Err(JsNativeError::typ().with_message(err.to_string()).into()), + &mut context.borrow_mut(), + ); + + // Just returns anything to comply with `NativeAsyncJob::new`'s signature. + return Ok(JsValue::undefined()); + } + }; + + // Could also add a path if needed. + let source = Source::from_bytes(body.as_bytes()); + + let module = Module::parse(source, None, &mut context.borrow_mut()); + + // We don't do any error handling, `finish_load` takes care of that for us. + finish_load(module, &mut context.borrow_mut()); + + // Also needed to match `NativeAsyncJob::new`. + Ok(JsValue::undefined()) + }) + }, + context.realm().clone(), + ) + .into(), + ); } } @@ -108,7 +111,7 @@ fn main() -> JsResult<()> { "#; let context = &mut Context::builder() - .job_queue(Rc::new(Queue::new())) + .job_executor(Rc::new(Queue::new())) // NEW: sets the context module loader to our custom loader .module_loader(Rc::new(HttpModuleLoader)) .build()?; @@ -169,20 +172,20 @@ fn main() -> JsResult<()> { // Taken from the `smol_event_loop.rs` example. /// An event queue using smol to drive futures to completion. struct Queue { - async_jobs: RefCell>, - jobs: RefCell>, + async_jobs: RefCell>, + promise_jobs: RefCell>, } impl Queue { fn new() -> Self { Self { async_jobs: RefCell::default(), - jobs: RefCell::default(), + promise_jobs: RefCell::default(), } } fn drain_jobs(&self, context: &mut Context) { - let jobs = std::mem::take(&mut *self.jobs.borrow_mut()); + let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut()); for job in jobs { if let Err(e) = job.call(context) { eprintln!("Uncaught {e}"); @@ -191,13 +194,13 @@ impl Queue { } } -impl JobQueue for Queue { - fn enqueue_job(&self, job: NativeJob, _context: &mut Context) { - self.jobs.borrow_mut().push_back(job); - } - - fn enqueue_async_job(&self, async_job: NativeAsyncJob, _context: &mut Context) { - self.async_jobs.borrow_mut().push(async_job); +impl JobExecutor for Queue { + fn enqueue_job(&self, job: Job, _context: &mut Context) { + match job { + Job::PromiseJob(job) => self.promise_jobs.borrow_mut().push_back(job), + Job::AsyncJob(job) => self.async_jobs.borrow_mut().push_back(job), + _ => panic!("unsupported job type"), + } } // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished... @@ -216,7 +219,7 @@ impl JobQueue for Queue { { Box::pin(async move { // Early return in case there were no jobs scheduled. - if self.jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { + if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { return; } let mut group = FutureGroup::new(); @@ -225,7 +228,7 @@ impl JobQueue for Queue { group.insert(job.call(context)); } - if self.jobs.borrow().is_empty() { + if self.promise_jobs.borrow().is_empty() { let Some(result) = group.next().await else { // Both queues are empty. We can exit. return; diff --git a/examples/src/bin/smol_event_loop.rs b/examples/src/bin/smol_event_loop.rs index c673b5f96c0..5d4998654cb 100644 --- a/examples/src/bin/smol_event_loop.rs +++ b/examples/src/bin/smol_event_loop.rs @@ -9,7 +9,7 @@ use std::{ use boa_engine::{ context::ContextBuilder, - job::{JobQueue, NativeAsyncJob, NativeJob}, + job::{Job, JobExecutor, NativeAsyncJob, PromiseJob}, js_string, native_function::NativeFunction, property::Attribute, @@ -35,19 +35,19 @@ fn main() { /// An event queue using smol to drive futures to completion. struct Queue { async_jobs: RefCell>, - jobs: RefCell>, + promise_jobs: RefCell>, } impl Queue { fn new() -> Self { Self { async_jobs: RefCell::default(), - jobs: RefCell::default(), + promise_jobs: RefCell::default(), } } fn drain_jobs(&self, context: &mut Context) { - let jobs = std::mem::take(&mut *self.jobs.borrow_mut()); + let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut()); for job in jobs { if let Err(e) = job.call(context) { eprintln!("Uncaught {e}"); @@ -56,13 +56,13 @@ impl Queue { } } -impl JobQueue for Queue { - fn enqueue_job(&self, job: NativeJob, _context: &mut Context) { - self.jobs.borrow_mut().push_back(job); - } - - fn enqueue_async_job(&self, async_job: NativeAsyncJob, _context: &mut Context) { - self.async_jobs.borrow_mut().push_back(async_job); +impl JobExecutor for Queue { + fn enqueue_job(&self, job: Job, _context: &mut Context) { + match job { + Job::PromiseJob(job) => self.promise_jobs.borrow_mut().push_back(job), + Job::AsyncJob(job) => self.async_jobs.borrow_mut().push_back(job), + _ => panic!("unsupported job type"), + } } // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished... @@ -81,7 +81,7 @@ impl JobQueue for Queue { { Box::pin(async move { // Early return in case there were no jobs scheduled. - if self.jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { + if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { return; } let mut group = FutureGroup::new(); @@ -90,7 +90,7 @@ impl JobQueue for Queue { group.insert(job.call(context)); } - if self.jobs.borrow().is_empty() { + if self.promise_jobs.borrow().is_empty() { let Some(result) = group.next().await else { // Both queues are empty. We can exit. return; @@ -153,21 +153,24 @@ fn interval(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult let delay = args.get_or_undefined(1).to_u32(context)?; let args = args.get(2..).unwrap_or_default().to_vec(); - context.enqueue_async_job(NativeAsyncJob::with_realm( - move |context| { - Box::pin(async move { - let mut timer = smol::Timer::interval(Duration::from_millis(u64::from(delay))); - for _ in 0..10 { - timer.next().await; - if let Err(err) = function.call(&this, &args, &mut context.borrow_mut()) { - eprintln!("Uncaught {err}"); + context.enqueue_job( + NativeAsyncJob::with_realm( + move |context| { + Box::pin(async move { + let mut timer = smol::Timer::interval(Duration::from_millis(u64::from(delay))); + for _ in 0..10 { + timer.next().await; + if let Err(err) = function.call(&this, &args, &mut context.borrow_mut()) { + eprintln!("Uncaught {err}"); + } } - } - Ok(JsValue::undefined()) - }) - }, - context.realm().clone(), - )); + Ok(JsValue::undefined()) + }) + }, + context.realm().clone(), + ) + .into(), + ); Ok(JsValue::undefined()) } @@ -233,7 +236,7 @@ fn internally_async_event_loop() { // Initialize the queue and the context let queue = Queue::new(); let context = &mut ContextBuilder::new() - .job_queue(Rc::new(queue)) + .job_executor(Rc::new(queue)) .build() .unwrap(); @@ -262,7 +265,7 @@ fn externally_async_event_loop() { // Initialize the queue and the context let queue = Queue::new(); let context = &mut ContextBuilder::new() - .job_queue(Rc::new(queue)) + .job_executor(Rc::new(queue)) .build() .unwrap(); diff --git a/examples/src/bin/tokio_event_loop.rs b/examples/src/bin/tokio_event_loop.rs index cc8b33cede3..07b187ce48d 100644 --- a/examples/src/bin/tokio_event_loop.rs +++ b/examples/src/bin/tokio_event_loop.rs @@ -9,7 +9,7 @@ use std::{ use boa_engine::{ context::ContextBuilder, - job::{JobQueue, NativeAsyncJob, NativeJob}, + job::{Job, JobExecutor, NativeAsyncJob, PromiseJob}, js_string, native_function::NativeFunction, property::Attribute, @@ -35,19 +35,19 @@ fn main() { /// An event queue using tokio to drive futures to completion. struct Queue { async_jobs: RefCell>, - jobs: RefCell>, + promise_jobs: RefCell>, } impl Queue { fn new() -> Self { Self { async_jobs: RefCell::default(), - jobs: RefCell::default(), + promise_jobs: RefCell::default(), } } fn drain_jobs(&self, context: &mut Context) { - let jobs = std::mem::take(&mut *self.jobs.borrow_mut()); + let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut()); for job in jobs { if let Err(e) = job.call(context) { eprintln!("Uncaught {e}"); @@ -56,13 +56,13 @@ impl Queue { } } -impl JobQueue for Queue { - fn enqueue_job(&self, job: NativeJob, _context: &mut Context) { - self.jobs.borrow_mut().push_back(job); - } - - fn enqueue_async_job(&self, async_job: NativeAsyncJob, _context: &mut Context) { - self.async_jobs.borrow_mut().push_back(async_job); +impl JobExecutor for Queue { + fn enqueue_job(&self, job: Job, _context: &mut Context) { + match job { + Job::PromiseJob(job) => self.promise_jobs.borrow_mut().push_back(job), + Job::AsyncJob(job) => self.async_jobs.borrow_mut().push_back(job), + _ => panic!("unsupported job type"), + } } // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished... @@ -86,7 +86,7 @@ impl JobQueue for Queue { { Box::pin(async move { // Early return in case there were no jobs scheduled. - if self.jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { + if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { return; } let mut group = FutureGroup::new(); @@ -95,7 +95,7 @@ impl JobQueue for Queue { group.insert(job.call(context)); } - if self.jobs.borrow().is_empty() { + if self.promise_jobs.borrow().is_empty() { let Some(result) = group.next().await else { // Both queues are empty. We can exit. return; @@ -161,21 +161,24 @@ fn interval(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult let delay = args.get_or_undefined(1).to_u32(context)?; let args = args.get(2..).unwrap_or_default().to_vec(); - context.enqueue_async_job(NativeAsyncJob::with_realm( - move |context| { - Box::pin(async move { - let mut timer = time::interval(Duration::from_millis(u64::from(delay))); - for _ in 0..10 { - timer.tick().await; - if let Err(err) = function.call(&this, &args, &mut context.borrow_mut()) { - eprintln!("Uncaught {err}"); + context.enqueue_job( + NativeAsyncJob::with_realm( + move |context| { + Box::pin(async move { + let mut timer = time::interval(Duration::from_millis(u64::from(delay))); + for _ in 0..10 { + timer.tick().await; + if let Err(err) = function.call(&this, &args, &mut context.borrow_mut()) { + eprintln!("Uncaught {err}"); + } } - } - Ok(JsValue::undefined()) - }) - }, - context.realm().clone(), - )); + Ok(JsValue::undefined()) + }) + }, + context.realm().clone(), + ) + .into(), + ); Ok(JsValue::undefined()) } @@ -241,7 +244,7 @@ fn internally_async_event_loop() { // Initialize the queue and the context let queue = Queue::new(); let context = &mut ContextBuilder::new() - .job_queue(Rc::new(queue)) + .job_executor(Rc::new(queue)) .build() .unwrap(); @@ -268,7 +271,7 @@ async fn externally_async_event_loop() { // Initialize the queue and the context let queue = Queue::new(); let context = &mut ContextBuilder::new() - .job_queue(Rc::new(queue)) + .job_executor(Rc::new(queue)) .build() .unwrap(); From a7c10d393f8ddad09be14aee3c51e5a623da7bc0 Mon Sep 17 00:00:00 2001 From: jedel1043 Date: Sat, 11 Jan 2025 15:57:47 -0600 Subject: [PATCH 6/7] Fix execution bug in cli executor --- cli/src/main.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cli/src/main.rs b/cli/src/main.rs index 2ff42930ef1..0d02d880894 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -472,6 +472,14 @@ impl JobExecutor for Executor { if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { return; } + + let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut()); + for job in jobs { + if let Err(e) = job.call(context) { + eprintln!("Uncaught {e}"); + } + } + let async_jobs = std::mem::take(&mut *self.async_jobs.borrow_mut()); for async_job in async_jobs { if let Err(err) = pollster::block_on(async_job.call(&RefCell::new(context))) { From 9c35fcd70ef025d58b041fc0ff117a8c819f020f Mon Sep 17 00:00:00 2001 From: jedel1043 Date: Tue, 14 Jan 2025 01:58:15 -0600 Subject: [PATCH 7/7] Propagate jobs into `JobExecutor::run_jobs` --- cli/src/main.rs | 18 ++++--- core/engine/src/builtins/promise/tests.rs | 3 +- core/engine/src/context/mod.rs | 11 ++-- core/engine/src/job.rs | 34 +++++++------ core/engine/src/object/builtins/jspromise.rs | 6 +-- core/engine/src/tests/async_generator.rs | 6 +-- core/engine/src/tests/iterators.rs | 9 ++-- core/engine/src/tests/promise.rs | 2 +- core/engine/tests/imports.rs | 2 +- core/engine/tests/module.rs | 2 +- core/interop/src/lib.rs | 6 +-- core/interop/src/macros.rs | 2 +- core/interop/tests/embedded.rs | 2 +- examples/src/bin/module_fetch_async.rs | 12 ++--- examples/src/bin/modules.rs | 2 +- examples/src/bin/smol_event_loop.rs | 33 ++++++------ examples/src/bin/synthetic.rs | 2 +- examples/src/bin/tokio_event_loop.rs | 53 +++++++++++--------- tests/tester/src/exec/mod.rs | 20 ++++++-- 19 files changed, 125 insertions(+), 100 deletions(-) diff --git a/cli/src/main.rs b/cli/src/main.rs index 0d02d880894..a02527343aa 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -18,7 +18,7 @@ use boa_engine::{ optimizer::OptimizerOptions, script::Script, vm::flowgraph::{Direction, Graph}, - Context, JsError, Source, + Context, JsError, JsResult, Source, }; use boa_parser::source::ReadChar; use clap::{Parser, ValueEnum, ValueHint}; @@ -292,7 +292,7 @@ fn evaluate_file( ); let promise = module.load_link_evaluate(context); - context.run_jobs(); + context.run_jobs().map_err(|err| err.into_erased(context))?; let result = promise.state(); return match result { @@ -308,9 +308,9 @@ fn evaluate_file( Ok(v) => println!("{}", v.display()), Err(v) => eprintln!("Uncaught {v}"), } - context.run_jobs(); - - Ok(()) + context + .run_jobs() + .map_err(|err| err.into_erased(context).into()) } fn evaluate_files(args: &Opt, context: &mut Context, loader: &SimpleModuleLoader) { @@ -425,7 +425,9 @@ fn main() -> Result<()> { eprintln!("{}: {}", "Uncaught".red(), v.to_string().red()); } } - context.run_jobs(); + if let Err(err) = context.run_jobs() { + eprintln!("{err}"); + }; } } @@ -467,10 +469,10 @@ impl JobExecutor for Executor { } } - fn run_jobs(&self, context: &mut Context) { + fn run_jobs(&self, context: &mut Context) -> JsResult<()> { loop { if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { - return; + return Ok(()); } let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut()); diff --git a/core/engine/src/builtins/promise/tests.rs b/core/engine/src/builtins/promise/tests.rs index 7f6c316fb93..e63fc131073 100644 --- a/core/engine/src/builtins/promise/tests.rs +++ b/core/engine/src/builtins/promise/tests.rs @@ -13,8 +13,7 @@ fn promise() { count += 1; "#}), TestAction::assert_eq("count", 2), - #[allow(clippy::redundant_closure_for_method_calls)] - TestAction::inspect_context(|ctx| ctx.run_jobs()), + TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()), TestAction::assert_eq("count", 3), ]); } diff --git a/core/engine/src/context/mod.rs b/core/engine/src/context/mod.rs index 3d5415e8b48..ccd57b4065c 100644 --- a/core/engine/src/context/mod.rs +++ b/core/engine/src/context/mod.rs @@ -477,9 +477,10 @@ impl Context { /// Runs all the jobs with the provided job executor. #[inline] - pub fn run_jobs(&mut self) { - self.job_executor().run_jobs(self); + pub fn run_jobs(&mut self) -> JsResult<()> { + let result = self.job_executor().run_jobs(self); self.clear_kept_objects(); + result } /// Asynchronously runs all the jobs with the provided job executor. @@ -490,11 +491,13 @@ impl Context { /// specific handling of each [`JobExecutor`]. If you want to execute jobs concurrently, you must /// provide a custom implementatin of `JobExecutor` to the context. #[allow(clippy::future_not_send)] - pub async fn run_jobs_async(&mut self) { - self.job_executor() + pub async fn run_jobs_async(&mut self) -> JsResult<()> { + let result = self + .job_executor() .run_jobs_async(&RefCell::new(self)) .await; self.clear_kept_objects(); + result } /// Abstract operation [`ClearKeptObjects`][clear]. diff --git a/core/engine/src/job.rs b/core/engine/src/job.rs index 30581a001cf..8838356b688 100644 --- a/core/engine/src/job.rs +++ b/core/engine/src/job.rs @@ -386,7 +386,7 @@ pub trait JobExecutor { fn enqueue_job(&self, job: Job, context: &mut Context); /// Runs all jobs in the executor. - fn run_jobs(&self, context: &mut Context); + fn run_jobs(&self, context: &mut Context) -> JsResult<()>; /// Asynchronously runs all jobs in the executor. /// @@ -395,7 +395,7 @@ pub trait JobExecutor { fn run_jobs_async<'a, 'b, 'fut>( &'a self, context: &'b RefCell<&mut Context>, - ) -> Pin + 'fut>> + ) -> Pin> + 'fut>> where 'a: 'fut, 'b: 'fut, @@ -427,7 +427,9 @@ pub struct IdleJobExecutor; impl JobExecutor for IdleJobExecutor { fn enqueue_job(&self, _: Job, _: &mut Context) {} - fn run_jobs(&self, _: &mut Context) {} + fn run_jobs(&self, _: &mut Context) -> JsResult<()> { + Ok(()) + } } /// A simple FIFO executor that bails on the first error. @@ -438,7 +440,7 @@ impl JobExecutor for IdleJobExecutor { /// To disable running promise jobs on the engine, see [`IdleJobExecutor`]. #[derive(Default)] pub struct SimpleJobExecutor { - jobs: RefCell>, + promise_jobs: RefCell>, async_jobs: RefCell>, } @@ -459,36 +461,38 @@ impl SimpleJobExecutor { impl JobExecutor for SimpleJobExecutor { fn enqueue_job(&self, job: Job, _: &mut Context) { match job { - Job::PromiseJob(p) => self.jobs.borrow_mut().push_back(p), + Job::PromiseJob(p) => self.promise_jobs.borrow_mut().push_back(p), Job::AsyncJob(a) => self.async_jobs.borrow_mut().push_back(a), } } - fn run_jobs(&self, context: &mut Context) { + fn run_jobs(&self, context: &mut Context) -> JsResult<()> { let context = RefCell::new(context); loop { let mut next_job = self.async_jobs.borrow_mut().pop_front(); while let Some(job) = next_job { - if pollster::block_on(job.call(&context)).is_err() { + if let Err(err) = pollster::block_on(job.call(&context)) { self.async_jobs.borrow_mut().clear(); - return; + self.promise_jobs.borrow_mut().clear(); + return Err(err); }; next_job = self.async_jobs.borrow_mut().pop_front(); } // Yeah, I have no idea why Rust extends the lifetime of a `RefCell` that should be immediately // dropped after calling `pop_front`. - let mut next_job = self.jobs.borrow_mut().pop_front(); + let mut next_job = self.promise_jobs.borrow_mut().pop_front(); while let Some(job) = next_job { - if job.call(&mut context.borrow_mut()).is_err() { - self.jobs.borrow_mut().clear(); - return; + if let Err(err) = job.call(&mut context.borrow_mut()) { + self.async_jobs.borrow_mut().clear(); + self.promise_jobs.borrow_mut().clear(); + return Err(err); }; - next_job = self.jobs.borrow_mut().pop_front(); + next_job = self.promise_jobs.borrow_mut().pop_front(); } - if self.async_jobs.borrow().is_empty() && self.jobs.borrow().is_empty() { - return; + if self.async_jobs.borrow().is_empty() && self.promise_jobs.borrow().is_empty() { + return Ok(()); } } } diff --git a/core/engine/src/object/builtins/jspromise.rs b/core/engine/src/object/builtins/jspromise.rs index 695c26cd9e3..a42370033cc 100644 --- a/core/engine/src/object/builtins/jspromise.rs +++ b/core/engine/src/object/builtins/jspromise.rs @@ -1143,14 +1143,14 @@ impl JsPromise { /// // Uncommenting the following line would panic. /// // context.run_jobs(); /// ``` - pub fn await_blocking(&self, context: &mut Context) -> Result { + pub fn await_blocking(&self, context: &mut Context) -> Result { loop { match self.state() { PromiseState::Pending => { - context.run_jobs(); + context.run_jobs()?; } PromiseState::Fulfilled(f) => break Ok(f), - PromiseState::Rejected(r) => break Err(r), + PromiseState::Rejected(r) => break Err(JsError::from_opaque(r)), } } } diff --git a/core/engine/src/tests/async_generator.rs b/core/engine/src/tests/async_generator.rs index 82889b1e264..54898589d13 100644 --- a/core/engine/src/tests/async_generator.rs +++ b/core/engine/src/tests/async_generator.rs @@ -47,7 +47,7 @@ fn return_on_then_infinite_loop() { }); g.return(); "#}), - TestAction::inspect_context(Context::run_jobs), + TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()), TestAction::assert_eq("count", 100), ]); } @@ -71,7 +71,7 @@ fn return_on_then_single() { }); let ret = g.return() "#}), - TestAction::inspect_context(Context::run_jobs), + TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()), TestAction::assert_eq("first", false), TestAction::assert_with_op("ret", |ret, context| { assert_promise_iter_value(&ret, &JsValue::undefined(), true, context); @@ -104,7 +104,7 @@ fn return_on_then_queue() { let second = g.next(); let ret = g.return(); "#}), - TestAction::inspect_context(Context::run_jobs), + TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()), TestAction::assert_with_op("first", |first, context| { assert_promise_iter_value(&first, &JsValue::from(1), false, context); true diff --git a/core/engine/src/tests/iterators.rs b/core/engine/src/tests/iterators.rs index eb341f1f986..2ddddd2fc50 100644 --- a/core/engine/src/tests/iterators.rs +++ b/core/engine/src/tests/iterators.rs @@ -46,8 +46,7 @@ fn iterator_close_in_continue_before_jobs() { actual.push("async fn end"); }(); "#}), - #[allow(clippy::redundant_closure_for_method_calls)] - TestAction::inspect_context(|ctx| ctx.run_jobs()), + TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()), TestAction::assert(indoc! {r#" arrayEquals( actual, @@ -110,8 +109,7 @@ fn async_iterator_close_in_continue_is_awaited() { actual.push("async fn end"); }(); "#}), - #[allow(clippy::redundant_closure_for_method_calls)] - TestAction::inspect_context(|ctx| ctx.run_jobs()), + TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()), TestAction::assert(indoc! {r#" arrayEquals( actual, @@ -198,8 +196,7 @@ fn mixed_iterators_close_in_continue() { actual.push("async fn end"); }(); "#}), - #[allow(clippy::redundant_closure_for_method_calls)] - TestAction::inspect_context(|ctx| ctx.run_jobs()), + TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()), TestAction::assert(indoc! {r#" arrayEquals( actual, diff --git a/core/engine/src/tests/promise.rs b/core/engine/src/tests/promise.rs index c8d64b8809e..a5f48f51521 100644 --- a/core/engine/src/tests/promise.rs +++ b/core/engine/src/tests/promise.rs @@ -31,7 +31,7 @@ fn issue_2658() { genTwo.next().then(v => { result2 = v; }); "# }), - TestAction::inspect_context(|ctx| ctx.run_jobs()), + TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()), TestAction::assert("!result1.done"), TestAction::assert_eq("result1.value", 5), TestAction::assert("!result2.done"), diff --git a/core/engine/tests/imports.rs b/core/engine/tests/imports.rs index 36def275947..a4feca921d4 100644 --- a/core/engine/tests/imports.rs +++ b/core/engine/tests/imports.rs @@ -23,7 +23,7 @@ fn subdirectories() { let module = boa_engine::Module::parse(source, None, &mut context).unwrap(); let result = module.load_link_evaluate(&mut context); - context.run_jobs(); + context.run_jobs().unwrap(); match result.state() { PromiseState::Pending => {} PromiseState::Fulfilled(v) => { diff --git a/core/engine/tests/module.rs b/core/engine/tests/module.rs index 09ec3522588..93db4609812 100644 --- a/core/engine/tests/module.rs +++ b/core/engine/tests/module.rs @@ -41,7 +41,7 @@ fn test_json_module_from_str() { let module = Module::parse(source, None, &mut context).unwrap(); let promise = module.load_link_evaluate(&mut context); - context.run_jobs(); + context.run_jobs().unwrap(); match promise.state() { PromiseState::Pending => {} diff --git a/core/interop/src/lib.rs b/core/interop/src/lib.rs index 436680d29db..e3b4bff3745 100644 --- a/core/interop/src/lib.rs +++ b/core/interop/src/lib.rs @@ -565,7 +565,7 @@ fn into_js_module() { let root_module = Module::parse(source, None, &mut context).unwrap(); let promise_result = root_module.load_link_evaluate(&mut context); - context.run_jobs(); + context.run_jobs().unwrap(); // Checking if the final promise didn't return an error. assert!( @@ -617,7 +617,7 @@ fn can_throw_exception() { let root_module = Module::parse(source, None, &mut context).unwrap(); let promise_result = root_module.load_link_evaluate(&mut context); - context.run_jobs(); + context.run_jobs().unwrap(); // Checking if the final promise didn't return an error. assert_eq!( @@ -721,7 +721,7 @@ fn class() { let root_module = Module::parse(source, None, &mut context).unwrap(); let promise_result = root_module.load_link_evaluate(&mut context); - context.run_jobs(); + context.run_jobs().unwrap(); // Checking if the final promise didn't return an error. assert!( diff --git a/core/interop/src/macros.rs b/core/interop/src/macros.rs index 9be07ef765d..5fe2d585052 100644 --- a/core/interop/src/macros.rs +++ b/core/interop/src/macros.rs @@ -525,7 +525,7 @@ fn js_class_test() { let root_module = Module::parse(source, None, &mut context).unwrap(); let promise_result = root_module.load_link_evaluate(&mut context); - context.run_jobs(); + context.run_jobs().unwrap(); // Checking if the final promise didn't return an error. assert!( diff --git a/core/interop/tests/embedded.rs b/core/interop/tests/embedded.rs index 04b35a9f6ad..d0a8501d65e 100644 --- a/core/interop/tests/embedded.rs +++ b/core/interop/tests/embedded.rs @@ -35,7 +35,7 @@ fn simple() { ) .expect("failed to parse module"); let promise = module.load_link_evaluate(&mut context); - context.run_jobs(); + context.run_jobs().unwrap(); match promise.state() { PromiseState::Fulfilled(value) => { diff --git a/examples/src/bin/module_fetch_async.rs b/examples/src/bin/module_fetch_async.rs index 458d70f6d13..abc7c90e982 100644 --- a/examples/src/bin/module_fetch_async.rs +++ b/examples/src/bin/module_fetch_async.rs @@ -124,7 +124,7 @@ fn main() -> JsResult<()> { // Important to call `Context::run_jobs`, or else all the futures and promises won't be // pushed forward by the job queue. - context.run_jobs(); + context.run_jobs()?; match promise.state() { // Our job queue guarantees that all promises and futures are finished after returning @@ -204,15 +204,15 @@ impl JobExecutor for Queue { } // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished... - fn run_jobs(&self, context: &mut Context) { - smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(&RefCell::new(context)))); + fn run_jobs(&self, context: &mut Context) -> JsResult<()> { + smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(&RefCell::new(context)))) } // ...the async flavor won't, which allows concurrent execution with external async tasks. fn run_jobs_async<'a, 'b, 'fut>( &'a self, context: &'b RefCell<&mut Context>, - ) -> Pin + 'fut>> + ) -> Pin> + 'fut>> where 'a: 'fut, 'b: 'fut, @@ -220,7 +220,7 @@ impl JobExecutor for Queue { Box::pin(async move { // Early return in case there were no jobs scheduled. if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { - return; + return Ok(()); } let mut group = FutureGroup::new(); loop { @@ -231,7 +231,7 @@ impl JobExecutor for Queue { if self.promise_jobs.borrow().is_empty() { let Some(result) = group.next().await else { // Both queues are empty. We can exit. - return; + return Ok(()); }; if let Err(err) = result { diff --git a/examples/src/bin/modules.rs b/examples/src/bin/modules.rs index 68037590f3e..eec64471c1d 100644 --- a/examples/src/bin/modules.rs +++ b/examples/src/bin/modules.rs @@ -87,7 +87,7 @@ fn main() -> Result<(), Box> { ); // Very important to push forward the job queue after queueing promises. - context.run_jobs(); + context.run_jobs()?; // Checking if the final promise didn't return an error. match promise_result.state() { diff --git a/examples/src/bin/smol_event_loop.rs b/examples/src/bin/smol_event_loop.rs index 5d4998654cb..749be95444c 100644 --- a/examples/src/bin/smol_event_loop.rs +++ b/examples/src/bin/smol_event_loop.rs @@ -21,14 +21,14 @@ use smol::{future, stream::StreamExt}; // This example shows how to create an event loop using the smol runtime. // The example contains two "flavors" of event loops: -fn main() { +fn main() -> JsResult<()> { // An internally async event loop. This event loop blocks the execution of the thread // while executing tasks, but internally uses async to run its tasks. - internally_async_event_loop(); + internally_async_event_loop()?; // An externally async event loop. This event loop can yield to the runtime to concurrently // run tasks with it. - externally_async_event_loop(); + externally_async_event_loop() } // Taken from the `smol_event_loop.rs` example. @@ -66,15 +66,15 @@ impl JobExecutor for Queue { } // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished... - fn run_jobs(&self, context: &mut Context) { - smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(&RefCell::new(context)))); + fn run_jobs(&self, context: &mut Context) -> JsResult<()> { + smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(&RefCell::new(context)))) } // ...the async flavor won't, which allows concurrent execution with external async tasks. fn run_jobs_async<'a, 'b, 'fut>( &'a self, context: &'b RefCell<&mut Context>, - ) -> Pin + 'fut>> + ) -> Pin> + 'fut>> where 'a: 'fut, 'b: 'fut, @@ -82,7 +82,7 @@ impl JobExecutor for Queue { Box::pin(async move { // Early return in case there were no jobs scheduled. if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { - return; + return Ok(()); } let mut group = FutureGroup::new(); loop { @@ -93,7 +93,7 @@ impl JobExecutor for Queue { if self.promise_jobs.borrow().is_empty() { let Some(result) = group.next().await else { // Both queues are empty. We can exit. - return; + return Ok(()); }; if let Err(err) = result { @@ -230,7 +230,7 @@ const SCRIPT: &str = r" // This flavor is most recommended when you have an application that: // - Needs to wait until the engine finishes executing; depends on the execution result to continue. // - Delegates the execution of the application to the engine's event loop. -fn internally_async_event_loop() { +fn internally_async_event_loop() -> JsResult<()> { println!("====== Internally async event loop. ======"); // Initialize the queue and the context @@ -249,15 +249,16 @@ fn internally_async_event_loop() { // Important to run this after evaluating, since this is what triggers to run the enqueued jobs. println!("Running jobs..."); - context.run_jobs(); + context.run_jobs()?; println!("Total elapsed time: {:?}\n", now.elapsed()); + Ok(()) } // This flavor is most recommended when you have an application that: // - Cannot afford to block until the engine finishes executing. // - Needs to process IO requests between executions that will be consumed by the engine. -fn externally_async_event_loop() { +fn externally_async_event_loop() -> JsResult<()> { println!("====== Externally async event loop. ======"); let executor = smol::Executor::new(); @@ -282,7 +283,7 @@ fn externally_async_event_loop() { interval.next().await; println!("Executed interval tick {i}"); } - println!("Finished smol interval job...") + println!("Finished smol interval job..."); }); let engine = async { @@ -295,11 +296,13 @@ fn externally_async_event_loop() { // Run the jobs asynchronously, which avoids blocking the main thread. println!("Running jobs..."); - context.run_jobs_async().await; + context.run_jobs_async().await }; - future::zip(counter, engine).await; + future::zip(counter, engine).await.1?; println!("Total elapsed time: {:?}\n", now.elapsed()); - })); + + Ok(()) + })) } diff --git a/examples/src/bin/synthetic.rs b/examples/src/bin/synthetic.rs index e38562cde4b..43d4955259c 100644 --- a/examples/src/bin/synthetic.rs +++ b/examples/src/bin/synthetic.rs @@ -62,7 +62,7 @@ fn main() -> Result<(), Box> { let promise_result = module.load_link_evaluate(context); // Very important to push forward the job queue after queueing promises. - context.run_jobs(); + context.run_jobs()?; // Checking if the final promise didn't return an error. match promise_result.state() { diff --git a/examples/src/bin/tokio_event_loop.rs b/examples/src/bin/tokio_event_loop.rs index 07b187ce48d..0c83887c229 100644 --- a/examples/src/bin/tokio_event_loop.rs +++ b/examples/src/bin/tokio_event_loop.rs @@ -22,14 +22,14 @@ use tokio::{task, time}; // This example shows how to create an event loop using the tokio runtime. // The example contains two "flavors" of event loops: -fn main() { +fn main() -> JsResult<()> { // An internally async event loop. This event loop blocks the execution of the thread // while executing tasks, but internally uses async to run its tasks. - internally_async_event_loop(); + internally_async_event_loop()?; // An externally async event loop. This event loop can yield to the runtime to concurrently // run tasks with it. - externally_async_event_loop(); + externally_async_event_loop() } /// An event queue using tokio to drive futures to completion. @@ -66,20 +66,20 @@ impl JobExecutor for Queue { } // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished... - fn run_jobs(&self, context: &mut Context) { + fn run_jobs(&self, context: &mut Context) -> JsResult<()> { let runtime = tokio::runtime::Builder::new_current_thread() .enable_time() .build() .unwrap(); - task::LocalSet::default().block_on(&runtime, self.run_jobs_async(&RefCell::new(context))); + task::LocalSet::default().block_on(&runtime, self.run_jobs_async(&RefCell::new(context))) } // ...the async flavor won't, which allows concurrent execution with external async tasks. fn run_jobs_async<'a, 'b, 'fut>( &'a self, context: &'b RefCell<&mut Context>, - ) -> Pin + 'fut>> + ) -> Pin> + 'fut>> where 'a: 'fut, 'b: 'fut, @@ -87,7 +87,7 @@ impl JobExecutor for Queue { Box::pin(async move { // Early return in case there were no jobs scheduled. if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() { - return; + return Ok(()); } let mut group = FutureGroup::new(); loop { @@ -98,7 +98,7 @@ impl JobExecutor for Queue { if self.promise_jobs.borrow().is_empty() { let Some(result) = group.next().await else { // Both queues are empty. We can exit. - return; + return Ok(()); }; if let Err(err) = result { @@ -238,7 +238,7 @@ const SCRIPT: &str = r" // This flavor is most recommended when you have an application that: // - Needs to wait until the engine finishes executing; depends on the execution result to continue. // - Delegates the execution of the application to the engine's event loop. -fn internally_async_event_loop() { +fn internally_async_event_loop() -> JsResult<()> { println!("====== Internally async event loop. ======"); // Initialize the queue and the context @@ -257,16 +257,18 @@ fn internally_async_event_loop() { // Important to run this after evaluating, since this is what triggers to run the enqueued jobs. println!("Running jobs..."); - context.run_jobs(); + context.run_jobs()?; println!("Total elapsed time: {:?}\n", now.elapsed()); + + Ok(()) } // This flavor is most recommended when you have an application that: // - Cannot afford to block until the engine finishes executing. // - Needs to process IO requests between executions that will be consumed by the engine. #[tokio::main] -async fn externally_async_event_loop() { +async fn externally_async_event_loop() -> JsResult<()> { println!("====== Externally async event loop. ======"); // Initialize the queue and the context let queue = Queue::new(); @@ -281,15 +283,19 @@ async fn externally_async_event_loop() { let now = Instant::now(); // Example of an asynchronous workload that must be run alongside the engine. - let counter = tokio::spawn(async { - let mut interval = time::interval(Duration::from_millis(100)); - println!("Starting tokio interval job..."); - for i in 0..10 { - interval.tick().await; - println!("Executed interval tick {i}"); - } - println!("Finished tokio interval job...") - }); + let counter = async { + tokio::spawn(async { + let mut interval = time::interval(Duration::from_millis(100)); + println!("Starting tokio interval job..."); + for i in 0..10 { + interval.tick().await; + println!("Executed interval tick {i}"); + } + println!("Finished tokio interval job...") + }) + .await + .map_err(|err| JsNativeError::typ().with_message(err.to_string()).into()) + }; let local_set = &mut task::LocalSet::default(); let engine = local_set.run_until(async { @@ -302,11 +308,12 @@ async fn externally_async_event_loop() { // Run the jobs asynchronously, which avoids blocking the main thread. println!("Running jobs..."); - context.run_jobs_async().await; - Ok(()) + context.run_jobs_async().await }); - tokio::try_join!(counter, engine).unwrap(); + tokio::try_join!(counter, engine)?; println!("Total elapsed time: {:?}\n", now.elapsed()); + + Ok(()) } diff --git a/tests/tester/src/exec/mod.rs b/tests/tester/src/exec/mod.rs index e5ce0553cfd..6bca5c1f163 100644 --- a/tests/tester/src/exec/mod.rs +++ b/tests/tester/src/exec/mod.rs @@ -289,7 +289,9 @@ impl Test { let promise = module.load_link_evaluate(context); - context.run_jobs(); + if let Err(err) = context.run_jobs() { + return (false, format!("Uncaught {err}")); + }; match promise.state() { PromiseState::Pending => { @@ -322,7 +324,9 @@ impl Test { } }; - context.run_jobs(); + if let Err(err) = context.run_jobs() { + return (false, format!("Uncaught {err}")); + }; match *async_result.inner.borrow() { UninitResult::Err(ref e) => return (false, format!("Uncaught {e}")), @@ -388,7 +392,9 @@ impl Test { let promise = module.load(context); - context.run_jobs(); + if let Err(err) = context.run_jobs() { + return (false, format!("Uncaught {err}")); + }; match promise.state() { PromiseState::Pending => { @@ -433,7 +439,9 @@ impl Test { let promise = module.load(context); - context.run_jobs(); + if let Err(err) = context.run_jobs() { + return (false, format!("Uncaught {err}")); + }; match promise.state() { PromiseState::Pending => { @@ -451,7 +459,9 @@ impl Test { let promise = module.evaluate(context); - context.run_jobs(); + if let Err(err) = context.run_jobs() { + return (false, format!("Uncaught {err}")); + }; match promise.state() { PromiseState::Pending => {