Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add configuration for async pipeline creation on RenderPlugin #11847

Merged
merged 6 commits into from
Feb 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion crates/bevy_render/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,9 @@ use std::{
#[derive(Default)]
pub struct RenderPlugin {
pub render_creation: RenderCreation,
/// If `true`, disables asynchronous pipeline compilation.
/// This has no effect on macOS, Wasm, or without the `multi-threaded` feature.
pub synchronous_pipeline_compilation: bool,
}

/// The labels of the default App rendering sets.
Expand Down Expand Up @@ -355,7 +358,10 @@ impl Plugin for RenderPlugin {

render_app
.insert_resource(instance)
.insert_resource(PipelineCache::new(device.clone()))
.insert_resource(PipelineCache::new(
device.clone(),
self.synchronous_pipeline_compilation,
))
.insert_resource(device)
.insert_resource(queue)
.insert_resource(render_adapter)
Expand Down
277 changes: 152 additions & 125 deletions crates/bevy_render/src/render_resource/pipeline_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -480,6 +480,9 @@ pub struct PipelineCache {
pipelines: Vec<CachedPipeline>,
waiting_pipelines: HashSet<CachedPipelineId>,
new_pipelines: Mutex<Vec<CachedPipeline>>,
/// If `true`, disables asynchronous pipeline compilation.
/// This has no effect on MacOS, wasm, or without the `multi_threaded` feature.
synchronous_pipeline_compilation: bool,
}

impl PipelineCache {
Expand All @@ -488,14 +491,15 @@ impl PipelineCache {
}

/// Create a new pipeline cache associated with the given render device.
pub fn new(device: RenderDevice) -> Self {
pub fn new(device: RenderDevice, synchronous_pipeline_compilation: bool) -> Self {
Self {
shader_cache: Arc::new(Mutex::new(ShaderCache::new(&device))),
device,
layout_cache: default(),
waiting_pipelines: default(),
new_pipelines: default(),
pipelines: default(),
synchronous_pipeline_compilation,
}
}

Expand Down Expand Up @@ -679,88 +683,95 @@ impl PipelineCache {
let device = self.device.clone();
let shader_cache = self.shader_cache.clone();
let layout_cache = self.layout_cache.clone();
create_pipeline_task(async move {
let mut shader_cache = shader_cache.lock().unwrap();
let mut layout_cache = layout_cache.lock().unwrap();

let vertex_module = match shader_cache.get(
&device,
id,
descriptor.vertex.shader.id(),
&descriptor.vertex.shader_defs,
) {
Ok(module) => module,
Err(err) => return Err(err),
};
create_pipeline_task(
async move {
let mut shader_cache = shader_cache.lock().unwrap();
let mut layout_cache = layout_cache.lock().unwrap();

let vertex_module = match shader_cache.get(
&device,
id,
descriptor.vertex.shader.id(),
&descriptor.vertex.shader_defs,
) {
Ok(module) => module,
Err(err) => return Err(err),
};

let fragment_module = match &descriptor.fragment {
Some(fragment) => {
match shader_cache.get(&device, id, fragment.shader.id(), &fragment.shader_defs)
{
Ok(module) => Some(module),
Err(err) => return Err(err),
let fragment_module = match &descriptor.fragment {
Some(fragment) => {
match shader_cache.get(
&device,
id,
fragment.shader.id(),
&fragment.shader_defs,
) {
Ok(module) => Some(module),
Err(err) => return Err(err),
}
}
}
None => None,
};

let layout =
if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
None
} else {
Some(layout_cache.get(
&device,
&descriptor.layout,
descriptor.push_constant_ranges.to_vec(),
))
None => None,
};

drop((shader_cache, layout_cache));

let vertex_buffer_layouts = descriptor
.vertex
.buffers
.iter()
.map(|layout| RawVertexBufferLayout {
array_stride: layout.array_stride,
attributes: &layout.attributes,
step_mode: layout.step_mode,
})
.collect::<Vec<_>>();

let fragment_data = descriptor.fragment.as_ref().map(|fragment| {
(
fragment_module.unwrap(),
fragment.entry_point.deref(),
fragment.targets.as_slice(),
)
});

let descriptor = RawRenderPipelineDescriptor {
multiview: None,
depth_stencil: descriptor.depth_stencil.clone(),
label: descriptor.label.as_deref(),
layout: layout.as_deref(),
multisample: descriptor.multisample,
primitive: descriptor.primitive,
vertex: RawVertexState {
buffers: &vertex_buffer_layouts,
entry_point: descriptor.vertex.entry_point.deref(),
module: &vertex_module,
},
fragment: fragment_data
.as_ref()
.map(|(module, entry_point, targets)| RawFragmentState {
entry_point,
module,
targets,
}),
};
let layout =
if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
None
} else {
Some(layout_cache.get(
&device,
&descriptor.layout,
descriptor.push_constant_ranges.to_vec(),
))
};

drop((shader_cache, layout_cache));

let vertex_buffer_layouts = descriptor
.vertex
.buffers
.iter()
.map(|layout| RawVertexBufferLayout {
array_stride: layout.array_stride,
attributes: &layout.attributes,
step_mode: layout.step_mode,
})
.collect::<Vec<_>>();

let fragment_data = descriptor.fragment.as_ref().map(|fragment| {
(
fragment_module.unwrap(),
fragment.entry_point.deref(),
fragment.targets.as_slice(),
)
});

let descriptor = RawRenderPipelineDescriptor {
multiview: None,
depth_stencil: descriptor.depth_stencil.clone(),
label: descriptor.label.as_deref(),
layout: layout.as_deref(),
multisample: descriptor.multisample,
primitive: descriptor.primitive,
vertex: RawVertexState {
buffers: &vertex_buffer_layouts,
entry_point: descriptor.vertex.entry_point.deref(),
module: &vertex_module,
},
fragment: fragment_data
.as_ref()
.map(|(module, entry_point, targets)| RawFragmentState {
entry_point,
module,
targets,
}),
};

Ok(Pipeline::RenderPipeline(
device.create_render_pipeline(&descriptor),
))
})
Ok(Pipeline::RenderPipeline(
device.create_render_pipeline(&descriptor),
))
},
self.synchronous_pipeline_compilation,
)
}

fn start_create_compute_pipeline(
Expand All @@ -771,44 +782,47 @@ impl PipelineCache {
let device = self.device.clone();
let shader_cache = self.shader_cache.clone();
let layout_cache = self.layout_cache.clone();
create_pipeline_task(async move {
let mut shader_cache = shader_cache.lock().unwrap();
let mut layout_cache = layout_cache.lock().unwrap();

let compute_module = match shader_cache.get(
&device,
id,
descriptor.shader.id(),
&descriptor.shader_defs,
) {
Ok(module) => module,
Err(err) => return Err(err),
};

let layout =
if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
None
} else {
Some(layout_cache.get(
&device,
&descriptor.layout,
descriptor.push_constant_ranges.to_vec(),
))
create_pipeline_task(
async move {
let mut shader_cache = shader_cache.lock().unwrap();
let mut layout_cache = layout_cache.lock().unwrap();

let compute_module = match shader_cache.get(
&device,
id,
descriptor.shader.id(),
&descriptor.shader_defs,
) {
Ok(module) => module,
Err(err) => return Err(err),
};

drop((shader_cache, layout_cache));

let descriptor = RawComputePipelineDescriptor {
label: descriptor.label.as_deref(),
layout: layout.as_deref(),
module: &compute_module,
entry_point: &descriptor.entry_point,
};
let layout =
if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
None
} else {
Some(layout_cache.get(
&device,
&descriptor.layout,
descriptor.push_constant_ranges.to_vec(),
))
};

drop((shader_cache, layout_cache));

let descriptor = RawComputePipelineDescriptor {
label: descriptor.label.as_deref(),
layout: layout.as_deref(),
module: &compute_module,
entry_point: &descriptor.entry_point,
};

Ok(Pipeline::ComputePipeline(
device.create_compute_pipeline(&descriptor),
))
})
Ok(Pipeline::ComputePipeline(
device.create_compute_pipeline(&descriptor),
))
},
self.synchronous_pipeline_compilation,
)
}

/// Process the pipeline queue and create all pending pipelines if possible.
Expand Down Expand Up @@ -917,21 +931,34 @@ impl PipelineCache {
}
}

#[cfg(all(
not(target_arch = "wasm32"),
not(target_os = "macos"),
feature = "multi-threaded"
))]
fn create_pipeline_task(
task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
sync: bool,
) -> CachedPipelineState {
if !sync {
return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));
}

match futures_lite::future::block_on(task) {
Ok(pipeline) => CachedPipelineState::Ok(pipeline),
Err(err) => CachedPipelineState::Err(err),
}
}

#[cfg(any(
target_arch = "wasm32",
target_os = "macos",
not(feature = "multi-threaded")
))]
fn create_pipeline_task(
task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
_sync: bool,
) -> CachedPipelineState {
#[cfg(all(
not(target_arch = "wasm32"),
not(target_os = "macos"),
feature = "multi-threaded"
))]
return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));

#[cfg(any(
target_arch = "wasm32",
target_os = "macos",
not(feature = "multi-threaded")
))]
match futures_lite::future::block_on(task) {
Ok(pipeline) => CachedPipelineState::Ok(pipeline),
Err(err) => CachedPipelineState::Err(err),
Expand Down
1 change: 1 addition & 0 deletions examples/3d/wireframe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ fn main() {
features: WgpuFeatures::POLYGON_MODE_LINE,
..default()
}),
..default()
}),
// You need to add this plugin to enable wireframe rendering
WireframePlugin,
Expand Down
1 change: 1 addition & 0 deletions examples/app/no_renderer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ fn main() {
..default()
}
.into(),
..default()
}),
)
.run();
Expand Down