-
-
Notifications
You must be signed in to change notification settings - Fork 3.6k
/
camera.rs
320 lines (293 loc) · 10.4 KB
/
camera.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
use std::marker::PhantomData;
use crate::{
camera::CameraProjection,
prelude::Image,
render_asset::RenderAssets,
render_resource::TextureView,
view::{ExtractedView, ExtractedWindows, VisibleEntities},
RenderApp, RenderStage,
};
use bevy_app::{App, CoreStage, Plugin, StartupStage};
use bevy_asset::{AssetEvent, Assets, Handle};
use bevy_ecs::{
change_detection::DetectChanges,
component::Component,
entity::Entity,
event::EventReader,
prelude::With,
query::Added,
reflect::ReflectComponent,
system::{Commands, ParamSet, Query, Res, ResMut},
};
use bevy_math::{Mat4, UVec2, Vec2, Vec3};
use bevy_reflect::{Reflect, ReflectDeserialize};
use bevy_transform::components::GlobalTransform;
use bevy_utils::HashSet;
use bevy_window::{WindowCreated, WindowId, WindowResized, Windows};
use serde::{Deserialize, Serialize};
use wgpu::Extent3d;
#[derive(Component, Default, Debug, Reflect)]
#[reflect(Component)]
pub struct Camera {
pub projection_matrix: Mat4,
#[reflect(ignore)]
pub target: RenderTarget,
#[reflect(ignore)]
pub depth_calculation: DepthCalculation,
pub near: f32,
pub far: f32,
}
#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash)]
pub enum RenderTarget {
/// Window to which the camera's view is rendered.
Window(WindowId),
/// Image to which the camera's view is rendered.
Image(Handle<Image>),
}
impl Default for RenderTarget {
fn default() -> Self {
Self::Window(Default::default())
}
}
impl RenderTarget {
pub fn get_texture_view<'a>(
&self,
windows: &'a ExtractedWindows,
images: &'a RenderAssets<Image>,
) -> Option<&'a TextureView> {
match self {
RenderTarget::Window(window_id) => windows
.get(window_id)
.and_then(|window| window.swap_chain_texture.as_ref()),
RenderTarget::Image(image_handle) => {
images.get(image_handle).map(|image| &image.texture_view)
}
}
}
pub fn get_physical_size(&self, windows: &Windows, images: &Assets<Image>) -> Option<UVec2> {
match self {
RenderTarget::Window(window_id) => windows
.get(*window_id)
.map(|window| UVec2::new(window.physical_width(), window.physical_height())),
RenderTarget::Image(image_handle) => images.get(image_handle).map(|image| {
let Extent3d { width, height, .. } = image.texture_descriptor.size;
UVec2::new(width, height)
}),
}
}
pub fn get_logical_size(&self, windows: &Windows, images: &Assets<Image>) -> Option<Vec2> {
match self {
RenderTarget::Window(window_id) => windows
.get(*window_id)
.map(|window| Vec2::new(window.width(), window.height())),
RenderTarget::Image(image_handle) => images.get(image_handle).map(|image| {
let Extent3d { width, height, .. } = image.texture_descriptor.size;
Vec2::new(width as f32, height as f32)
}),
}
}
// Check if this render target is contained in the given changed windows or images.
fn is_changed(
&self,
changed_window_ids: &[WindowId],
changed_image_handles: &HashSet<&Handle<Image>>,
) -> bool {
match self {
RenderTarget::Window(window_id) => changed_window_ids.contains(window_id),
RenderTarget::Image(image_handle) => changed_image_handles.contains(&image_handle),
}
}
}
#[derive(Debug, Clone, Copy, Reflect, Serialize, Deserialize)]
#[reflect_value(Serialize, Deserialize)]
pub enum DepthCalculation {
/// Pythagorean distance; works everywhere, more expensive to compute.
Distance,
/// Optimization for 2D; assuming the camera points towards -Z.
ZDifference,
}
impl Default for DepthCalculation {
fn default() -> Self {
DepthCalculation::Distance
}
}
impl Camera {
/// Given a position in world space, use the camera to compute the screen space coordinates.
pub fn world_to_screen(
&self,
windows: &Windows,
images: &Assets<Image>,
camera_transform: &GlobalTransform,
world_position: Vec3,
) -> Option<Vec2> {
let window_size = self.target.get_logical_size(windows, images)?;
// Build a transform to convert from world to NDC using camera data
let world_to_ndc: Mat4 =
self.projection_matrix * camera_transform.compute_matrix().inverse();
let ndc_space_coords: Vec3 = world_to_ndc.project_point3(world_position);
// NDC z-values outside of 0 < z < 1 are outside the camera frustum and are thus not in screen space
if ndc_space_coords.z < 0.0 || ndc_space_coords.z > 1.0 {
return None;
}
// Once in NDC space, we can discard the z element and rescale x/y to fit the screen
let screen_space_coords = (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * window_size;
if !screen_space_coords.is_nan() {
Some(screen_space_coords)
} else {
None
}
}
}
#[allow(clippy::type_complexity)]
pub fn camera_system<T: CameraProjection + Component>(
mut window_resized_events: EventReader<WindowResized>,
mut window_created_events: EventReader<WindowCreated>,
mut image_asset_events: EventReader<AssetEvent<Image>>,
windows: Res<Windows>,
images: Res<Assets<Image>>,
mut queries: ParamSet<(
Query<(Entity, &mut Camera, &mut T)>,
Query<Entity, Added<Camera>>,
)>,
) {
let mut changed_window_ids = Vec::new();
// handle resize events. latest events are handled first because we only want to resize each
// window once
for event in window_resized_events.iter().rev() {
if changed_window_ids.contains(&event.id) {
continue;
}
changed_window_ids.push(event.id);
}
// handle resize events. latest events are handled first because we only want to resize each
// window once
for event in window_created_events.iter().rev() {
if changed_window_ids.contains(&event.id) {
continue;
}
changed_window_ids.push(event.id);
}
let changed_image_handles: HashSet<&Handle<Image>> = image_asset_events
.iter()
.filter_map(|event| {
if let AssetEvent::Modified { handle } = event {
Some(handle)
} else {
None
}
})
.collect();
let mut added_cameras = vec![];
for entity in &mut queries.p1().iter() {
added_cameras.push(entity);
}
for (entity, mut camera, mut camera_projection) in queries.p0().iter_mut() {
if camera
.target
.is_changed(&changed_window_ids, &changed_image_handles)
|| added_cameras.contains(&entity)
|| camera_projection.is_changed()
{
if let Some(size) = camera.target.get_logical_size(&windows, &images) {
camera_projection.update(size.x, size.y);
camera.projection_matrix = camera_projection.get_projection_matrix();
camera.depth_calculation = camera_projection.depth_calculation();
}
}
}
}
pub struct CameraTypePlugin<T: Component + Default>(PhantomData<T>);
impl<T: Component + Default> Default for CameraTypePlugin<T> {
fn default() -> Self {
Self(Default::default())
}
}
impl<T: Component + Default> Plugin for CameraTypePlugin<T> {
fn build(&self, app: &mut App) {
app.init_resource::<ActiveCamera<T>>()
.add_startup_system_to_stage(StartupStage::PostStartup, set_active_camera::<T>)
.add_system_to_stage(CoreStage::PostUpdate, set_active_camera::<T>);
if let Ok(render_app) = app.get_sub_app_mut(RenderApp) {
render_app.add_system_to_stage(RenderStage::Extract, extract_cameras::<T>);
}
}
}
/// The canonical source of the "active camera" of the given camera type `T`.
#[derive(Debug)]
pub struct ActiveCamera<T: Component> {
camera: Option<Entity>,
marker: PhantomData<T>,
}
impl<T: Component> Default for ActiveCamera<T> {
fn default() -> Self {
Self {
camera: Default::default(),
marker: Default::default(),
}
}
}
impl<T: Component> Clone for ActiveCamera<T> {
fn clone(&self) -> Self {
Self {
camera: self.camera,
marker: self.marker,
}
}
}
impl<T: Component> ActiveCamera<T> {
/// Sets the active camera to the given `camera` entity.
pub fn set(&mut self, camera: Entity) {
self.camera = Some(camera);
}
/// Returns the active camera, if it exists.
pub fn get(&self) -> Option<Entity> {
self.camera
}
}
pub fn set_active_camera<T: Component>(
mut active_camera: ResMut<ActiveCamera<T>>,
cameras: Query<Entity, With<T>>,
) {
if active_camera.get().is_some() {
return;
}
if let Some(camera) = cameras.iter().next() {
active_camera.camera = Some(camera);
}
}
#[derive(Component, Debug)]
pub struct ExtractedCamera {
pub target: RenderTarget,
pub physical_size: Option<UVec2>,
}
pub fn extract_cameras<M: Component + Default>(
mut commands: Commands,
windows: Res<Windows>,
images: Res<Assets<Image>>,
active_camera: Res<ActiveCamera<M>>,
query: Query<(&Camera, &GlobalTransform, &VisibleEntities), With<M>>,
) {
if let Some(entity) = active_camera.get() {
if let Ok((camera, transform, visible_entities)) = query.get(entity) {
if let Some(size) = camera.target.get_physical_size(&windows, &images) {
commands.get_or_spawn(entity).insert_bundle((
ExtractedCamera {
target: camera.target.clone(),
physical_size: camera.target.get_physical_size(&windows, &images),
},
ExtractedView {
projection: camera.projection_matrix,
transform: *transform,
width: size.x.max(1),
height: size.y.max(1),
near: camera.near,
far: camera.far,
},
visible_entities.clone(),
M::default(),
));
}
}
}
commands.insert_resource(active_camera.clone())
}