Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bevyengine
GitHub Repository: bevyengine/bevy
Path: blob/main/crates/bevy_camera/src/camera.rs
6849 views
1
use crate::primitives::Frustum;
2
3
use super::{
4
visibility::{Visibility, VisibleEntities},
5
ClearColorConfig,
6
};
7
use bevy_asset::Handle;
8
use bevy_derive::Deref;
9
use bevy_ecs::{component::Component, entity::Entity, reflect::ReflectComponent};
10
use bevy_image::Image;
11
use bevy_math::{ops, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, Vec2, Vec3, Vec3A};
12
use bevy_reflect::prelude::*;
13
use bevy_transform::components::{GlobalTransform, Transform};
14
use bevy_window::{NormalizedWindowRef, WindowRef};
15
use core::ops::Range;
16
use derive_more::derive::From;
17
use thiserror::Error;
18
use wgpu_types::{BlendState, TextureUsages};
19
20
/// Render viewport configuration for the [`Camera`] component.
21
///
22
/// The viewport defines the area on the render target to which the camera renders its image.
23
/// You can overlay multiple cameras in a single window using viewports to create effects like
24
/// split screen, minimaps, and character viewers.
25
#[derive(Reflect, Debug, Clone)]
26
#[reflect(Default, Clone)]
27
pub struct Viewport {
28
/// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].
29
/// (0,0) corresponds to the top-left corner
30
pub physical_position: UVec2,
31
/// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].
32
/// The origin of the rectangle is in the top-left corner.
33
pub physical_size: UVec2,
34
/// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).
35
pub depth: Range<f32>,
36
}
37
38
impl Default for Viewport {
39
fn default() -> Self {
40
Self {
41
physical_position: Default::default(),
42
physical_size: UVec2::new(1, 1),
43
depth: 0.0..1.0,
44
}
45
}
46
}
47
48
impl Viewport {
49
/// Cut the viewport rectangle so that it lies inside a rectangle of the
50
/// given size.
51
///
52
/// If either of the viewport's position coordinates lies outside the given
53
/// dimensions, it will be moved just inside first. If either of the given
54
/// dimensions is zero, the position and size of the viewport rectangle will
55
/// both be set to zero in that dimension.
56
pub fn clamp_to_size(&mut self, size: UVec2) {
57
// If the origin of the viewport rect is outside, then adjust so that
58
// it's just barely inside. Then, cut off the part that is outside.
59
if self.physical_size.x + self.physical_position.x > size.x {
60
if self.physical_position.x < size.x {
61
self.physical_size.x = size.x - self.physical_position.x;
62
} else if size.x > 0 {
63
self.physical_position.x = size.x - 1;
64
self.physical_size.x = 1;
65
} else {
66
self.physical_position.x = 0;
67
self.physical_size.x = 0;
68
}
69
}
70
if self.physical_size.y + self.physical_position.y > size.y {
71
if self.physical_position.y < size.y {
72
self.physical_size.y = size.y - self.physical_position.y;
73
} else if size.y > 0 {
74
self.physical_position.y = size.y - 1;
75
self.physical_size.y = 1;
76
} else {
77
self.physical_position.y = 0;
78
self.physical_size.y = 0;
79
}
80
}
81
}
82
83
pub fn from_viewport_and_override(
84
viewport: Option<&Self>,
85
main_pass_resolution_override: Option<&MainPassResolutionOverride>,
86
) -> Option<Self> {
87
let mut viewport = viewport.cloned();
88
89
if let Some(override_size) = main_pass_resolution_override {
90
if viewport.is_none() {
91
viewport = Some(Viewport::default());
92
}
93
94
viewport.as_mut().unwrap().physical_size = **override_size;
95
}
96
97
viewport
98
}
99
}
100
101
/// Override the resolution a 3d camera's main pass is rendered at.
102
///
103
/// Does not affect post processing.
104
///
105
/// ## Usage
106
///
107
/// * Insert this component on a 3d camera entity in the render world.
108
/// * The resolution override must be smaller than the camera's viewport size.
109
/// * The resolution override is specified in physical pixels.
110
/// * In shaders, use `View::main_pass_viewport` instead of `View::viewport`.
111
#[derive(Component, Reflect, Deref, Debug)]
112
#[reflect(Component)]
113
pub struct MainPassResolutionOverride(pub UVec2);
114
115
/// Settings to define a camera sub view.
116
///
117
/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the
118
/// image defined by `size` and `offset` (relative to the `full_size` of the
119
/// whole image) is projected to the cameras viewport.
120
///
121
/// Take the example of the following multi-monitor setup:
122
/// ```css
123
/// ┌───┬───┐
124
/// │ A │ B │
125
/// ├───┼───┤
126
/// │ C │ D │
127
/// └───┴───┘
128
/// ```
129
/// If each monitor is 1920x1080, the whole image will have a resolution of
130
/// 3840x2160. For each monitor we can use a single camera with a viewport of
131
/// the same size as the monitor it corresponds to. To ensure that the image is
132
/// cohesive, we can use a different sub view on each camera:
133
/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0
134
/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0
135
/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080
136
/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =
137
/// 1920,1080
138
///
139
/// However since only the ratio between the values is important, they could all
140
/// be divided by 120 and still produce the same image. Camera D would for
141
/// example have the following values:
142
/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9
143
#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
144
#[reflect(Clone, PartialEq, Default)]
145
pub struct SubCameraView {
146
/// Size of the entire camera view
147
pub full_size: UVec2,
148
/// Offset of the sub camera
149
pub offset: Vec2,
150
/// Size of the sub camera
151
pub size: UVec2,
152
}
153
154
impl Default for SubCameraView {
155
fn default() -> Self {
156
Self {
157
full_size: UVec2::new(1, 1),
158
offset: Vec2::new(0., 0.),
159
size: UVec2::new(1, 1),
160
}
161
}
162
}
163
164
/// Information about the current [`RenderTarget`].
165
#[derive(Debug, Clone)]
166
pub struct RenderTargetInfo {
167
/// The physical size of this render target (in physical pixels, ignoring scale factor).
168
pub physical_size: UVec2,
169
/// The scale factor of this render target.
170
///
171
/// When rendering to a window, typically it is a value greater or equal than 1.0,
172
/// representing the ratio between the size of the window in physical pixels and the logical size of the window.
173
pub scale_factor: f32,
174
}
175
176
impl Default for RenderTargetInfo {
177
fn default() -> Self {
178
Self {
179
physical_size: Default::default(),
180
scale_factor: 1.,
181
}
182
}
183
}
184
185
/// Holds internally computed [`Camera`] values.
186
#[derive(Default, Debug, Clone)]
187
pub struct ComputedCameraValues {
188
pub clip_from_view: Mat4,
189
pub target_info: Option<RenderTargetInfo>,
190
// size of the `Viewport`
191
pub old_viewport_size: Option<UVec2>,
192
pub old_sub_camera_view: Option<SubCameraView>,
193
}
194
195
/// How much energy a [`Camera3d`](crate::Camera3d) absorbs from incoming light.
196
///
197
/// <https://en.wikipedia.org/wiki/Exposure_(photography)>
198
#[derive(Component, Clone, Copy, Reflect)]
199
#[reflect(opaque)]
200
#[reflect(Component, Default, Clone)]
201
pub struct Exposure {
202
/// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>
203
pub ev100: f32,
204
}
205
206
impl Exposure {
207
pub const SUNLIGHT: Self = Self {
208
ev100: Self::EV100_SUNLIGHT,
209
};
210
pub const OVERCAST: Self = Self {
211
ev100: Self::EV100_OVERCAST,
212
};
213
pub const INDOOR: Self = Self {
214
ev100: Self::EV100_INDOOR,
215
};
216
/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
217
/// It also happens to be a reasonable default.
218
///
219
/// See <https://github.com/bevyengine/bevy/issues/11577> for details.
220
pub const BLENDER: Self = Self {
221
ev100: Self::EV100_BLENDER,
222
};
223
224
pub const EV100_SUNLIGHT: f32 = 15.0;
225
pub const EV100_OVERCAST: f32 = 12.0;
226
pub const EV100_INDOOR: f32 = 7.0;
227
228
/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
229
/// It also happens to be a reasonable default.
230
///
231
/// See <https://github.com/bevyengine/bevy/issues/11577> for details.
232
pub const EV100_BLENDER: f32 = 9.7;
233
234
pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
235
Self {
236
ev100: physical_camera_parameters.ev100(),
237
}
238
}
239
240
/// Converts EV100 values to exposure values.
241
/// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>
242
#[inline]
243
pub fn exposure(&self) -> f32 {
244
ops::exp2(-self.ev100) / 1.2
245
}
246
}
247
248
impl Default for Exposure {
249
fn default() -> Self {
250
Self::BLENDER
251
}
252
}
253
254
/// Parameters based on physical camera characteristics for calculating EV100
255
/// values for use with [`Exposure`]. This is also used for depth of field.
256
#[derive(Clone, Copy)]
257
pub struct PhysicalCameraParameters {
258
/// <https://en.wikipedia.org/wiki/F-number>
259
pub aperture_f_stops: f32,
260
/// <https://en.wikipedia.org/wiki/Shutter_speed>
261
pub shutter_speed_s: f32,
262
/// <https://en.wikipedia.org/wiki/Film_speed>
263
pub sensitivity_iso: f32,
264
/// The height of the [image sensor format] in meters.
265
///
266
/// Focal length is derived from the FOV and this value. The default is
267
/// 18.66mm, matching the [Super 35] format, which is popular in cinema.
268
///
269
/// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format
270
///
271
/// [Super 35]: https://en.wikipedia.org/wiki/Super_35
272
pub sensor_height: f32,
273
}
274
275
impl PhysicalCameraParameters {
276
/// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).
277
pub fn ev100(&self) -> f32 {
278
ops::log2(
279
self.aperture_f_stops * self.aperture_f_stops * 100.0
280
/ (self.shutter_speed_s * self.sensitivity_iso),
281
)
282
}
283
}
284
285
impl Default for PhysicalCameraParameters {
286
fn default() -> Self {
287
Self {
288
aperture_f_stops: 1.0,
289
shutter_speed_s: 1.0 / 125.0,
290
sensitivity_iso: 100.0,
291
sensor_height: 0.01866,
292
}
293
}
294
}
295
296
/// Error returned when a conversion between world-space and viewport-space coordinates fails.
297
///
298
/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].
299
#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]
300
pub enum ViewportConversionError {
301
/// The pre-computed size of the viewport was not available.
302
///
303
/// This may be because the `Camera` was just created and `camera_system` has not been executed
304
/// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:
305
/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
306
/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
307
/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
308
/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
309
#[error("pre-computed size of viewport not available")]
310
NoViewportSize,
311
/// The computed coordinate was beyond the `Camera`'s near plane.
312
///
313
/// Only applicable when converting from world-space to viewport-space.
314
#[error("computed coordinate beyond `Camera`'s near plane")]
315
PastNearPlane,
316
/// The computed coordinate was beyond the `Camera`'s far plane.
317
///
318
/// Only applicable when converting from world-space to viewport-space.
319
#[error("computed coordinate beyond `Camera`'s far plane")]
320
PastFarPlane,
321
/// The Normalized Device Coordinates could not be computed because the `camera_transform`, the
322
/// `world_position`, or the projection matrix defined by [`Projection`](super::projection::Projection)
323
/// contained `NAN` (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).
324
#[error("found NaN while computing NDC")]
325
InvalidData,
326
}
327
328
/// The defining [`Component`] for camera entities,
329
/// storing information about how and what to render through this camera.
330
///
331
/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from
332
/// which rendering occurs. It defines the position of the view to render, the projection method
333
/// to transform the 3D objects into a 2D image, as well as the render target into which that image
334
/// is produced.
335
///
336
/// Note that a [`Camera`] needs a `CameraRenderGraph` to render anything.
337
/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,
338
/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render
339
/// graph will emit an error at runtime.
340
///
341
/// [`Camera2d`]: crate::Camera2d
342
/// [`Camera3d`]: crate::Camera3d
343
#[derive(Component, Debug, Reflect, Clone)]
344
#[reflect(Component, Default, Debug, Clone)]
345
#[require(
346
Frustum,
347
CameraMainTextureUsages,
348
VisibleEntities,
349
Transform,
350
Visibility
351
)]
352
pub struct Camera {
353
/// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].
354
pub viewport: Option<Viewport>,
355
/// Cameras with a higher order are rendered later, and thus on top of lower order cameras.
356
pub order: isize,
357
/// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this
358
/// camera will not be rendered.
359
pub is_active: bool,
360
/// Computed values for this camera, such as the projection matrix and the render target size.
361
#[reflect(ignore, clone)]
362
pub computed: ComputedCameraValues,
363
/// The "target" that this camera will render to.
364
pub target: RenderTarget,
365
// todo: reflect this when #6042 lands
366
/// The [`CameraOutputMode`] for this camera.
367
#[reflect(ignore, clone)]
368
pub output_mode: CameraOutputMode,
369
/// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's
370
/// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to
371
/// "write their results on top" of previous camera results, and include them as a part of their render results. This is enabled by default to ensure
372
/// cameras with MSAA enabled layer their results in the same way as cameras without MSAA enabled by default.
373
pub msaa_writeback: bool,
374
/// The clear color operation to perform on the render target.
375
pub clear_color: ClearColorConfig,
376
/// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].
377
pub sub_camera_view: Option<SubCameraView>,
378
}
379
380
impl Default for Camera {
381
fn default() -> Self {
382
Self {
383
is_active: true,
384
order: 0,
385
viewport: None,
386
computed: Default::default(),
387
target: Default::default(),
388
output_mode: Default::default(),
389
msaa_writeback: true,
390
clear_color: Default::default(),
391
sub_camera_view: None,
392
}
393
}
394
}
395
396
impl Camera {
397
/// Converts a physical size in this `Camera` to a logical size.
398
#[inline]
399
pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
400
let scale = self.computed.target_info.as_ref()?.scale_factor;
401
Some(physical_size.as_vec2() / scale)
402
}
403
404
/// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is
405
/// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to
406
/// the full physical rect of the current [`RenderTarget`].
407
#[inline]
408
pub fn physical_viewport_rect(&self) -> Option<URect> {
409
let min = self
410
.viewport
411
.as_ref()
412
.map(|v| v.physical_position)
413
.unwrap_or(UVec2::ZERO);
414
let max = min + self.physical_viewport_size()?;
415
Some(URect { min, max })
416
}
417
418
/// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to
419
/// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the
420
/// full logical rect of the current [`RenderTarget`].
421
#[inline]
422
pub fn logical_viewport_rect(&self) -> Option<Rect> {
423
let URect { min, max } = self.physical_viewport_rect()?;
424
Some(Rect {
425
min: self.to_logical(min)?,
426
max: self.to_logical(max)?,
427
})
428
}
429
430
/// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this
431
/// will be the size of that custom viewport. Otherwise it will default to the full logical size
432
/// of the current [`RenderTarget`].
433
/// For logic that requires the full logical size of the
434
/// [`RenderTarget`], prefer [`Camera::logical_target_size`].
435
///
436
/// Returns `None` if either:
437
/// - the function is called just after the `Camera` is created, before `camera_system` is executed,
438
/// - the [`RenderTarget`] isn't correctly set:
439
/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
440
/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
441
/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
442
/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
443
#[inline]
444
pub fn logical_viewport_size(&self) -> Option<Vec2> {
445
self.viewport
446
.as_ref()
447
.and_then(|v| self.to_logical(v.physical_size))
448
.or_else(|| self.logical_target_size())
449
}
450
451
/// The physical size of this camera's viewport (in physical pixels).
452
/// If the `viewport` field is set to [`Some`], this
453
/// will be the size of that custom viewport. Otherwise it will default to the full physical size of
454
/// the current [`RenderTarget`].
455
/// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].
456
#[inline]
457
pub fn physical_viewport_size(&self) -> Option<UVec2> {
458
self.viewport
459
.as_ref()
460
.map(|v| v.physical_size)
461
.or_else(|| self.physical_target_size())
462
}
463
464
/// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.
465
/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
466
/// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].
467
#[inline]
468
pub fn logical_target_size(&self) -> Option<Vec2> {
469
self.computed
470
.target_info
471
.as_ref()
472
.and_then(|t| self.to_logical(t.physical_size))
473
}
474
475
/// The full physical size of this camera's [`RenderTarget`] (in physical pixels),
476
/// ignoring custom `viewport` configuration.
477
/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
478
/// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].
479
#[inline]
480
pub fn physical_target_size(&self) -> Option<UVec2> {
481
self.computed.target_info.as_ref().map(|t| t.physical_size)
482
}
483
484
#[inline]
485
pub fn target_scaling_factor(&self) -> Option<f32> {
486
self.computed
487
.target_info
488
.as_ref()
489
.map(|t: &RenderTargetInfo| t.scale_factor)
490
}
491
492
/// The projection matrix computed using this camera's [`Projection`](super::projection::Projection).
493
#[inline]
494
pub fn clip_from_view(&self) -> Mat4 {
495
self.computed.clip_from_view
496
}
497
498
/// Given a position in world space, use the camera to compute the viewport-space coordinates.
499
///
500
/// To get the coordinates in Normalized Device Coordinates, you should use
501
/// [`world_to_ndc`](Self::world_to_ndc).
502
///
503
/// # Panics
504
///
505
/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
506
/// (see [`world_to_ndc`][Self::world_to_ndc]).
507
#[doc(alias = "world_to_screen")]
508
pub fn world_to_viewport(
509
&self,
510
camera_transform: &GlobalTransform,
511
world_position: Vec3,
512
) -> Result<Vec2, ViewportConversionError> {
513
let target_rect = self
514
.logical_viewport_rect()
515
.ok_or(ViewportConversionError::NoViewportSize)?;
516
let mut ndc_space_coords = self
517
.world_to_ndc(camera_transform, world_position)
518
.ok_or(ViewportConversionError::InvalidData)?;
519
// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
520
if ndc_space_coords.z < 0.0 {
521
return Err(ViewportConversionError::PastFarPlane);
522
}
523
if ndc_space_coords.z > 1.0 {
524
return Err(ViewportConversionError::PastNearPlane);
525
}
526
527
// Flip the Y co-ordinate origin from the bottom to the top.
528
ndc_space_coords.y = -ndc_space_coords.y;
529
530
// Once in NDC space, we can discard the z element and map x/y to the viewport rect
531
let viewport_position =
532
(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
533
Ok(viewport_position)
534
}
535
536
/// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.
537
///
538
/// To get the coordinates in Normalized Device Coordinates, you should use
539
/// [`world_to_ndc`](Self::world_to_ndc).
540
///
541
/// # Panics
542
///
543
/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
544
/// (see [`world_to_ndc`][Self::world_to_ndc]).
545
#[doc(alias = "world_to_screen_with_depth")]
546
pub fn world_to_viewport_with_depth(
547
&self,
548
camera_transform: &GlobalTransform,
549
world_position: Vec3,
550
) -> Result<Vec3, ViewportConversionError> {
551
let target_rect = self
552
.logical_viewport_rect()
553
.ok_or(ViewportConversionError::NoViewportSize)?;
554
let mut ndc_space_coords = self
555
.world_to_ndc(camera_transform, world_position)
556
.ok_or(ViewportConversionError::InvalidData)?;
557
// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
558
if ndc_space_coords.z < 0.0 {
559
return Err(ViewportConversionError::PastFarPlane);
560
}
561
if ndc_space_coords.z > 1.0 {
562
return Err(ViewportConversionError::PastNearPlane);
563
}
564
565
// Stretching ndc depth to value via near plane and negating result to be in positive room again.
566
let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z);
567
568
// Flip the Y co-ordinate origin from the bottom to the top.
569
ndc_space_coords.y = -ndc_space_coords.y;
570
571
// Once in NDC space, we can discard the z element and map x/y to the viewport rect
572
let viewport_position =
573
(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
574
Ok(viewport_position.extend(depth))
575
}
576
577
/// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.
578
///
579
/// The resulting ray starts on the near plane of the camera.
580
///
581
/// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.
582
///
583
/// To get the world space coordinates with Normalized Device Coordinates, you should use
584
/// [`ndc_to_world`](Self::ndc_to_world).
585
///
586
/// # Example
587
/// ```no_run
588
/// # use bevy_window::Window;
589
/// # use bevy_ecs::prelude::{Single, IntoScheduleConfigs};
590
/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
591
/// # use bevy_camera::Camera;
592
/// # use bevy_app::{App, PostUpdate};
593
/// #
594
/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
595
/// let (camera, camera_transform) = *camera_query;
596
///
597
/// if let Some(cursor_position) = window.cursor_position()
598
/// // Calculate a ray pointing from the camera into the world based on the cursor's position.
599
/// && let Ok(ray) = camera.viewport_to_world(camera_transform, cursor_position)
600
/// {
601
/// println!("{ray:?}");
602
/// }
603
/// }
604
///
605
/// # let mut app = App::new();
606
/// // Run the system after transform propagation so the camera's global transform is up-to-date.
607
/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
608
/// ```
609
///
610
/// # Panics
611
///
612
/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
613
/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
614
pub fn viewport_to_world(
615
&self,
616
camera_transform: &GlobalTransform,
617
viewport_position: Vec2,
618
) -> Result<Ray3d, ViewportConversionError> {
619
let target_rect = self
620
.logical_viewport_rect()
621
.ok_or(ViewportConversionError::NoViewportSize)?;
622
let rect_relative = (viewport_position - target_rect.min) / target_rect.size();
623
let mut ndc_xy = rect_relative * 2. - Vec2::ONE;
624
// Flip the Y co-ordinate from the top to the bottom to enter NDC.
625
ndc_xy.y = -ndc_xy.y;
626
627
let ndc_point_near = ndc_xy.extend(1.0).into();
628
// Using EPSILON because an ndc with Z = 0 returns NaNs.
629
let ndc_point_far = ndc_xy.extend(f32::EPSILON).into();
630
let view_from_clip = self.computed.clip_from_view.inverse();
631
let world_from_view = camera_transform.affine();
632
// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
633
// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
634
// Additionally, we avoid adding and subtracting translation to the direction component to maintain precision.
635
let view_point_near = view_from_clip.project_point3a(ndc_point_near);
636
let view_point_far = view_from_clip.project_point3a(ndc_point_far);
637
let view_dir = view_point_far - view_point_near;
638
let origin = world_from_view.transform_point3a(view_point_near).into();
639
let direction = world_from_view.transform_vector3a(view_dir).into();
640
641
// The fallible direction constructor ensures that direction isn't NaN.
642
Dir3::new(direction)
643
.map_err(|_| ViewportConversionError::InvalidData)
644
.map(|direction| Ray3d { origin, direction })
645
}
646
647
/// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.
648
///
649
/// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.
650
///
651
/// To get the world space coordinates with Normalized Device Coordinates, you should use
652
/// [`ndc_to_world`](Self::ndc_to_world).
653
///
654
/// # Example
655
/// ```no_run
656
/// # use bevy_window::Window;
657
/// # use bevy_ecs::prelude::*;
658
/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
659
/// # use bevy_camera::Camera;
660
/// # use bevy_app::{App, PostUpdate};
661
/// #
662
/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
663
/// let (camera, camera_transform) = *camera_query;
664
///
665
/// if let Some(cursor_position) = window.cursor_position()
666
/// // Calculate a world position based on the cursor's position.
667
/// && let Ok(world_pos) = camera.viewport_to_world_2d(camera_transform, cursor_position)
668
/// {
669
/// println!("World position: {world_pos:.2}");
670
/// }
671
/// }
672
///
673
/// # let mut app = App::new();
674
/// // Run the system after transform propagation so the camera's global transform is up-to-date.
675
/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
676
/// ```
677
///
678
/// # Panics
679
///
680
/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
681
/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
682
pub fn viewport_to_world_2d(
683
&self,
684
camera_transform: &GlobalTransform,
685
viewport_position: Vec2,
686
) -> Result<Vec2, ViewportConversionError> {
687
let target_rect = self
688
.logical_viewport_rect()
689
.ok_or(ViewportConversionError::NoViewportSize)?;
690
let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size();
691
692
// Flip the Y co-ordinate origin from the top to the bottom.
693
rect_relative.y = 1.0 - rect_relative.y;
694
695
let ndc = rect_relative * 2. - Vec2::ONE;
696
697
let world_near_plane = self
698
.ndc_to_world(camera_transform, ndc.extend(1.))
699
.ok_or(ViewportConversionError::InvalidData)?;
700
701
Ok(world_near_plane.truncate())
702
}
703
704
/// Given a point in world space, use the camera's viewport to compute the Normalized Device Coordinates of the point.
705
///
706
/// When the point is within the viewport the values returned will be between -1.0 (bottom left) and 1.0 (top right)
707
/// on the X and Y axes, and between 0.0 (far) and 1.0 (near) on the Z axis.
708
/// To get the coordinates in the render target's viewport dimensions, you should use
709
/// [`world_to_viewport`](Self::world_to_viewport).
710
///
711
/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by
712
/// [`Projection`](super::projection::Projection) contain `NAN`.
713
///
714
/// # Panics
715
///
716
/// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.
717
pub fn world_to_ndc<V: Into<Vec3A> + From<Vec3A>>(
718
&self,
719
camera_transform: &GlobalTransform,
720
world_point: V,
721
) -> Option<V> {
722
let view_from_world = camera_transform.affine().inverse();
723
let view_point = view_from_world.transform_point3a(world_point.into());
724
let ndc_point = self.computed.clip_from_view.project_point3a(view_point);
725
726
(!ndc_point.is_nan()).then_some(ndc_point.into())
727
}
728
729
/// Given a position in Normalized Device Coordinates,
730
/// use the camera's viewport to compute the world space position.
731
///
732
/// When the position is within the viewport the values returned will be between -1.0 and 1.0 on the X and Y axes,
733
/// and between 0.0 and 1.0 on the Z axis.
734
/// To get the world space coordinates with the viewport position, you should use
735
/// [`world_to_viewport`](Self::world_to_viewport).
736
///
737
/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by
738
/// [`Projection`](super::projection::Projection) contain `NAN`.
739
///
740
/// # Panics
741
///
742
/// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.
743
pub fn ndc_to_world<V: Into<Vec3A> + From<Vec3A>>(
744
&self,
745
camera_transform: &GlobalTransform,
746
ndc_point: V,
747
) -> Option<V> {
748
// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
749
// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
750
let view_point = self
751
.computed
752
.clip_from_view
753
.inverse()
754
.project_point3a(ndc_point.into());
755
let world_point = camera_transform.affine().transform_point3a(view_point);
756
757
(!world_point.is_nan()).then_some(world_point.into())
758
}
759
760
/// Converts the depth in Normalized Device Coordinates
761
/// to linear view z for perspective projections.
762
///
763
/// Note: Depth values in front of the camera will be negative as -z is forward
764
pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
765
let near = self.clip_from_view().w_axis.z; // [3][2]
766
-near / ndc_depth
767
}
768
769
/// Converts the depth in Normalized Device Coordinates
770
/// to linear view z for orthographic projections.
771
///
772
/// Note: Depth values in front of the camera will be negative as -z is forward
773
pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
774
-(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
775
// [3][2] [2][2]
776
}
777
}
778
779
/// Control how this [`Camera`] outputs once rendering is completed.
780
#[derive(Debug, Clone, Copy)]
781
pub enum CameraOutputMode {
782
/// Writes the camera output to configured render target.
783
Write {
784
/// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.
785
/// If not set, the output will be written as-is, ignoring `clear_color` and the existing data in the final render target texture.
786
blend_state: Option<BlendState>,
787
/// The clear color operation to perform on the final render target texture.
788
clear_color: ClearColorConfig,
789
},
790
/// Skips writing the camera output to the configured render target. The output will remain in the
791
/// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target
792
/// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause
793
/// them to be lost. Only use this if you know what you are doing!
794
/// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove
795
/// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.
796
Skip,
797
}
798
799
impl Default for CameraOutputMode {
800
fn default() -> Self {
801
CameraOutputMode::Write {
802
blend_state: None,
803
clear_color: ClearColorConfig::Default,
804
}
805
}
806
}
807
808
/// The "target" that a [`Camera`] will render to. For example, this could be a `Window`
809
/// swapchain or an [`Image`].
810
#[derive(Debug, Clone, Reflect, From)]
811
#[reflect(Clone)]
812
pub enum RenderTarget {
813
/// Window to which the camera's view is rendered.
814
Window(WindowRef),
815
/// Image to which the camera's view is rendered.
816
Image(ImageRenderTarget),
817
/// Texture View to which the camera's view is rendered.
818
/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
819
TextureView(ManualTextureViewHandle),
820
/// The camera won't render to any color target.
821
///
822
/// This is useful when you want a camera that *only* renders prepasses, for
823
/// example a depth prepass. See the `render_depth_to_texture` example.
824
None {
825
/// The physical size of the viewport.
826
size: UVec2,
827
},
828
}
829
830
impl RenderTarget {
831
/// Get a handle to the render target's image,
832
/// or `None` if the render target is another variant.
833
pub fn as_image(&self) -> Option<&Handle<Image>> {
834
if let Self::Image(image_target) = self {
835
Some(&image_target.handle)
836
} else {
837
None
838
}
839
}
840
}
841
842
impl RenderTarget {
843
/// Normalize the render target down to a more concrete value, mostly used for equality comparisons.
844
pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {
845
match self {
846
RenderTarget::Window(window_ref) => window_ref
847
.normalize(primary_window)
848
.map(NormalizedRenderTarget::Window),
849
RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),
850
RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),
851
RenderTarget::None { size } => Some(NormalizedRenderTarget::None {
852
width: size.x,
853
height: size.y,
854
}),
855
}
856
}
857
}
858
859
/// Normalized version of the render target.
860
///
861
/// Once we have this we shouldn't need to resolve it down anymore.
862
#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]
863
#[reflect(Clone, PartialEq, Hash)]
864
pub enum NormalizedRenderTarget {
865
/// Window to which the camera's view is rendered.
866
Window(NormalizedWindowRef),
867
/// Image to which the camera's view is rendered.
868
Image(ImageRenderTarget),
869
/// Texture View to which the camera's view is rendered.
870
/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
871
TextureView(ManualTextureViewHandle),
872
/// The camera won't render to any color target.
873
///
874
/// This is useful when you want a camera that *only* renders prepasses, for
875
/// example a depth prepass. See the `render_depth_to_texture` example.
876
None {
877
/// The physical width of the viewport.
878
width: u32,
879
/// The physical height of the viewport.
880
height: u32,
881
},
882
}
883
884
/// A unique id that corresponds to a specific `ManualTextureView` in the `ManualTextureViews` collection.
885
///
886
/// See `ManualTextureViews` in `bevy_camera` for more details.
887
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)]
888
#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)]
889
pub struct ManualTextureViewHandle(pub u32);
890
891
/// A render target that renders to an [`Image`].
892
#[derive(Debug, Clone, Reflect)]
893
#[reflect(Clone, PartialEq, Hash)]
894
pub struct ImageRenderTarget {
895
/// The image to render to.
896
pub handle: Handle<Image>,
897
/// The scale factor of the render target image, corresponding to the scale
898
/// factor for a window target. This should almost always be 1.0.
899
pub scale_factor: f32,
900
}
901
902
impl Eq for ImageRenderTarget {}
903
904
impl PartialEq for ImageRenderTarget {
905
fn eq(&self, other: &Self) -> bool {
906
self.handle == other.handle && FloatOrd(self.scale_factor) == FloatOrd(other.scale_factor)
907
}
908
}
909
910
impl core::hash::Hash for ImageRenderTarget {
911
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
912
self.handle.hash(state);
913
FloatOrd(self.scale_factor).hash(state);
914
}
915
}
916
917
impl PartialOrd for ImageRenderTarget {
918
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
919
Some(self.cmp(other))
920
}
921
}
922
923
impl Ord for ImageRenderTarget {
924
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
925
self.handle
926
.cmp(&other.handle)
927
.then_with(|| FloatOrd(self.scale_factor).cmp(&FloatOrd(other.scale_factor)))
928
}
929
}
930
931
impl From<Handle<Image>> for RenderTarget {
932
fn from(handle: Handle<Image>) -> Self {
933
Self::Image(handle.into())
934
}
935
}
936
937
impl From<Handle<Image>> for ImageRenderTarget {
938
fn from(handle: Handle<Image>) -> Self {
939
Self {
940
handle,
941
scale_factor: 1.0,
942
}
943
}
944
}
945
946
impl Default for RenderTarget {
947
fn default() -> Self {
948
Self::Window(Default::default())
949
}
950
}
951
952
/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera
953
#[derive(Component, Clone, Copy, Reflect)]
954
#[reflect(opaque)]
955
#[reflect(Component, Default, Clone)]
956
pub struct CameraMainTextureUsages(pub TextureUsages);
957
958
impl Default for CameraMainTextureUsages {
959
fn default() -> Self {
960
Self(
961
TextureUsages::RENDER_ATTACHMENT
962
| TextureUsages::TEXTURE_BINDING
963
| TextureUsages::COPY_SRC,
964
)
965
}
966
}
967
968
impl CameraMainTextureUsages {
969
pub fn with(mut self, usages: TextureUsages) -> Self {
970
self.0 |= usages;
971
self
972
}
973
}
974
975
#[cfg(test)]
976
mod test {
977
use bevy_math::{Vec2, Vec3};
978
use bevy_transform::components::GlobalTransform;
979
980
use crate::{
981
Camera, OrthographicProjection, PerspectiveProjection, Projection, RenderTargetInfo,
982
Viewport,
983
};
984
985
fn make_camera(mut projection: Projection, physical_size: Vec2) -> Camera {
986
let viewport = Viewport {
987
physical_size: physical_size.as_uvec2(),
988
..Default::default()
989
};
990
let mut camera = Camera {
991
viewport: Some(viewport.clone()),
992
..Default::default()
993
};
994
camera.computed.target_info = Some(RenderTargetInfo {
995
physical_size: viewport.physical_size,
996
scale_factor: 1.0,
997
});
998
projection.update(
999
viewport.physical_size.x as f32,
1000
viewport.physical_size.y as f32,
1001
);
1002
camera.computed.clip_from_view = projection.get_clip_from_view();
1003
camera
1004
}
1005
1006
#[test]
1007
fn viewport_to_world_orthographic_3d_returns_forward() {
1008
let transform = GlobalTransform::default();
1009
let size = Vec2::new(1600.0, 900.0);
1010
let camera = make_camera(
1011
Projection::Orthographic(OrthographicProjection::default_3d()),
1012
size,
1013
);
1014
let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
1015
assert_eq!(ray.direction, transform.forward());
1016
assert!(ray
1017
.origin
1018
.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 0.0), 1e-4));
1019
let ray = camera.viewport_to_world(&transform, size).unwrap();
1020
assert_eq!(ray.direction, transform.forward());
1021
assert!(ray
1022
.origin
1023
.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 0.0), 1e-4));
1024
}
1025
1026
#[test]
1027
fn viewport_to_world_orthographic_2d_returns_forward() {
1028
let transform = GlobalTransform::default();
1029
let size = Vec2::new(1600.0, 900.0);
1030
let camera = make_camera(
1031
Projection::Orthographic(OrthographicProjection::default_2d()),
1032
size,
1033
);
1034
let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
1035
assert_eq!(ray.direction, transform.forward());
1036
assert!(ray
1037
.origin
1038
.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 1000.0), 1e-4));
1039
let ray = camera.viewport_to_world(&transform, size).unwrap();
1040
assert_eq!(ray.direction, transform.forward());
1041
assert!(ray
1042
.origin
1043
.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 1000.0), 1e-4));
1044
}
1045
1046
#[test]
1047
fn viewport_to_world_perspective_center_returns_forward() {
1048
let transform = GlobalTransform::default();
1049
let size = Vec2::new(1600.0, 900.0);
1050
let camera = make_camera(
1051
Projection::Perspective(PerspectiveProjection::default()),
1052
size,
1053
);
1054
let ray = camera.viewport_to_world(&transform, size * 0.5).unwrap();
1055
assert_eq!(ray.direction, transform.forward());
1056
assert_eq!(ray.origin, transform.forward() * 0.1);
1057
}
1058
}
1059
1060