use crate::primitives::Frustum;12use super::{3visibility::{Visibility, VisibleEntities},4ClearColorConfig,5};6use bevy_asset::Handle;7use bevy_derive::Deref;8use bevy_ecs::{component::Component, entity::Entity, reflect::ReflectComponent};9use bevy_image::Image;10use bevy_math::{ops, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, Vec2, Vec3, Vec3A};11use bevy_reflect::prelude::*;12use bevy_transform::components::{GlobalTransform, Transform};13use bevy_window::{NormalizedWindowRef, WindowRef};14use core::ops::Range;15use derive_more::derive::From;16use thiserror::Error;17use wgpu_types::{BlendState, TextureUsages};1819/// Render viewport configuration for the [`Camera`] component.20///21/// The viewport defines the area on the render target to which the camera renders its image.22/// You can overlay multiple cameras in a single window using viewports to create effects like23/// split screen, minimaps, and character viewers.24#[derive(Reflect, Debug, Clone)]25#[reflect(Default, Clone)]26pub struct Viewport {27/// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].28/// (0,0) corresponds to the top-left corner29pub physical_position: UVec2,30/// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].31/// The origin of the rectangle is in the top-left corner.32pub physical_size: UVec2,33/// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).34pub depth: Range<f32>,35}3637impl Default for Viewport {38fn default() -> Self {39Self {40physical_position: Default::default(),41physical_size: UVec2::new(1, 1),42depth: 0.0..1.0,43}44}45}4647impl Viewport {48/// Cut the viewport rectangle so that it lies inside a rectangle of the49/// given size.50///51/// If either of the viewport's position coordinates lies outside the given52/// dimensions, it will be moved just inside first. If either of the given53/// dimensions is zero, the position and size of the viewport rectangle will54/// both be set to zero in that dimension.55pub fn clamp_to_size(&mut self, size: UVec2) {56// If the origin of the viewport rect is outside, then adjust so that57// it's just barely inside. Then, cut off the part that is outside.58if self.physical_size.x + self.physical_position.x > size.x {59if self.physical_position.x < size.x {60self.physical_size.x = size.x - self.physical_position.x;61} else if size.x > 0 {62self.physical_position.x = size.x - 1;63self.physical_size.x = 1;64} else {65self.physical_position.x = 0;66self.physical_size.x = 0;67}68}69if self.physical_size.y + self.physical_position.y > size.y {70if self.physical_position.y < size.y {71self.physical_size.y = size.y - self.physical_position.y;72} else if size.y > 0 {73self.physical_position.y = size.y - 1;74self.physical_size.y = 1;75} else {76self.physical_position.y = 0;77self.physical_size.y = 0;78}79}80}8182pub fn from_viewport_and_override(83viewport: Option<&Self>,84main_pass_resolution_override: Option<&MainPassResolutionOverride>,85) -> Option<Self> {86let mut viewport = viewport.cloned();8788if let Some(override_size) = main_pass_resolution_override {89if viewport.is_none() {90viewport = Some(Viewport::default());91}9293viewport.as_mut().unwrap().physical_size = **override_size;94}9596viewport97}98}99100/// Override the resolution a 3d camera's main pass is rendered at.101///102/// Does not affect post processing.103///104/// ## Usage105///106/// * Insert this component on a 3d camera entity in the render world.107/// * The resolution override must be smaller than the camera's viewport size.108/// * The resolution override is specified in physical pixels.109/// * In shaders, use `View::main_pass_viewport` instead of `View::viewport`.110#[derive(Component, Reflect, Deref, Debug)]111#[reflect(Component)]112pub struct MainPassResolutionOverride(pub UVec2);113114/// Settings to define a camera sub view.115///116/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the117/// image defined by `size` and `offset` (relative to the `full_size` of the118/// whole image) is projected to the cameras viewport.119///120/// Take the example of the following multi-monitor setup:121/// ```css122/// ┌───┬───┐123/// │ A │ B │124/// ├───┼───┤125/// │ C │ D │126/// └───┴───┘127/// ```128/// If each monitor is 1920x1080, the whole image will have a resolution of129/// 3840x2160. For each monitor we can use a single camera with a viewport of130/// the same size as the monitor it corresponds to. To ensure that the image is131/// cohesive, we can use a different sub view on each camera:132/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0133/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0134/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080135/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =136/// 1920,1080137///138/// However since only the ratio between the values is important, they could all139/// be divided by 120 and still produce the same image. Camera D would for140/// example have the following values:141/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9142#[derive(Debug, Clone, Copy, Reflect, PartialEq)]143#[reflect(Clone, PartialEq, Default)]144pub struct SubCameraView {145/// Size of the entire camera view146pub full_size: UVec2,147/// Offset of the sub camera148pub offset: Vec2,149/// Size of the sub camera150pub size: UVec2,151}152153impl Default for SubCameraView {154fn default() -> Self {155Self {156full_size: UVec2::new(1, 1),157offset: Vec2::new(0., 0.),158size: UVec2::new(1, 1),159}160}161}162163/// Information about the current [`RenderTarget`].164#[derive(Debug, Clone)]165pub struct RenderTargetInfo {166/// The physical size of this render target (in physical pixels, ignoring scale factor).167pub physical_size: UVec2,168/// The scale factor of this render target.169///170/// When rendering to a window, typically it is a value greater or equal than 1.0,171/// representing the ratio between the size of the window in physical pixels and the logical size of the window.172pub scale_factor: f32,173}174175impl Default for RenderTargetInfo {176fn default() -> Self {177Self {178physical_size: Default::default(),179scale_factor: 1.,180}181}182}183184/// Holds internally computed [`Camera`] values.185#[derive(Default, Debug, Clone)]186pub struct ComputedCameraValues {187pub clip_from_view: Mat4,188pub target_info: Option<RenderTargetInfo>,189// size of the `Viewport`190pub old_viewport_size: Option<UVec2>,191pub old_sub_camera_view: Option<SubCameraView>,192}193194/// How much energy a [`Camera3d`](crate::Camera3d) absorbs from incoming light.195///196/// <https://en.wikipedia.org/wiki/Exposure_(photography)>197#[derive(Component, Clone, Copy, Reflect)]198#[reflect(opaque)]199#[reflect(Component, Default, Clone)]200pub struct Exposure {201/// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>202pub ev100: f32,203}204205impl Exposure {206pub const SUNLIGHT: Self = Self {207ev100: Self::EV100_SUNLIGHT,208};209pub const OVERCAST: Self = Self {210ev100: Self::EV100_OVERCAST,211};212pub const INDOOR: Self = Self {213ev100: Self::EV100_INDOOR,214};215/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.216/// It also happens to be a reasonable default.217///218/// See <https://github.com/bevyengine/bevy/issues/11577> for details.219pub const BLENDER: Self = Self {220ev100: Self::EV100_BLENDER,221};222223pub const EV100_SUNLIGHT: f32 = 15.0;224pub const EV100_OVERCAST: f32 = 12.0;225pub const EV100_INDOOR: f32 = 7.0;226227/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.228/// It also happens to be a reasonable default.229///230/// See <https://github.com/bevyengine/bevy/issues/11577> for details.231pub const EV100_BLENDER: f32 = 9.7;232233pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {234Self {235ev100: physical_camera_parameters.ev100(),236}237}238239/// Converts EV100 values to exposure values.240/// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>241#[inline]242pub fn exposure(&self) -> f32 {243ops::exp2(-self.ev100) / 1.2244}245}246247impl Default for Exposure {248fn default() -> Self {249Self::BLENDER250}251}252253/// Parameters based on physical camera characteristics for calculating EV100254/// values for use with [`Exposure`]. This is also used for depth of field.255#[derive(Clone, Copy)]256pub struct PhysicalCameraParameters {257/// <https://en.wikipedia.org/wiki/F-number>258pub aperture_f_stops: f32,259/// <https://en.wikipedia.org/wiki/Shutter_speed>260pub shutter_speed_s: f32,261/// <https://en.wikipedia.org/wiki/Film_speed>262pub sensitivity_iso: f32,263/// The height of the [image sensor format] in meters.264///265/// Focal length is derived from the FOV and this value. The default is266/// 18.66mm, matching the [Super 35] format, which is popular in cinema.267///268/// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format269///270/// [Super 35]: https://en.wikipedia.org/wiki/Super_35271pub sensor_height: f32,272}273274impl PhysicalCameraParameters {275/// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).276pub fn ev100(&self) -> f32 {277ops::log2(278self.aperture_f_stops * self.aperture_f_stops * 100.0279/ (self.shutter_speed_s * self.sensitivity_iso),280)281}282}283284impl Default for PhysicalCameraParameters {285fn default() -> Self {286Self {287aperture_f_stops: 1.0,288shutter_speed_s: 1.0 / 125.0,289sensitivity_iso: 100.0,290sensor_height: 0.01866,291}292}293}294295/// Error returned when a conversion between world-space and viewport-space coordinates fails.296///297/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].298#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]299pub enum ViewportConversionError {300/// The pre-computed size of the viewport was not available.301///302/// This may be because the `Camera` was just created and `camera_system` has not been executed303/// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:304/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,305/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,306/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),307/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).308#[error("pre-computed size of viewport not available")]309NoViewportSize,310/// The computed coordinate was beyond the `Camera`'s near plane.311///312/// Only applicable when converting from world-space to viewport-space.313#[error("computed coordinate beyond `Camera`'s near plane")]314PastNearPlane,315/// The computed coordinate was beyond the `Camera`'s far plane.316///317/// Only applicable when converting from world-space to viewport-space.318#[error("computed coordinate beyond `Camera`'s far plane")]319PastFarPlane,320/// The Normalized Device Coordinates could not be computed because the `camera_transform`, the321/// `world_position`, or the projection matrix defined by [`Projection`](super::projection::Projection)322/// contained `NAN` (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).323#[error("found NaN while computing NDC")]324InvalidData,325}326327/// The defining [`Component`] for camera entities,328/// storing information about how and what to render through this camera.329///330/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from331/// which rendering occurs. It defines the position of the view to render, the projection method332/// to transform the 3D objects into a 2D image, as well as the render target into which that image333/// is produced.334///335/// Note that a [`Camera`] needs a `CameraRenderGraph` to render anything.336/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,337/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render338/// graph will emit an error at runtime.339///340/// [`Camera2d`]: crate::Camera2d341/// [`Camera3d`]: crate::Camera3d342#[derive(Component, Debug, Reflect, Clone)]343#[reflect(Component, Default, Debug, Clone)]344#[require(345Frustum,346CameraMainTextureUsages,347VisibleEntities,348Transform,349Visibility350)]351pub struct Camera {352/// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].353pub viewport: Option<Viewport>,354/// Cameras with a higher order are rendered later, and thus on top of lower order cameras.355pub order: isize,356/// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this357/// camera will not be rendered.358pub is_active: bool,359/// Computed values for this camera, such as the projection matrix and the render target size.360#[reflect(ignore, clone)]361pub computed: ComputedCameraValues,362/// The "target" that this camera will render to.363pub target: RenderTarget,364// todo: reflect this when #6042 lands365/// The [`CameraOutputMode`] for this camera.366#[reflect(ignore, clone)]367pub output_mode: CameraOutputMode,368/// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's369/// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to370/// "write their results on top" of previous camera results, and include them as a part of their render results. This is enabled by default to ensure371/// cameras with MSAA enabled layer their results in the same way as cameras without MSAA enabled by default.372pub msaa_writeback: bool,373/// The clear color operation to perform on the render target.374pub clear_color: ClearColorConfig,375/// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].376pub sub_camera_view: Option<SubCameraView>,377}378379impl Default for Camera {380fn default() -> Self {381Self {382is_active: true,383order: 0,384viewport: None,385computed: Default::default(),386target: Default::default(),387output_mode: Default::default(),388msaa_writeback: true,389clear_color: Default::default(),390sub_camera_view: None,391}392}393}394395impl Camera {396/// Converts a physical size in this `Camera` to a logical size.397#[inline]398pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {399let scale = self.computed.target_info.as_ref()?.scale_factor;400Some(physical_size.as_vec2() / scale)401}402403/// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is404/// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to405/// the full physical rect of the current [`RenderTarget`].406#[inline]407pub fn physical_viewport_rect(&self) -> Option<URect> {408let min = self409.viewport410.as_ref()411.map(|v| v.physical_position)412.unwrap_or(UVec2::ZERO);413let max = min + self.physical_viewport_size()?;414Some(URect { min, max })415}416417/// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to418/// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the419/// full logical rect of the current [`RenderTarget`].420#[inline]421pub fn logical_viewport_rect(&self) -> Option<Rect> {422let URect { min, max } = self.physical_viewport_rect()?;423Some(Rect {424min: self.to_logical(min)?,425max: self.to_logical(max)?,426})427}428429/// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this430/// will be the size of that custom viewport. Otherwise it will default to the full logical size431/// of the current [`RenderTarget`].432/// For logic that requires the full logical size of the433/// [`RenderTarget`], prefer [`Camera::logical_target_size`].434///435/// Returns `None` if either:436/// - the function is called just after the `Camera` is created, before `camera_system` is executed,437/// - the [`RenderTarget`] isn't correctly set:438/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,439/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,440/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),441/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).442#[inline]443pub fn logical_viewport_size(&self) -> Option<Vec2> {444self.viewport445.as_ref()446.and_then(|v| self.to_logical(v.physical_size))447.or_else(|| self.logical_target_size())448}449450/// The physical size of this camera's viewport (in physical pixels).451/// If the `viewport` field is set to [`Some`], this452/// will be the size of that custom viewport. Otherwise it will default to the full physical size of453/// the current [`RenderTarget`].454/// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].455#[inline]456pub fn physical_viewport_size(&self) -> Option<UVec2> {457self.viewport458.as_ref()459.map(|v| v.physical_size)460.or_else(|| self.physical_target_size())461}462463/// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.464/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.465/// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].466#[inline]467pub fn logical_target_size(&self) -> Option<Vec2> {468self.computed469.target_info470.as_ref()471.and_then(|t| self.to_logical(t.physical_size))472}473474/// The full physical size of this camera's [`RenderTarget`] (in physical pixels),475/// ignoring custom `viewport` configuration.476/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.477/// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].478#[inline]479pub fn physical_target_size(&self) -> Option<UVec2> {480self.computed.target_info.as_ref().map(|t| t.physical_size)481}482483#[inline]484pub fn target_scaling_factor(&self) -> Option<f32> {485self.computed486.target_info487.as_ref()488.map(|t: &RenderTargetInfo| t.scale_factor)489}490491/// The projection matrix computed using this camera's [`Projection`](super::projection::Projection).492#[inline]493pub fn clip_from_view(&self) -> Mat4 {494self.computed.clip_from_view495}496497/// Given a position in world space, use the camera to compute the viewport-space coordinates.498///499/// To get the coordinates in Normalized Device Coordinates, you should use500/// [`world_to_ndc`](Self::world_to_ndc).501///502/// # Panics503///504/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`505/// (see [`world_to_ndc`][Self::world_to_ndc]).506#[doc(alias = "world_to_screen")]507pub fn world_to_viewport(508&self,509camera_transform: &GlobalTransform,510world_position: Vec3,511) -> Result<Vec2, ViewportConversionError> {512let target_rect = self513.logical_viewport_rect()514.ok_or(ViewportConversionError::NoViewportSize)?;515let mut ndc_space_coords = self516.world_to_ndc(camera_transform, world_position)517.ok_or(ViewportConversionError::InvalidData)?;518// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space519if ndc_space_coords.z < 0.0 {520return Err(ViewportConversionError::PastFarPlane);521}522if ndc_space_coords.z > 1.0 {523return Err(ViewportConversionError::PastNearPlane);524}525526// Flip the Y co-ordinate origin from the bottom to the top.527ndc_space_coords.y = -ndc_space_coords.y;528529// Once in NDC space, we can discard the z element and map x/y to the viewport rect530let viewport_position =531(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;532Ok(viewport_position)533}534535/// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.536///537/// To get the coordinates in Normalized Device Coordinates, you should use538/// [`world_to_ndc`](Self::world_to_ndc).539///540/// # Panics541///542/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`543/// (see [`world_to_ndc`][Self::world_to_ndc]).544#[doc(alias = "world_to_screen_with_depth")]545pub fn world_to_viewport_with_depth(546&self,547camera_transform: &GlobalTransform,548world_position: Vec3,549) -> Result<Vec3, ViewportConversionError> {550let target_rect = self551.logical_viewport_rect()552.ok_or(ViewportConversionError::NoViewportSize)?;553let mut ndc_space_coords = self554.world_to_ndc(camera_transform, world_position)555.ok_or(ViewportConversionError::InvalidData)?;556// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space557if ndc_space_coords.z < 0.0 {558return Err(ViewportConversionError::PastFarPlane);559}560if ndc_space_coords.z > 1.0 {561return Err(ViewportConversionError::PastNearPlane);562}563564// Stretching ndc depth to value via near plane and negating result to be in positive room again.565let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z);566567// Flip the Y co-ordinate origin from the bottom to the top.568ndc_space_coords.y = -ndc_space_coords.y;569570// Once in NDC space, we can discard the z element and map x/y to the viewport rect571let viewport_position =572(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;573Ok(viewport_position.extend(depth))574}575576/// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.577///578/// The resulting ray starts on the near plane of the camera.579///580/// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.581///582/// To get the world space coordinates with Normalized Device Coordinates, you should use583/// [`ndc_to_world`](Self::ndc_to_world).584///585/// # Example586/// ```no_run587/// # use bevy_window::Window;588/// # use bevy_ecs::prelude::{Single, IntoScheduleConfigs};589/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};590/// # use bevy_camera::Camera;591/// # use bevy_app::{App, PostUpdate};592/// #593/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {594/// let (camera, camera_transform) = *camera_query;595///596/// if let Some(cursor_position) = window.cursor_position()597/// // Calculate a ray pointing from the camera into the world based on the cursor's position.598/// && let Ok(ray) = camera.viewport_to_world(camera_transform, cursor_position)599/// {600/// println!("{ray:?}");601/// }602/// }603///604/// # let mut app = App::new();605/// // Run the system after transform propagation so the camera's global transform is up-to-date.606/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));607/// ```608///609/// # Panics610///611/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and612/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).613pub fn viewport_to_world(614&self,615camera_transform: &GlobalTransform,616viewport_position: Vec2,617) -> Result<Ray3d, ViewportConversionError> {618let target_rect = self619.logical_viewport_rect()620.ok_or(ViewportConversionError::NoViewportSize)?;621let rect_relative = (viewport_position - target_rect.min) / target_rect.size();622let mut ndc_xy = rect_relative * 2. - Vec2::ONE;623// Flip the Y co-ordinate from the top to the bottom to enter NDC.624ndc_xy.y = -ndc_xy.y;625626let ndc_point_near = ndc_xy.extend(1.0).into();627// Using EPSILON because an ndc with Z = 0 returns NaNs.628let ndc_point_far = ndc_xy.extend(f32::EPSILON).into();629let view_from_clip = self.computed.clip_from_view.inverse();630let world_from_view = camera_transform.affine();631// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss632// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.633// Additionally, we avoid adding and subtracting translation to the direction component to maintain precision.634let view_point_near = view_from_clip.project_point3a(ndc_point_near);635let view_point_far = view_from_clip.project_point3a(ndc_point_far);636let view_dir = view_point_far - view_point_near;637let origin = world_from_view.transform_point3a(view_point_near).into();638let direction = world_from_view.transform_vector3a(view_dir).into();639640// The fallible direction constructor ensures that direction isn't NaN.641Dir3::new(direction)642.map_err(|_| ViewportConversionError::InvalidData)643.map(|direction| Ray3d { origin, direction })644}645646/// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.647///648/// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.649///650/// To get the world space coordinates with Normalized Device Coordinates, you should use651/// [`ndc_to_world`](Self::ndc_to_world).652///653/// # Example654/// ```no_run655/// # use bevy_window::Window;656/// # use bevy_ecs::prelude::*;657/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};658/// # use bevy_camera::Camera;659/// # use bevy_app::{App, PostUpdate};660/// #661/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {662/// let (camera, camera_transform) = *camera_query;663///664/// if let Some(cursor_position) = window.cursor_position()665/// // Calculate a world position based on the cursor's position.666/// && let Ok(world_pos) = camera.viewport_to_world_2d(camera_transform, cursor_position)667/// {668/// println!("World position: {world_pos:.2}");669/// }670/// }671///672/// # let mut app = App::new();673/// // Run the system after transform propagation so the camera's global transform is up-to-date.674/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));675/// ```676///677/// # Panics678///679/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and680/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).681pub fn viewport_to_world_2d(682&self,683camera_transform: &GlobalTransform,684viewport_position: Vec2,685) -> Result<Vec2, ViewportConversionError> {686let target_rect = self687.logical_viewport_rect()688.ok_or(ViewportConversionError::NoViewportSize)?;689let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size();690691// Flip the Y co-ordinate origin from the top to the bottom.692rect_relative.y = 1.0 - rect_relative.y;693694let ndc = rect_relative * 2. - Vec2::ONE;695696let world_near_plane = self697.ndc_to_world(camera_transform, ndc.extend(1.))698.ok_or(ViewportConversionError::InvalidData)?;699700Ok(world_near_plane.truncate())701}702703/// Given a point in world space, use the camera's viewport to compute the Normalized Device Coordinates of the point.704///705/// When the point is within the viewport the values returned will be between -1.0 (bottom left) and 1.0 (top right)706/// on the X and Y axes, and between 0.0 (far) and 1.0 (near) on the Z axis.707/// To get the coordinates in the render target's viewport dimensions, you should use708/// [`world_to_viewport`](Self::world_to_viewport).709///710/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by711/// [`Projection`](super::projection::Projection) contain `NAN`.712///713/// # Panics714///715/// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.716pub fn world_to_ndc<V: Into<Vec3A> + From<Vec3A>>(717&self,718camera_transform: &GlobalTransform,719world_point: V,720) -> Option<V> {721let view_from_world = camera_transform.affine().inverse();722let view_point = view_from_world.transform_point3a(world_point.into());723let ndc_point = self.computed.clip_from_view.project_point3a(view_point);724725(!ndc_point.is_nan()).then_some(ndc_point.into())726}727728/// Given a position in Normalized Device Coordinates,729/// use the camera's viewport to compute the world space position.730///731/// When the position is within the viewport the values returned will be between -1.0 and 1.0 on the X and Y axes,732/// and between 0.0 and 1.0 on the Z axis.733/// To get the world space coordinates with the viewport position, you should use734/// [`world_to_viewport`](Self::world_to_viewport).735///736/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by737/// [`Projection`](super::projection::Projection) contain `NAN`.738///739/// # Panics740///741/// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.742pub fn ndc_to_world<V: Into<Vec3A> + From<Vec3A>>(743&self,744camera_transform: &GlobalTransform,745ndc_point: V,746) -> Option<V> {747// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss748// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.749let view_point = self750.computed751.clip_from_view752.inverse()753.project_point3a(ndc_point.into());754let world_point = camera_transform.affine().transform_point3a(view_point);755756(!world_point.is_nan()).then_some(world_point.into())757}758759/// Converts the depth in Normalized Device Coordinates760/// to linear view z for perspective projections.761///762/// Note: Depth values in front of the camera will be negative as -z is forward763pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {764let near = self.clip_from_view().w_axis.z; // [3][2]765-near / ndc_depth766}767768/// Converts the depth in Normalized Device Coordinates769/// to linear view z for orthographic projections.770///771/// Note: Depth values in front of the camera will be negative as -z is forward772pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {773-(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z774// [3][2] [2][2]775}776}777778/// Control how this [`Camera`] outputs once rendering is completed.779#[derive(Debug, Clone, Copy)]780pub enum CameraOutputMode {781/// Writes the camera output to configured render target.782Write {783/// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.784/// If not set, the output will be written as-is, ignoring `clear_color` and the existing data in the final render target texture.785blend_state: Option<BlendState>,786/// The clear color operation to perform on the final render target texture.787clear_color: ClearColorConfig,788},789/// Skips writing the camera output to the configured render target. The output will remain in the790/// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target791/// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause792/// them to be lost. Only use this if you know what you are doing!793/// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove794/// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.795Skip,796}797798impl Default for CameraOutputMode {799fn default() -> Self {800CameraOutputMode::Write {801blend_state: None,802clear_color: ClearColorConfig::Default,803}804}805}806807/// The "target" that a [`Camera`] will render to. For example, this could be a `Window`808/// swapchain or an [`Image`].809#[derive(Debug, Clone, Reflect, From)]810#[reflect(Clone)]811pub enum RenderTarget {812/// Window to which the camera's view is rendered.813Window(WindowRef),814/// Image to which the camera's view is rendered.815Image(ImageRenderTarget),816/// Texture View to which the camera's view is rendered.817/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.818TextureView(ManualTextureViewHandle),819/// The camera won't render to any color target.820///821/// This is useful when you want a camera that *only* renders prepasses, for822/// example a depth prepass. See the `render_depth_to_texture` example.823None {824/// The physical size of the viewport.825size: UVec2,826},827}828829impl RenderTarget {830/// Get a handle to the render target's image,831/// or `None` if the render target is another variant.832pub fn as_image(&self) -> Option<&Handle<Image>> {833if let Self::Image(image_target) = self {834Some(&image_target.handle)835} else {836None837}838}839}840841impl RenderTarget {842/// Normalize the render target down to a more concrete value, mostly used for equality comparisons.843pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {844match self {845RenderTarget::Window(window_ref) => window_ref846.normalize(primary_window)847.map(NormalizedRenderTarget::Window),848RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),849RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),850RenderTarget::None { size } => Some(NormalizedRenderTarget::None {851width: size.x,852height: size.y,853}),854}855}856}857858/// Normalized version of the render target.859///860/// Once we have this we shouldn't need to resolve it down anymore.861#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]862#[reflect(Clone, PartialEq, Hash)]863pub enum NormalizedRenderTarget {864/// Window to which the camera's view is rendered.865Window(NormalizedWindowRef),866/// Image to which the camera's view is rendered.867Image(ImageRenderTarget),868/// Texture View to which the camera's view is rendered.869/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.870TextureView(ManualTextureViewHandle),871/// The camera won't render to any color target.872///873/// This is useful when you want a camera that *only* renders prepasses, for874/// example a depth prepass. See the `render_depth_to_texture` example.875None {876/// The physical width of the viewport.877width: u32,878/// The physical height of the viewport.879height: u32,880},881}882883/// A unique id that corresponds to a specific `ManualTextureView` in the `ManualTextureViews` collection.884///885/// See `ManualTextureViews` in `bevy_camera` for more details.886#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)]887#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)]888pub struct ManualTextureViewHandle(pub u32);889890/// A render target that renders to an [`Image`].891#[derive(Debug, Clone, Reflect)]892#[reflect(Clone, PartialEq, Hash)]893pub struct ImageRenderTarget {894/// The image to render to.895pub handle: Handle<Image>,896/// The scale factor of the render target image, corresponding to the scale897/// factor for a window target. This should almost always be 1.0.898pub scale_factor: f32,899}900901impl Eq for ImageRenderTarget {}902903impl PartialEq for ImageRenderTarget {904fn eq(&self, other: &Self) -> bool {905self.handle == other.handle && FloatOrd(self.scale_factor) == FloatOrd(other.scale_factor)906}907}908909impl core::hash::Hash for ImageRenderTarget {910fn hash<H: core::hash::Hasher>(&self, state: &mut H) {911self.handle.hash(state);912FloatOrd(self.scale_factor).hash(state);913}914}915916impl PartialOrd for ImageRenderTarget {917fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {918Some(self.cmp(other))919}920}921922impl Ord for ImageRenderTarget {923fn cmp(&self, other: &Self) -> core::cmp::Ordering {924self.handle925.cmp(&other.handle)926.then_with(|| FloatOrd(self.scale_factor).cmp(&FloatOrd(other.scale_factor)))927}928}929930impl From<Handle<Image>> for RenderTarget {931fn from(handle: Handle<Image>) -> Self {932Self::Image(handle.into())933}934}935936impl From<Handle<Image>> for ImageRenderTarget {937fn from(handle: Handle<Image>) -> Self {938Self {939handle,940scale_factor: 1.0,941}942}943}944945impl Default for RenderTarget {946fn default() -> Self {947Self::Window(Default::default())948}949}950951/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera952#[derive(Component, Clone, Copy, Reflect)]953#[reflect(opaque)]954#[reflect(Component, Default, Clone)]955pub struct CameraMainTextureUsages(pub TextureUsages);956957impl Default for CameraMainTextureUsages {958fn default() -> Self {959Self(960TextureUsages::RENDER_ATTACHMENT961| TextureUsages::TEXTURE_BINDING962| TextureUsages::COPY_SRC,963)964}965}966967impl CameraMainTextureUsages {968pub fn with(mut self, usages: TextureUsages) -> Self {969self.0 |= usages;970self971}972}973974#[cfg(test)]975mod test {976use bevy_math::{Vec2, Vec3};977use bevy_transform::components::GlobalTransform;978979use crate::{980Camera, OrthographicProjection, PerspectiveProjection, Projection, RenderTargetInfo,981Viewport,982};983984fn make_camera(mut projection: Projection, physical_size: Vec2) -> Camera {985let viewport = Viewport {986physical_size: physical_size.as_uvec2(),987..Default::default()988};989let mut camera = Camera {990viewport: Some(viewport.clone()),991..Default::default()992};993camera.computed.target_info = Some(RenderTargetInfo {994physical_size: viewport.physical_size,995scale_factor: 1.0,996});997projection.update(998viewport.physical_size.x as f32,999viewport.physical_size.y as f32,1000);1001camera.computed.clip_from_view = projection.get_clip_from_view();1002camera1003}10041005#[test]1006fn viewport_to_world_orthographic_3d_returns_forward() {1007let transform = GlobalTransform::default();1008let size = Vec2::new(1600.0, 900.0);1009let camera = make_camera(1010Projection::Orthographic(OrthographicProjection::default_3d()),1011size,1012);1013let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();1014assert_eq!(ray.direction, transform.forward());1015assert!(ray1016.origin1017.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 0.0), 1e-4));1018let ray = camera.viewport_to_world(&transform, size).unwrap();1019assert_eq!(ray.direction, transform.forward());1020assert!(ray1021.origin1022.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 0.0), 1e-4));1023}10241025#[test]1026fn viewport_to_world_orthographic_2d_returns_forward() {1027let transform = GlobalTransform::default();1028let size = Vec2::new(1600.0, 900.0);1029let camera = make_camera(1030Projection::Orthographic(OrthographicProjection::default_2d()),1031size,1032);1033let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();1034assert_eq!(ray.direction, transform.forward());1035assert!(ray1036.origin1037.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 1000.0), 1e-4));1038let ray = camera.viewport_to_world(&transform, size).unwrap();1039assert_eq!(ray.direction, transform.forward());1040assert!(ray1041.origin1042.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 1000.0), 1e-4));1043}10441045#[test]1046fn viewport_to_world_perspective_center_returns_forward() {1047let transform = GlobalTransform::default();1048let size = Vec2::new(1600.0, 900.0);1049let camera = make_camera(1050Projection::Perspective(PerspectiveProjection::default()),1051size,1052);1053let ray = camera.viewport_to_world(&transform, size * 0.5).unwrap();1054assert_eq!(ray.direction, transform.forward());1055assert_eq!(ray.origin, transform.forward() * 0.1);1056}1057}105810591060