newton.sensors.SensorTiledCamera#

class newton.sensors.SensorTiledCamera(model, *, config=None, load_textures=True)[source]#

Bases: object

Warp-based tiled camera sensor for raytraced rendering across multiple worlds.

Renders up to six image channels per (world, camera) pair:

  • color – RGBA shaded image (uint32).

  • hdr_color – linear shaded RGB image (vec3f).

  • depth – ray-hit distance [m] (float32); negative means no hit.

  • normal – surface normal at hit point (vec3f).

  • albedo – unshaded surface color (uint32).

  • shape_index – shape id per pixel (uint32).

All output arrays have shape (world_count, camera_count, height, width). Use the flatten_* helpers to rearrange them into tiled RGBA buffers for display, with one tile per (world, camera) pair laid out in a grid.

Shapes without the VISIBLE flag are excluded.

Example

sensor = SensorTiledCamera(model)
rays = sensor.utils.compute_pinhole_camera_rays(width, height, fov)
color = sensor.utils.create_color_image_output(width, height)

# After setup, build BVHs once for the initial state.
state = model.state()
newton.geometry.build_bvh_shape(model, state)
newton.geometry.build_bvh_particle(model, state)

# Before each later frame that changes geometry, refit BVHs.
newton.geometry.refit_bvh_shape(model, state)
newton.geometry.refit_bvh_particle(model, state)
sensor.update(state, camera_transforms, rays, color_image=color)

See RenderConfig for optional rendering settings and ClearData / DEFAULT_CLEAR_DATA / GRAY_CLEAR_DATA for image-clear presets.

class ClearData(clear_color=0, clear_depth=0.0, clear_shape_index=4294967295, clear_normal=(0.0, 0.0, 0.0), clear_albedo=0)#

Bases: object

Default values written to output images before rendering.

__init__(clear_color=0, clear_depth=0.0, clear_shape_index=4294967295, clear_normal=(0.0, 0.0, 0.0), clear_albedo=0)#
clear_albedo: int = 0#
clear_color: int = 0#
clear_depth: float = 0.0#
clear_normal: tuple[float, float, float] = (0.0, 0.0, 0.0)#
clear_shape_index: int = 4294967295#
class Config(checkerboard_texture=False, default_light=False, default_light_shadows=False, enable_ambient_lighting=True, colors_per_world=False, colors_per_shape=False, backface_culling=True, enable_textures=False, enable_particles=True)#

Bases: object

Sensor configuration.

Deprecated since version 1.1: Use RenderConfig and SensorTiledCamera.utils.* instead.

__init__(checkerboard_texture=False, default_light=False, default_light_shadows=False, enable_ambient_lighting=True, colors_per_world=False, colors_per_shape=False, backface_culling=True, enable_textures=False, enable_particles=True)#
backface_culling: bool = True#

Deprecated since version 1.1: Use render_config.enable_backface_culling instead.

checkerboard_texture: bool = False#

Deprecated since version 1.1: Use SensorTiledCamera.utils.assign_checkerboard_material_to_all_shapes() instead.

colors_per_shape: bool = False#

Deprecated since version 1.1: Use shape colors instead (e.g. builder.add_shape_cylinder(..., color=(r, g, b))).

colors_per_world: bool = False#

Deprecated since version 1.1: Use shape colors instead (e.g. builder.add_shape_cylinder(..., color=(r, g, b))).

default_light: bool = False#

Deprecated since version 1.1: Use SensorTiledCamera.utils.create_default_light() instead.

default_light_shadows: bool = False#

Deprecated since version 1.1: Use SensorTiledCamera.utils.create_default_light(enable_shadows=True) instead.

enable_ambient_lighting: bool = True#

Deprecated since version 1.1: Use render_config.enable_ambient_lighting instead.

enable_particles: bool = True#

Deprecated since version 1.1: Use render_config.enable_particles instead.

enable_textures: bool = False#

Deprecated since version 1.1: Use render_config.enable_textures instead.

class GaussianRenderMode(*values)#

Bases: IntEnum

Gaussian Render Mode

FAST = 0#

Fast Render Mode

QUALITY = 1#

Quality Render Mode, collect hits until minimum transmittance is reached

class RenderConfig(enable_global_world=True, enable_textures=False, enable_shadows=False, enable_ambient_lighting=True, enable_particles=True, enable_backface_culling=True, render_order=RenderOrder.PIXEL_PRIORITY, tile_width=16, tile_height=8, max_distance=1000.0, gaussians_mode=GaussianRenderMode.FAST, gaussians_min_transmittance=0.49, gaussians_max_num_hits=20)#

Bases: object

Raytrace render settings shared across all worlds.

__init__(enable_global_world=True, enable_textures=False, enable_shadows=False, enable_ambient_lighting=True, enable_particles=True, enable_backface_culling=True, render_order=RenderOrder.PIXEL_PRIORITY, tile_width=16, tile_height=8, max_distance=1000.0, gaussians_mode=GaussianRenderMode.FAST, gaussians_min_transmittance=0.49, gaussians_max_num_hits=20)#
enable_ambient_lighting: bool = True#

Enable ambient lighting for the scene.

enable_backface_culling: bool = True#

Cull back-facing triangles.

enable_global_world: bool = True#

Include shapes that belong to no specific world.

enable_particles: bool = True#

Enable particle rendering.

enable_shadows: bool = False#

Enable shadow rays for directional lights.

enable_textures: bool = False#

Enable texture-mapped rendering for meshes.

gaussians_max_num_hits: int = 20#

Maximum Gaussian hits accumulated per ray.

gaussians_min_transmittance: float = 0.49#

Minimum transmittance before early-out during Gaussian rendering.

gaussians_mode: int = 0#

Gaussian splatting render mode (see GaussianRenderMode).

max_distance: float = 1000.0#

Maximum ray distance [m].

render_order: int = 0#

Render traversal order (see RenderOrder).

tile_height: int = 8#

Tile height [px] for RenderOrder.TILED traversal.

tile_width: int = 16#

Tile width [px] for RenderOrder.TILED traversal.

class RenderLightType(*values)#

Bases: IntEnum

Light types supported by the Warp raytracer.

DIRECTIONAL = 1#

Directional Light.

SPOTLIGHT = 0#

Spotlight.

class RenderOrder(*values)#

Bases: IntEnum

Render Order

PIXEL_PRIORITY = 0#

Render the same pixel of every view before continuing to the next one

TILED = 2#

Render pixels in tiles, defined by tile_width x tile_height

VIEW_PRIORITY = 1#

Render all pixels of a whole view before continuing to the next one

class Utils(render_context)#

Bases: object

Utility functions for the RenderContext.

__init__(render_context)#
assign_checkerboard_material_to_all_shapes(resolution=64, checker_size=32)#

Assign a gray checkerboard texture material to all shapes. Creates a gray checkerboard pattern texture and applies it to all shapes in the scene.

Parameters:
  • resolution (int) – Texture resolution in pixels (square texture).

  • checker_size (int) – Size of each checkerboard square in pixels.

assign_random_colors_per_shape(seed=100)#

Assign a random color to each shape.

Deprecated since version 1.1: Use shape colors instead (e.g. builder.add_shape_cylinder(..., color=(r, g, b))).

Parameters:

seed (int) – Random seed.

assign_random_colors_per_world(seed=100)#

Assign each world a random color, applied to all its shapes.

Deprecated since version 1.1: Use shape colors instead (e.g. builder.add_shape_cylinder(..., color=(r, g, b))).

Parameters:

seed (int) – Random seed.

compute_pinhole_camera_rays(width, height, camera_fovs)#

Compute camera-space ray directions for pinhole cameras.

Generates rays in camera space (origin at the camera center, direction normalized) for each pixel based on the vertical field of view.

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_fovs (float | list[float] | np.ndarray | wp.array[wp.float32]) – Vertical FOV angles [rad], shape (camera_count,).

Returns:

Shape (camera_count, height, width, 2), dtype vec3f.

Return type:

camera_rays

convert_ray_depth_to_forward_depth(depth_image, camera_transforms, camera_rays, out_depth=None)#

Convert ray-distance depth to forward (planar) depth.

Projects each pixel’s hit distance along its ray onto the camera’s forward axis, producing depth measured perpendicular to the image plane. The forward axis is derived from each camera transform by transforming camera-space (0, 0, -1) into world space.

Parameters:
  • depth_image (wp.array4d[wp.float32]) – Ray-distance depth [m] from update(), shape (world_count, camera_count, height, width).

  • camera_transforms (wp.array2d[wp.transformf]) – World-space camera transforms, shape (camera_count, world_count).

  • camera_rays (wp.array4d[wp.vec3f]) – Camera-space rays from compute_pinhole_camera_rays(), shape (camera_count, height, width, 2).

  • out_depth (wp.array4d[wp.float32] | None) – Output forward-depth array [m] with the same shape as depth_image. If None, allocates a new one.

Returns:

Forward (planar) depth array, same shape as depth_image [m].

Return type:

wp.array4d[wp.float32]

create_albedo_image_output(width, height, camera_count=1)#

Create an albedo output array for update().

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype uint32.

Return type:

wp.array(dtype=wp.uint32, ndim=4)

create_color_image_output(width, height, camera_count=1)#

Create a color output array for update().

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype uint32.

Return type:

wp.array(dtype=wp.uint32, ndim=4)

create_default_light(enable_shadows=True, direction=None)#

Create a default directional light oriented at (-1, 1, -1).

Parameters:
  • enable_shadows (bool) – Enable shadow casting for this light.

  • direction (vec3f | None) – Normalized light direction. If None, defaults to (normalized (-1, 1, -1)).

create_depth_image_output(width, height, camera_count=1)#

Create a depth output array for update().

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype float32.

Return type:

wp.array(dtype=wp.float32, ndim=4)

create_hdr_color_image_output(width, height, camera_count=1)#

Create a linear HDR color output array for update().

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype vec3f.

Return type:

wp.array(dtype=wp.vec3f, ndim=4)

create_normal_image_output(width, height, camera_count=1)#

Create a normal output array for update().

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype vec3f.

Return type:

wp.array(dtype=wp.vec3f, ndim=4)

create_shape_index_image_output(width, height, camera_count=1)#

Create a shape-index output array for update().

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype uint32.

Return type:

wp.array(dtype=wp.uint32, ndim=4)

flatten_color_image_to_rgba(image, out_buffer=None, worlds_per_row=None)#

Flatten rendered color image to a tiled RGBA buffer.

Arranges (world_count * camera_count) tiles in a grid. Each tile shows one camera’s view of one world. Useful for writing a single pre-tiled image to disk; use to_rgba_from_color() with log_image() for in-viewer display.

Parameters:
  • image (wp.array4d[wp.uint32]) – Color output from update(), shape (world_count, camera_count, height, width).

  • out_buffer (wp.array3d[wp.uint8] | None) – Pre-allocated RGBA buffer. If None, allocates a new one.

  • worlds_per_row (int | None) – Tiles per row in the grid. If None, picks a square-ish layout.

flatten_depth_image_to_rgba(image, out_buffer=None, worlds_per_row=None, depth_range=None)#

Flatten rendered depth image to a tiled RGBA buffer.

Encodes depth as grayscale: inverts values (closer = brighter) and normalizes to the [50, 255] range. Background pixels (no hit) remain black. Useful for writing a single pre-tiled image to disk; use to_rgba_from_depth() with log_image() for in-viewer display.

Parameters:
  • image (wp.array4d[wp.float32]) – Depth output from update(), shape (world_count, camera_count, height, width).

  • out_buffer (wp.array3d[wp.uint8] | None) – Pre-allocated RGBA buffer. If None, allocates a new one.

  • worlds_per_row (int | None) – Tiles per row in the grid. If None, picks a square-ish layout.

  • depth_range (wp.array[wp.float32] | None) – Depth range to normalize to, shape (2,) [near, far]. If None, computes from image.

flatten_normal_image_to_rgba(image, out_buffer=None, worlds_per_row=None)#

Flatten rendered normal image to a tiled RGBA buffer.

Arranges (world_count * camera_count) tiles in a grid. Each tile shows one camera’s view of one world. Useful for writing a single pre-tiled image to disk; use to_rgba_from_normal() with log_image() for in-viewer display.

Parameters:
  • image (wp.array4d[wp.vec3f]) – Normal output from update(), shape (world_count, camera_count, height, width).

  • out_buffer (wp.array3d[wp.uint8] | None) – Pre-allocated RGBA buffer. If None, allocates a new one.

  • worlds_per_row (int | None) – Tiles per row in the grid. If None, picks a square-ish layout.

to_rgba_from_color(image)#

Reinterpret packed uint32 RGBA color sensor output as uint8 RGBA.

Returns a zero-copy view: each uint32 (R | G<<8 | B<<16 | A<<24) aliases 4 contiguous uint8 channels and the (world_count, camera_count) axes are flattened. The returned array shares memory with image; do not write into it.

The returned array plugs directly into log_image(). World is the slower-changing axis: tile i has world = i // camera_count and camera = i % camera_count.

Parameters:

image (wp.array(dtype=wp.uint32, ndim=4)) – Color sensor output, shape (world_count, camera_count, H, W), dtype uint32 (packed RGBA: R | G<<8 | B<<16 | A<<24). Must be contiguous; arrays returned by update() always satisfy this.

Returns:

Array of shape (world_count * camera_count, H, W, 4), dtype uint8, aliasing image.

Return type:

wp.array(dtype=wp.uint8, ndim=4)

to_rgba_from_depth(image, depth_range=None, out_buffer=None)#

Convert float32 depth sensor output to uint8 grayscale RGBA.

Closer pixels render brighter; miss pixels (depth <= 0; matches the default ClearData.clear_depth = 0.0 sentinel) render black. Alpha = 255.

Parameters:
  • image (wp.array4d[wp.float32]) – Depth output, shape (world_count, camera_count, H, W), dtype float32. Non-positive values denote ray misses.

  • depth_range (wp.array[wp.float32] | tuple[float, float] | None) – Optional (near, far) [m] for normalization. Accepts a 2-element wp.array[wp.float32] or a Python (near, far) tuple. If None, the per-frame range is computed on device by find_depth_range() (matches flatten_depth_image_to_rgba()).

  • out_buffer (wp.array4d[wp.uint8] | None) – Optional pre-allocated output of shape (world_count * camera_count, H, W, 4), dtype uint8.

Returns:

Array of shape (world_count * camera_count, H, W, 4), dtype uint8. Suitable for log_image().

Return type:

wp.array4d[wp.uint8]

to_rgba_from_normal(image, out_buffer=None)#

Convert vec3 normal sensor output to uint8 RGBA.

Parameters:
  • image (wp.array4d[wp.vec3f]) – Normal output, shape (world_count, camera_count, H, W), dtype vec3f.

  • out_buffer (wp.array4d[wp.uint8] | None) – Optional pre-allocated output of shape (world_count * camera_count, H, W, 4), dtype uint8.

Returns:

Array of shape (world_count * camera_count, H, W, 4), dtype uint8. Suitable for log_image().

Return type:

wp.array4d[wp.uint8]

to_rgba_from_shape_index(image, colors=None, out_buffer=None)#

Convert uint32 shape-index sensor output to uint8 RGBA.

Parameters:
  • image (wp.array4d[wp.uint32]) – Shape-index output, shape (world_count, camera_count, H, W), dtype uint32.

  • colors (wp.array2d[wp.uint8] | None) – Optional RGB palette of shape (num_entries, 3), dtype uint8. If provided, each pixel is colored by looking up its shape index in this palette (indices past the palette length render black). If None, a deterministic hash palette is used (good for debugging which shape hit which pixel without a predefined class map).

  • out_buffer (wp.array4d[wp.uint8] | None) – Optional pre-allocated output of shape (world_count * camera_count, H, W, 4), dtype uint8.

Returns:

Array of shape (world_count * camera_count, H, W, 4), dtype uint8. Suitable for log_image().

Return type:

wp.array4d[wp.uint8]

__init__(model, *, config=None, load_textures=True)#

Initialize the tiled camera sensor from a simulation model.

Builds the internal RenderContext, loads shape geometry (and optionally textures) from model, and exposes utils for creating output buffers, computing rays, and assigning materials.

Parameters:
  • model (Model) – Simulation model whose shapes will be rendered.

  • config (Config | RenderConfig | None) – Rendering configuration. Pass a RenderConfig to control raytrace settings directly, or None to use defaults. The legacy Config dataclass is still accepted but deprecated.

  • load_textures (bool) – Load texture data from the model. Set to False to skip texture loading when textures are not needed.

assign_checkerboard_material_to_all_shapes(resolution=64, checker_size=32)#

Assign a gray checkerboard texture material to all shapes.

Creates a gray checkerboard pattern texture and applies it to all shapes in the scene.

Deprecated since version 1.1: Use SensorTiledCamera.utils.assign_checkerboard_material_to_all_shapes instead.

Parameters:
  • resolution (int) – Texture resolution in pixels (square texture).

  • checker_size (int) – Size of each checkerboard square in pixels.

assign_random_colors_per_shape(seed=100)#

Assign a random color to each shape.

Deprecated since version 1.1: Use shape colors instead (e.g. builder.add_shape_cylinder(..., color=(r, g, b))).

Parameters:

seed (int) – Random seed.

assign_random_colors_per_world(seed=100)#

Assign each world a random color, applied to all its shapes.

Deprecated since version 1.1: Use shape colors instead (e.g. builder.add_shape_cylinder(..., color=(r, g, b))).

Parameters:

seed (int) – Random seed.

compute_pinhole_camera_rays(width, height, camera_fovs)#

Compute camera-space ray directions for pinhole cameras.

Generates rays in camera space (origin at the camera center, direction normalized) for each pixel based on the vertical field of view.

Deprecated since version 1.1: Use SensorTiledCamera.utils.compute_pinhole_camera_rays instead.

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_fovs (float | list[float] | np.ndarray | wp.array[wp.float32]) – Vertical FOV angles [rad], shape (camera_count,).

Returns:

Shape (camera_count, height, width, 2), dtype vec3f.

Return type:

camera_rays

create_albedo_image_output(width, height, camera_count=1)#

Create an albedo output array for update().

Deprecated since version 1.1: Use SensorTiledCamera.utils.create_albedo_image_output instead.

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype uint32.

Return type:

wp.array(dtype=wp.uint32, ndim=4)

create_color_image_output(width, height, camera_count=1)#

Create a color output array for update().

Deprecated since version 1.1: Use SensorTiledCamera.utils.create_color_image_output instead.

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype uint32.

Return type:

wp.array(dtype=wp.uint32, ndim=4)

create_default_light(enable_shadows=True)#

Create a default directional light oriented at (-1, 1, -1).

Deprecated since version 1.1: Use SensorTiledCamera.utils.create_default_light instead.

Parameters:

enable_shadows (bool) – Enable shadow casting for this light.

create_depth_image_output(width, height, camera_count=1)#

Create a depth output array for update().

Deprecated since version 1.1: Use SensorTiledCamera.utils.create_depth_image_output instead.

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype float32.

Return type:

wp.array(dtype=wp.float32, ndim=4)

create_normal_image_output(width, height, camera_count=1)#

Create a normal output array for update().

Deprecated since version 1.1: Use SensorTiledCamera.utils.create_normal_image_output instead.

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype vec3f.

Return type:

wp.array(dtype=wp.vec3f, ndim=4)

create_shape_index_image_output(width, height, camera_count=1)#

Create a shape-index output array for update().

Deprecated since version 1.1: Use SensorTiledCamera.utils.create_shape_index_image_output instead.

Parameters:
  • width (int) – Image width [px].

  • height (int) – Image height [px].

  • camera_count (int) – Number of cameras.

Returns:

Array of shape (world_count, camera_count, height, width), dtype uint32.

Return type:

wp.array(dtype=wp.uint32, ndim=4)

flatten_color_image_to_rgba(image, out_buffer=None, worlds_per_row=None)#

Flatten rendered color image to a tiled RGBA buffer.

Arranges (world_count * camera_count) tiles in a grid. Each tile shows one camera’s view of one world.

Deprecated since version 1.1: Use SensorTiledCamera.utils.flatten_color_image_to_rgba instead.

Parameters:
  • image (wp.array4d[wp.uint32]) – Color output from update(), shape (world_count, camera_count, height, width).

  • out_buffer (wp.array3d[wp.uint8] | None) – Pre-allocated RGBA buffer. If None, allocates a new one.

  • worlds_per_row (int | None) – Tiles per row in the grid. If None, picks a square-ish layout.

flatten_depth_image_to_rgba(image, out_buffer=None, worlds_per_row=None, depth_range=None)#

Flatten rendered depth image to a tiled RGBA buffer.

Encodes depth as grayscale: inverts values (closer = brighter) and normalizes to the [50, 255] range. Background pixels (no hit) remain black.

Deprecated since version 1.1: Use SensorTiledCamera.utils.flatten_depth_image_to_rgba instead.

Parameters:
  • image (wp.array4d[wp.float32]) – Depth output from update(), shape (world_count, camera_count, height, width).

  • out_buffer (wp.array3d[wp.uint8] | None) – Pre-allocated RGBA buffer. If None, allocates a new one.

  • worlds_per_row (int | None) – Tiles per row in the grid. If None, picks a square-ish layout.

  • depth_range (wp.array[wp.float32] | None) – Depth range to normalize to, shape (2,) [near, far]. If None, computes from image.

flatten_normal_image_to_rgba(image, out_buffer=None, worlds_per_row=None)#

Flatten rendered normal image to a tiled RGBA buffer.

Arranges (world_count * camera_count) tiles in a grid. Each tile shows one camera’s view of one world.

Deprecated since version 1.1: Use SensorTiledCamera.utils.flatten_normal_image_to_rgba instead.

Parameters:
  • image (wp.array4d[wp.vec3f]) – Normal output from update(), shape (world_count, camera_count, height, width).

  • out_buffer (wp.array3d[wp.uint8] | None) – Pre-allocated RGBA buffer. If None, allocates a new one.

  • worlds_per_row (int | None) – Tiles per row in the grid. If None, picks a square-ish layout.

sync_transforms(state)#

Synchronize triangle-mesh points from the simulation state.

update() calls this automatically when state is not None.

Shape and particle BVHs on model must be built once via build_bvh_shape() and build_bvh_particle() before first use. Before later frames that change geometry, refit them via refit_bvh_shape() and refit_bvh_particle() prior to calling update().

Parameters:

state (newton.State) – The current simulation state containing particle positions.

update(state=None, camera_transforms=None, camera_rays=None, *, color_image=None, depth_image=None, shape_index_image=None, normal_image=None, albedo_image=None, clear_data=DEFAULT_CLEAR_DATA, refit_bvh=None, hdr_color_image=None)#

Render output images for all worlds and cameras.

Each output array has shape (world_count, camera_count, height, width) where element [world_id, camera_id, y, x] corresponds to the ray in camera_rays[camera_id, y, x]. Each output channel is optional – pass None to skip that channel’s rendering entirely.

Shape and particle BVHs on model must be built once for the initial state via build_bvh_shape() and build_bvh_particle() before first use. Before later frames that change geometry, refit them for state via refit_bvh_shape() and refit_bvh_particle() before calling this method.

Parameters:
  • state (State | None) – Simulation state with body and particle transforms. Passing None is deprecated and will be removed in a future release.

  • camera_transforms (wp.array2d[wp.transformf] | None) – Camera-to-world transforms, shape (camera_count, world_count).

  • camera_rays (wp.array4d[wp.vec3f] | None) – Camera-space rays from compute_pinhole_camera_rays(), shape (camera_count, height, width, 2).

  • color_image (wp.array4d[wp.uint32] | None) – Output for RGBA color. None to skip.

  • depth_image (wp.array4d[wp.float32] | None) – Output for ray-hit distance [m]. None to skip.

  • shape_index_image (wp.array4d[wp.uint32] | None) – Output for per-pixel shape id. None to skip.

  • normal_image (wp.array4d[wp.vec3f] | None) – Output for surface normals. None to skip.

  • albedo_image (wp.array4d[wp.uint32] | None) – Output for unshaded surface color. None to skip.

  • clear_data (ClearData | None) – Values to clear output buffers with. See DEFAULT_CLEAR_DATA, GRAY_CLEAR_DATA.

  • refit_bvh (bool | None) – Refit the BVH before rendering. This is deprecated, use build_bvh_shape(), refit_bvh_shape(), build_bvh_particle(), and refit_bvh_particle() explicitly before calling this method instead.

  • hdr_color_image (wp.array4d[wp.vec3f] | None) – Output for linear HDR color. None to skip.

DEFAULT_CLEAR_DATA = ClearData(clear_color=0, clear_depth=0.0, clear_shape_index=4294967295, clear_normal=(0.0, 0.0, 0.0), clear_albedo=0)#
GRAY_CLEAR_DATA = ClearData(clear_color=4284900966, clear_depth=0.0, clear_shape_index=4294967295, clear_normal=(0.0, 0.0, 0.0), clear_albedo=4278190080)#
property render_config: RenderConfig#

Low-level raytrace settings on the internal RenderContext.

Populated at construction from Config and from fixed defaults (for example global world and shadow flags on the context). Attributes may be modified to change behavior for subsequent update() calls.

Returns:

The live RenderConfig instance (same object as render_context.config without triggering deprecation warnings).

property render_context: RenderContext#

Internal Warp raytracing context used by update() and buffer helpers.

Deprecated since version 1.1: Direct access is deprecated and will be removed. Prefer this class’s public methods, or render_config for RenderConfig access.

Returns:

The shared RenderContext instance.

property utils: Utils#

Utility helpers for creating output buffers, computing rays, and assigning materials/lights.