Python TransformFrame

HORUS provides a Python API for coordinate frame management — registering frames, updating transforms, and looking up transformations between any two frames in the tree.

Transform

A 3D rigid transformation (translation + quaternion rotation).

Creating Transforms

from horus import Transform

# Identity transform
tf = Transform.identity()

# From translation and rotation quaternion
tf = Transform(translation=[1.0, 2.0, 0.0], rotation=[0.0, 0.0, 0.0, 1.0])

# From translation only (identity rotation)
tf = Transform.from_translation([1.0, 2.0, 3.0])

# From Euler angles (translation + roll/pitch/yaw)
tf = Transform.from_euler([1.0, 0.0, 0.5], [0.0, 0.1, 1.57])

# From 4x4 homogeneous matrix
tf = Transform.from_matrix(matrix_4x4)

Properties

tf.translation  # [x, y, z] as list of floats
tf.rotation     # [x, y, z, w] quaternion as list of floats

Both are readable and writable:

tf.translation = [2.0, 3.0, 0.0]
tf.rotation = [0.0, 0.0, 0.383, 0.924]  # 45 degrees around Z

Methods

# Convert to Euler angles
roll, pitch, yaw = tf.to_euler()

# Compose transforms: result = self * other
combined = parent_tf.compose(child_tf)

# Inverse transform
inv = tf.inverse()

# Apply to a point
point_in_world = tf.transform_point([1.0, 0.0, 0.0])

# Apply rotation only (no translation)
rotated = tf.transform_vector([1.0, 0.0, 0.0])

# Interpolate between two transforms (SLERP for rotation)
halfway = tf_a.interpolate(tf_b, t=0.5)

# Magnitudes
dist = tf.translation_magnitude()  # Translation distance
angle = tf.rotation_angle()        # Rotation angle in radians

# Export as 4x4 matrix
matrix = tf.to_matrix()  # [[f64; 4]; 4]
MethodReturnsDescription
identity()TransformNo translation, no rotation
from_translation([x,y,z])TransformTranslation only
from_euler([x,y,z], [r,p,y])TransformTranslation + Euler angles
from_matrix(4x4)TransformFrom homogeneous matrix
to_euler()[roll, pitch, yaw]Get Euler angles
compose(other)TransformChain transforms (self * other)
inverse()TransformCompute inverse
transform_point([x,y,z])[x,y,z]Apply full transform to point
transform_vector([x,y,z])[x,y,z]Apply rotation only to vector
interpolate(other, t)TransformSLERP interpolation (0.0-1.0)
translation_magnitude()floatTranslation distance
rotation_angle()floatRotation angle in radians
to_matrix()4x4 listExport as homogeneous matrix

TransformFrame

The frame tree manager. Stores a hierarchy of coordinate frames and computes transforms between any two frames.

Creating a TransformFrame

from horus import TransformFrame, TransformFrameConfig

# Default configuration
tf_tree = TransformFrame()

# With custom config
config = TransformFrameConfig(max_frames=1024, history_len=64)
tf_tree = TransformFrame(config=config)

# Preset sizes
tf_tree = TransformFrame.small()    # 256 frames, ~550KB
tf_tree = TransformFrame.medium()   # 1024 frames, ~2.2MB
tf_tree = TransformFrame.large()    # 4096 frames, ~9MB
tf_tree = TransformFrame.massive()  # 16384 frames, ~35MB

Registering Frames

# Register child frame under a parent — returns frame ID (int)
frame_id = tf_tree.register_frame("base_link", "world")
tf_tree.register_frame("lidar", "base_link")
tf_tree.register_frame("camera", "base_link")

# Unregister a frame (raises HorusNotFoundError if not found)
tf_tree.unregister_frame("camera")

Updating Transforms

from horus import Transform

# Set the transform from parent to child (raises on unknown frame)
tf_tree.update_transform("base_link", Transform(
    translation=[1.0, 0.0, 0.0],
    rotation=[0.0, 0.0, 0.0, 1.0]
))

# With optional explicit timestamp
tf_tree.update_transform("lidar", Transform.from_translation([0.0, 0.0, 0.3]),
                         timestamp_ns=1234567890)

Looking Up Transforms

# Get transform from source frame to target frame
tf = tf_tree.tf("lidar", "world")
print(f"Lidar in world: {tf.translation}")

# With specific timestamp (for interpolation)
tf = tf_tree.tf_at("lidar", "world", timestamp_ns=1234567890)

Querying the Tree

# List all registered frames
frames = tf_tree.all_frames()  # ["world", "base_link", "lidar", "camera"]

# Get parent of a frame
parent = tf_tree.parent("lidar")  # "base_link"

# Get children of a frame
children = tf_tree.children("base_link")  # ["lidar", "camera"]

# Seconds since last transform update (None if never updated)
age = tf_tree.time_since_last_update("lidar")  # e.g., 0.015

# Wait for a transform to become available (blocking with timeout)
tf = tf_tree.wait_for_transform("lidar", "world", timeout_sec=1.0)
print(f"Got transform: {tf.translation}")

Frame Registration

MethodReturnsDescription
register_frame(name, parent)intRegister dynamic frame, returns frame ID
register_static_frame(name, transform, parent=None)intRegister frame with fixed transform
unregister_frame(name)NoneRemove a frame
has_frame(name)boolCheck if frame exists
frame_count()intNumber of registered frames
frame_id(name)intGet numeric frame ID from name
frame_name(id)strGet frame name from numeric ID

Updating Transforms

MethodReturnsDescription
update_transform(name, tf, timestamp_ns=None)NoneSet transform for a frame
update_transform_by_id(frame_id, tf, timestamp_ns=None)NoneUpdate by numeric ID (faster)
set_static_transform(name, transform)NoneSet a static (never-changing) transform

Looking Up Transforms

MethodReturnsDescription
tf(source, target)TransformLatest transform between frames
tf_at(source, target, ts)TransformTransform at specific timestamp (interpolated)
tf_at_strict(source, target, ts)TransformExact timestamp match (no interpolation)
tf_at_with_tolerance(src, dst, ts, tolerance_ns=100_000_000)TransformInterpolated with tolerance window
tf_by_id(src_id, dst_id)TransformLook up by numeric IDs (fastest)
can_transform(source, target)boolCheck if transform path exists
can_transform_at(src, dst, ts)boolCheck if available at timestamp
can_transform_at_with_tolerance(src, dst, ts, tolerance_ns)boolCheck with tolerance
wait_for_transform(src, dst, timeout_sec)TransformBlock until available
wait_for_transform_async(src, dst, timeout_sec)Future[Transform]Async wait (returns concurrent.futures.Future)

Applying Transforms

MethodReturnsDescription
transform_point(source, target, [x,y,z])[x,y,z]Transform a point between frames
transform_vector(source, target, [x,y,z])[x,y,z]Transform a vector (rotation only)

Querying the Tree

MethodReturnsDescription
all_frames()list[str]All registered frame names
parent(name)str or NoneParent frame name
children(name)list[str]Child frame names
frame_chain(source, target)list[str]Frame path from source to target
time_since_last_update(name)float or NoneSeconds since last update
is_stale(name, max_age_sec=1.0)boolCheck if frame data is stale

Diagnostics

MethodReturnsDescription
stats()dictFrame tree statistics
validate()boolValidate tree integrity
frame_info(name)dictMetadata for a single frame
frame_info_all()list[dict]Metadata for all frames
format_tree()strHuman-readable tree visualization
frames_as_dot()strDOT graph format (for Graphviz)
frames_as_yaml()strYAML export of frame tree

Advanced Usage

# Static frames — set once, never changes
tf_tree.register_static_frame("lidar", Transform.from_translation([0.0, 0.0, 0.3]), parent="base_link")

# Transform points directly between frames
world_point = tf_tree.transform_point("lidar", "world", [5.0, 0.0, 0.0])

# Async wait (non-blocking)
import concurrent.futures
future = tf_tree.wait_for_transform_async("lidar", "world", timeout_sec=2.0)
tf = future.result()  # Blocks until ready

# Check staleness before using data
if tf_tree.is_stale("base_link", max_age_sec=0.1):
    print("Odometry data is stale!")

# Diagnostics
print(tf_tree.format_tree())   # Visual tree
print(tf_tree.stats())         # {"frames": 5, "lookups": 1234, ...}
tf_tree.validate()             # Checks tree integrity

TransformFrameConfig

Configuration for the frame tree.

from horus import TransformFrameConfig

# Custom
config = TransformFrameConfig(max_frames=512, history_len=16)

# Presets
config = TransformFrameConfig.small()    # 256 frames, ~550KB
config = TransformFrameConfig.medium()   # 1024 frames, ~2.2MB
config = TransformFrameConfig.large()    # 4096 frames, ~9MB
config = TransformFrameConfig.massive()  # 16384 frames, ~35MB

# Check memory usage
print(config.max_frames)       # 512
print(config.history_len)      # 16
print(config.memory_estimate())  # "~1.1MB"

Complete Example

from horus import Node, Scheduler, TransformFrame, Transform
import math

# Global transform tree
tf_tree = TransformFrame.medium()

def setup_frames(node):
    tf_tree.register_frame("base_link", "world")
    tf_tree.register_frame("lidar", "base_link")
    tf_tree.register_frame("camera", "base_link")

    # Static transform: lidar is 30cm above base
    tf_tree.update_transform("lidar", Transform.from_translation([0.0, 0.0, 0.3]))

    # Static transform: camera is 10cm forward, 15cm up
    tf_tree.update_transform("camera", Transform.from_translation([0.1, 0.0, 0.15]))

tick_count = 0

def odometry_tick(node):
    global tick_count
    tick_count += 1

    # Simulate robot moving in a circle
    t = tick_count * 0.01
    x = math.cos(t) * 2.0
    y = math.sin(t) * 2.0
    yaw = t + math.pi / 2

    # Update base_link in world
    tf_tree.update_transform("base_link",
        Transform.from_euler([x, y, 0.0], [0.0, 0.0, yaw]))

def perception_tick(node):
    # Transform a lidar point into world coordinates
    try:
        lidar_to_world = tf_tree.tf("lidar", "world")
        point_in_world = lidar_to_world.transform_point([5.0, 0.0, 0.0])
        node.log_info(f"Obstacle at world: {point_in_world}")
    except Exception:
        pass  # Frame not yet available

odom = Node(name="odom", tick=odometry_tick, init=setup_frames, rate=100)
percept = Node(name="perception", tick=perception_tick, rate=10)

scheduler = Scheduler()
scheduler.add(odom, order=0)
scheduler.add(percept, order=1)
scheduler.run(duration=10)

Utility

from horus import get_timestamp_ns

# Get current time in nanoseconds (same clock as Rust)
now = get_timestamp_ns()

See Also