diff --git a/docs/source/docs/contributing/design-descriptions/index.md b/docs/source/docs/contributing/design-descriptions/index.md index b6e706271a..78dc4463a2 100644 --- a/docs/source/docs/contributing/design-descriptions/index.md +++ b/docs/source/docs/contributing/design-descriptions/index.md @@ -6,4 +6,5 @@ image-rotation time-sync camera-matching e2e-latency +robot-to-camera ``` diff --git a/docs/source/docs/contributing/design-descriptions/robot-to-camera.md b/docs/source/docs/contributing/design-descriptions/robot-to-camera.md new file mode 100644 index 0000000000..2a3589c557 --- /dev/null +++ b/docs/source/docs/contributing/design-descriptions/robot-to-camera.md @@ -0,0 +1,14 @@ +# Robot to Camera + +## How 3D pose estimation works + +At its core, Photonvision's 3D pose estimation is built around solving the Perspective-n-Point (PnP) problem. The PnP problem is essentially, 'given a set of points in 3D space, and their projections on a 2D image, determine the pose of the camera'. In photonvision's case, the points are the corners of one or more apriltags. +However, this leaves us with the camera's pose, *not* the robot's pose. The solution, of course, is to apply an offset. For 3D Pose estimation, Photonvision associates to each camera a `Transform3d` object, representing a vector encoding the 6DOF transformation from the robot (or, rather, the point on the robot considered to be its center) to the camera, and the final step of pose estimation is to transform the camera's pose (that got spit out of the PnP solver) by the inverse of this vector, yielding an estimate of the robot's pose. + +## What does the plumbing look like? + +The `PhotonCamera` object is (optionally) passed a `Transform3d` object by the Robot code, corresponding to the robot-to-camera transformation. This `Transform3d` (if passed) is transmitted to the coprocessor by the `PhotonCamera`. The coprocessor then appends that `Transform3d` to every result it sends back to the robot controller, where the `PhotonPoseEstimator` object then applies the transformation to the camera pose (If no `Transform3d` is passed to `PhotonCamera`, then no `Transform3d` is appended to the results, and `PhotonPoseEstimator` simply won't work. The result stores the `Transform3d` as an optional) + +## That seems complicated and silly. Why not just keep the robot to camera transform in PhotonLib? + +We used to! However, a new algorithm for fusing gyroscopic data to PnP pose observations has been added to PhotonVision, Constrained PnP, offering significantly improved accuracy and stability. Notably, this algorithm requires the robot-to-camera transform to run properly. In previous seasons, this wasn't a problem, as Constrained PnP ran entirely on the RoboRIO anyways, in order to access robot gyro data. However, Constrained PnP is very computationally expensive, and so work is being done to offload the work to the coprocessor, and expose gyro data to the coprocessor. The switch to having the robot-to-camera transform exposed to the coprocessor over NetworkTables and becoming an integrated part of Photonvision results is to facilitate the offloading of the Constrained PnP workload (and other estimation workloads that require Gyroscopic data) to the coprocessor diff --git a/photon-core/src/main/java/org/photonvision/common/dataflow/networktables/NTDataPublisher.java b/photon-core/src/main/java/org/photonvision/common/dataflow/networktables/NTDataPublisher.java index 53e7cba3a4..10c6660c3c 100644 --- a/photon-core/src/main/java/org/photonvision/common/dataflow/networktables/NTDataPublisher.java +++ b/photon-core/src/main/java/org/photonvision/common/dataflow/networktables/NTDataPublisher.java @@ -18,6 +18,7 @@ package org.photonvision.common.dataflow.networktables; import java.util.List; +import java.util.function.BiConsumer; import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Supplier; @@ -35,6 +36,7 @@ import org.wpilib.networktables.NetworkTable; import org.wpilib.networktables.NetworkTableEvent; import org.wpilib.networktables.NetworkTablesJNI; +import org.wpilib.networktables.TimestampedObject; public class NTDataPublisher implements CVPipelineResultConsumer { private final Logger logger = new Logger(NTDataPublisher.class, LogGroup.General); @@ -55,6 +57,9 @@ public class NTDataPublisher implements CVPipelineResultConsumer { private final Consumer fpsLimitConsumer; private final Supplier fpsLimitSupplier; + NTDataChangeListener robotToCameraListener; + private final BiConsumer robotToCameraConsumer; + public NTDataPublisher( String cameraNickname, Supplier pipelineIndexSupplier, @@ -62,13 +67,15 @@ public NTDataPublisher( BooleanSupplier driverModeSupplier, Consumer driverModeConsumer, Supplier fpsLimitSupplier, - Consumer fpsLimitConsumer) { + Consumer fpsLimitConsumer, + BiConsumer robotToCameraConsumer) { this.pipelineIndexSupplier = pipelineIndexSupplier; this.pipelineIndexConsumer = pipelineIndexConsumer; this.driverModeSupplier = driverModeSupplier; this.driverModeConsumer = driverModeConsumer; this.fpsLimitSupplier = fpsLimitSupplier; this.fpsLimitConsumer = fpsLimitConsumer; + this.robotToCameraConsumer = robotToCameraConsumer; updateCameraNickname(cameraNickname); updateEntries(); @@ -98,6 +105,15 @@ private void onPipelineIndexChange(NetworkTableEvent entryNotification) { logger.debug("Set pipeline index to " + newIndex); } + private void onRobotToCameraChange(NetworkTableEvent entryNotification) { + // HACK: the entryNotification's value can't be cast to Transform3d, so we read directly from + // the subscriber + TimestampedObject robotToCamera = ts.robotToCameraSubscriber.getAtomic(null); + var doubleTime = (robotToCamera.serverTime) * (double) 1e6; + robotToCameraConsumer.accept(doubleTime, robotToCamera.value); + logger.debug("Sampled robot to camera transform as " + robotToCamera + " at t=" + doubleTime); + } + private void onDriverModeChange(NetworkTableEvent entryNotification) { var newDriverMode = entryNotification.valueData.value.getBoolean(); var originalDriverMode = driverModeSupplier.getAsBoolean(); @@ -134,6 +150,7 @@ private void updateEntries() { if (pipelineIndexListener != null) pipelineIndexListener.remove(); if (driverModeListener != null) driverModeListener.remove(); if (fpsLimitListener != null) fpsLimitListener.remove(); + if (robotToCameraListener != null) robotToCameraListener.remove(); ts.updateEntries(); @@ -148,6 +165,10 @@ private void updateEntries() { fpsLimitListener = new NTDataChangeListener( ts.subTable.getInstance(), ts.fpsLimitSubscriber, this::onFPSLimitChange); + + robotToCameraListener = + new NTDataChangeListener( + ts.subTable.getInstance(), ts.robotToCameraSubscriber, this::onRobotToCameraChange); } public void updateCameraNickname(String newCameraNickname) { @@ -187,7 +208,8 @@ public void accept(CVPipelineResult result) { now + offset, NetworkTablesManager.getInstance().getTimeSinceLastPong(), TrackedTarget.simpleFromTrackedTargets(acceptedResult.targets), - acceptedResult.multiTagResult); + acceptedResult.multiTagResult, + acceptedResult.robotToCamera); // random guess at size of the array ts.resultPublisher.set(simplified, 1024); diff --git a/photon-core/src/main/java/org/photonvision/vision/frame/provider/FileFrameProvider.java b/photon-core/src/main/java/org/photonvision/vision/frame/provider/FileFrameProvider.java index 2b281b517f..ae0b7530d7 100644 --- a/photon-core/src/main/java/org/photonvision/vision/frame/provider/FileFrameProvider.java +++ b/photon-core/src/main/java/org/photonvision/vision/frame/provider/FileFrameProvider.java @@ -57,10 +57,6 @@ public FileFrameProvider(Path path, double fov, int maxFPS) { this(path, fov, maxFPS, null); } - public FileFrameProvider(Path path, double fov, CameraCalibrationCoefficients calibration) { - this(path, fov, MAX_FPS, calibration); - } - public FileFrameProvider( Path path, double fov, int maxFPS, CameraCalibrationCoefficients calibration) { if (!Files.exists(path)) @@ -97,6 +93,10 @@ public FileFrameProvider(Path path, double fov) { this(path, fov, MAX_FPS); } + public FileFrameProvider(Path path, double fov, CameraCalibrationCoefficients calibration) { + this(path, fov, MAX_FPS, calibration); + } + @Override public CapturedFrame getInputMat() { var out = new CVMat(); diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java index 5ae3539aaf..f983935230 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Optional; +import java.util.function.Function; import org.photonvision.common.configuration.ConfigManager; import org.photonvision.common.dataflow.structures.Packet; import org.photonvision.common.logging.LogGroup; @@ -63,12 +64,13 @@ public class AprilTagPipeline extends CVPipeline null); settings = new AprilTagPipelineSettings(); } - public AprilTagPipeline(AprilTagPipelineSettings settings) { - super(PROCESSING_TYPE); + public AprilTagPipeline( + AprilTagPipelineSettings settings, Function robotToCameraSampler) { + super(PROCESSING_TYPE, robotToCameraSampler); this.settings = settings; } @@ -247,7 +249,13 @@ protected CVPipelineResult process(Frame frame, AprilTagPipelineSettings setting var fps = fpsResult.output; return new CVPipelineResult( - frame.sequenceID, sumPipeNanosElapsed, fps, targetList, multiTagResult, frame); + frame.sequenceID, + sumPipeNanosElapsed, + fps, + targetList, + multiTagResult, + frame, + Optional.ofNullable(robotToCameraSampler.apply(frame.timestampNanos))); } @Override diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipeline.java index 52c2f9f5bd..61377b2309 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipeline.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Optional; +import java.util.function.Function; import org.opencv.core.Mat; import org.opencv.imgproc.Imgproc; import org.opencv.objdetect.Objdetect; @@ -55,12 +56,13 @@ public class ArucoPipeline extends CVPipeline null); settings = new ArucoPipelineSettings(); } - public ArucoPipeline(ArucoPipelineSettings settings) { - super(FrameThresholdType.GREYSCALE); + public ArucoPipeline( + ArucoPipelineSettings settings, Function robotToCameraSampler) { + super(FrameThresholdType.GREYSCALE, robotToCameraSampler); this.settings = settings; } @@ -235,7 +237,13 @@ protected CVPipelineResult process(Frame frame, ArucoPipelineSettings settings) var fps = fpsResult.output; return new CVPipelineResult( - frame.sequenceID, sumPipeNanosElapsed, fps, targetList, multiTagResult, frame); + frame.sequenceID, + sumPipeNanosElapsed, + fps, + targetList, + multiTagResult, + frame, + Optional.ofNullable(robotToCameraSampler.apply(frame.timestampNanos))); } private void drawThresholdFrame(Mat greyMat, Mat outputMat, int windowSize, double constant) { diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/CVPipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/CVPipeline.java index 1d5167909a..9ccfe8e2ca 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/CVPipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/CVPipeline.java @@ -17,26 +17,31 @@ package org.photonvision.vision.pipeline; +import java.util.function.Function; import org.photonvision.vision.camera.QuirkyCamera; import org.photonvision.vision.frame.Frame; import org.photonvision.vision.frame.FrameStaticProperties; import org.photonvision.vision.frame.FrameThresholdType; import org.photonvision.vision.opencv.Releasable; import org.photonvision.vision.pipeline.result.CVPipelineResult; +import org.wpilib.math.geometry.Transform3d; public abstract class CVPipeline implements Releasable { protected S settings; protected FrameStaticProperties frameStaticProperties; protected QuirkyCamera cameraQuirks; + protected Function robotToCameraSampler; private final FrameThresholdType thresholdType; // So releaseable doesn't keep track of if we double-free something. so (ew) remember that here protected volatile boolean released = false; - public CVPipeline(FrameThresholdType thresholdType) { + public CVPipeline( + FrameThresholdType thresholdType, Function robotToCameraSampler) { this.thresholdType = thresholdType; + this.robotToCameraSampler = robotToCameraSampler; } public FrameThresholdType getThresholdType() { diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/Calibrate3dPipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/Calibrate3dPipeline.java index e7f6a974c7..e416b22b39 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/Calibrate3dPipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/Calibrate3dPipeline.java @@ -74,7 +74,7 @@ public Calibrate3dPipeline() { } public Calibrate3dPipeline(int minSnapshots) { - super(PROCESSING_TYPE); + super(PROCESSING_TYPE, (time) -> null); this.settings = new Calibration3dPipelineSettings(); this.foundCornersList = new ArrayList<>(); this.minSnapshots = minSnapshots; diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/ColoredShapePipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/ColoredShapePipeline.java index 577cef7ec4..490222a1d9 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/ColoredShapePipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/ColoredShapePipeline.java @@ -19,6 +19,8 @@ import java.util.Arrays; import java.util.List; +import java.util.Optional; +import java.util.function.Function; import org.opencv.core.Point; import org.photonvision.vision.frame.Frame; import org.photonvision.vision.frame.FrameThresholdType; @@ -31,6 +33,7 @@ import org.photonvision.vision.pipeline.result.CVPipelineResult; import org.photonvision.vision.target.PotentialTarget; import org.photonvision.vision.target.TrackedTarget; +import org.wpilib.math.geometry.Transform3d; import org.wpilib.math.util.Pair; public class ColoredShapePipeline @@ -54,12 +57,13 @@ public class ColoredShapePipeline private static final FrameThresholdType PROCESSING_TYPE = FrameThresholdType.HSV; public ColoredShapePipeline() { - super(PROCESSING_TYPE); + super(PROCESSING_TYPE, (time) -> null); settings = new ColoredShapePipelineSettings(); } - public ColoredShapePipeline(ColoredShapePipelineSettings settings) { - super(PROCESSING_TYPE); + public ColoredShapePipeline( + ColoredShapePipelineSettings settings, Function robotToCameraSampler) { + super(PROCESSING_TYPE, robotToCameraSampler); this.settings = settings; } @@ -213,6 +217,12 @@ protected CVPipelineResult process(Frame frame, ColoredShapePipelineSettings set var fpsResult = calculateFPSPipe.run(null); var fps = fpsResult.output; - return new CVPipelineResult(frame.sequenceID, sumPipeNanosElapsed, fps, targetList, frame); + return new CVPipelineResult( + frame.sequenceID, + sumPipeNanosElapsed, + fps, + targetList, + frame, + Optional.ofNullable(robotToCameraSampler.apply(frame.timestampNanos))); } } diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/DriverModePipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/DriverModePipeline.java index c4caad708f..dd608cc14c 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/DriverModePipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/DriverModePipeline.java @@ -18,6 +18,7 @@ package org.photonvision.vision.pipeline; import java.util.List; +import java.util.function.Function; import org.photonvision.common.util.math.MathUtils; import org.photonvision.vision.frame.Frame; import org.photonvision.vision.frame.FrameThresholdType; @@ -25,6 +26,7 @@ import org.photonvision.vision.pipe.impl.Draw2dCrosshairPipe; import org.photonvision.vision.pipe.impl.ResizeImagePipe; import org.photonvision.vision.pipeline.result.DriverModePipelineResult; +import org.wpilib.math.geometry.Transform3d; import org.wpilib.math.util.Pair; public class DriverModePipeline @@ -36,12 +38,13 @@ public class DriverModePipeline private static final FrameThresholdType PROCESSING_TYPE = FrameThresholdType.NONE; public DriverModePipeline() { - super(PROCESSING_TYPE); + super(PROCESSING_TYPE, (time) -> null); settings = new DriverModePipelineSettings(); } - public DriverModePipeline(DriverModePipelineSettings settings) { - super(PROCESSING_TYPE); + public DriverModePipeline( + DriverModePipelineSettings settings, Function robotToCameraSampler) { + super(PROCESSING_TYPE, robotToCameraSampler); this.settings = settings; } diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/FocusPipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/FocusPipeline.java index 83ad1107af..f780d72a02 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/FocusPipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/FocusPipeline.java @@ -17,6 +17,7 @@ package org.photonvision.vision.pipeline; +import java.util.function.Function; import org.opencv.core.Mat; import org.photonvision.common.util.math.MathUtils; import org.photonvision.vision.frame.Frame; @@ -26,6 +27,7 @@ import org.photonvision.vision.pipe.impl.FocusPipe; import org.photonvision.vision.pipe.impl.ResizeImagePipe; import org.photonvision.vision.pipeline.result.FocusPipelineResult; +import org.wpilib.math.geometry.Transform3d; public class FocusPipeline extends CVPipeline { private final FocusPipe focusPipe = new FocusPipe(); @@ -35,12 +37,13 @@ public class FocusPipeline extends CVPipeline null); settings = new FocusPipelineSettings(); } - public FocusPipeline(FocusPipelineSettings settings) { - super(PROCESSING_TYPE); + public FocusPipeline( + FocusPipelineSettings settings, Function robotToCameraSampler) { + super(PROCESSING_TYPE, robotToCameraSampler); this.settings = settings; } diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/ObjectDetectionPipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/ObjectDetectionPipeline.java index a009d2cc9e..28a19441f5 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/ObjectDetectionPipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/ObjectDetectionPipeline.java @@ -19,6 +19,7 @@ import java.util.List; import java.util.Optional; +import java.util.function.Function; import org.photonvision.common.configuration.NeuralNetworkModelManager; import org.photonvision.vision.frame.Frame; import org.photonvision.vision.frame.FrameThresholdType; @@ -32,6 +33,7 @@ import org.photonvision.vision.target.PotentialTarget; import org.photonvision.vision.target.TargetOrientation; import org.photonvision.vision.target.TrackedTarget; +import org.wpilib.math.geometry.Transform3d; public class ObjectDetectionPipeline extends CVPipeline { @@ -44,12 +46,13 @@ public class ObjectDetectionPipeline private static final FrameThresholdType PROCESSING_TYPE = FrameThresholdType.NONE; public ObjectDetectionPipeline() { - super(PROCESSING_TYPE); + super(PROCESSING_TYPE, (time) -> null); settings = new ObjectDetectionPipelineSettings(); } - public ObjectDetectionPipeline(ObjectDetectionPipelineSettings settings) { - super(PROCESSING_TYPE); + public ObjectDetectionPipeline( + ObjectDetectionPipelineSettings settings, Function robotToCameraSampler) { + super(PROCESSING_TYPE, robotToCameraSampler); this.settings = settings; } @@ -129,7 +132,13 @@ protected CVPipelineResult process(Frame frame, ObjectDetectionPipelineSettings var fps = fpsResult.output; return new CVPipelineResult( - frame.sequenceID, sumPipeNanosElapsed, fps, collect2dTargetsResult.output, frame, names); + frame.sequenceID, + sumPipeNanosElapsed, + fps, + collect2dTargetsResult.output, + frame, + names, + Optional.ofNullable(robotToCameraSampler.apply(frame.timestampNanos))); } @Override diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/ReflectivePipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/ReflectivePipeline.java index 0d78d978fb..522a7ecf32 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/ReflectivePipeline.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/ReflectivePipeline.java @@ -18,6 +18,8 @@ package org.photonvision.vision.pipeline; import java.util.List; +import java.util.Optional; +import java.util.function.Function; import org.photonvision.vision.frame.Frame; import org.photonvision.vision.frame.FrameThresholdType; import org.photonvision.vision.opencv.Contour; @@ -28,6 +30,7 @@ import org.photonvision.vision.target.PotentialTarget; import org.photonvision.vision.target.TargetOrientation; import org.photonvision.vision.target.TrackedTarget; +import org.wpilib.math.geometry.Transform3d; /** Represents a pipeline for tracking retro-reflective targets. */ public class ReflectivePipeline extends CVPipeline { @@ -46,12 +49,13 @@ public class ReflectivePipeline extends CVPipeline null); settings = new ReflectivePipelineSettings(); } - public ReflectivePipeline(ReflectivePipelineSettings settings) { - super(PROCESSING_TYPE); + public ReflectivePipeline( + ReflectivePipelineSettings settings, Function robotToCameraSampler) { + super(PROCESSING_TYPE, robotToCameraSampler); this.settings = settings; } @@ -159,6 +163,12 @@ public CVPipelineResult process(Frame frame, ReflectivePipelineSettings settings PipelineProfiler.printReflectiveProfile(pipeProfileNanos); - return new CVPipelineResult(frame.sequenceID, sumPipeNanosElapsed, fps, targetList, frame); + return new CVPipelineResult( + frame.sequenceID, + sumPipeNanosElapsed, + fps, + targetList, + frame, + Optional.ofNullable(robotToCameraSampler.apply(frame.timestampNanos))); } } diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/result/CVPipelineResult.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/result/CVPipelineResult.java index 7867f27ed5..4c308f14aa 100644 --- a/photon-core/src/main/java/org/photonvision/vision/pipeline/result/CVPipelineResult.java +++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/result/CVPipelineResult.java @@ -25,6 +25,7 @@ import org.photonvision.vision.frame.Frame; import org.photonvision.vision.opencv.Releasable; import org.photonvision.vision.target.TrackedTarget; +import org.wpilib.math.geometry.Transform3d; public class CVPipelineResult implements Releasable { public final long sequenceID; @@ -35,6 +36,7 @@ public class CVPipelineResult implements Releasable { public final Frame inputAndOutputFrame; public Optional multiTagResult; public final List objectDetectionClassNames; + public Optional robotToCamera; public CVPipelineResult( long sequenceID, @@ -45,6 +47,24 @@ public CVPipelineResult( this(sequenceID, processingNanos, fps, targets, Optional.empty(), inputFrame, List.of()); } + public CVPipelineResult( + long sequenceID, + double processingNanos, + double fps, + List targets, + Frame inputFrame, + Optional robotToCamera) { + this( + sequenceID, + processingNanos, + fps, + targets, + Optional.empty(), + inputFrame, + List.of(), + robotToCamera); + } + public CVPipelineResult( long sequenceID, double processingNanos, @@ -55,6 +75,25 @@ public CVPipelineResult( this(sequenceID, processingNanos, fps, targets, Optional.empty(), inputFrame, classNames); } + public CVPipelineResult( + long sequenceID, + double processingNanos, + double fps, + List targets, + Frame inputFrame, + List classNames, + Optional robotToCamera) { + this( + sequenceID, + processingNanos, + fps, + targets, + Optional.empty(), + inputFrame, + classNames, + robotToCamera); + } + public CVPipelineResult( long sequenceID, double processingNanos, @@ -73,12 +112,52 @@ public CVPipelineResult( Optional multiTagResult, Frame inputFrame, List classNames) { + this( + sequenceID, + processingNanos, + fps, + targets, + multiTagResult, + inputFrame, + classNames, + Optional.empty()); + } + + public CVPipelineResult( + long sequenceID, + double processingNanos, + double fps, + List targets, + Optional multiTagResult, + Frame inputFrame, + Optional robotToCamera) { + this( + sequenceID, + processingNanos, + fps, + targets, + multiTagResult, + inputFrame, + List.of(), + robotToCamera); + } + + public CVPipelineResult( + long sequenceID, + double processingNanos, + double fps, + List targets, + Optional multiTagResult, + Frame inputFrame, + List classNames, + Optional robotToCamera) { this.sequenceID = sequenceID; this.processingNanos = processingNanos; this.fps = fps; this.targets = targets != null ? targets : Collections.emptyList(); this.multiTagResult = multiTagResult; this.objectDetectionClassNames = classNames; + this.robotToCamera = robotToCamera; this.inputAndOutputFrame = inputFrame; } diff --git a/photon-core/src/main/java/org/photonvision/vision/processes/PipelineManager.java b/photon-core/src/main/java/org/photonvision/vision/processes/PipelineManager.java index 02ecc509a2..989cdd5ef7 100644 --- a/photon-core/src/main/java/org/photonvision/vision/processes/PipelineManager.java +++ b/photon-core/src/main/java/org/photonvision/vision/processes/PipelineManager.java @@ -22,6 +22,7 @@ import java.util.Arrays; import java.util.Comparator; import java.util.List; +import java.util.function.Function; import org.photonvision.common.configuration.CameraConfiguration; import org.photonvision.common.configuration.ConfigManager; import org.photonvision.common.dataflow.DataChangeService; @@ -30,6 +31,7 @@ import org.photonvision.common.logging.LogGroup; import org.photonvision.common.logging.Logger; import org.photonvision.vision.pipeline.*; +import org.wpilib.math.geometry.Transform3d; @SuppressWarnings({"rawtypes", "unused"}) public class PipelineManager { @@ -43,6 +45,10 @@ public class PipelineManager { protected final Calibrate3dPipeline calibration3dPipeline; protected final FocusPipeline focusPipeline = new FocusPipeline(); protected final DriverModePipeline driverModePipeline = new DriverModePipeline(); + // This is a wrapper around the VisionModule's sampling method that automatically converts from + // the local integer-nanosecond + // time into the server double time expected by the VisionModule's sampler + protected final Function robotToCamSampler; /** Index of the currently active pipeline. Defaults to 0. */ private int currentPipelineIndex = DRIVERMODE_INDEX; @@ -58,13 +64,15 @@ public class PipelineManager { private int lastUserPipelineIdx; /** - * Creates a PipelineManager with a DriverModePipeline, a Calibration3dPipeline, and all provided - * pipelines. + * Creates a PipelineManager with a DriverModePipeline, a Calibration3dPipeline, a robotToCamera + * supplier, and all provided pipelines. */ PipelineManager( DriverModePipelineSettings driverSettings, List userPipelines, + Function robotToCamSampler, int defaultIndex) { + this.robotToCamSampler = robotToCamSampler; this.userPipelineSettings = new ArrayList<>(userPipelines); // This is to respect the default res idx for vendor cameras @@ -81,8 +89,20 @@ public class PipelineManager { updatePipelineFromRequested(); } - public PipelineManager(CameraConfiguration config) { - this(config.driveModeSettings, config.pipelineSettings, config.currentPipelineIndex); + PipelineManager( + DriverModePipelineSettings driverSettings, + List userPipelines, + int defaultIndex) { + this(driverSettings, userPipelines, (time) -> null, defaultIndex); + } + + public PipelineManager( + CameraConfiguration config, Function robotToCamSampler) { + this( + config.driveModeSettings, + config.pipelineSettings, + robotToCamSampler, + config.currentPipelineIndex); } /** @@ -245,26 +265,31 @@ private void recreateUserPipeline() { case Reflective -> { logger.debug("Creating Reflective pipeline"); currentUserPipeline = - new ReflectivePipeline((ReflectivePipelineSettings) desiredPipelineSettings); + new ReflectivePipeline( + (ReflectivePipelineSettings) desiredPipelineSettings, robotToCamSampler); } case ColoredShape -> { logger.debug("Creating ColoredShape pipeline"); currentUserPipeline = - new ColoredShapePipeline((ColoredShapePipelineSettings) desiredPipelineSettings); + new ColoredShapePipeline( + (ColoredShapePipelineSettings) desiredPipelineSettings, robotToCamSampler); } case AprilTag -> { logger.debug("Creating AprilTag pipeline"); currentUserPipeline = - new AprilTagPipeline((AprilTagPipelineSettings) desiredPipelineSettings); + new AprilTagPipeline( + (AprilTagPipelineSettings) desiredPipelineSettings, robotToCamSampler); } case Aruco -> { logger.debug("Creating ArUco Pipeline"); - currentUserPipeline = new ArucoPipeline((ArucoPipelineSettings) desiredPipelineSettings); + currentUserPipeline = + new ArucoPipeline((ArucoPipelineSettings) desiredPipelineSettings, robotToCamSampler); } case ObjectDetection -> { logger.debug("Creating ObjectDetection Pipeline"); currentUserPipeline = - new ObjectDetectionPipeline((ObjectDetectionPipelineSettings) desiredPipelineSettings); + new ObjectDetectionPipeline( + (ObjectDetectionPipelineSettings) desiredPipelineSettings, robotToCamSampler); } case Calib3d, DriverMode, FocusCamera -> {} } diff --git a/photon-core/src/main/java/org/photonvision/vision/processes/VisionModule.java b/photon-core/src/main/java/org/photonvision/vision/processes/VisionModule.java index d9afb714a1..6c21c395ff 100644 --- a/photon-core/src/main/java/org/photonvision/vision/processes/VisionModule.java +++ b/photon-core/src/main/java/org/photonvision/vision/processes/VisionModule.java @@ -31,6 +31,7 @@ import org.photonvision.common.dataflow.DataChangeService.SubscriberHandle; import org.photonvision.common.dataflow.events.OutgoingUIEvent; import org.photonvision.common.dataflow.networktables.NTDataPublisher; +import org.photonvision.common.dataflow.networktables.NetworkTablesManager; import org.photonvision.common.dataflow.statusLEDs.StatusLEDConsumer; import org.photonvision.common.dataflow.websocket.UICameraConfiguration; import org.photonvision.common.dataflow.websocket.UIDataPublisher; @@ -54,6 +55,11 @@ import org.photonvision.vision.pipeline.result.CVPipelineResult; import org.photonvision.vision.target.TargetModel; import org.photonvision.vision.target.TrackedTarget; +import org.wpilib.math.geometry.Rotation3d; +import org.wpilib.math.geometry.Transform3d; +import org.wpilib.math.geometry.Translation3d; +import org.wpilib.math.interpolation.Interpolator; +import org.wpilib.math.interpolation.TimeInterpolatableBuffer; import org.wpilib.math.util.Units; import org.wpilib.vision.camera.CameraServerJNI; import org.wpilib.vision.camera.VideoException; @@ -97,13 +103,47 @@ public class VisionModule { boolean mismatch; - public VisionModule(PipelineManager pipelineManager, VisionSource visionSource) { + private static final double bufferDuration = 1.5; + + private static class Transform3dInterpolator implements Interpolator { + @Override + public Transform3d interpolate(Transform3d start, Transform3d end, double t) { + // Maybe clamp t here? TimeInterpolabaleBuffer already does so but IDK if thats a guarantee or + // just an implementation detail + + Translation3d p0 = start.getTranslation(); + Translation3d p1 = end.getTranslation(); + + Translation3d interpTranslation = + new Translation3d( + p0.getX() + (p1.getX() - p0.getX()) * t, + p0.getY() + (p1.getY() - p0.getY()) * t, + p0.getZ() + (p1.getZ() - p0.getZ()) * t); + + Rotation3d interpRotation = start.getRotation().interpolate(end.getRotation(), t); + + return new Transform3d(interpTranslation, interpRotation); + } + } + + private final TimeInterpolatableBuffer robotToCameraBuffer = + TimeInterpolatableBuffer.createBuffer(new Transform3dInterpolator(), bufferDuration); + + public VisionModule(VisionSource visionSource) { logger = new Logger( VisionModule.class, visionSource.getSettables().getConfiguration().nickname, LogGroup.VisionModule); + pipelineManager = + new PipelineManager( + visionSource.getCameraConfiguration(), + (localtime_ns) -> + getRobotToCameraSample( + (localtime_ns + NetworkTablesManager.getInstance().getOffset() * 1000) + / (double) 1e9)); + mismatch = false; cameraQuirks = visionSource.getCameraConfiguration().cameraQuirks; @@ -127,7 +167,6 @@ public VisionModule(PipelineManager pipelineManager, VisionSource visionSource) }); } - this.pipelineManager = pipelineManager; this.visionSource = visionSource; changeSubscriber = new VisionModuleChangeSubscriber(this); this.visionRunner = @@ -153,7 +192,8 @@ public VisionModule(PipelineManager pipelineManager, VisionSource visionSource) pipelineManager::getDriverMode, this::setDriverMode, this::getFPSLimit, - this::setFPSLimit); + this::setFPSLimit, + this::addRobotToCameraSample); uiDataConsumer = new UIDataPublisher(visionSource.getSettables().getConfiguration().uniqueName); statusLEDsConsumer = new StatusLEDConsumer(visionSource.getSettables().getConfiguration().uniqueName); @@ -635,6 +675,32 @@ public void setFPSLimit(int fps) { saveAndBroadcastAll(); } + /** + * Set camera transform for this vision module. + * + * @param time the time (in seconds) that the transform was published by the RIO, in server time + * @param robotToCameraTransform the transform from the robot's origin to the camera's origin, in + * the robot's coordinate system. This should be provided in meters. + */ + public void addRobotToCameraSample(double time, Transform3d robotToCameraTransform) { + synchronized (robotToCameraBuffer) { + robotToCameraBuffer.addSample(time, robotToCameraTransform); + } + } + + /** + * Get robot to camera transform for this vision module. + * + * @param time the time (in seconds) to sample + * @return the transform from the robot's origin to the camera's origin, in the robot's coordinate + * system, in meters. May return null if no transform is set. + */ + public Transform3d getRobotToCameraSample(double time) { + synchronized (robotToCameraBuffer) { + return robotToCameraBuffer.getSample(time).orElse(null); + } + } + /** * Get the current FPS limit for this vision module. This limit cannot be exceeded, but may be * lower depending on processing time. diff --git a/photon-core/src/main/java/org/photonvision/vision/processes/VisionModuleManager.java b/photon-core/src/main/java/org/photonvision/vision/processes/VisionModuleManager.java index 7eb3c86212..20f4d30c33 100644 --- a/photon-core/src/main/java/org/photonvision/vision/processes/VisionModuleManager.java +++ b/photon-core/src/main/java/org/photonvision/vision/processes/VisionModuleManager.java @@ -43,8 +43,7 @@ public VisionModule getModule(String uniqueName) { public synchronized VisionModule addSource(VisionSource visionSource) { visionSource.cameraConfiguration.streamIndex = newCameraIndex(); - var pipelineManager = new PipelineManager(visionSource.getCameraConfiguration()); - var module = new VisionModule(pipelineManager, visionSource); + var module = new VisionModule(visionSource); visionModules.add(module); return module; diff --git a/photon-docs/build.gradle b/photon-docs/build.gradle index e7ca8ece58..2e7bfc22d4 100644 --- a/photon-docs/build.gradle +++ b/photon-docs/build.gradle @@ -89,7 +89,7 @@ doxygen.sourceSets.main { tasks.register("zipCppDocs", Zip) { archiveBaseName = zipBaseNameCpp destinationDirectory = outputsFolder - dependsOn doxygen + dependsOn doxygenDox from ("$buildDir/docs/doxygen/html") into '/' } diff --git a/photon-lib/py/photonlibpy/generated/PhotonPipelineResultSerde.py b/photon-lib/py/photonlibpy/generated/PhotonPipelineResultSerde.py index 885ce2c963..4e1cfed47b 100644 --- a/photon-lib/py/photonlibpy/generated/PhotonPipelineResultSerde.py +++ b/photon-lib/py/photonlibpy/generated/PhotonPipelineResultSerde.py @@ -41,8 +41,8 @@ class PhotonPipelineResultSerde: # Message definition md5sum. See photon_packet.adoc for details - MESSAGE_VERSION = "4b2ff16a964b5e2bf04be0c1454d91c4" - MESSAGE_FORMAT = "PhotonPipelineMetadata:ac0a45f686457856fb30af77699ea356 metadata;PhotonTrackedTarget:cc6dbb5c5c1e0fa808108019b20863f1 targets[?];optional MultiTargetPNPResult:541096947e9f3ca2d3f425ff7b04aa7b multitagResult;" + MESSAGE_VERSION = "c3e6b96bad05f102560d0abcad50debc" + MESSAGE_FORMAT = "PhotonPipelineMetadata:ac0a45f686457856fb30af77699ea356 metadata;PhotonTrackedTarget:cc6dbb5c5c1e0fa808108019b20863f1 targets[?];optional MultiTargetPNPResult:541096947e9f3ca2d3f425ff7b04aa7b multitagResult;optional Transform3d robotToCamera;" @staticmethod def pack(value: "PhotonPipelineResult") -> "Packet": @@ -56,6 +56,8 @@ def pack(value: "PhotonPipelineResult") -> "Packet": # multitagResult is optional! it better not be a VLA too ret.encodeOptional(value.multitagResult, MultiTargetPNPResult.photonStruct) + + ret.encodeOptionalShimmed(value.robotToCamera, ret.encodeTransform) return ret @staticmethod @@ -71,6 +73,9 @@ def unpack(packet: "Packet") -> "PhotonPipelineResult": # multitagResult is optional! it better not be a VLA too ret.multitagResult = packet.decodeOptional(MultiTargetPNPResult.photonStruct) + # robotToCamera is optional and shimmed! + ret.robotToCamera = packet.decodeOptionalShimmed(packet.decodeTransform) + return ret diff --git a/photon-lib/py/photonlibpy/packet.py b/photon-lib/py/photonlibpy/packet.py index 8aae95062e..5569d88c5f 100644 --- a/photon-lib/py/photonlibpy/packet.py +++ b/photon-lib/py/photonlibpy/packet.py @@ -16,7 +16,7 @@ ############################################################################### import struct -from typing import Generic, Optional, Protocol, TypeVar +from typing import Callable, Generic, Optional, Protocol, TypeVar import wpilib from wpimath.geometry import Quaternion, Rotation3d, Transform3d, Translation3d @@ -207,6 +207,12 @@ def decodeOptional(self, serde: Serde[T]) -> Optional[T]: else: return None + def decodeOptionalShimmed(self, shim: Callable[[], T]) -> Optional[T]: + if self.decodeBoolean(): + return shim() + else: + return None + def _encodeGeneric(self, packFormat, value): """ Append bytes to the packet data buffer. @@ -310,6 +316,16 @@ def encodeOptional(self, value: Optional[T], serde: Serde[T]): self.packetData = self.packetData + packed.getData() self.size = len(self.packetData) + def encodeOptionalShimmed(self, value: Optional[T], shim: Callable[[T], None]): + """ + Encodes an optional value using a specific shimmed serializer. + """ + if value is None: + self.encodeBoolean(False) + else: + self.encodeBoolean(True) + shim(value) + def encodeBytes(self, value: bytes): self.packetData = self.packetData + value self.size = len(self.packetData) diff --git a/photon-lib/py/photonlibpy/photonCamera.py b/photon-lib/py/photonlibpy/photonCamera.py index 653afafa76..962776a71e 100644 --- a/photon-lib/py/photonlibpy/photonCamera.py +++ b/photon-lib/py/photonlibpy/photonCamera.py @@ -16,7 +16,7 @@ ############################################################################### from enum import Enum -from typing import List +from typing import List, Optional import hal import ntcore @@ -25,6 +25,7 @@ import photonlibpy.generated # noqa import wpilib from wpilib import RobotController, Timer +from wpilib.geometry import Transform3d from .packet import Packet from .targeting.photonPipelineResult import PhotonPipelineResult @@ -51,7 +52,7 @@ def setVersionCheckEnabled(enabled: bool): class PhotonCamera: instance_count = 1 - def __init__(self, cameraName: str): + def __init__(self, cameraName: str, robotToCamera: Transform3d): """Constructs a PhotonCamera from the name of the camera. :param cameraName: The nickname of the camera (found in the PhotonVision UI). @@ -67,7 +68,9 @@ def __init__(self, cameraName: str): bytes([]), ntcore.PubSubOptions(periodic=0.01, sendAll=True), ) - + self._robotToCameraPublisher = self._cameraTable.getStructTopic( + "robotToCamera", Transform3d + ).publish(ntcore.PubSubOptions(periodic=0.01)) self._driverModePublisher = self._cameraTable.getBooleanTopic( "driverModeRequest" ).publish() @@ -111,6 +114,8 @@ def __init__(self, cameraName: str): instance, ["/photonvision/"], ntcore.PubSubOptions(topicsOnly=True) ) + self._robotToCameraPublisher.set(robotToCamera) + self._prevHeartbeat = 0 self._prevHeartbeatChangeTime = Timer.getFPGATimestamp() @@ -177,6 +182,20 @@ def getLatestResult(self) -> PhotonPipelineResult: retVal.ntReceiveTimestampMicros = now return retVal + def getRobotToCamera(self) -> Optional[Transform3d]: + """ + returns the robot to camera transform, or None if not present + + :returns: the robot to camera transform, or None if not present""" + return self._robotToCamera + + def setRobotToCamera(self, robotToCamera: Transform3d) -> None: + """ + Set the robotToCamera, and publish to NT + """ + self._robotToCamera = robotToCamera + self._robotToCameraPublisher.set(robotToCamera) + def getDriverMode(self) -> bool: """Returns whether the camera is in driver mode. diff --git a/photon-lib/py/photonlibpy/photonPoseEstimator.py b/photon-lib/py/photonlibpy/photonPoseEstimator.py index f487e5523d..7ccf7f25d6 100644 --- a/photon-lib/py/photonlibpy/photonPoseEstimator.py +++ b/photon-lib/py/photonlibpy/photonPoseEstimator.py @@ -26,7 +26,6 @@ Pose3d, Rotation2d, Rotation3d, - Transform3d, Translation2d, Translation3d, ) @@ -48,7 +47,6 @@ class PhotonPoseEstimator: def __init__( self, fieldTags: AprilTagFieldLayout, - robotToCamera: Transform3d, ): """Create a new PhotonPoseEstimator. @@ -56,11 +54,8 @@ def __init__( with respect to the FIRST field using the Field Coordinate System. Note that setting the origin of this layout object will affect the results from this class. - :param robotToCamera: Transform3d from the center of the robot to the camera mount position (i.e., - robot ➔ camera) in the Robot Coordinate System. """ self._fieldTags = fieldTags - self.robotToCamera = robotToCamera self._reportedErrors: set[int] = set() self._headingBuffer = TimeInterpolatableRotation2dBuffer(1) @@ -126,6 +121,9 @@ def _shouldEstimate(self, cameraResult: PhotonPipelineResult) -> bool: if cameraResult.getTimestampSeconds() < 0: return False + if cameraResult.robotToCamera == None: + return False + # If no targets seen, trivial case -- can't do estimation return len(cameraResult.targets) > 0 @@ -170,7 +168,7 @@ def estimatePnpDistanceTrigSolvePose( -wpimath.units.degreesToRadians(bestTarget.getYaw()), ), ) - .rotateBy(self.robotToCamera.rotation()) + .rotateBy(result.robotToCamera.rotation()) .toTranslation2d() .rotateBy(headingSample) ) @@ -179,7 +177,7 @@ def estimatePnpDistanceTrigSolvePose( tagPose.toPose2d().translation() - camToTagTranslation ) camToRobotTranslation: Translation2d = -( - self.robotToCamera.translation().toTranslation2d() + result.robotToCamera.translation().toTranslation2d() ) camToRobotTranslation = camToRobotTranslation.rotateBy(headingSample) robotPose = Pose2d( @@ -208,7 +206,7 @@ def estimateCoprocMultiTagPose( Pose3d() .transformBy(best_tf) # field-to-camera .relativeTo(self._fieldTags.getOrigin()) - .transformBy(self.robotToCamera.inverse()) # field-to-robot + .transformBy(result.robotToCamera.inverse()) # field-to-robot ) return EstimatedRobotPose( best, @@ -257,7 +255,7 @@ def estimateLowestAmbiguityPose( return EstimatedRobotPose( targetPosition.transformBy( lowestAmbiguityTarget.getBestCameraToTarget().inverse() - ).transformBy(self.robotToCamera.inverse()), + ).transformBy(result.robotToCamera.inverse()), result.getTimestampSeconds(), result.targets, ) diff --git a/photon-lib/py/photonlibpy/simulation/photonCameraSim.py b/photon-lib/py/photonlibpy/simulation/photonCameraSim.py index b804d3e0fa..3c2d029f3f 100644 --- a/photon-lib/py/photonlibpy/simulation/photonCameraSim.py +++ b/photon-lib/py/photonlibpy/simulation/photonCameraSim.py @@ -446,6 +446,7 @@ def distance(target: VisionTargetSim): ), targets=detectableTgts, multitagResult=multiTagResults, + robotToCamera=self.cam.getRobotToCamera(), ) def submitProcessedFrame( diff --git a/photon-lib/py/photonlibpy/targeting/photonPipelineResult.py b/photon-lib/py/photonlibpy/targeting/photonPipelineResult.py index 24d5d709c0..d6171741df 100644 --- a/photon-lib/py/photonlibpy/targeting/photonPipelineResult.py +++ b/photon-lib/py/photonlibpy/targeting/photonPipelineResult.py @@ -1,6 +1,8 @@ from dataclasses import dataclass, field from typing import TYPE_CHECKING, ClassVar, Optional +from wpimath.geometry import Transform3d + from .multiTargetPNPResult import MultiTargetPNPResult from .photonTrackedTarget import PhotonTrackedTarget @@ -34,6 +36,7 @@ class PhotonPipelineResult: # an arbitrary timebase. This is not true in C++ or Java. metadata: PhotonPipelineMetadata = field(default_factory=PhotonPipelineMetadata) multitagResult: Optional[MultiTargetPNPResult] = None + robotToCamera: Optional[Transform3d] = None def getLatencyMillis(self) -> float: return ( diff --git a/photon-lib/py/test/photonPoseEstimator_test.py b/photon-lib/py/test/photonPoseEstimator_test.py index ad9693664b..8e88a5711f 100644 --- a/photon-lib/py/test/photonPoseEstimator_test.py +++ b/photon-lib/py/test/photonPoseEstimator_test.py @@ -34,8 +34,8 @@ class PhotonCameraInjector(PhotonCamera): result: PhotonPipelineResult - def __init__(self, cameraName="camera"): - super().__init__(cameraName) + def __init__(self, cameraName="camera", robotToCamera=Transform3d()): + super().__init__(cameraName, robotToCamera) def getLatestResult(self) -> PhotonPipelineResult: return self.result @@ -134,12 +134,10 @@ def test_lowestAmbiguityStrategy(): ], metadata=PhotonPipelineMetadata(0, int(2 * 1e3), 0), multitagResult=None, + robotToCamera=Transform3d(), ) - estimator = PhotonPoseEstimator( - aprilTags, - Transform3d(), - ) + estimator = PhotonPoseEstimator(aprilTags) estimatedPose = estimator.estimateLowestAmbiguityPose(cameraOne.result) @@ -177,14 +175,12 @@ def test_pnpDistanceTrigSolve(): ), ) - estimator = PhotonPoseEstimator( - aprilTags, - compoundTestTransform, - ) + estimator = PhotonPoseEstimator(aprilTags) + cameraOne.setRobotToCamera(compoundTestTransform) realPose = Pose3d(7.3, 4.42, 0, Rotation3d(0, 0, 2.197)) # Pose to compare with result = cameraOneSim.process( - latencySecs, realPose.transformBy(estimator.robotToCamera), simTargets + latencySecs, realPose.transformBy(cameraOne.getRobotToCamera()), simTargets ) bestTarget = result.getBestTarget() assert bestTarget is not None @@ -207,10 +203,10 @@ def test_pnpDistanceTrigSolve(): # Straight on fakeTimestampSecs += 60 straightOnTestTransform = Transform3d(0, 0, 3, Rotation3d()) - estimator.robotToCamera = straightOnTestTransform + cameraOne.setRobotToCamera(straightOnTestTransform) realPose = Pose3d(4.81, 2.38, 0, Rotation3d(0, 0, 2.818)) # Pose to compare with result = cameraOneSim.process( - latencySecs, realPose.transformBy(estimator.robotToCamera), simTargets + latencySecs, realPose.transformBy(cameraOne.getRobotToCamera()), simTargets ) bestTarget = result.getBestTarget() assert bestTarget is not None @@ -265,12 +261,10 @@ def test_multiTagOnCoprocStrategy(): multitagResult=MultiTargetPNPResult( PnpResult(Transform3d(1, 3, 2, Rotation3d())) ), + robotToCamera=Transform3d(), ) - estimator = PhotonPoseEstimator( - AprilTagFieldLayout(), - Transform3d(), - ) + estimator = PhotonPoseEstimator(AprilTagFieldLayout()) estimatedPose = estimator.estimateCoprocMultiTagPose(cameraOne.result) assert estimatedPose is not None diff --git a/photon-lib/src/main/java/org/photonvision/PhotonCamera.java b/photon-lib/src/main/java/org/photonvision/PhotonCamera.java index c16fa36f45..ec75948f12 100644 --- a/photon-lib/src/main/java/org/photonvision/PhotonCamera.java +++ b/photon-lib/src/main/java/org/photonvision/PhotonCamera.java @@ -36,6 +36,7 @@ import org.wpilib.driverstation.Alert; import org.wpilib.driverstation.DriverStation; import org.wpilib.hardware.hal.HAL; +import org.wpilib.math.geometry.Transform3d; import org.wpilib.math.linalg.MatBuilder; import org.wpilib.math.linalg.Matrix; import org.wpilib.math.numbers.*; @@ -51,6 +52,7 @@ import org.wpilib.networktables.NetworkTableInstance; import org.wpilib.networktables.PubSubOption; import org.wpilib.networktables.StringSubscriber; +import org.wpilib.networktables.StructPublisher; import org.wpilib.system.Timer; /** Represents a camera that is connected to PhotonVision. */ @@ -61,6 +63,8 @@ public class PhotonCamera implements AutoCloseable { private final NetworkTable cameraTable; PacketSubscriber resultSubscriber; + Optional robotToCamera; + StructPublisher robotToCameraPublisher; BooleanPublisher driverModePublisher; BooleanSubscriber driverModeSubscriber; IntegerPublisher fpsLimitPublisher; @@ -80,6 +84,7 @@ public void close() { resultSubscriber.close(); driverModePublisher.close(); driverModeSubscriber.close(); + robotToCameraPublisher.close(); fpsLimitPublisher.close(); fpsLimitSubscriber.close(); versionEntry.close(); @@ -124,6 +129,22 @@ public static void setVersionCheckEnabled(boolean enabled) { VERSION_CHECK_ENABLED = enabled; } + /** + * Constructs a PhotonCamera from a root table. + * + * @param instance The NetworkTableInstance to pull data from. This can be a custom instance in + * simulation, but should *usually* be the default NTInstance from + * NetworkTableInstance::getDefault + * @param cameraName The name of the camera, as seen in the UI. + * @param robotToCamera Transform3d from the center of the robot to the camera mount position (ie, + * robot ➔ camera) in the Robot + * Coordinate System. + */ + public PhotonCamera(NetworkTableInstance instance, String cameraName, Transform3d robotToCamera) { + this(instance, cameraName, Optional.of(robotToCamera)); + } + /** * Constructs a PhotonCamera from a root table. * @@ -133,7 +154,14 @@ public static void setVersionCheckEnabled(boolean enabled) { * @param cameraName The name of the camera, as seen in the UI. */ public PhotonCamera(NetworkTableInstance instance, String cameraName) { + this(instance, cameraName, Optional.empty()); + } + + /** Internal implementation of the constructor */ + private PhotonCamera( + NetworkTableInstance instance, String cameraName, Optional robotToCamera) { name = cameraName; + this.robotToCamera = robotToCamera; disconnectAlert = new Alert( PHOTON_ALERT_GROUP, "PhotonCamera '" + name + "' is disconnected.", Alert.Level.MEDIUM); @@ -141,7 +169,7 @@ public PhotonCamera(NetworkTableInstance instance, String cameraName) { rootPhotonTable = instance.getTable(kTableName); this.cameraTable = rootPhotonTable.getSubTable(cameraName); path = cameraTable.getPath(); - var rawBytesEntry = + var rawBytesEntry_pipelineResult = cameraTable .getRawTopic("rawBytes") .subscribe( @@ -150,7 +178,12 @@ public PhotonCamera(NetworkTableInstance instance, String cameraName) { PubSubOption.periodic(0.01), PubSubOption.SEND_ALL, PubSubOption.pollStorage(20)); - resultSubscriber = new PacketSubscriber<>(rawBytesEntry, PhotonPipelineResult.photonStruct); + resultSubscriber = + new PacketSubscriber<>(rawBytesEntry_pipelineResult, PhotonPipelineResult.photonStruct); + robotToCameraPublisher = + cameraTable + .getStructTopic("robotToCamera", Transform3d.struct) + .publish(PubSubOption.periodic(0.01)); driverModePublisher = cameraTable.getBooleanTopic("driverModeRequest").publish(); driverModeSubscriber = cameraTable.getBooleanTopic("driverMode").subscribe(false); fpsLimitPublisher = cameraTable.getIntegerTopic("fpsLimitRequest").publish(); @@ -181,6 +214,10 @@ public PhotonCamera(NetworkTableInstance instance, String cameraName) { // HACK - check if things are compatible verifyDependencies(); + + if (robotToCamera.isPresent()) { + robotToCameraPublisher.set(robotToCamera.get()); + } } static void verifyDependencies() { @@ -241,6 +278,19 @@ public PhotonCamera(String cameraName) { this(NetworkTableInstance.getDefault(), cameraName); } + /** + * Constructs a PhotonCamera from the name of the camera and its transform from the robot. + * + * @param cameraName the name of the camera, as seen in the UI + * @param robotToCamera Transform3d from the center of the robot to the camera mount position (ie, + * robot ➔ camera) in the Robot + * Coordinate System. + */ + public PhotonCamera(String cameraName, Transform3d robotToCamera) { + this(NetworkTableInstance.getDefault(), cameraName, Optional.of(robotToCamera)); + } + /** * The list of pipeline results sent by PhotonVision since the last call to getAllUnreadResults(). * Calling this function clears the internal FIFO queue, and multiple calls to @@ -390,6 +440,12 @@ public int getPipelineIndex() { return (int) pipelineIndexState.get(0); } + /** + * Returns the robot-to-camera transform. + * + * @return The robot-to-camera transform, if it is set. Optional.empty() otherwise. + */ + /** * Allows the user to select the active pipeline index. * @@ -491,6 +547,28 @@ public final NetworkTable getCameraTable() { return cameraTable; } + /** + * Returns the camera's transform from the robot + * + * @return The camera's transform from the robot, if it is set. Empty otherwise. + */ + public Optional getRobotToCamera() { + return robotToCamera; + } + + /** + * Sets the camera's transform from the robot. This is used for pose estimation. + * + * @param robotToCamera Transform3d from the center of the robot to the camera mount position (ie, + * robot ➔ camera) in the Robot + * Coordinate System. + */ + public void setRobotToCamera(Transform3d robotToCamera) { + this.robotToCamera = Optional.of(robotToCamera); + robotToCameraPublisher.set(robotToCamera); + } + void verifyVersion() { if (!VERSION_CHECK_ENABLED) return; diff --git a/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java b/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java index ea29a83cdb..1829d72350 100644 --- a/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java +++ b/photon-lib/src/main/java/org/photonvision/PhotonPoseEstimator.java @@ -35,7 +35,6 @@ import org.wpilib.math.geometry.Pose3d; import org.wpilib.math.geometry.Rotation2d; import org.wpilib.math.geometry.Rotation3d; -import org.wpilib.math.geometry.Transform3d; import org.wpilib.math.geometry.Translation2d; import org.wpilib.math.geometry.Translation3d; import org.wpilib.math.interpolation.TimeInterpolatableBuffer; @@ -122,7 +121,6 @@ public static final record ConstrainedSolvepnpParams( private AprilTagFieldLayout fieldTags; private TargetModel tagModel = TargetModel.kAprilTag36h11; - private Transform3d robotToCamera; private final Set reportedErrors = new HashSet<>(); private final TimeInterpolatableBuffer headingBuffer = @@ -132,18 +130,9 @@ public static final record ConstrainedSolvepnpParams( * Create a new PhotonPoseEstimator. * * @param fieldTags A WPILib {@link AprilTagFieldLayout} linking AprilTag IDs to Pose3d objects - * with respect to the FIRST field using the Field - * Coordinate System. Note that setting the origin of this layout object will affect the - * results from this class. - * @param robotToCamera Transform3d from the center of the robot to the camera mount position (ie, - * robot ➔ camera) in the Robot - * Coordinate System. */ - public PhotonPoseEstimator(AprilTagFieldLayout fieldTags, Transform3d robotToCamera) { + public PhotonPoseEstimator(AprilTagFieldLayout fieldTags) { this.fieldTags = fieldTags; - this.robotToCamera = robotToCamera; HAL.reportUsage("PhotonVision/PhotonPoseEstimator", InstanceCount, ""); InstanceCount++; @@ -239,23 +228,6 @@ public void resetHeadingData(double timestampSeconds, Rotation2d heading) { addHeadingData(timestampSeconds, heading); } - /** - * @return The current transform from the center of the robot to the camera mount position - */ - public Transform3d getRobotToCameraTransform() { - return robotToCamera; - } - - /** - * Useful for pan and tilt mechanisms and such. - * - * @param robotToCamera The current transform from the center of the robot to the camera mount - * position - */ - public void setRobotToCameraTransform(Transform3d robotToCamera) { - this.robotToCamera = robotToCamera; - } - /** * @param cameraResult A pipeline result from the camera. * @return Whether or not pose estimation should be performed. @@ -266,6 +238,10 @@ private boolean shouldEstimate(PhotonPipelineResult cameraResult) { return false; } + if (cameraResult.getRobotToCamera().isEmpty()) { + return false; + } + // If no targets seen, trivial case -- can't do estimation return cameraResult.hasTargets(); } @@ -305,7 +281,7 @@ public Optional estimatePnpDistanceTrigSolvePose( 0, -Math.toRadians(bestTarget.getPitch()), -Math.toRadians(bestTarget.getYaw()))) - .rotateBy(robotToCamera.getRotation()) + .rotateBy(cameraResult.getRobotToCamera().get().getRotation()) .toTranslation2d() .rotateBy(headingSample); @@ -319,7 +295,13 @@ public Optional estimatePnpDistanceTrigSolvePose( tagPose2d.getTranslation().plus(camToTagTranslation.unaryMinus()); Translation2d camToRobotTranslation = - robotToCamera.getTranslation().toTranslation2d().unaryMinus().rotateBy(headingSample); + cameraResult + .getRobotToCamera() + .get() + .getTranslation() + .toTranslation2d() + .unaryMinus() + .rotateBy(headingSample); Pose2d robotPose = new Pose2d(fieldToCameraTranslation.plus(camToRobotTranslation), headingSample); @@ -383,7 +365,7 @@ public Optional estimateConstrainedSolvepnpPose( cameraMatrix, distCoeffs, cameraResult.getTargets(), - robotToCamera, + cameraResult.getRobotToCamera().get(), seedPose, fieldTags, tagModel, @@ -421,7 +403,7 @@ public Optional estimateCoprocMultiTagPose( Pose3d.kZero .plus(best_tf) // field-to-camera .relativeTo(fieldTags.getOrigin()) - .plus(robotToCamera.inverse()); // field-to-robot + .plus(cameraResult.getRobotToCamera().get().inverse()); // field-to-robot return Optional.of( new EstimatedRobotPose( best, @@ -455,7 +437,7 @@ public Optional estimateRioMultiTagPose( var best = Pose3d.kZero .plus(pnpResult.get().best) // field-to-camera - .plus(robotToCamera.inverse()); // field-to-robot + .plus(cameraResult.getRobotToCamera().get().inverse()); // field-to-robot return Optional.of( new EstimatedRobotPose( @@ -509,7 +491,7 @@ public Optional estimateLowestAmbiguityPose( targetPosition .get() .transformBy(lowestAmbiguityTarget.getBestCameraToTarget().inverse()) - .transformBy(robotToCamera.inverse()), + .transformBy(cameraResult.getRobotToCamera().get().inverse()), cameraResult.getTimestampSeconds(), cameraResult.getTargets(), PoseStrategy.LOWEST_AMBIGUITY)); @@ -548,14 +530,14 @@ public Optional estimateClosestToCameraHeightPose( double alternateTransformDelta = Math.abs( - robotToCamera.getZ() + cameraResult.getRobotToCamera().get().getZ() - targetPosition .get() .transformBy(target.getAlternateCameraToTarget().inverse()) .getZ()); double bestTransformDelta = Math.abs( - robotToCamera.getZ() + cameraResult.getRobotToCamera().get().getZ() - targetPosition .get() .transformBy(target.getBestCameraToTarget().inverse()) @@ -568,7 +550,7 @@ public Optional estimateClosestToCameraHeightPose( targetPosition .get() .transformBy(target.getAlternateCameraToTarget().inverse()) - .transformBy(robotToCamera.inverse()), + .transformBy(cameraResult.getRobotToCamera().get().inverse()), cameraResult.getTimestampSeconds(), cameraResult.getTargets(), PoseStrategy.CLOSEST_TO_CAMERA_HEIGHT); @@ -581,7 +563,7 @@ public Optional estimateClosestToCameraHeightPose( targetPosition .get() .transformBy(target.getBestCameraToTarget().inverse()) - .transformBy(robotToCamera.inverse()), + .transformBy(cameraResult.getRobotToCamera().get().inverse()), cameraResult.getTimestampSeconds(), cameraResult.getTargets(), PoseStrategy.CLOSEST_TO_CAMERA_HEIGHT); @@ -635,12 +617,12 @@ public Optional estimateClosestToReferencePose( targetPosition .get() .transformBy(target.getAlternateCameraToTarget().inverse()) - .transformBy(robotToCamera.inverse()); + .transformBy(cameraResult.getRobotToCamera().get().inverse()); Pose3d bestTransformPosition = targetPosition .get() .transformBy(target.getBestCameraToTarget().inverse()) - .transformBy(robotToCamera.inverse()); + .transformBy(cameraResult.getRobotToCamera().get().inverse()); double altDifference = Math.abs(calculateDifference(referencePose, altTransformPosition)); double bestDifference = Math.abs(calculateDifference(referencePose, bestTransformPosition)); @@ -706,7 +688,7 @@ public Optional estimateAverageBestTargetsPose( targetPosition .get() .transformBy(target.getBestCameraToTarget().inverse()) - .transformBy(robotToCamera.inverse()), + .transformBy(cameraResult.getRobotToCamera().get().inverse()), cameraResult.getTimestampSeconds(), cameraResult.getTargets(), PoseStrategy.AVERAGE_BEST_TARGETS)); @@ -720,7 +702,7 @@ public Optional estimateAverageBestTargetsPose( targetPosition .get() .transformBy(target.getBestCameraToTarget().inverse()) - .transformBy(robotToCamera.inverse()))); + .transformBy(cameraResult.getRobotToCamera().get().inverse()))); } // Take the average diff --git a/photon-lib/src/main/java/org/photonvision/simulation/PhotonCameraSim.java b/photon-lib/src/main/java/org/photonvision/simulation/PhotonCameraSim.java index 1b7eba56f3..3ae64c9edf 100644 --- a/photon-lib/src/main/java/org/photonvision/simulation/PhotonCameraSim.java +++ b/photon-lib/src/main/java/org/photonvision/simulation/PhotonCameraSim.java @@ -658,7 +658,8 @@ public PhotonPipelineResult process( // Pretend like we heard a pong recently 1000L + (long) ((Math.random() - 0.5) * 50), detectableTgts, - multitagResult); + multitagResult, + cam.getRobotToCamera()); return ret; } diff --git a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp index cd69705185..0dce77f49b 100644 --- a/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp +++ b/photon-lib/src/main/native/cpp/photon/PhotonCamera.cpp @@ -123,7 +123,8 @@ static const std::string TYPE_STRING = std::string{SerdeType::GetSchemaHash()}; PhotonCamera::PhotonCamera(wpi::nt::NetworkTableInstance instance, - const std::string_view cameraName) + const std::string_view cameraName, + std::optional robotToCamera) : mainTable(instance.GetTable("photonvision")), rootTable(mainTable->GetSubTable(cameraName)), rawBytesEntry( @@ -131,6 +132,9 @@ PhotonCamera::PhotonCamera(wpi::nt::NetworkTableInstance instance, .Subscribe( TYPE_STRING, {}, {.pollStorage = 20, .periodic = 0.01, .sendAll = true})), + robotToCameraPublisher( + rootTable->GetStructTopic("robotToCamera") + .Publish({.periodic = 0.01})), inputSaveImgEntry( rootTable->GetIntegerTopic("inputSaveImgCmd").Publish()), inputSaveImgSubscriber( @@ -166,8 +170,11 @@ PhotonCamera::PhotonCamera(wpi::nt::NetworkTableInstance instance, std::string{"PhotonCamera '"} + std::string{cameraName} + "' is disconnected.", wpi::Alert::Level::MEDIUM), - timesyncAlert(PHOTON_ALERT_GROUP, "", wpi::Alert::Level::MEDIUM) { + timesyncAlert(PHOTON_ALERT_GROUP, "", wpi::Alert::Level::MEDIUM), + robotToCamera(robotToCamera) { verifyDependencies(); + if (robotToCamera.has_value()) + robotToCameraPublisher.Set(robotToCamera.value()); InstanceCount++; HAL_ReportUsage("PhotonVision/PhotonCamera", InstanceCount, ""); @@ -177,9 +184,23 @@ PhotonCamera::PhotonCamera(wpi::nt::NetworkTableInstance instance, InitTspServer(); } +PhotonCamera::PhotonCamera(wpi::nt::NetworkTableInstance instance, + const std::string_view cameraName) + : PhotonCamera(instance, cameraName, std::nullopt) {} + +PhotonCamera::PhotonCamera(wpi::nt::NetworkTableInstance instance, + const std::string_view cameraName, + const wpi::math::Transform3d robotToCamera) + : PhotonCamera(instance, cameraName, std::make_optional(robotToCamera)) {} + PhotonCamera::PhotonCamera(const std::string_view cameraName) : PhotonCamera(wpi::nt::NetworkTableInstance::GetDefault(), cameraName) {} +PhotonCamera::PhotonCamera(const std::string_view cameraName, + const wpi::math::Transform3d robotToCamera) + : PhotonCamera(wpi::nt::NetworkTableInstance::GetDefault(), cameraName, + std::make_optional(robotToCamera)) {} + PhotonPipelineResult PhotonCamera::GetLatestResult() { if (test) { if (testResult.size()) @@ -455,4 +476,9 @@ std::vector PhotonCamera::tablesThatLookLikePhotonCameras() { return ret; } +void PhotonCamera::SetRobotToCamera(wpi::math::Transform3d newRobotToCamera) { + robotToCamera = std::make_optional(newRobotToCamera); + robotToCameraPublisher.Set(robotToCamera.value()); +} + } // namespace photon diff --git a/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp b/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp index 53cde30887..9a2f017d75 100644 --- a/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp +++ b/photon-lib/src/main/native/cpp/photon/PhotonPoseEstimator.cpp @@ -63,10 +63,8 @@ cv::Point3d TagCornerToObjectPoint(wpi::units::meter_t cornerX, } // namespace detail PhotonPoseEstimator::PhotonPoseEstimator( - wpi::apriltag::AprilTagFieldLayout tags, - wpi::math::Transform3d robotToCamera) + wpi::apriltag::AprilTagFieldLayout tags) : aprilTags(tags), - m_robotToCamera(robotToCamera), headingBuffer( wpi::math::TimeInterpolatableBuffer(1_s)) { HAL_ReportUsage("PhotonVision/PhotonPoseEstimator", InstanceCount, ""); @@ -81,6 +79,13 @@ bool ShouldEstimate(const PhotonPipelineResult& result) { return false; } + // Result has no robot to camera transform -- can't do estimation + if (!result.GetRobotToCamera().has_value()) { + WPILIB_ReportError(wpi::warn::Warning, + "Result has no robot to camera transform!"); + return false; + } + // If no targets seen, trivial case -- can't do estimation return result.HasTargets(); } @@ -118,7 +123,7 @@ PhotonPoseEstimator::EstimateLowestAmbiguityPose( return EstimatedRobotPose{ fiducialPose->TransformBy(bestTarget.GetBestCameraToTarget().Inverse()) - .TransformBy(m_robotToCamera.Inverse()), + .TransformBy(cameraResult.GetRobotToCamera().value().Inverse()), cameraResult.GetTimestamp(), cameraResult.GetTargets(), LOWEST_AMBIGUITY}; } @@ -145,19 +150,19 @@ PhotonPoseEstimator::EstimateClosestToCameraHeightPose( wpi::math::Pose3d const targetPose = *fiducialPose; wpi::units::meter_t const alternativeDifference = wpi::units::math::abs( - m_robotToCamera.Z() - + cameraResult.GetRobotToCamera().value().Z() - targetPose.TransformBy(target.GetAlternateCameraToTarget().Inverse()) .Z()); wpi::units::meter_t const bestDifference = wpi::units::math::abs( - m_robotToCamera.Z() - + cameraResult.GetRobotToCamera().value().Z() - targetPose.TransformBy(target.GetBestCameraToTarget().Inverse()).Z()); if (alternativeDifference < smallestHeightDifference) { smallestHeightDifference = alternativeDifference; pose = EstimatedRobotPose{ targetPose.TransformBy(target.GetAlternateCameraToTarget().Inverse()) - .TransformBy(m_robotToCamera.Inverse()), + .TransformBy(cameraResult.GetRobotToCamera().value().Inverse()), cameraResult.GetTimestamp(), cameraResult.GetTargets(), CLOSEST_TO_CAMERA_HEIGHT}; } @@ -165,7 +170,7 @@ PhotonPoseEstimator::EstimateClosestToCameraHeightPose( smallestHeightDifference = bestDifference; pose = EstimatedRobotPose{ targetPose.TransformBy(target.GetBestCameraToTarget().Inverse()) - .TransformBy(m_robotToCamera.Inverse()), + .TransformBy(cameraResult.GetRobotToCamera().value().Inverse()), cameraResult.GetTimestamp(), cameraResult.GetTargets(), CLOSEST_TO_CAMERA_HEIGHT}; } @@ -199,10 +204,10 @@ PhotonPoseEstimator::EstimateClosestToReferencePose( const auto altPose = targetPose.TransformBy(target.GetAlternateCameraToTarget().Inverse()) - .TransformBy(m_robotToCamera.Inverse()); + .TransformBy(cameraResult.GetRobotToCamera().value().Inverse()); const auto bestPose = targetPose.TransformBy(target.GetBestCameraToTarget().Inverse()) - .TransformBy(m_robotToCamera.Inverse()); + .TransformBy(cameraResult.GetRobotToCamera().value().Inverse()); wpi::units::meter_t const alternativeDifference = wpi::units::math::abs( referencePose.Translation().Distance(altPose.Translation())); @@ -283,8 +288,8 @@ PhotonPoseEstimator::EstimateCoprocMultiTagPose( const auto field2camera = cameraResult.MultiTagResult()->estimatedPose.best; - const auto fieldToRobot = - wpi::math::Pose3d() + field2camera + m_robotToCamera.Inverse(); + const auto fieldToRobot = wpi::math::Pose3d() + field2camera + + cameraResult.GetRobotToCamera().value().Inverse(); return photon::EstimatedRobotPose(fieldToRobot, cameraResult.GetTimestamp(), cameraResult.GetTargets(), MULTI_TAG_PNP_ON_COPROCESSOR); @@ -341,8 +346,9 @@ std::optional PhotonPoseEstimator::EstimateRioMultiTagPose( const wpi::math::Pose3d pose = detail::ToPose3d(tvec, rvec); return photon::EstimatedRobotPose( - pose.TransformBy(m_robotToCamera.Inverse()), cameraResult.GetTimestamp(), - cameraResult.GetTargets(), MULTI_TAG_PNP_ON_RIO); + pose.TransformBy(cameraResult.GetRobotToCamera().value().Inverse()), + cameraResult.GetTimestamp(), cameraResult.GetTargets(), + MULTI_TAG_PNP_ON_RIO); } std::optional @@ -369,7 +375,7 @@ PhotonPoseEstimator::EstimatePnpDistanceTrigSolvePose( wpi::math::Rotation3d(0_rad, -wpi::units::degree_t(bestTarget.GetPitch()), -wpi::units::degree_t(bestTarget.GetYaw()))) - .RotateBy(m_robotToCamera.Rotation()) + .RotateBy(cameraResult.GetRobotToCamera().value().Rotation()) .ToTranslation2d() .RotateBy(headingSample); @@ -388,7 +394,7 @@ PhotonPoseEstimator::EstimatePnpDistanceTrigSolvePose( tagPose.Translation() - camToTagTranslation; wpi::math::Translation2d camToRobotTranslation = - (-m_robotToCamera.Translation().ToTranslation2d()) + (-cameraResult.GetRobotToCamera().value().Translation().ToTranslation2d()) .RotateBy(headingSample); wpi::math::Pose2d robotPose = wpi::math::Pose2d( @@ -426,7 +432,7 @@ PhotonPoseEstimator::EstimateAverageBestTargetsPose( if (target.GetPoseAmbiguity() == 0) { return EstimatedRobotPose{ targetPose.TransformBy(target.GetBestCameraToTarget().Inverse()) - .TransformBy(m_robotToCamera.Inverse()), + .TransformBy(cameraResult.GetRobotToCamera().value().Inverse()), cameraResult.GetTimestamp(), cameraResult.GetTargets(), AVERAGE_BEST_TARGETS}; } @@ -479,8 +485,9 @@ PhotonPoseEstimator::EstimateConstrainedSolvepnpPose( std::optional pnpResult = VisionEstimation::EstimateRobotPoseConstrainedSolvePNP( - cameraMatrix, distCoeffs, targets, m_robotToCamera, seedPose, - aprilTags, photon::kAprilTag36h11, headingFree, + cameraMatrix, distCoeffs, targets, + cameraResult.GetRobotToCamera().value(), seedPose, aprilTags, + photon::kAprilTag36h11, headingFree, wpi::math::Rotation2d{ headingBuffer.Sample(cameraResult.GetTimestamp()).value()}, headingScaleFactor); diff --git a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp index d3f18b1c64..9e7bdc5e50 100644 --- a/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp +++ b/photon-lib/src/main/native/cpp/photon/simulation/PhotonCameraSim.cpp @@ -362,7 +362,7 @@ PhotonPipelineResult PhotonCameraSim::Process( PhotonPipelineMetadata{heartbeatCounter, 0, wpi::units::microsecond_t{latency}.to(), 1000000}, - detectableTgts, multiTagResults}; + detectableTgts, multiTagResults, cam->GetRobotToCamera()}; } void PhotonCameraSim::SubmitProcessedFrame(const PhotonPipelineResult& result) { SubmitProcessedFrame(result, wpi::nt::Now()); diff --git a/photon-lib/src/main/native/include/photon/PhotonCamera.h b/photon-lib/src/main/native/include/photon/PhotonCamera.h index 1e8a9ca738..7c4734bcab 100644 --- a/photon-lib/src/main/native/include/photon/PhotonCamera.h +++ b/photon-lib/src/main/native/include/photon/PhotonCamera.h @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -38,6 +39,7 @@ #include #include #include +#include #include #include "photon/targeting/PhotonPipelineResult.h" @@ -74,10 +76,53 @@ class PhotonCamera { */ explicit PhotonCamera(const std::string_view cameraName); + /** + * Constructs a PhotonCamera from the name of the camera and a robot to camera + * transform. + * @param cameraName The nickname of the camera (found in the PhotonVision + * UI). + * @param robotToCamera Transform3d from the center of the robot to the camera + * mount positions (ie, robot ➔ camera). + */ + explicit PhotonCamera(const std::string_view cameraName, + wpi::math::Transform3d robotToCamera); + + /** + * Constructs a PhotonCamera from a root table. + * + * @param instance The NetworkTableInstance to pull data from. This can be a + * custom instance in simulation, but should *usually* be the default + * NTInstance from {@link NetworkTableInstance::getDefault} + * @param cameraName The name of the camera, as seen in the UI. + * over. + * @param robotToCamera Transform3d from the center of the robot to the camera + * mount positions (ie, robot ➔ camera). + */ + explicit PhotonCamera(wpi::nt::NetworkTableInstance instance, + const std::string_view cameraName, + wpi::math::Transform3d robotToCamera); + PhotonCamera(PhotonCamera&&) = default; ~PhotonCamera() = default; + /** + * Returns the robot to camera transform + * + * @return The transform from the robot's center to the camera, if it was set. + * Empty otherwise. + */ + std::optional GetRobotToCamera() { + return robotToCamera; + } + + /** + * Sets the robot to camera transform + * + * @param newRobotToCamera Transform3d from the center of the robot to the + * camera mount positions (ie, robot ➔ camera). + */ + void SetRobotToCamera(wpi::math::Transform3d newRobotToCamera); /** * The list of pipeline results sent by PhotonVision since the last call to * GetAllUnreadResults(). Calling this function clears the internal FIFO @@ -216,6 +261,7 @@ class PhotonCamera { std::shared_ptr mainTable; std::shared_ptr rootTable; wpi::nt::RawSubscriber rawBytesEntry; + wpi::nt::StructPublisher robotToCameraPublisher; wpi::nt::IntegerPublisher inputSaveImgEntry; wpi::nt::IntegerSubscriber inputSaveImgSubscriber; wpi::nt::IntegerPublisher outputSaveImgEntry; @@ -247,6 +293,12 @@ class PhotonCamera { wpi::Alert timesyncAlert; private: + /** + * Internal implementation of the constructor. + */ + explicit PhotonCamera(wpi::nt::NetworkTableInstance instance, + const std::string_view cameraName, + std::optional robotToCamera); wpi::units::second_t lastVersionCheckTime = 0_s; static bool VERSION_CHECK_ENABLED; inline static int InstanceCount = 1; @@ -262,6 +314,7 @@ class PhotonCamera { void CheckTimeSyncOrWarn(photon::PhotonPipelineResult& result); std::vector tablesThatLookLikePhotonCameras(); + std::optional robotToCamera; }; } // namespace photon diff --git a/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h b/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h index a32b0b18ac..2fac96aa6a 100644 --- a/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h +++ b/photon-lib/src/main/native/include/photon/PhotonPoseEstimator.h @@ -91,11 +91,8 @@ class PhotonPoseEstimator { * * @param aprilTags A AprilTagFieldLayout linking AprilTag IDs to Pose3ds with * respect to the FIRST field. - * @param robotToCamera Transform3d from the center of the robot to the camera - * mount positions (ie, robot ➔ camera). */ - explicit PhotonPoseEstimator(wpi::apriltag::AprilTagFieldLayout aprilTags, - wpi::math::Transform3d robotToCamera); + explicit PhotonPoseEstimator(wpi::apriltag::AprilTagFieldLayout aprilTags); /** * Get the AprilTagFieldLayout being used by the PositionEstimator. @@ -106,24 +103,6 @@ class PhotonPoseEstimator { return aprilTags; } - /** - * @return The current transform from the center of the robot to the camera - * mount position. - */ - inline wpi::math::Transform3d GetRobotToCameraTransform() { - return m_robotToCamera; - } - - /** - * Useful for pan and tilt mechanisms, or cameras on turrets - * - * @param robotToCamera The current transform from the center of the robot to - * the camera mount position. - */ - inline void SetRobotToCameraTransform(wpi::math::Transform3d robotToCamera) { - m_robotToCamera = robotToCamera; - } - /** * Add robot heading data to the buffer. Must be called periodically for the * PNP_DISTANCE_TRIG_SOLVE strategy. @@ -307,8 +286,6 @@ class PhotonPoseEstimator { private: wpi::apriltag::AprilTagFieldLayout aprilTags; - wpi::math::Transform3d m_robotToCamera; - wpi::math::TimeInterpolatableBuffer headingBuffer; inline static int InstanceCount = 1; diff --git a/photon-lib/src/test/java/org/photonvision/PhotonPoseEstimatorTest.java b/photon-lib/src/test/java/org/photonvision/PhotonPoseEstimatorTest.java index 0411302d70..e329214dc0 100644 --- a/photon-lib/src/test/java/org/photonvision/PhotonPoseEstimatorTest.java +++ b/photon-lib/src/test/java/org/photonvision/PhotonPoseEstimatorTest.java @@ -161,9 +161,11 @@ void testLowestAmbiguityStrategy() { new TargetCorner(1, 2), new TargetCorner(3, 4), new TargetCorner(5, 6), - new TargetCorner(7, 8))))); + new TargetCorner(7, 8)))), + Optional.empty(), + Optional.of(new Transform3d())); - PhotonPoseEstimator estimator = new PhotonPoseEstimator(aprilTags, new Transform3d()); + PhotonPoseEstimator estimator = new PhotonPoseEstimator(aprilTags); Optional estimatedPose = estimator.estimateLowestAmbiguityPose(cameraOne.result); @@ -246,11 +248,11 @@ void testClosestToCameraHeightStrategy() { new TargetCorner(1, 2), new TargetCorner(3, 4), new TargetCorner(5, 6), - new TargetCorner(7, 8))))); + new TargetCorner(7, 8)))), + Optional.empty(), + Optional.of(new Transform3d(new Translation3d(0, 0, 4), new Rotation3d()))); - PhotonPoseEstimator estimator = - new PhotonPoseEstimator( - aprilTags, new Transform3d(new Translation3d(0, 0, 4), new Rotation3d())); + PhotonPoseEstimator estimator = new PhotonPoseEstimator(aprilTags); Optional estimatedPose = estimator.estimateClosestToCameraHeightPose(cameraOne.result); @@ -333,11 +335,11 @@ void closestToReferencePoseStrategy() { new TargetCorner(1, 2), new TargetCorner(3, 4), new TargetCorner(5, 6), - new TargetCorner(7, 8))))); + new TargetCorner(7, 8)))), + Optional.empty(), + Optional.of(new Transform3d(new Translation3d(0, 0, 0), new Rotation3d()))); - PhotonPoseEstimator estimator = - new PhotonPoseEstimator( - aprilTags, new Transform3d(new Translation3d(0, 0, 0), new Rotation3d())); + PhotonPoseEstimator estimator = new PhotonPoseEstimator(aprilTags); Optional estimatedPose = estimator.estimateClosestToReferencePose( @@ -421,11 +423,11 @@ void closestToLastPose() { new TargetCorner(1, 2), new TargetCorner(3, 4), new TargetCorner(5, 6), - new TargetCorner(7, 8))))); + new TargetCorner(7, 8)))), + Optional.empty(), + Optional.of(new Transform3d(new Translation3d(0, 0, 0), new Rotation3d()))); - PhotonPoseEstimator estimator = - new PhotonPoseEstimator( - aprilTags, new Transform3d(new Translation3d(0, 0, 0), new Rotation3d())); + PhotonPoseEstimator estimator = new PhotonPoseEstimator(aprilTags); Optional estimatedPose = estimator.estimateClosestToReferencePose( @@ -501,7 +503,9 @@ void closestToLastPose() { new TargetCorner(1, 2), new TargetCorner(3, 4), new TargetCorner(5, 6), - new TargetCorner(7, 8))))); + new TargetCorner(7, 8)))), + Optional.empty(), + Optional.of(new Transform3d(new Translation3d(0, 0, 0), new Rotation3d()))); estimatedPose = estimator.estimateClosestToReferencePose(cameraOne.result, pose); pose = estimatedPose.get().estimatedPose; @@ -518,26 +522,23 @@ void pnpDistanceTrigSolve() { aprilTags.getTags().stream() .map((AprilTag x) -> new VisionTargetSim(x.pose, TargetModel.kAprilTag36h11, x.ID)) .toList(); + /* Compound Rolled + Pitched + Yaw */ + Transform3d compoundTestTransform = + new Transform3d( + -Units.inchesToMeters(12), + -Units.inchesToMeters(11), + 3, + new Rotation3d( + Units.degreesToRadians(37), Units.degreesToRadians(6), Units.degreesToRadians(60))); + cameraOne.setRobotToCamera(compoundTestTransform); try (PhotonCameraSim cameraOneSim = new PhotonCameraSim(cameraOne, SimCameraProperties.PERFECT_90DEG())) { - /* Compound Rolled + Pitched + Yaw */ - Transform3d compoundTestTransform = - new Transform3d( - -Units.inchesToMeters(12), - -Units.inchesToMeters(11), - 3, - new Rotation3d( - Units.degreesToRadians(37), - Units.degreesToRadians(6), - Units.degreesToRadians(60))); - - var estimator = new PhotonPoseEstimator(aprilTags, compoundTestTransform); + var estimator = new PhotonPoseEstimator(aprilTags); /* this is the real pose of the robot base we test against */ var realPose = new Pose3d(7.3, 4.42, 0, new Rotation3d(0, 0, 2.197)); PhotonPipelineResult result = - cameraOneSim.process( - 1, realPose.transformBy(estimator.getRobotToCameraTransform()), simTargets); + cameraOneSim.process(1, realPose.transformBy(compoundTestTransform), simTargets); var bestTarget = result.getBestTarget(); assertNotNull(bestTarget); assertEquals(0, bestTarget.fiducialId); @@ -553,13 +554,11 @@ void pnpDistanceTrigSolve() { /* Straight on */ Transform3d straightOnTestTransform = new Transform3d(0, 0, 3, Rotation3d.kZero); - estimator.setRobotToCameraTransform(straightOnTestTransform); + cameraOne.setRobotToCamera(straightOnTestTransform); /* Pose to compare with */ realPose = new Pose3d(4.81, 2.38, 0, new Rotation3d(0, 0, 2.818)); - result = - cameraOneSim.process( - 1, realPose.transformBy(estimator.getRobotToCameraTransform()), simTargets); + result = cameraOneSim.process(1, realPose.transformBy(straightOnTestTransform), simTargets); estimator.addHeadingData(result.getTimestampSeconds(), realPose.getRotation().toRotation2d()); estimatedPose = estimator.estimatePnpDistanceTrigSolvePose(result); @@ -642,11 +641,12 @@ void averageBestPoses() { new TargetCorner(1, 2), new TargetCorner(3, 4), new TargetCorner(5, 6), - new TargetCorner(7, 8))))); // 3 3 3 ambig .4 + new TargetCorner(7, 8)))), + Optional.empty(), + Optional.of( + new Transform3d(new Translation3d(0, 0, 0), new Rotation3d()))); // 3 3 3 ambig .4 - PhotonPoseEstimator estimator = - new PhotonPoseEstimator( - aprilTags, new Transform3d(new Translation3d(0, 0, 0), new Rotation3d())); + PhotonPoseEstimator estimator = new PhotonPoseEstimator(aprilTags); Optional estimatedPose = estimator.estimateAverageBestTargetsPose(cameraOne.result); @@ -709,8 +709,10 @@ void testMultiTagOnCoprocFallback() { new TargetCorner(1, 2), new TargetCorner(3, 4), new TargetCorner(5, 6), - new TargetCorner(7, 8))))); - PhotonPoseEstimator estimator = new PhotonPoseEstimator(aprilTags, Transform3d.kZero); + new TargetCorner(7, 8)))), + Optional.empty(), + Optional.of(Transform3d.kZero)); + PhotonPoseEstimator estimator = new PhotonPoseEstimator(aprilTags); Optional estimatedPose = estimator.estimateCoprocMultiTagPose(camera.result); @@ -725,6 +727,7 @@ void testMultiTagOnCoprocFallback() { assertEquals(1, pose.getX(), 1e-9); assertEquals(3, pose.getY(), 1e-9); assertEquals(2, pose.getZ(), 1e-9); + camera.close(); } @Test @@ -775,6 +778,10 @@ public void testConstrainedPnpOneTag() { new TargetCorner(127.17118732489361, 313.81406314178633), new TargetCorner(104.28543773760417, 309.6516557438994)); + final double camPitch = Units.degreesToRadians(30.0); + final Transform3d kRobotToCam = + new Transform3d(new Translation3d(0.5, 0.0, 0.5), new Rotation3d(0, -camPitch, 0)); + var result = new PhotonPipelineResult( new PhotonPipelineMetadata(10000, 2000, 1, 100), @@ -794,15 +801,11 @@ public void testConstrainedPnpOneTag() { -0.08413452932300695, 0.9130568172784148))), 0.1), - new ArrayList(8)))); - - final double camPitch = Units.degreesToRadians(30.0); - final Transform3d kRobotToCam = - new Transform3d(new Translation3d(0.5, 0.0, 0.5), new Rotation3d(0, -camPitch, 0)); + new ArrayList(8))), + Optional.of(kRobotToCam)); PhotonPoseEstimator estimator = - new PhotonPoseEstimator( - AprilTagFieldLayout.loadField(AprilTagFields.k2024Crescendo), kRobotToCam); + new PhotonPoseEstimator(AprilTagFieldLayout.loadField(AprilTagFields.k2024Crescendo)); var multiTagEstimate = estimator.estimateCoprocMultiTagPose(result); estimator.addHeadingData(result.getTimestampSeconds(), Rotation2d.kZero); diff --git a/photon-lib/src/test/native/cpp/PhotonCameraTest.cpp b/photon-lib/src/test/native/cpp/PhotonCameraTest.cpp index b704a3abc4..2991355fb0 100644 --- a/photon-lib/src/test/native/cpp/PhotonCameraTest.cpp +++ b/photon-lib/src/test/native/cpp/PhotonCameraTest.cpp @@ -109,7 +109,9 @@ TEST(PhotonCameraTest, Alerts) { // AND a result with a timeSinceLastPong in the past photon::PhotonPipelineMetadata metadata{3, 1, 2, 10 * 1000000}; photon::PhotonPipelineResult noPongResult{ - metadata, std::vector{}, std::nullopt}; + metadata, std::vector{}, std::nullopt, + std::make_optional(wpi::math::Transform3d{ + wpi::math::Translation3d(0_m, 0_m, 1_m), wpi::math::Rotation3d()})}; // Loop to hit cases past first iteration for (int i = 0; i < 10; i++) { diff --git a/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp b/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp index 342d9aa389..8f711b4ac5 100644 --- a/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp +++ b/photon-lib/src/test/native/cpp/PhotonPoseEstimatorTest.cpp @@ -89,10 +89,11 @@ TEST(PhotonPoseEstimatorTest, LowestAmbiguityStrategy) { cameraOne.test = true; cameraOne.testResult = {photon::PhotonPipelineResult{ - photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt}}; + photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt, + std::make_optional({})}}; cameraOne.testResult[0].SetReceiveTimestamp(wpi::units::second_t(11)); - photon::PhotonPoseEstimator estimator(aprilTags, wpi::math::Transform3d{}); + photon::PhotonPoseEstimator estimator(aprilTags); std::optional estimatedPose; for (const auto& result : cameraOne.GetAllUnreadResults()) { @@ -149,10 +150,11 @@ TEST(PhotonPoseEstimatorTest, ClosestToCameraHeightStrategy) { cameraOne.test = true; cameraOne.testResult = {photon::PhotonPipelineResult{ - photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt}}; + photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt, + std::make_optional({{0_m, 0_m, 4_m}, {}})}}; cameraOne.testResult[0].SetReceiveTimestamp(17_s); - photon::PhotonPoseEstimator estimator(aprilTags, {{0_m, 0_m, 4_m}, {}}); + photon::PhotonPoseEstimator estimator(aprilTags); std::optional estimatedPose; for (const auto& result : cameraOne.GetAllUnreadResults()) { @@ -197,10 +199,11 @@ TEST(PhotonPoseEstimatorTest, ClosestToReferencePoseStrategy) { cameraOne.test = true; cameraOne.testResult = {photon::PhotonPipelineResult{ - photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt}}; + photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt, + std::make_optional({})}}; cameraOne.testResult[0].SetReceiveTimestamp(wpi::units::second_t(17)); - photon::PhotonPoseEstimator estimator(aprilTags, {}); + photon::PhotonPoseEstimator estimator(aprilTags); std::optional estimatedPose; for (const auto& result : cameraOne.GetAllUnreadResults()) { @@ -247,10 +250,11 @@ TEST(PhotonPoseEstimatorTest, ClosestToLastPose) { cameraOne.test = true; cameraOne.testResult = {photon::PhotonPipelineResult{ - photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt}}; + photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt, + std::make_optional({})}}; cameraOne.testResult[0].SetReceiveTimestamp(wpi::units::second_t(17)); - photon::PhotonPoseEstimator estimator(aprilTags, {}); + photon::PhotonPoseEstimator estimator(aprilTags); std::optional estimatedPose; for (const auto& result : cameraOne.GetAllUnreadResults()) { @@ -287,7 +291,7 @@ TEST(PhotonPoseEstimatorTest, ClosestToLastPose) { cameraOne.testResult = {photon::PhotonPipelineResult{ photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targetsThree, - std::nullopt}}; + std::nullopt, std::make_optional({})}}; cameraOne.testResult[0].SetReceiveTimestamp(wpi::units::second_t(21)); for (const auto& result : cameraOne.GetAllUnreadResults()) { @@ -322,14 +326,15 @@ TEST(PhotonPoseEstimatorTest, PnpDistanceTrigSolve) { wpi::math::Transform3d compoundTestTransform = wpi::math::Transform3d( -12_in, -11_in, 3_m, wpi::math::Rotation3d(37_deg, 6_deg, 60_deg)); - photon::PhotonPoseEstimator estimator(aprilTags, compoundTestTransform); + photon::PhotonPoseEstimator estimator(aprilTags); + cameraOne.SetRobotToCamera(compoundTestTransform); /* real pose of the robot base to test against */ wpi::math::Pose3d realPose = wpi::math::Pose3d( 7.3_m, 4.42_m, 0_m, wpi::math::Rotation3d(0_rad, 0_rad, 2.197_rad)); photon::PhotonPipelineResult result = cameraOneSim.Process( - 1_ms, realPose.TransformBy(estimator.GetRobotToCameraTransform()), + 1_ms, realPose.TransformBy(cameraOne.GetRobotToCamera().value()), targets); cameraOne.testResult = {result}; cameraOne.testResult[0].SetReceiveTimestamp(17_s); @@ -355,11 +360,11 @@ TEST(PhotonPoseEstimatorTest, PnpDistanceTrigSolve) { wpi::math::Transform3d straightOnTestTransform = wpi::math::Transform3d( 0_m, 0_m, 3_m, wpi::math::Rotation3d(0_rad, 0_rad, 0_rad)); - estimator.SetRobotToCameraTransform(straightOnTestTransform); + cameraOne.SetRobotToCamera(straightOnTestTransform); realPose = wpi::math::Pose3d(4.81_m, 2.38_m, 0_m, wpi::math::Rotation3d(0_rad, 0_rad, 2.818_rad)); result = cameraOneSim.Process( - 1_ms, realPose.TransformBy(estimator.GetRobotToCameraTransform()), + 1_ms, realPose.TransformBy(cameraOne.GetRobotToCamera().value()), targets); cameraOne.testResult = {result}; cameraOne.testResult[0].SetReceiveTimestamp(18_s); @@ -410,10 +415,11 @@ TEST(PhotonPoseEstimatorTest, AverageBestPoses) { cameraOne.test = true; cameraOne.testResult = {photon::PhotonPipelineResult{ - photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt}}; + photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt, + std::make_optional({})}}; cameraOne.testResult[0].SetReceiveTimestamp(wpi::units::second_t(15)); - photon::PhotonPoseEstimator estimator(aprilTags, {}); + photon::PhotonPoseEstimator estimator(aprilTags); std::optional estimatedPose; for (const auto& result : cameraOne.GetAllUnreadResults()) { @@ -452,10 +458,11 @@ TEST(PhotonPoseEstimatorTest, MultiTagOnCoprocFallback) { cameraOne.test = true; cameraOne.testResult = {photon::PhotonPipelineResult{ - photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt}}; + photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt, + std::make_optional({})}}; cameraOne.testResult[0].SetReceiveTimestamp(wpi::units::second_t(11)); - photon::PhotonPoseEstimator estimator(aprilTags, wpi::math::Transform3d{}); + photon::PhotonPoseEstimator estimator(aprilTags); std::optional estimatedPose; for (const auto& result : cameraOne.GetAllUnreadResults()) { @@ -480,7 +487,9 @@ TEST(PhotonPoseEstimatorTest, CopyResult) { std::vector targets{}; auto testResult = photon::PhotonPipelineResult{ - photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt}; + photon::PhotonPipelineMetadata{0, 0, 2000, 1000}, targets, std::nullopt, + std::make_optional(wpi::math::Transform3d{ + wpi::math::Translation3d(0_m, 0_m, 1_m), wpi::math::Rotation3d()})}; testResult.SetReceiveTimestamp(wpi::units::second_t(11)); auto test2 = testResult; @@ -492,8 +501,7 @@ TEST(PhotonPoseEstimatorTest, CopyResult) { TEST(PhotonPoseEstimatorTest, ConstrainedPnpEmptyCase) { photon::PhotonPoseEstimator estimator( wpi::apriltag::AprilTagFieldLayout::LoadField( - wpi::apriltag::AprilTagField::k2024Crescendo), - wpi::math::Transform3d()); + wpi::apriltag::AprilTagField::k2024Crescendo)); photon::PhotonPipelineResult result; auto distortion = Eigen::VectorXd::Zero(8); @@ -534,23 +542,21 @@ TEST(PhotonPoseEstimatorTest, ConstrainedPnpOneTag) { photon::PnpResult{poseTransform, poseTransform, 0.1, 0.1, 0.0}, std::vector{8}); + const wpi::units::radian_t camPitch = 30_deg; + const wpi::math::Transform3d kRobotToCam{ + wpi::math::Translation3d(0.5_m, 0.0_m, 0.5_m), + wpi::math::Rotation3d(0_rad, -camPitch, 0_rad)}; photon::PhotonPipelineResult result{ photon::PhotonPipelineMetadata{1, 10000, 2000, 100}, targets, - multiTagResult}; + multiTagResult, std::make_optional(kRobotToCam)}; cameraOne.test = true; cameraOne.testResult = {result}; cameraOne.testResult[0].SetReceiveTimestamp(wpi::units::second_t(15)); - const wpi::units::radian_t camPitch = 30_deg; - const wpi::math::Transform3d kRobotToCam{ - wpi::math::Translation3d(0.5_m, 0.0_m, 0.5_m), - wpi::math::Rotation3d(0_rad, -camPitch, 0_rad)}; - photon::PhotonPoseEstimator estimator( wpi::apriltag::AprilTagFieldLayout::LoadField( - wpi::apriltag::AprilTagField::k2024Crescendo), - kRobotToCam); + wpi::apriltag::AprilTagField::k2024Crescendo)); auto estimatedMultiTagPose = estimator.EstimateCoprocMultiTagPose(cameraOne.testResult[0]); diff --git a/photon-serde/messages.yaml b/photon-serde/messages.yaml index 40cbc1436b..a922c9072e 100644 --- a/photon-serde/messages.yaml +++ b/photon-serde/messages.yaml @@ -15,7 +15,7 @@ java_decode_shim: PacketUtils.unpackTransform3d java_encode_shim: PacketUtils.packTransform3d cpp_type: wpi::math::Transform3d - cpp_include: "" + cpp_include: "" python_decode_shim: decodeTransform python_encode_shim: encodeTransform java_import: org.wpilib.math.geometry.Transform3d @@ -81,7 +81,6 @@ type: int16 vla: True - - name: PhotonPipelineResult fields: - name: metadata @@ -92,3 +91,6 @@ - name: multitagResult type: MultiTargetPNPResult optional: True + - name: robotToCamera + type: Transform3d + optional: True diff --git a/photon-serde/templates/Message.java.jinja b/photon-serde/templates/Message.java.jinja index 75983e6a51..48c0eda86b 100644 --- a/photon-serde/templates/Message.java.jinja +++ b/photon-serde/templates/Message.java.jinja @@ -33,6 +33,10 @@ import org.photonvision.utils.PacketUtils; // Assume that the base class lives here and we can import it import org.photonvision.targeting.*; +// Needed for optional shims +import java.util.function.BiConsumer; +import java.util.function.Function; + // WPILib imports (if any) import org.wpilib.util.struct.Struct; {% for type in nested_wpilib_types -%} @@ -57,12 +61,22 @@ public class {{ name }}Serde implements PacketSerde<{{name}}> { // TODO Auto-generated method stub throw new UnsupportedOperationException("Unimplemented method 'getMaxByteSize'"); } - +{% for field in fields -%} +{%- if field.type | is_shimmed and field.optional == True %} + private static BiConsumer {{ field.name }}_PSINTERNALencode_shim_callable = (packet, value) -> {{ get_message_by_name(field.type).java_encode_shim }}(packet, value); + private static Function {{ field.name }}_PSINTERNALdecode_shim_callable = (packet) -> {{ get_message_by_name(field.type).java_decode_shim }}(packet); +{% endif -%} +{% endfor%} @Override public void pack(Packet packet, {{ name }} value) { {%- for field in fields -%} {%- if field.type | is_shimmed %} + {%- if field.optional == True %} + // {{ field.name }} is optional and shimmed! + PacketUtils.packOptional(packet, value.{{ field.name }}, {{ field.name }}_PSINTERNALencode_shim_callable); + {%- else %} {{ get_message_by_name(field.type).java_encode_shim }}(packet, value.{{ field.name }}); + {%- endif %} {%- elif field.optional == True %} // {{ field.name }} is optional! it better not be a VLA too packet.encodeOptional(value.{{ field.name }}); @@ -89,7 +103,12 @@ public class {{ name }}Serde implements PacketSerde<{{name}}> { var ret = new {{ name }}(); {% for field in fields -%} {%- if field.type | is_shimmed %} + {%- if field.optional == True %} + // {{ field.name }} is optional and shimmed! + ret.{{ field.name }} = PacketUtils.unpackOptional(packet, {{ field.name }}_PSINTERNALdecode_shim_callable); + {%- else %} ret.{{ field.name }} = {{ get_message_by_name(field.type).java_decode_shim }}(packet); + {%- endif %} {%- elif field.optional == True %} // {{ field.name }} is optional! it better not be a VLA too ret.{{ field.name }} = packet.decodeOptional({{ field.type }}.photonStruct); diff --git a/photon-serde/templates/ThingSerde.py.jinja b/photon-serde/templates/ThingSerde.py.jinja index 324757a520..fb6c4b4d3e 100644 --- a/photon-serde/templates/ThingSerde.py.jinja +++ b/photon-serde/templates/ThingSerde.py.jinja @@ -55,7 +55,11 @@ class {{ name }}Serde: ret = Packet() {% for field in fields -%} {%- if field.type | is_shimmed %} + {%- if field.optional == True %} + ret.encodeOptionalShimmed(value.{{ field.name }}, ret.{{ get_message_by_name(field.type).python_encode_shim }}) + {%- else %} ret.{{ get_message_by_name(field.type).python_encode_shim}}(value.{{ field.name }}) + {%- endif %} {%- elif field.optional == True %} # {{ field.name }} is optional! it better not be a VLA too ret.encodeOptional(value.{{ field.name }}, {{ field.type }}.photonStruct) @@ -82,7 +86,12 @@ class {{ name }}Serde: ret = {{ name }}() {% for field in fields -%} {%- if field.type | is_shimmed %} +{%- if field.optional == True %} + # {{ field.name }} is optional and shimmed! + ret.{{ field.name }} = packet.decodeOptionalShimmed(packet.{{ get_message_by_name(field.type).python_decode_shim }}) +{%- else %} ret.{{ field.name }} = packet.{{ get_message_by_name(field.type).python_decode_shim }}() +{%- endif %} {%- elif field.optional == True %} # {{ field.name }} is optional! it better not be a VLA too ret.{{ field.name }} = packet.decodeOptional({{ field.type }}.photonStruct) diff --git a/photon-targeting/src/generated/main/java/org/photonvision/struct/MultiTargetPNPResultSerde.java b/photon-targeting/src/generated/main/java/org/photonvision/struct/MultiTargetPNPResultSerde.java index bf1c0696fe..c30411cc73 100644 --- a/photon-targeting/src/generated/main/java/org/photonvision/struct/MultiTargetPNPResultSerde.java +++ b/photon-targeting/src/generated/main/java/org/photonvision/struct/MultiTargetPNPResultSerde.java @@ -28,10 +28,15 @@ import org.photonvision.common.dataflow.structures.Packet; import org.photonvision.common.dataflow.structures.PacketSerde; +import org.photonvision.utils.PacketUtils; // Assume that the base class lives here and we can import it import org.photonvision.targeting.*; +// Needed for optional shims +import java.util.function.BiConsumer; +import java.util.function.Function; + // WPILib imports (if any) import org.wpilib.util.struct.Struct; diff --git a/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonPipelineMetadataSerde.java b/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonPipelineMetadataSerde.java index fb8e811dd9..7b1723f57a 100644 --- a/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonPipelineMetadataSerde.java +++ b/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonPipelineMetadataSerde.java @@ -28,10 +28,15 @@ import org.photonvision.common.dataflow.structures.Packet; import org.photonvision.common.dataflow.structures.PacketSerde; +import org.photonvision.utils.PacketUtils; // Assume that the base class lives here and we can import it import org.photonvision.targeting.*; +// Needed for optional shims +import java.util.function.BiConsumer; +import java.util.function.Function; + // WPILib imports (if any) import org.wpilib.util.struct.Struct; diff --git a/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonPipelineResultSerde.java b/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonPipelineResultSerde.java index ec0371fe2f..954b948d26 100644 --- a/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonPipelineResultSerde.java +++ b/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonPipelineResultSerde.java @@ -28,13 +28,18 @@ import org.photonvision.common.dataflow.structures.Packet; import org.photonvision.common.dataflow.structures.PacketSerde; +import org.photonvision.utils.PacketUtils; // Assume that the base class lives here and we can import it import org.photonvision.targeting.*; +// Needed for optional shims +import java.util.function.BiConsumer; +import java.util.function.Function; + // WPILib imports (if any) import org.wpilib.util.struct.Struct; - +import org.wpilib.math.geometry.Transform3d; /** * Auto-generated serialization/deserialization helper for PhotonPipelineResult @@ -42,9 +47,9 @@ public class PhotonPipelineResultSerde implements PacketSerde { @Override - public final String getInterfaceUUID() { return "4b2ff16a964b5e2bf04be0c1454d91c4"; } + public final String getInterfaceUUID() { return "c3e6b96bad05f102560d0abcad50debc"; } @Override - public final String getSchema() { return "PhotonPipelineMetadata:ac0a45f686457856fb30af77699ea356 metadata;PhotonTrackedTarget:cc6dbb5c5c1e0fa808108019b20863f1 targets[?];optional MultiTargetPNPResult:541096947e9f3ca2d3f425ff7b04aa7b multitagResult;"; } + public final String getSchema() { return "PhotonPipelineMetadata:ac0a45f686457856fb30af77699ea356 metadata;PhotonTrackedTarget:cc6dbb5c5c1e0fa808108019b20863f1 targets[?];optional MultiTargetPNPResult:541096947e9f3ca2d3f425ff7b04aa7b multitagResult;optional Transform3d robotToCamera;"; } @Override public final String getTypeName() { return "PhotonPipelineResult"; } @@ -54,6 +59,9 @@ public int getMaxByteSize() { throw new UnsupportedOperationException("Unimplemented method 'getMaxByteSize'"); } + private static BiConsumer robotToCamera_PSINTERNALencode_shim_callable = (packet, value) -> PacketUtils.packTransform3d(packet, value); + private static Function robotToCamera_PSINTERNALdecode_shim_callable = (packet) -> PacketUtils.unpackTransform3d(packet); + @Override public void pack(Packet packet, PhotonPipelineResult value) { // field metadata is of non-intrinsic type PhotonPipelineMetadata @@ -64,6 +72,9 @@ public void pack(Packet packet, PhotonPipelineResult value) { // multitagResult is optional! it better not be a VLA too packet.encodeOptional(value.multitagResult); + + // robotToCamera is optional and shimmed! + PacketUtils.packOptional(packet, value.robotToCamera, robotToCamera_PSINTERNALencode_shim_callable); } @Override @@ -79,20 +90,23 @@ public PhotonPipelineResult unpack(Packet packet) { // multitagResult is optional! it better not be a VLA too ret.multitagResult = packet.decodeOptional(MultiTargetPNPResult.photonStruct); + // robotToCamera is optional and shimmed! + ret.robotToCamera = PacketUtils.unpackOptional(packet, robotToCamera_PSINTERNALdecode_shim_callable); + return ret; } @Override public PacketSerde[] getNestedPhotonMessages() { return new PacketSerde[] { - PhotonTrackedTarget.photonStruct,MultiTargetPNPResult.photonStruct,PhotonPipelineMetadata.photonStruct + MultiTargetPNPResult.photonStruct,PhotonTrackedTarget.photonStruct,PhotonPipelineMetadata.photonStruct }; } @Override public Struct[] getNestedWpilibMessages() { return new Struct[] { - + Transform3d.struct }; } } diff --git a/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonTrackedTargetSerde.java b/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonTrackedTargetSerde.java index 5d1c3a84bf..d941a3b1e3 100644 --- a/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonTrackedTargetSerde.java +++ b/photon-targeting/src/generated/main/java/org/photonvision/struct/PhotonTrackedTargetSerde.java @@ -33,6 +33,10 @@ // Assume that the base class lives here and we can import it import org.photonvision.targeting.*; +// Needed for optional shims +import java.util.function.BiConsumer; +import java.util.function.Function; + // WPILib imports (if any) import org.wpilib.util.struct.Struct; import org.wpilib.math.geometry.Transform3d; diff --git a/photon-targeting/src/generated/main/java/org/photonvision/struct/PnpResultSerde.java b/photon-targeting/src/generated/main/java/org/photonvision/struct/PnpResultSerde.java index 90f4b7a6f0..4a4b929e80 100644 --- a/photon-targeting/src/generated/main/java/org/photonvision/struct/PnpResultSerde.java +++ b/photon-targeting/src/generated/main/java/org/photonvision/struct/PnpResultSerde.java @@ -33,6 +33,10 @@ // Assume that the base class lives here and we can import it import org.photonvision.targeting.*; +// Needed for optional shims +import java.util.function.BiConsumer; +import java.util.function.Function; + // WPILib imports (if any) import org.wpilib.util.struct.Struct; import org.wpilib.math.geometry.Transform3d; diff --git a/photon-targeting/src/generated/main/java/org/photonvision/struct/TargetCornerSerde.java b/photon-targeting/src/generated/main/java/org/photonvision/struct/TargetCornerSerde.java index df42d7f46f..b0f5865174 100644 --- a/photon-targeting/src/generated/main/java/org/photonvision/struct/TargetCornerSerde.java +++ b/photon-targeting/src/generated/main/java/org/photonvision/struct/TargetCornerSerde.java @@ -28,10 +28,15 @@ import org.photonvision.common.dataflow.structures.Packet; import org.photonvision.common.dataflow.structures.PacketSerde; +import org.photonvision.utils.PacketUtils; // Assume that the base class lives here and we can import it import org.photonvision.targeting.*; +// Needed for optional shims +import java.util.function.BiConsumer; +import java.util.function.Function; + // WPILib imports (if any) import org.wpilib.util.struct.Struct; diff --git a/photon-targeting/src/generated/main/native/cpp/photon/serde/PhotonPipelineResultSerde.cpp b/photon-targeting/src/generated/main/native/cpp/photon/serde/PhotonPipelineResultSerde.cpp index eebb222826..17cbb7c634 100644 --- a/photon-targeting/src/generated/main/native/cpp/photon/serde/PhotonPipelineResultSerde.cpp +++ b/photon-targeting/src/generated/main/native/cpp/photon/serde/PhotonPipelineResultSerde.cpp @@ -34,6 +34,7 @@ void StructType::Pack(Packet& packet, const PhotonPipelineResult& value) { packet.Pack(value.metadata); packet.Pack>(value.targets); packet.Pack>(value.multitagResult); + packet.Pack>(value.robotToCamera); } PhotonPipelineResult StructType::Unpack(Packet& packet) { @@ -41,6 +42,7 @@ PhotonPipelineResult StructType::Unpack(Packet& packet) { .metadata = packet.Unpack(), .targets = packet.Unpack>(), .multitagResult = packet.Unpack>(), + .robotToCamera = packet.Unpack>(), }}; } diff --git a/photon-targeting/src/generated/main/native/include/photon/serde/PhotonPipelineResultSerde.h b/photon-targeting/src/generated/main/native/include/photon/serde/PhotonPipelineResultSerde.h index b64adbacf1..65f2cc3553 100644 --- a/photon-targeting/src/generated/main/native/include/photon/serde/PhotonPipelineResultSerde.h +++ b/photon-targeting/src/generated/main/native/include/photon/serde/PhotonPipelineResultSerde.h @@ -39,6 +39,7 @@ #include #include #include +#include namespace photon { @@ -46,11 +47,11 @@ namespace photon { template <> struct WPILIB_DLLEXPORT SerdeType { static constexpr std::string_view GetSchemaHash() { - return "4b2ff16a964b5e2bf04be0c1454d91c4"; + return "c3e6b96bad05f102560d0abcad50debc"; } static constexpr std::string_view GetSchema() { - return "PhotonPipelineMetadata:ac0a45f686457856fb30af77699ea356 metadata;PhotonTrackedTarget:cc6dbb5c5c1e0fa808108019b20863f1 targets[?];optional MultiTargetPNPResult:541096947e9f3ca2d3f425ff7b04aa7b multitagResult;"; + return "PhotonPipelineMetadata:ac0a45f686457856fb30af77699ea356 metadata;PhotonTrackedTarget:cc6dbb5c5c1e0fa808108019b20863f1 targets[?];optional MultiTargetPNPResult:541096947e9f3ca2d3f425ff7b04aa7b multitagResult;optional Transform3d robotToCamera;"; } static photon::PhotonPipelineResult Unpack(photon::Packet& packet); diff --git a/photon-targeting/src/generated/main/native/include/photon/struct/PhotonPipelineResultStruct.h b/photon-targeting/src/generated/main/native/include/photon/struct/PhotonPipelineResultStruct.h index 0d4fa4ac37..1ff6f9da24 100644 --- a/photon-targeting/src/generated/main/native/include/photon/struct/PhotonPipelineResultStruct.h +++ b/photon-targeting/src/generated/main/native/include/photon/struct/PhotonPipelineResultStruct.h @@ -33,6 +33,7 @@ #include #include #include +#include namespace photon { @@ -41,6 +42,7 @@ struct PhotonPipelineResult_PhotonStruct { photon::PhotonPipelineMetadata metadata; std::vector targets; std::optional multitagResult; + std::optional robotToCamera; friend bool operator==(PhotonPipelineResult_PhotonStruct const&, PhotonPipelineResult_PhotonStruct const&) = default; }; diff --git a/photon-targeting/src/main/java/org/photonvision/common/networktables/NTTopicSet.java b/photon-targeting/src/main/java/org/photonvision/common/networktables/NTTopicSet.java index 4e7354ce9d..819e6e2787 100644 --- a/photon-targeting/src/main/java/org/photonvision/common/networktables/NTTopicSet.java +++ b/photon-targeting/src/main/java/org/photonvision/common/networktables/NTTopicSet.java @@ -31,6 +31,8 @@ import org.wpilib.networktables.ProtobufPublisher; import org.wpilib.networktables.PubSubOption; import org.wpilib.networktables.StructPublisher; +import org.wpilib.networktables.StructSubscriber; +import org.wpilib.networktables.StructTopic; /** * This class is a wrapper around all per-pipeline NT topics that PhotonVision should be publishing @@ -78,6 +80,10 @@ public class NTTopicSet { public DoubleArrayPublisher cameraIntrinsicsPublisher; public DoubleArrayPublisher cameraDistortionPublisher; + // Camera Intrinsics + public StructTopic robotToCameraTopic; + public StructSubscriber robotToCameraSubscriber; + public void updateEntries() { var rawBytesEntry = subTable @@ -127,6 +133,11 @@ public void updateEntries() { cameraIntrinsicsPublisher = subTable.getDoubleArrayTopic("cameraIntrinsics").publish(); cameraDistortionPublisher = subTable.getDoubleArrayTopic("cameraDistortion").publish(); + robotToCameraTopic = subTable.getStructTopic("robotToCamera", Transform3d.struct); + robotToCameraSubscriber = + subTable + .getStructTopic("robotToCamera", Transform3d.struct) + .subscribe(null, PubSubOption.periodic(0.01)); } @SuppressWarnings("DuplicatedCode") @@ -156,5 +167,6 @@ public void removeEntries() { if (cameraIntrinsicsPublisher != null) cameraIntrinsicsPublisher.close(); if (cameraDistortionPublisher != null) cameraDistortionPublisher.close(); + if (robotToCameraSubscriber != null) robotToCameraSubscriber.close(); } } diff --git a/photon-targeting/src/main/java/org/photonvision/targeting/PhotonPipelineResult.java b/photon-targeting/src/main/java/org/photonvision/targeting/PhotonPipelineResult.java index 7945276d99..53bec91ec1 100644 --- a/photon-targeting/src/main/java/org/photonvision/targeting/PhotonPipelineResult.java +++ b/photon-targeting/src/main/java/org/photonvision/targeting/PhotonPipelineResult.java @@ -24,6 +24,7 @@ import org.photonvision.struct.PhotonPipelineResultSerde; import org.photonvision.targeting.proto.PhotonPipelineResultProto; import org.photonvision.targeting.serde.PhotonStructSerializable; +import org.wpilib.math.geometry.Transform3d; import org.wpilib.util.protobuf.ProtobufSerializable; /** Represents a pipeline result from a PhotonCamera. */ @@ -40,9 +41,12 @@ public class PhotonPipelineResult /** The multitag result, if using an AprilTag pipeline with Multi-Target Estimation enabled. */ public Optional multitagResult; + /** The transform from the robot to the camera. This is an optional value */ + public Optional robotToCamera; + /** Constructs an empty pipeline result. */ public PhotonPipelineResult() { - this(new PhotonPipelineMetadata(), List.of(), Optional.empty()); + this(new PhotonPipelineMetadata(), List.of(), Optional.empty(), Optional.empty()); } /** @@ -66,6 +70,7 @@ public PhotonPipelineResult( new PhotonPipelineMetadata( captureTimestampMicros, publishTimestampMicros, sequenceID, timeSinceLastPong), targets, + Optional.empty(), Optional.empty()); } @@ -92,16 +97,55 @@ public PhotonPipelineResult( new PhotonPipelineMetadata( captureTimestamp, publishTimestamp, sequenceID, timeSinceLastPong), targets, - result); + result, + Optional.empty()); } public PhotonPipelineResult( PhotonPipelineMetadata metadata, List targets, Optional result) { + this(metadata, targets, result, Optional.empty()); + } + + /** + * Constructs a pipeline result. + * + * @param sequenceID The number of frames processed by this camera since boot + * @param captureTimestamp The time, in uS in the coprocessor's timebase, that the coprocessor + * captured the image this result contains the targeting info of + * @param publishTimestamp The time, in uS in the coprocessor's timebase, that the coprocessor + * published targeting info + * @param timeSinceLastPong The time since the last Time Sync Pong in uS. + * @param targets The list of targets identified by the pipeline. + * @param result Result from multi-target PNP. + * @param robotToCamera The transform from the robot to the camera. + */ + public PhotonPipelineResult( + long sequenceID, + long captureTimestamp, + long publishTimestamp, + long timeSinceLastPong, + List targets, + Optional result, + Optional robotToCamera) { + this( + new PhotonPipelineMetadata( + captureTimestamp, publishTimestamp, sequenceID, timeSinceLastPong), + targets, + result, + robotToCamera); + } + + public PhotonPipelineResult( + PhotonPipelineMetadata metadata, + List targets, + Optional result, + Optional robotToCamera) { this.metadata = metadata; this.targets.addAll(targets); this.multitagResult = result; + this.robotToCamera = robotToCamera; } /** @@ -167,6 +211,16 @@ public Optional getMultiTagResult() { return multitagResult; } + /** + * Return the robot to camera transform. Be sure to check {@code getRobotToCamera().isPresent()} + * before using the transform! + * + * @return The robot to camera transform. Empty if no transform is set. + */ + public Optional getRobotToCamera() { + return robotToCamera; + } + /** * Returns the estimated time the frame was taken, in the Time Sync Server's time base * (wpi::nt::Now). This is calculated using the estimated offset between Time Sync Server time and @@ -186,6 +240,8 @@ public String toString() { + targets + ", multitagResult=" + multitagResult + + ", robotToCamera=" + + robotToCamera + "]"; } @@ -214,6 +270,9 @@ public boolean equals(Object obj) { if (multitagResult == null) { if (other.multitagResult != null) return false; } else if (!multitagResult.equals(other.multitagResult)) return false; + if (robotToCamera == null) { + if (other.robotToCamera != null) return false; + } else if (!robotToCamera.equals(other.robotToCamera)) return false; return true; } diff --git a/photon-targeting/src/main/java/org/photonvision/targeting/proto/PhotonPipelineResultProto.java b/photon-targeting/src/main/java/org/photonvision/targeting/proto/PhotonPipelineResultProto.java index e1f2039c0a..64ba6f6729 100644 --- a/photon-targeting/src/main/java/org/photonvision/targeting/proto/PhotonPipelineResultProto.java +++ b/photon-targeting/src/main/java/org/photonvision/targeting/proto/PhotonPipelineResultProto.java @@ -22,6 +22,7 @@ import org.photonvision.targeting.MultiTargetPNPResult; import org.photonvision.targeting.PhotonPipelineResult; import org.photonvision.targeting.PhotonTrackedTarget; +import org.wpilib.math.geometry.Transform3d; import org.wpilib.util.protobuf.Protobuf; import us.hebi.quickbuf.Descriptors.Descriptor; @@ -52,7 +53,10 @@ public PhotonPipelineResult unpack(ProtobufPhotonPipelineResult msg) { PhotonTrackedTarget.proto.unpack(msg.getTargets()), msg.hasMultiTargetResult() ? Optional.of(MultiTargetPNPResult.proto.unpack(msg.getMultiTargetResult())) - : Optional.empty()); + : Optional.empty(), + msg.hasRobotToCamera() + ? Optional.of(Transform3d.proto.unpack(msg.getRobotToCamera())) + : Optional.empty()); } @Override @@ -66,6 +70,12 @@ public void pack(ProtobufPhotonPipelineResult msg, PhotonPipelineResult value) { msg.clearMultiTargetResult(); } + if (value.getRobotToCamera().isPresent()) { + Transform3d.proto.pack(msg.getMutableRobotToCamera(), value.getRobotToCamera().get()); + } else { + msg.clearRobotToCamera(); + } + msg.setSequenceId(value.metadata.getSequenceID()); msg.setCaptureTimestampMicros(value.metadata.getCaptureTimestampMicros()); msg.setNtPublishTimestampMicros(value.metadata.getPublishTimestampMicros()); diff --git a/photon-targeting/src/main/java/org/photonvision/utils/PacketUtils.java b/photon-targeting/src/main/java/org/photonvision/utils/PacketUtils.java index 501d753a24..f85df7607f 100644 --- a/photon-targeting/src/main/java/org/photonvision/utils/PacketUtils.java +++ b/photon-targeting/src/main/java/org/photonvision/utils/PacketUtils.java @@ -17,6 +17,9 @@ package org.photonvision.utils; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.Function; import org.photonvision.common.dataflow.structures.Packet; import org.wpilib.math.geometry.*; @@ -114,4 +117,23 @@ public static void packPose3d(Packet packet, Pose3d pose) { public static Pose3d unpackPose3d(Packet packet) { return new Pose3d(unpackTranslation3d(packet), unpackRotation3d(packet)); } + + public static void packOptional( + Packet packet, Optional optional, BiConsumer packer) { + if (optional.isPresent()) { + packet.encode(true); + packer.accept(packet, optional.get()); + } else { + packet.encode(false); + } + } + + public static Optional unpackOptional(Packet packet, Function unpacker) { + boolean isPresent = packet.decodeBoolean(); + if (isPresent) { + return Optional.of(unpacker.apply(packet)); + } else { + return Optional.empty(); + } + } } diff --git a/photon-targeting/src/main/native/include/photon/dataflow/structures/Packet.h b/photon-targeting/src/main/native/include/photon/dataflow/structures/Packet.h index 840ee8300c..128465022b 100644 --- a/photon-targeting/src/main/native/include/photon/dataflow/structures/Packet.h +++ b/photon-targeting/src/main/native/include/photon/dataflow/structures/Packet.h @@ -30,6 +30,30 @@ namespace photon { class Packet; +template +struct optional_inner; + +template +struct optional_inner> { + using type = T; +}; + +template +using optional_inner_t = typename optional_inner>::type; + +template +struct is_optional : std::false_type {}; + +template +struct is_optional> : std::true_type {}; + +template +concept Optional = is_optional>::value; + +template +concept OptionalWPIStructSerializable = + Optional && wpi::util::StructSerializable, I...>; + // Struct is where all our actual ser/de methods are implemented template struct SerdeType {}; @@ -104,6 +128,19 @@ class Packet { writePos = newWritePos; } + // Support encoding optional wpi structs + template + requires OptionalWPIStructSerializable + inline void Pack(const std::optional>& value) { + using T = optional_inner_t; + if (value) { + this->Pack(1u); + this->Pack(*value); + } else { + this->Pack(0u); + } + } + template requires(PhotonStructSerializable) inline void Pack(const T& value) { @@ -120,6 +157,18 @@ class Packet { return ret; } + // Support decoding optional wpi structs + template + requires OptionalWPIStructSerializable + inline std::optional> Unpack() { + using T = optional_inner_t; + if (this->Unpack() == 0u) { + return std::nullopt; + } else { + return std::make_optional(this->Unpack()); + } + } + template requires(PhotonStructSerializable) inline T Unpack() { diff --git a/photon-targeting/src/main/native/include/photon/targeting/PhotonPipelineResult.h b/photon-targeting/src/main/native/include/photon/targeting/PhotonPipelineResult.h index 4cd9447e33..82e7b5bc1b 100644 --- a/photon-targeting/src/main/native/include/photon/targeting/PhotonPipelineResult.h +++ b/photon-targeting/src/main/native/include/photon/targeting/PhotonPipelineResult.h @@ -81,6 +81,10 @@ class PhotonPipelineResult : public PhotonPipelineResult_PhotonStruct { return HasTargets() ? targets[0] : PhotonTrackedTarget{}; } + std::optional GetRobotToCamera() const { + return robotToCamera; + } + /** * Returns the latency in the pipeline. * @return The latency in the pipeline. diff --git a/photon-targeting/src/main/proto/photon.proto b/photon-targeting/src/main/proto/photon.proto index dea75aab07..539c03042b 100644 --- a/photon-targeting/src/main/proto/photon.proto +++ b/photon-targeting/src/main/proto/photon.proto @@ -67,6 +67,8 @@ message ProtobufPhotonPipelineResult { int64 capture_timestamp_micros = 5; int64 nt_publish_timestamp_micros = 6; int64 time_since_last_pong_micros = 7; + + optional wpi.proto.ProtobufTransform3d robotToCamera = 8; } message ProtobufDeviceMetrics { diff --git a/photon-targeting/src/test/java/org/photonvision/PacketTest.java b/photon-targeting/src/test/java/org/photonvision/PacketTest.java index b531f54816..3491e4f6e8 100644 --- a/photon-targeting/src/test/java/org/photonvision/PacketTest.java +++ b/photon-targeting/src/test/java/org/photonvision/PacketTest.java @@ -155,7 +155,8 @@ void pipelineResultSerde() { new MultiTargetPNPResult( new PnpResult( new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)), 0.1), - List.of((short) 1, (short) 2, (short) 3)))); + List.of((short) 1, (short) 2, (short) 3))), + Optional.of(new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)))); var p3 = new Packet(10); PhotonPipelineResult.photonStruct.pack(p3, ret3); var unpackedRet3 = PhotonPipelineResult.photonStruct.unpack(p3); diff --git a/photon-targeting/src/test/java/org/photonvision/targeting/PhotonPipelineResultTest.java b/photon-targeting/src/test/java/org/photonvision/targeting/PhotonPipelineResultTest.java index b520f33ba5..5bc6e3a161 100644 --- a/photon-targeting/src/test/java/org/photonvision/targeting/PhotonPipelineResultTest.java +++ b/photon-targeting/src/test/java/org/photonvision/targeting/PhotonPipelineResultTest.java @@ -187,7 +187,8 @@ public void equalityTest() { new MultiTargetPNPResult( new PnpResult( new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)), 0.1), - List.of((short) 1, (short) 2, (short) 3)))); + List.of((short) 1, (short) 2, (short) 3))), + Optional.of(new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)))); b = new PhotonPipelineResult( 3, @@ -241,7 +242,8 @@ public void equalityTest() { new MultiTargetPNPResult( new PnpResult( new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)), 0.1), - List.of((short) 1, (short) 2, (short) 3)))); + List.of((short) 1, (short) 2, (short) 3))), + Optional.of(new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)))); assertEquals(a, b); } @@ -400,7 +402,8 @@ public void inequalityTest() { new MultiTargetPNPResult( new PnpResult( new Transform3d(new Translation3d(1, 8, 3), new Rotation3d(1, 2, 3)), 0.1), - List.of((short) 3, (short) 4, (short) 7)))); + List.of((short) 3, (short) 4, (short) 7))), + Optional.of(new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)))); b = new PhotonPipelineResult( 3, @@ -454,7 +457,8 @@ public void inequalityTest() { new MultiTargetPNPResult( new PnpResult( new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)), 0.1), - List.of((short) 1, (short) 2, (short) 3)))); + List.of((short) 1, (short) 2, (short) 3))), + Optional.of(new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)))); assertNotEquals(a, b); } } diff --git a/photon-targeting/src/test/java/org/photonvision/targeting/proto/PhotonPipelineResultProtoTest.java b/photon-targeting/src/test/java/org/photonvision/targeting/proto/PhotonPipelineResultProtoTest.java index 80150e65ff..a7c0e00133 100644 --- a/photon-targeting/src/test/java/org/photonvision/targeting/proto/PhotonPipelineResultProtoTest.java +++ b/photon-targeting/src/test/java/org/photonvision/targeting/proto/PhotonPipelineResultProtoTest.java @@ -146,7 +146,8 @@ public void protobufTest() { new MultiTargetPNPResult( new PnpResult( new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)), 0.1), - List.of((short) 1, (short) 2, (short) 3)))); + List.of((short) 1, (short) 2, (short) 3))), + Optional.of(new Transform3d(new Translation3d(1, 2, 3), new Rotation3d(1, 2, 3)))); serializedResult = PhotonPipelineResult.proto.createMessage(); PhotonPipelineResult.proto.pack(serializedResult, result); unpackedResult = PhotonPipelineResult.proto.unpack(serializedResult); diff --git a/photon-targeting/src/test/native/cpp/PacketTest.cpp b/photon-targeting/src/test/native/cpp/PacketTest.cpp index 923d19713c..e61f21a315 100644 --- a/photon-targeting/src/test/native/cpp/PacketTest.cpp +++ b/photon-targeting/src/test/native/cpp/PacketTest.cpp @@ -87,8 +87,11 @@ TEST(PacketTest, PnpResult) { // } TEST(PacketTest, PhotonPipelineResult) { - PhotonPipelineResult result(PhotonPipelineMetadata(0, 0, 1, 2), - std::vector{}, std::nullopt); + PhotonPipelineResult result( + PhotonPipelineMetadata(0, 0, 1, 2), std::vector{}, + std::nullopt, + std::make_optional(wpi::math::Transform3d{ + wpi::math::Translation3d(0_m, 0_m, 1_m), wpi::math::Rotation3d()})); Packet p; p.Pack(result); @@ -132,8 +135,10 @@ TEST(PacketTest, PhotonPipelineResult) { 17.0, 22.33, 2.54}, std::vector{8, 7, 11, 22, 59, 40}}; - PhotonPipelineResult result2(PhotonPipelineMetadata{0, 0, 1, 1}, targets, - mtResult); + PhotonPipelineResult result2( + PhotonPipelineMetadata{0, 0, 1, 1}, targets, mtResult, + std::make_optional(wpi::math::Transform3d{ + wpi::math::Translation3d(0_m, 0_m, 1_m), wpi::math::Rotation3d()})); Packet p2; auto t1 = std::chrono::steady_clock::now(); diff --git a/photonlib-cpp-examples/poseest/src/main/include/Vision.h b/photonlib-cpp-examples/poseest/src/main/include/Vision.h index ba2fa80803..cf48f859e9 100644 --- a/photonlib-cpp-examples/poseest/src/main/include/Vision.h +++ b/photonlib-cpp-examples/poseest/src/main/include/Vision.h @@ -147,9 +147,9 @@ class Vision { wpi::Field2d& GetSimDebugField() { return visionSim->GetDebugField(); } private: - photon::PhotonPoseEstimator photonEstimator{constants::Vision::kTagLayout, - constants::Vision::kRobotToCam}; - photon::PhotonCamera camera{constants::Vision::kCameraName}; + photon::PhotonPoseEstimator photonEstimator{constants::Vision::kTagLayout}; + photon::PhotonCamera camera{constants::Vision::kCameraName, + constants::Vision::kRobotToCam}; std::unique_ptr visionSim; std::unique_ptr cameraProp; std::shared_ptr cameraSim; diff --git a/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java b/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java index ef7118f54a..3abc66fe63 100644 --- a/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java +++ b/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java @@ -59,8 +59,8 @@ public class Vision { */ public Vision(EstimateConsumer estConsumer) { this.estConsumer = estConsumer; - camera = new PhotonCamera(kCameraName); - photonEstimator = new PhotonPoseEstimator(kTagLayout, kRobotToCam); + camera = new PhotonCamera(kCameraName, kRobotToCam); + photonEstimator = new PhotonPoseEstimator(kTagLayout); // ----- Simulation if (Robot.isSimulation()) {