UPLOAD DATA
...
Guides
More examples
Lidars and Cameras Sequence
4min
a lidarsandcamerasseq consists of a sequence of camera images and lidar point clouds, where each frame consists on 1 12 camera images as well as 1 20 point clouds for more documentation on what each field corresponds to in the lidarsandcamerasseq object please check the section related to overview docid\ yunpnpwuhzlgg9wb9qnk8 from future import absolute import from datetime import datetime from typing import optional from uuid import uuid4 import kognic io model scene lidars and cameras sequence as lcsm from examples calibration calibration import create sensor calibration from kognic io client import kognicioclient from kognic io logger import setup logging from kognic io model import createsceneresponse, image, pointcloud def run(client kognicioclient, dryrun bool = true, kwargs) > optional\[createsceneresponse] print("creating lidar and camera sequence scene ") lidar sensor1 = "lidar" cam sensor1 = "rfc01" cam sensor2 = "rfc02" metadata = {"location lat" 27 986065, "location long" 86 922623, "vehicle id" "abg"} \# create calibration \# (please refer to the api documentation about calibration for more details) calibration spec = create sensor calibration( f"collection {datetime now()}", \[lidar sensor1], \[cam sensor1, cam sensor2], ) created calibration = client calibration create calibration(calibration spec) scene = lcsm lidarsandcamerassequence( external id=f"lcs example {uuid4()}", frames=\[ lcsm frame( frame id="1", relative timestamp=0, point clouds=\[ pointcloud( filename=" /examples/resources/point cloud rfl01 las", sensor name=lidar sensor1, ), ], images=\[ image( filename=" /examples/resources/img rfc01 jpg", sensor name=cam sensor1, ), image( filename=" /examples/resources/img rfc02 jpg", sensor name=cam sensor2, ), ], metadata={"dut status" "active"}, ), lcsm frame( frame id="2", relative timestamp=100, point clouds=\[ pointcloud( filename=" /examples/resources/point cloud rfl02 las", sensor name=lidar sensor1, ), ], images=\[ image( filename=" /examples/resources/img rfc11 jpg", sensor name=cam sensor1, ), image( filename=" /examples/resources/img rfc12 jpg", sensor name=cam sensor2, ), ], metadata={"dut status" "active"}, ), ], calibration id=created calibration id, metadata=metadata, ) \# create scene return client lidars and cameras sequence create(scene, dryrun=dryrun, kwargs) if name == " main " setup logging(level="info") client = kognicioclient() \# project available via `client project get projects()` project = "\<project identifier>" run(client, project) use dryrun to validate scene setting dryrun parameter to true in the method call, will validate the scene using the api but not create it reuse calibration note that you can, and should, reuse the same calibration for multiple scenes if possible providing ego vehicle motion information ego vehicle motion (i e the position and rotation of the ego vehicle) is optional information that can be provided when creating lidarsandcamerasseq s this information can enable a massive reduction in the time it takes to annotate static objects ego vehicle motion information is provided by passing a egovehiclemotion object to each frame in the scene from future import absolute import from datetime import datetime from typing import optional from uuid import uuid4 import kognic io model scene lidars and cameras sequence as lcsm from examples calibration calibration import create sensor calibration from kognic io client import kognicioclient from kognic io logger import setup logging from kognic io model import createsceneresponse, egovehiclepose, image, pointcloud, position, rotationquaternion def run(client kognicioclient, dryrun bool = true, kwargs) > optional\[createsceneresponse] print("creating lidar and camera sequence scene ") lidar sensor1 = "lidar" cam sensor1 = "rfc01" cam sensor2 = "rfc02" cam sensor3 = "rfc03" metadata = {"location lat" 27 986065, "location long" 86 922623, "vehicle id" "abg"} \# create calibration calibration spec = create sensor calibration(f"collection {datetime now()}", \[lidar sensor1], \[cam sensor1, cam sensor2, cam sensor3]) created calibration = client calibration create calibration(calibration spec) scene = lcsm lidarsandcamerassequence( external id=f"lcs full example {uuid4()}", frames=\[ lcsm frame( frame id="1", relative timestamp=0, point clouds=\[ pointcloud(filename=" /examples/resources/point cloud rfl01 las", sensor name=lidar sensor1), ], images=\[ image(filename=" /examples/resources/img rfc01 jpg", sensor name=cam sensor1), image(filename=" /examples/resources/img rfc02 jpg", sensor name=cam sensor2), ], metadata={"dut status" "active"}, ego vehicle pose=egovehiclepose( position=position(x=1 0, y=1 0, z=1 0), rotation=rotationquaternion(w=0 01, x=1 01, y=1 01, z=1 01) ), ), lcsm frame( frame id="2", relative timestamp=500, point clouds=\[ pointcloud(filename=" /examples/resources/point cloud rfl02 las", sensor name=lidar sensor1), ], images=\[ image(filename=" /examples/resources/img rfc11 jpg", sensor name=cam sensor1), image(filename=" /examples/resources/img rfc12 jpg", sensor name=cam sensor2), ], ego vehicle pose=egovehiclepose( position=position(x=2 0, y=2 0, z=2 0), rotation=rotationquaternion(w=0 01, x=2 01, y=2 01, z=2 01) ), ), ], calibration id=created calibration id, metadata=metadata, ) \# create scene return client lidars and cameras sequence create(scene, dryrun=dryrun, kwargs) if name == " main " setup logging(level="info") client = kognicioclient() \# project available via `client project get projects()` project = "\<project identifier>" run(client, project=project) coordinate systems note that both position and rotation for ego vehicle pose are with respect to the local coordinate system shutter timings shutter timings are optional metadata that may be provided when creating an image within a frame timings are two values shutter start and end timestamp in nanoseconds since unix epoch and are specified for each image in each frame from future import absolute import import os path from datetime import datetime from typing import optional from uuid import uuid4 import kognic io model scene lidars and cameras sequence as lcsm from examples calibration calibration import create sensor calibration from examples imu data create imu data import create dummy imu data from kognic io client import kognicioclient from kognic io logger import setup logging from kognic io model import createsceneresponse, image, imagemetadata, pointcloud def run(client kognicioclient, dryrun bool = true, kwargs) > optional\[createsceneresponse] print("creating lidar and camera sequence scene ") lidar sensor1 = "rfl01" lidar sensor2 = "rfl02" cam sensor1 = "rfc01" cam sensor2 = "rfc02" metadata = {"location lat" 27 986065, "location long" 86 922623, "vehicleid" "abg"} examples path = os path dirname( file ) \# create calibration calibration spec = create sensor calibration(f"collection {datetime now()}", \[lidar sensor1, lidar sensor2], \[cam sensor1, cam sensor2]) created calibration = client calibration create calibration(calibration spec) \# generate imu data one millisecond = 1000000 # one millisecond, expressed in nanos start ts = 1648200140000000000 end ts = start ts + 10 one millisecond imu data = create dummy imu data(start timestamp=start ts, end timestamp=end ts, samples per sec=1000) scene = lcsm lidarsandcamerassequence( external id=f"lcs full with imu and shutter example {uuid4()}", frames=\[ lcsm frame( frame id="1", unix timestamp=start ts + one millisecond, relative timestamp=0, point clouds=\[ pointcloud(filename=examples path + "/resources/point cloud rfl01 csv", sensor name=lidar sensor1), pointcloud(filename=examples path + "/resources/point cloud rfl02 csv", sensor name=lidar sensor2), ], images=\[ image( filename=examples path + "/resources/img rfc01 jpg", sensor name=cam sensor1, metadata=imagemetadata( shutter time start ns=start ts + 0 5 one millisecond, shutter time end ns=start ts + 1 5 one millisecond ), ), image( filename=examples path + "/resources/img rfc02 jpg", sensor name=cam sensor2, metadata=imagemetadata( shutter time start ns=start ts + 0 5 one millisecond, shutter time end ns=start ts + 1 5 one millisecond ), ), ], ), lcsm frame( frame id="2", unix timestamp=start ts + 5 one millisecond, relative timestamp=4, point clouds=\[ pointcloud(filename=examples path + "/resources/point cloud rfl11 csv", sensor name=lidar sensor1), pointcloud(filename=examples path + "/resources/point cloud rfl12 csv", sensor name=lidar sensor2), ], images=\[ image( filename=examples path + "/resources/img rfc11 jpg", sensor name=cam sensor1, metadata=imagemetadata( shutter time start ns=start ts + 4 5 one millisecond, shutter time end ns=start ts + 5 5 one millisecond ), ), image( filename=examples path + "/resources/img rfc12 jpg", sensor name=cam sensor2, metadata=imagemetadata( shutter time start ns=start ts + 4 5 one millisecond, shutter time end ns=start ts + 5 5 one millisecond ), ), ], ), ], calibration id=created calibration id, metadata=metadata, imu data=imu data, ) \# create scene return client lidars and cameras sequence create(scene, dryrun=dryrun, kwargs) if name == " main " setup logging(level="info") client = kognicioclient() \# project available via `client project get projects()` project = "\<project id>" run(client, project=project)