ライダーと複数カメラのシーケンス
4 分
lidarsandcamerasseq は、カメラ画像とライダー点群のシーケンスで構成されており、各フレームは1〜12枚のカメラ画像と1〜20個の点群から成ります。 lidarsandcamerasseq オブジェクトの各フィールドの詳細については、 docid\ m9gj 7ge3m0jf7hvlohj2 のセクションを参照してください。 from future import absolute import from datetime import datetime from typing import optional from uuid import uuid4 import kognic io model scene lidars and cameras sequence as lcsm from examples calibration calibration import create sensor calibration from kognic io client import kognicioclient from kognic io logger import setup logging from kognic io model import createsceneresponse, image, pointcloud def run(client kognicioclient, dryrun bool = true, kwargs) > optional\[createsceneresponse] print("creating lidar and camera sequence scene ") lidar sensor1 = "lidar" cam sensor1 = "rfc01" cam sensor2 = "rfc02" metadata = {"location lat" 27 986065, "location long" 86 922623, "vehicle id" "abg"} \# create calibration \# (please refer to the api documentation about calibration for more details) calibration spec = create sensor calibration( f"collection {datetime now()}", \[lidar sensor1], \[cam sensor1, cam sensor2], ) created calibration = client calibration create calibration(calibration spec) scene = lcsm lidarsandcamerassequence( external id=f"lcs example {uuid4()}", frames=\[ lcsm frame( frame id="1", relative timestamp=0, point clouds=\[ pointcloud( filename=" /examples/resources/point cloud rfl01 las", sensor name=lidar sensor1, ), ], images=\[ image( filename=" /examples/resources/img rfc01 jpg", sensor name=cam sensor1, ), image( filename=" /examples/resources/img rfc02 jpg", sensor name=cam sensor2, ), ], metadata={"dut status" "active"}, ), lcsm frame( frame id="2", relative timestamp=100, point clouds=\[ pointcloud( filename=" /examples/resources/point cloud rfl02 las", sensor name=lidar sensor1, ), ], images=\[ image( filename=" /examples/resources/img rfc11 jpg", sensor name=cam sensor1, ), image( filename=" /examples/resources/img rfc12 jpg", sensor name=cam sensor2, ), ], metadata={"dut status" "active"}, ), ], calibration id=created calibration id, metadata=metadata, ) \# create scene return client lidars and cameras sequence create(scene, dryrun=dryrun, kwargs) if name == " main " setup logging(level="info") client = kognicioclient() \# project available via `client project get projects()` project = "\<project identifier>" run(client, project) dryrunを使用してシーンを検証する メソッド呼び出しで dryrun パラメータを true に設定すると、apiを使用してシーンを検証しますが、シーンは作成されません。 キャリブレーションの再利用 同じキャリブレーションを複数のシーンで再利用できます。可能な場合は再利用することを推奨します。 エゴビークルのモーション情報を提供す エゴビークルのモーション(すなわち、エゴビークルの位置と回転)は、 lidarsandcamerasseq を作成する際に提供できるオプション情報です。この情報により、静的オブジェクトのアノテーションにかかる時間を大幅に短縮できます。エゴビークルのモーション情報は、シーン内の各 frame に egovehiclemotion オブジェクトを渡すことで提供します from future import absolute import from datetime import datetime from typing import optional from uuid import uuid4 import kognic io model scene lidars and cameras sequence as lcsm from examples calibration calibration import create sensor calibration from kognic io client import kognicioclient from kognic io logger import setup logging from kognic io model import createsceneresponse, egovehiclepose, image, pointcloud, position, rotationquaternion def run(client kognicioclient, dryrun bool = true, kwargs) > optional\[createsceneresponse] print("creating lidar and camera sequence scene ") lidar sensor1 = "lidar" cam sensor1 = "rfc01" cam sensor2 = "rfc02" cam sensor3 = "rfc03" metadata = {"location lat" 27 986065, "location long" 86 922623, "vehicle id" "abg"} \# create calibration calibration spec = create sensor calibration(f"collection {datetime now()}", \[lidar sensor1], \[cam sensor1, cam sensor2, cam sensor3]) created calibration = client calibration create calibration(calibration spec) scene = lcsm lidarsandcamerassequence( external id=f"lcs full example {uuid4()}", frames=\[ lcsm frame( frame id="1", relative timestamp=0, point clouds=\[ pointcloud(filename=" /examples/resources/point cloud rfl01 las", sensor name=lidar sensor1), ], images=\[ image(filename=" /examples/resources/img rfc01 jpg", sensor name=cam sensor1), image(filename=" /examples/resources/img rfc02 jpg", sensor name=cam sensor2), ], metadata={"dut status" "active"}, ego vehicle pose=egovehiclepose( position=position(x=1 0, y=1 0, z=1 0), rotation=rotationquaternion(w=0 01, x=1 01, y=1 01, z=1 01) ), ), lcsm frame( frame id="2", relative timestamp=500, point clouds=\[ pointcloud(filename=" /examples/resources/point cloud rfl02 las", sensor name=lidar sensor1), ], images=\[ image(filename=" /examples/resources/img rfc11 jpg", sensor name=cam sensor1), image(filename=" /examples/resources/img rfc12 jpg", sensor name=cam sensor2), ], ego vehicle pose=egovehiclepose( position=position(x=2 0, y=2 0, z=2 0), rotation=rotationquaternion(w=0 01, x=2 01, y=2 01, z=2 01) ), ), ], calibration id=created calibration id, metadata=metadata, ) \# create scene return client lidars and cameras sequence create(scene, dryrun=dryrun, kwargs) if name == " main " setup logging(level="info") client = kognicioclient() \# project available via `client project get projects()` project = "\<project identifier>" run(client, project=project) 座標系 position と rotation はいずれも、ローカル座標系を基準としたエゴビークルのポーズです。 シャッタータイミング シャッタータイミングは、 frame 内に image を作成する際に指定できるオプションのメタデータです。タイミングはシャッター開始と終了のタイムスタンプ(unixエポックからのナノ秒)の2つの値で構成され、各フレームの各画像に対して指定します。 from future import absolute import import os path from datetime import datetime from typing import optional from uuid import uuid4 import kognic io model scene lidars and cameras sequence as lcsm from examples calibration calibration import create sensor calibration from examples imu data create imu data import create dummy imu data from kognic io client import kognicioclient from kognic io logger import setup logging from kognic io model import createsceneresponse, image, imagemetadata, pointcloud def run(client kognicioclient, dryrun bool = true, kwargs) > optional\[createsceneresponse] print("creating lidar and camera sequence scene ") lidar sensor1 = "rfl01" lidar sensor2 = "rfl02" cam sensor1 = "rfc01" cam sensor2 = "rfc02" metadata = {"location lat" 27 986065, "location long" 86 922623, "vehicleid" "abg"} examples path = os path dirname( file ) \# create calibration calibration spec = create sensor calibration(f"collection {datetime now()}", \[lidar sensor1, lidar sensor2], \[cam sensor1, cam sensor2]) created calibration = client calibration create calibration(calibration spec) \# generate imu data one millisecond = 1000000 # one millisecond, expressed in nanos start ts = 1648200140000000000 end ts = start ts + 10 one millisecond imu data = create dummy imu data(start timestamp=start ts, end timestamp=end ts, samples per sec=1000) scene = lcsm lidarsandcamerassequence( external id=f"lcs full with imu and shutter example {uuid4()}", frames=\[ lcsm frame( frame id="1", unix timestamp=start ts + one millisecond, relative timestamp=0, point clouds=\[ pointcloud(filename=examples path + "/resources/point cloud rfl01 csv", sensor name=lidar sensor1), pointcloud(filename=examples path + "/resources/point cloud rfl02 csv", sensor name=lidar sensor2), ], images=\[ image( filename=examples path + "/resources/img rfc01 jpg", sensor name=cam sensor1, metadata=imagemetadata( shutter time start ns=start ts + 0 5 one millisecond, shutter time end ns=start ts + 1 5 one millisecond ), ), image( filename=examples path + "/resources/img rfc02 jpg", sensor name=cam sensor2, metadata=imagemetadata( shutter time start ns=start ts + 0 5 one millisecond, shutter time end ns=start ts + 1 5 one millisecond ), ), ], ), lcsm frame( frame id="2", unix timestamp=start ts + 5 one millisecond, relative timestamp=4, point clouds=\[ pointcloud(filename=examples path + "/resources/point cloud rfl11 csv", sensor name=lidar sensor1), pointcloud(filename=examples path + "/resources/point cloud rfl12 csv", sensor name=lidar sensor2), ], images=\[ image( filename=examples path + "/resources/img rfc11 jpg", sensor name=cam sensor1, metadata=imagemetadata( shutter time start ns=start ts + 4 5 one millisecond, shutter time end ns=start ts + 5 5 one millisecond ), ), image( filename=examples path + "/resources/img rfc12 jpg", sensor name=cam sensor2, metadata=imagemetadata( shutter time start ns=start ts + 4 5 one millisecond, shutter time end ns=start ts + 5 5 one millisecond ), ), ], ), ], calibration id=created calibration id, metadata=metadata, imu data=imu data, ) \# create scene return client lidars and cameras sequence create(scene, dryrun=dryrun, kwargs) if name == " main " setup logging(level="info") client = kognicioclient() \# project available via `client project get projects()` project = "\<project id>" run(client, project=project)

