UPLOAD DATA
...
Guides
More examples
Cameras sequence
1min
A CamerasSeq consists of a sequence of camera images, where each frame can contain between 1-9 images from different sensors. For more documentation on what each field corresponds to in the CamerasSeq object please check the section related to Scene Overview.
Python
1from __future__ import absolute_import
2
3from typing import Optional
4from uuid import uuid4
5
6import kognic.io.model.scene.cameras_sequence as CSM
7from kognic.io.client import KognicIOClient
8from kognic.io.logger import setup_logging
9from kognic.io.model import CreateSceneResponse, Image
10
11
12def run(client: KognicIOClient, dryrun: bool = True, **kwargs) -> Optional[CreateSceneResponse]:
13 print("Creating Cameras Sequence Scene...")
14
15 sensor1 = "RFC01"
16 sensor2 = "RFC02"
17 metadata = {"location-lat": 27.986065, "location-long": 86.922623, "vehicle_id": "abg"}
18
19 scene = CSM.CamerasSequence(
20 external_id=f"camera-seq-images-example-{uuid4()}",
21 frames=[
22 CSM.Frame(
23 frame_id="1",
24 relative_timestamp=0,
25 images=[
26 # JPG Images in Frame 1
27 Image(
28 filename="./examples/resources/img_RFC01.jpg",
29 sensor_name=sensor1,
30 ),
31 Image(
32 filename="./examples/resources/img_RFC02.jpg",
33 sensor_name=sensor2,
34 ),
35 ],
36 metadata={"dut_status": "active"},
37 ),
38 CSM.Frame(
39 frame_id="2",
40 relative_timestamp=500,
41 images=[
42 # PNG Images in Frame 2
43 Image(
44 filename="./examples/resources/img_RFC11.png",
45 sensor_name=sensor1,
46 ),
47 Image(
48 filename="./examples/resources/img_RFC12.png",
49 sensor_name=sensor2,
50 ),
51 ],
52 metadata={"dut_status": "active"},
53 ),
54 CSM.Frame(
55 frame_id="3",
56 relative_timestamp=1000,
57 images=[
58 # WebP VP8 Images in Frame 3
59 Image(
60 filename="./examples/resources/img_RFC21.webp",
61 sensor_name=sensor1,
62 ),
63 Image(
64 filename="./examples/resources/img_RFC22.webp",
65 sensor_name=sensor2,
66 ),
67 ],
68 metadata={"dut_status": "active"},
69 ),
70 CSM.Frame(
71 frame_id="4",
72 relative_timestamp=1500,
73 images=[
74 # WebP VP8L Images in Frame 4
75 Image(
76 filename="./examples/resources/img_RFC31.webp",
77 sensor_name=sensor1,
78 ),
79 Image(
80 filename="./examples/resources/img_RFC32.webp",
81 sensor_name=sensor2,
82 ),
83 ],
84 metadata={"dut_status": "active"},
85 ),
86 CSM.Frame(
87 frame_id="5",
88 relative_timestamp=2000,
89 images=[
90 # WebP VP8X Images in Frame 5
91 Image(
92 filename="./examples/resources/img_RFC41.webp",
93 sensor_name=sensor1,
94 ),
95 Image(
96 filename="./examples/resources/img_RFC42.webp",
97 sensor_name=sensor2,
98 ),
99 ],
100 metadata={"dut_status": "active"},
101 ),
102 CSM.Frame(
103 frame_id="6",
104 relative_timestamp=2500,
105 images=[
106 # AVIF Images in Frame 6
107 Image(
108 filename="./examples/resources/img_RFC51.avif",
109 sensor_name=sensor1,
110 ),
111 Image(
112 filename="./examples/resources/img_RFC52.avif",
113 sensor_name=sensor2,
114 ),
115 ],
116 metadata={"dut_status": "active"},
117 ),
118 ],
119 metadata=metadata,
120 )
121
122 # Create scene
123 return client.cameras_sequence.create(scene, dryrun=dryrun, **kwargs)
124
125
126if __name__ == "__main__":
127 setup_logging(level="INFO")
128 # Project - Available via `client.project.get_projects()`
129 project = "<project-identifier>"
130
131 client = KognicIOClient()
132 run(client, project=project)
Use dryrun to validate scene
Setting dryrun parameter to true in the method call, will validate the scene using the API but not create it.
Updated 17 Dec 2024

Did this page help you?