curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/classify/all/result/page \
--header 'x-api-key: <api-key>'{
"success": true,
"result": [
{
"sampleId": 123,
"sample": {
"id": 2,
"filename": "idle01.d8Ae",
"signatureValidate": true,
"created": "2023-11-07T05:31:56Z",
"lastModified": "2023-11-07T05:31:56Z",
"category": "training",
"coldstorageFilename": "<string>",
"label": "healthy-machine",
"intervalMs": 16,
"frequency": 62.5,
"originalIntervalMs": 16,
"originalFrequency": 62.5,
"deviceType": "<string>",
"sensors": [
{
"name": "accX",
"units": "<string>"
}
],
"valuesCount": 123,
"added": "2023-11-07T05:31:56Z",
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123
}
],
"boundingBoxesType": "object_detection",
"chartType": "chart",
"isDisabled": true,
"isProcessing": true,
"processingError": true,
"isCropped": true,
"projectId": 123,
"sha256Hash": "<string>",
"signatureMethod": "HS256",
"signatureKey": "<string>",
"deviceName": "<string>",
"totalLengthMs": 123,
"thumbnailVideo": "<string>",
"thumbnailVideoFull": "<string>",
"processingJobId": 123,
"processingErrorString": "<string>",
"metadata": {},
"projectOwnerName": "<string>",
"projectName": "<string>",
"projectLabelingMethod": "single_label",
"structuredLabels": [
{
"startIndex": 123,
"endIndex": 123,
"label": "<string>"
}
],
"structuredLabelsList": [
"<string>"
],
"createdBySyntheticDataJobId": 123,
"imageDimensions": {
"width": 123,
"height": 123
},
"videoUrl": "<string>",
"videoUrlFull": "<string>"
},
"classifications": [
{
"learnBlock": {
"id": 2,
"type": "anomaly",
"name": "NN Classifier",
"dsp": [
27
],
"title": "Classification (Keras)",
"createdBy": "createImpulse",
"createdAt": "2023-11-07T05:31:56Z"
},
"result": [
{
"idle": 0.0002,
"wave": 0.9998,
"anomaly": -0.42
}
],
"minimumConfidenceRating": 123,
"expectedLabels": [
{
"startIndex": 123,
"endIndex": 123,
"label": "<string>"
}
],
"thresholds": [
{
"key": "min_score",
"description": "Score threshold",
"helpText": "Threshold score for bounding boxes. If the score for a bounding box is below this the box will be discarded.",
"value": 0.5,
"suggestedValue": 123,
"suggestedValueText": "<string>"
}
],
"anomalyResult": [
{
"boxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
],
"scores": [
[
123
]
],
"meanScore": 123,
"maxScore": 123
}
],
"structuredResult": [
{
"boxes": [
[
123
]
],
"scores": [
123
],
"mAP": 123,
"f1": 123,
"precision": 123,
"recall": 123,
"labels": [
"<string>"
],
"debugInfoJson": "{\n \"y_trues\": [\n {\"x\": 0.854, \"y\": 0.453125, \"label\": 1},\n {\"x\": 0.197, \"y\": 0.53125, \"label\": 2}\n ],\n \"y_preds\": [\n {\"x\": 0.916, \"y\": 0.875, \"label\": 1},\n {\"x\": 0.25, \"y\": 0.541, \"label\": 2}\n ],\n \"assignments\": [\n {\"yp\": 1, \"yt\": 1, \"label\": 2, \"distance\": 0.053}\n ],\n \"normalised_min_distance\": 0.2,\n \"all_pairwise_distances\": [\n [0, 0, 0.426],\n [1, 1, 0.053]\n ],\n \"unassigned_y_true_idxs\": [0],\n \"unassigned_y_pred_idxs\": [0]\n}\n"
}
],
"details": [
{
"boxes": [
[
123
]
],
"labels": [
123
],
"scores": [
123
],
"mAP": 123,
"f1": 123
}
],
"objectDetectionLastLayer": "mobilenet-ssd"
}
]
}
],
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"totalCount": 123,
"error": "<string>"
}Get classify job result, containing the predictions for a given page.
curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/classify/all/result/page \
--header 'x-api-key: <api-key>'{
"success": true,
"result": [
{
"sampleId": 123,
"sample": {
"id": 2,
"filename": "idle01.d8Ae",
"signatureValidate": true,
"created": "2023-11-07T05:31:56Z",
"lastModified": "2023-11-07T05:31:56Z",
"category": "training",
"coldstorageFilename": "<string>",
"label": "healthy-machine",
"intervalMs": 16,
"frequency": 62.5,
"originalIntervalMs": 16,
"originalFrequency": 62.5,
"deviceType": "<string>",
"sensors": [
{
"name": "accX",
"units": "<string>"
}
],
"valuesCount": 123,
"added": "2023-11-07T05:31:56Z",
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123
}
],
"boundingBoxesType": "object_detection",
"chartType": "chart",
"isDisabled": true,
"isProcessing": true,
"processingError": true,
"isCropped": true,
"projectId": 123,
"sha256Hash": "<string>",
"signatureMethod": "HS256",
"signatureKey": "<string>",
"deviceName": "<string>",
"totalLengthMs": 123,
"thumbnailVideo": "<string>",
"thumbnailVideoFull": "<string>",
"processingJobId": 123,
"processingErrorString": "<string>",
"metadata": {},
"projectOwnerName": "<string>",
"projectName": "<string>",
"projectLabelingMethod": "single_label",
"structuredLabels": [
{
"startIndex": 123,
"endIndex": 123,
"label": "<string>"
}
],
"structuredLabelsList": [
"<string>"
],
"createdBySyntheticDataJobId": 123,
"imageDimensions": {
"width": 123,
"height": 123
},
"videoUrl": "<string>",
"videoUrlFull": "<string>"
},
"classifications": [
{
"learnBlock": {
"id": 2,
"type": "anomaly",
"name": "NN Classifier",
"dsp": [
27
],
"title": "Classification (Keras)",
"createdBy": "createImpulse",
"createdAt": "2023-11-07T05:31:56Z"
},
"result": [
{
"idle": 0.0002,
"wave": 0.9998,
"anomaly": -0.42
}
],
"minimumConfidenceRating": 123,
"expectedLabels": [
{
"startIndex": 123,
"endIndex": 123,
"label": "<string>"
}
],
"thresholds": [
{
"key": "min_score",
"description": "Score threshold",
"helpText": "Threshold score for bounding boxes. If the score for a bounding box is below this the box will be discarded.",
"value": 0.5,
"suggestedValue": 123,
"suggestedValueText": "<string>"
}
],
"anomalyResult": [
{
"boxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
],
"scores": [
[
123
]
],
"meanScore": 123,
"maxScore": 123
}
],
"structuredResult": [
{
"boxes": [
[
123
]
],
"scores": [
123
],
"mAP": 123,
"f1": 123,
"precision": 123,
"recall": 123,
"labels": [
"<string>"
],
"debugInfoJson": "{\n \"y_trues\": [\n {\"x\": 0.854, \"y\": 0.453125, \"label\": 1},\n {\"x\": 0.197, \"y\": 0.53125, \"label\": 2}\n ],\n \"y_preds\": [\n {\"x\": 0.916, \"y\": 0.875, \"label\": 1},\n {\"x\": 0.25, \"y\": 0.541, \"label\": 2}\n ],\n \"assignments\": [\n {\"yp\": 1, \"yt\": 1, \"label\": 2, \"distance\": 0.053}\n ],\n \"normalised_min_distance\": 0.2,\n \"all_pairwise_distances\": [\n [0, 0, 0.426],\n [1, 1, 0.053]\n ],\n \"unassigned_y_true_idxs\": [0],\n \"unassigned_y_pred_idxs\": [0]\n}\n"
}
],
"details": [
{
"boxes": [
[
123
]
],
"labels": [
123
],
"scores": [
123
],
"mAP": 123,
"f1": 123
}
],
"objectDetectionLastLayer": "mobilenet-ssd"
}
]
}
],
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"totalCount": 123,
"error": "<string>"
}Project ID
Maximum number of results
Offset in results, can be used in conjunction with LimitResultsParameter to implement paging.
Keras model variant
int8, float32, akida Impulse ID. If this is unset then the default impulse is used.
If true, only a slice of labels will be returned for samples with multiple labels.
Only include samples with a label within the given list of labels, given as a JSON string
"[\"idle\", \"snake\"]"
Only include samples whose filename includes the given filename
Only include samples shorter than the given length, in milliseconds
Only include samples longer than the given length, in milliseconds
Only include samples with higher frequency than given frequency, in hertz
Only include samples with lower frequency than given frequency, in hertz
Include samples with either valid or invalid signatures
both, valid, invalid Only include samples with a label >= this value
Only include samples with a label < this value
Search query
"<id> <name>"
Include only samples with a particular data type
audio, image Include only samples with an ID >= this value
Include only samples with an ID < this value
Filter samples by metadata key-value pairs, provided as a JSON string. Each item in the filter list is an object with the following properties: - "key": Metadata key to filter on. - "op": Operator ("eq" for positive match, "neq" for negative match). - "values": (optional) Array of values to match/exclude. If omitted or empty, matches/excludes all values for the key. In addition to filter objects, the following option objects can be specified: - { "no_metadata": boolean } - If true, include samples without any metadata - { "filters_combinator": ("and" | "or") } - Specifies the combinator and matching mode: - "and": All filter items must match (logical AND). - "or": Any filter item may match (logical OR); samples with metadata keys not present in the filters are included.
"Example 1: returns samples where metadata key \"foo\" is 'bar' or 'baz' AND\nmetadata key \"k\" is \"v\".\n[\n { \"no_metadata\": true },\n { \"filters_combinator\": \"and\" },\n { \"key\": \"foo\", \"op\": \"eq\", \"values\": [\"bar\", \"baz\"] },\n { \"key\": \"k\", \"op\": \"eq\", \"values\": [\"v\"] }\n]\n\nExample 2: returns samples where metadata key \"foo\" is not 'bar'. Samples\nwithout any metadata are filtered out.\n[\n { \"no_metadata\": false },\n { \"filters_combinator\": \"or\" },\n { \"key\": \"foo\", \"op\": \"neq\", \"values\": [\"bar\"] }\n]\n"
Only include samples that where added after the date given
"2023-01-01T00:00:00.000Z"
Only include samples that were added before the date given
"2024-12-31T00:00:00.000Z"
OK
Whether the operation succeeded
Show child attributes
Show child attributes
2
"idle01.d8Ae"
Whether signature validation passed
true
Timestamp when the sample was created on device, or if no accurate time was known on device, the time that the file was processed by the ingestion service.
Timestamp when the sample was last modified.
training, testing, post-processing "training"
"healthy-machine"
Interval between two windows (1000 / frequency). If the data was resampled, then this lists the resampled interval.
16
Frequency of the sample. If the data was resampled, then this lists the resampled frequency.
62.5
Interval between two windows (1000 / frequency) in the source data (before resampling).
16
Frequency of the sample in the source data (before resampling).
62.5
Show child attributes
Name of the axis
"accX"
Type of data on this axis. Needs to comply to SenML units (see https://www.iana.org/assignments/senml/senml.xhtml).
Number of readings in this file
Timestamp when the sample was added to the current acquisition bucket.
object_detection, constrained_object_detection chart, image, video, table True if the current sample is excluded from use
True if the current sample is still processing (e.g. for video)
Set when processing this sample failed
Whether the sample is cropped from another sample (and has crop start / end info)
Unique identifier of the project this sample belongs to
Data sample SHA 256 hash (including CBOR envelope if applicable)
"HS256"
Either the shared key or the public key that was used to validate the sample
Total length (in ms.) of this file
Set when sample is processing and a job has picked up the request
Error (only set when processing this sample failed)
Name of the owner of the project this sample belongs to
Name of the project this sample belongs to
What labeling flow the project this sample belongs to uses
single_label, object_detection Show child attributes
Start index of the label (e.g. 0)
End index of the label (e.g. 3). This value is inclusive, so { startIndex: 0, endIndex: 3 } covers 0, 1, 2, 3.
The label for this section.
If this sample was created by a synthetic data job, it's referenced here.
Video link, cropped and in original resolution.
Video link in original resolution.
Show child attributes
Show child attributes
Identifier for this block. Make sure to up this number when creating a new block via getNewBlockId, and don't re-use identifiers. If the block hasn't changed, keep the ID as-is. ID must be unique across the project and greather than zero (>0).
x >= 1The type of learning block (anomaly, keras, keras-transfer-image, keras-transfer-kws, keras-object-detection, keras-regression). Each behaves differently.
anomaly, anomaly-gmm, keras, keras-transfer-image, keras-transfer-kws, keras-object-detection, keras-regression, keras-akida, keras-akida-transfer-image, keras-akida-object-detection, keras-visual-anomaly Block name, will be used in menus. If a block has a baseBlockId, this field is ignored and the base block's name is used instead.
"NN Classifier"
DSP dependencies, identified by DSP block ID
Block title, used in the impulse UI
"Classification (Keras)"
The system component that created the block version (createImpulse | clone | tuner). Cannot be set via API.
"createImpulse"
The datetime that the block version was created. Cannot be set via API.
DEPRECATED, see "thresholds" instead. The minimum confidence rating for this block. For regression, this is the absolute error (which can be larger than 1).
An array with an expected label per window.
Show child attributes
Start index of the label (e.g. 0)
End index of the label (e.g. 3). This value is inclusive, so { startIndex: 0, endIndex: 3 } covers 0, 1, 2, 3.
The label for this section.
List of configurable thresholds for this block.
Show child attributes
Identifier to reference the threshold. You'll need to refer to the threshold by this key when you set the threshold).
"min_score"
User-friendly description of the threshold.
"Score threshold"
Additional help text (shown in the UI under a "?" icon)
"Threshold score for bounding boxes. If the score for a bounding box is below this the box will be discarded."
Current value of the threshold
0.5
If the threshold has a suggested value, e.g. a max. absolute error for regression projects; or the min. anomaly score for visual anomaly detection, then this is the numeric value of that threshold.
If the threshold has a suggested value, e.g. a max. absolute error for regression projects; or the min. anomaly score for visual anomaly detection, then this is the stringified value of that threshold.
Anomaly scores and computed metrics for visual anomaly detection, one item per window.
Show child attributes
For visual anomaly detection. An array of bounding box objects, (x, y, width, height, score, label), one per detection in the image. Filtered by the minimum confidence rating of the learn block.
Show child attributes
2D array of shape (n, n) with raw anomaly scores for visual anomaly detection, where n can be calculated as ((1/8 of image input size)/2 - 1). The scores corresponds to each grid cell in the image's spatial matrix.
Mean value of the scores.
Maximum value of the scores.
Results of inferencing that returns structured data, such as object detection
Show child attributes
For object detection. An array of bounding box arrays, (x, y, width, height), one per detection in the image.
For object detection. An array of probability scores, one per detection in the image.
For object detection. A score that indicates accuracy compared to the ground truth, if available.
For FOMO. A score that combines the precision and recall of a classifier into a single metric, if available.
A measure of how many of the positive predictions made are correct (true positives).
A measure of how many of the positive cases the classifier correctly predicted, over all the positive cases.
For object detection. An array of labels, one per detection in the image.
Debug info in JSON format
"{\n \"y_trues\": [\n {\"x\": 0.854, \"y\": 0.453125, \"label\": 1},\n {\"x\": 0.197, \"y\": 0.53125, \"label\": 2}\n ],\n \"y_preds\": [\n {\"x\": 0.916, \"y\": 0.875, \"label\": 1},\n {\"x\": 0.25, \"y\": 0.541, \"label\": 2}\n ],\n \"assignments\": [\n {\"yp\": 1, \"yt\": 1, \"label\": 2, \"distance\": 0.053}\n ],\n \"normalised_min_distance\": 0.2,\n \"all_pairwise_distances\": [\n [0, 0, 0.426],\n [1, 1, 0.053]\n ],\n \"unassigned_y_true_idxs\": [0],\n \"unassigned_y_pred_idxs\": [0]\n}\n"
Structured outputs and computed metrics for some model types (e.g. object detection), one item per window.
Show child attributes
Bounding boxes predicted by localization model
Labels predicted by localization model
Scores predicted by localization model
For object detection, the COCO mAP computed for the predictions on this image
For FOMO, the F1 score computed for the predictions on this image
mobilenet-ssd, fomo, yolov2-akida, yolov5, yolov5v5-drpai, yolox, yolov7, yolo-pro, tao-retinanet, tao-ssd, tao-yolov3, tao-yolov4, yolov11, yolov11-abs Show child attributes
Only set for object detection projects
Only set for visual anomaly projects. 2D array of shape (n, n) with raw anomaly scores, where n varies based on the image input size and the specific visual anomaly algorithm used. The scores corresponds to each grid cell in the image's spatial matrix.
Only set for object detection projects. Coordinates are scaled 0..1, not absolute values.
Total sample count
Optional error description (set if 'success' was false)
Was this page helpful?