curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/pretrained-model \
--header 'x-api-key: <api-key>'{
"success": true,
"specificDeviceSelected": true,
"availableModelTypes": [
"int8"
],
"error": "<string>",
"model": {
"fileName": "<string>",
"inputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"outputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"profileInfo": {
"table": {
"variant": "int8",
"lowEndMcu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"highEndMcu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"highEndMcuPlusAccelerator": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"mpu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"rom": 123
},
"gpuOrMpuAccelerator": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"rom": 123
}
},
"float32": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"profilingError": "<string>"
},
"int8": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"profilingError": "<string>"
}
},
"profileJobId": 123,
"profileJobFailed": true,
"supportsTFLite": true
},
"modelInfo": {
"input": {
"inputType": "time-series",
"frequencyHz": 123,
"windowLengthMs": 123
},
"model": {
"modelType": "classification",
"labels": [
"<string>"
]
}
}
}Receive info back about the earlier uploaded pretrained model (via uploadPretrainedModel) input/output tensors. If you want to deploy a pretrained model from the API, see startDeployPretrainedModelJob.
curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/pretrained-model \
--header 'x-api-key: <api-key>'{
"success": true,
"specificDeviceSelected": true,
"availableModelTypes": [
"int8"
],
"error": "<string>",
"model": {
"fileName": "<string>",
"inputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"outputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"profileInfo": {
"table": {
"variant": "int8",
"lowEndMcu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"highEndMcu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"highEndMcuPlusAccelerator": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"mpu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"rom": 123
},
"gpuOrMpuAccelerator": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"rom": 123
}
},
"float32": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"profilingError": "<string>"
},
"int8": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"profilingError": "<string>"
}
},
"profileJobId": 123,
"profileJobFailed": true,
"supportsTFLite": true
},
"modelInfo": {
"input": {
"inputType": "time-series",
"frequencyHz": 123,
"windowLengthMs": 123
},
"model": {
"modelType": "classification",
"labels": [
"<string>"
]
}
}
}Project ID
Impulse ID. If this is unset then the default impulse is used.
OK
Whether the operation succeeded
Whether a specific device was selected for performance profiling
The types of model that are available
int8, float32, akida, requiresRetrain Optional error description (set if 'success' was false)
Show child attributes
Show child attributes
Was this page helpful?