curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/pretrained-model \
--header 'x-api-key: <api-key>'
{
"success": true,
"error": "<string>",
"specificDeviceSelected": true,
"availableModelTypes": [
"int8"
],
"model": {
"fileName": "<string>",
"profileInfo": {
"float32": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"profilingError": "<string>"
},
"int8": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"profilingError": "<string>"
},
"table": {
"variant": "int8",
"lowEndMcu": {
"description": "<string>",
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"supported": true,
"mcuSupportError": "<string>"
},
"highEndMcu": {
"description": "<string>",
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"supported": true,
"mcuSupportError": "<string>"
},
"highEndMcuPlusAccelerator": {
"description": "<string>",
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"supported": true,
"mcuSupportError": "<string>"
},
"mpu": {
"description": "<string>",
"timePerInferenceMs": 123,
"rom": 123,
"supported": true
},
"gpuOrMpuAccelerator": {
"description": "<string>",
"timePerInferenceMs": 123,
"rom": 123,
"supported": true
}
}
},
"inputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"outputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"profileJobId": 123,
"profileJobFailed": true,
"supportsTFLite": true
},
"modelInfo": {
"input": {
"inputType": "time-series",
"frequencyHz": 123,
"windowLengthMs": 123
},
"model": {
"modelType": "classification",
"labels": [
"<string>"
]
}
}
}
Receive info back about the earlier uploaded pretrained model (via uploadPretrainedModel
) input/output tensors. If you want to deploy a pretrained model from the API, see startDeployPretrainedModelJob
.
curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/pretrained-model \
--header 'x-api-key: <api-key>'
{
"success": true,
"error": "<string>",
"specificDeviceSelected": true,
"availableModelTypes": [
"int8"
],
"model": {
"fileName": "<string>",
"profileInfo": {
"float32": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"profilingError": "<string>"
},
"int8": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"profilingError": "<string>"
},
"table": {
"variant": "int8",
"lowEndMcu": {
"description": "<string>",
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"supported": true,
"mcuSupportError": "<string>"
},
"highEndMcu": {
"description": "<string>",
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"supported": true,
"mcuSupportError": "<string>"
},
"highEndMcuPlusAccelerator": {
"description": "<string>",
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"supported": true,
"mcuSupportError": "<string>"
},
"mpu": {
"description": "<string>",
"timePerInferenceMs": 123,
"rom": 123,
"supported": true
},
"gpuOrMpuAccelerator": {
"description": "<string>",
"timePerInferenceMs": 123,
"rom": 123,
"supported": true
}
}
},
"inputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"outputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"profileJobId": 123,
"profileJobFailed": true,
"supportsTFLite": true
},
"modelInfo": {
"input": {
"inputType": "time-series",
"frequencyHz": 123,
"windowLengthMs": 123
},
"model": {
"modelType": "classification",
"labels": [
"<string>"
]
}
}
}
Project ID
Impulse ID. If this is unset then the default impulse is used.
OK
The response is of type object
.