Get pretrained model

Get pretrained model

get

Receive info back about the earlier uploaded pretrained model (via uploadPretrainedModel) input/output tensors. If you want to deploy a pretrained model from the API, see startDeployPretrainedModelJob.

Authorizations
Path parameters
projectIdintegerRequired

Project ID

Query parameters
impulseIdintegerOptional

Impulse ID. If this is unset then the default impulse is used.

Responses
200
OK
application/json
Responseall of
get
GET /v1/api/{projectId}/pretrained-model HTTP/1.1
Host: studio.edgeimpulse.com
x-api-key: YOUR_API_KEY
Accept: */*
200

OK

{
  "success": true,
  "error": "text",
  "specificDeviceSelected": true,
  "availableModelTypes": [
    "int8"
  ],
  "model": {
    "fileName": "text",
    "profileInfo": {
      "float32": {
        "variant": "int8",
        "device": "text",
        "tfliteFileSizeBytes": 1,
        "isSupportedOnMcu": true,
        "memory": {
          "tflite": {
            "ram": 1,
            "rom": 1,
            "arenaSize": 1
          },
          "eon": {
            "ram": 1,
            "rom": 1,
            "arenaSize": 1
          },
          "eonRamOptimized": {
            "ram": 1,
            "rom": 1,
            "arenaSize": 1
          }
        },
        "timePerInferenceMs": 1,
        "mcuSupportError": "text",
        "customMetrics": [
          {
            "name": "text",
            "value": "text"
          }
        ],
        "hasPerformance": true,
        "profilingError": "text"
      },
      "int8": {
        "variant": "int8",
        "device": "text",
        "tfliteFileSizeBytes": 1,
        "isSupportedOnMcu": true,
        "memory": {
          "tflite": {
            "ram": 1,
            "rom": 1,
            "arenaSize": 1
          },
          "eon": {
            "ram": 1,
            "rom": 1,
            "arenaSize": 1
          },
          "eonRamOptimized": {
            "ram": 1,
            "rom": 1,
            "arenaSize": 1
          }
        },
        "timePerInferenceMs": 1,
        "mcuSupportError": "text",
        "customMetrics": [
          {
            "name": "text",
            "value": "text"
          }
        ],
        "hasPerformance": true,
        "profilingError": "text"
      },
      "table": {
        "variant": "int8",
        "lowEndMcu": {
          "description": "text",
          "timePerInferenceMs": 1,
          "memory": {
            "tflite": {
              "ram": 1,
              "rom": 1
            },
            "eon": {
              "ram": 1,
              "rom": 1
            },
            "eonRamOptimized": {
              "ram": 1,
              "rom": 1
            }
          },
          "supported": true,
          "mcuSupportError": "text"
        },
        "highEndMcu": {
          "description": "text",
          "timePerInferenceMs": 1,
          "memory": {
            "tflite": {
              "ram": 1,
              "rom": 1
            },
            "eon": {
              "ram": 1,
              "rom": 1
            },
            "eonRamOptimized": {
              "ram": 1,
              "rom": 1
            }
          },
          "supported": true,
          "mcuSupportError": "text"
        },
        "highEndMcuPlusAccelerator": {
          "description": "text",
          "timePerInferenceMs": 1,
          "memory": {
            "tflite": {
              "ram": 1,
              "rom": 1
            },
            "eon": {
              "ram": 1,
              "rom": 1
            },
            "eonRamOptimized": {
              "ram": 1,
              "rom": 1
            }
          },
          "supported": true,
          "mcuSupportError": "text"
        },
        "mpu": {
          "description": "text",
          "timePerInferenceMs": 1,
          "rom": 1,
          "supported": true
        },
        "gpuOrMpuAccelerator": {
          "description": "text",
          "timePerInferenceMs": 1,
          "rom": 1,
          "supported": true
        }
      }
    },
    "inputs": [
      {
        "dataType": "int8",
        "name": "text",
        "shape": [
          1
        ],
        "quantizationScale": 1,
        "quantizationZeroPoint": 1
      }
    ],
    "outputs": [
      {
        "dataType": "int8",
        "name": "text",
        "shape": [
          1
        ],
        "quantizationScale": 1,
        "quantizationZeroPoint": 1
      }
    ],
    "profileJobId": 1,
    "profileJobFailed": true,
    "supportsTFLite": true
  },
  "modelInfo": {
    "input": {
      "inputType": "time-series",
      "frequencyHz": 1,
      "windowLengthMs": 1
    },
    "model": {
      "modelType": "classification",
      "labels": [
        "text"
      ]
    }
  }
}

Last updated

Was this helpful?