Download OpenAPI specification:Download
name required | string |
name required | string Example: name=test username to be deleted |
name required | string |
new_password required | string |
old_password required | string |
{- "name": "test",
- "old_password": "0000",
- "new_password": "1111"
}
{ }
name required | string |
password required | string |
description | string |
role required | string |
{- "name": "test",
- "password": "test",
- "role": "Viewer",
- "description": "create new user test and set him to viewer"
}
{ }
name required | string |
name required | string Example: name=test username to be deleted |
name required | string |
description | string |
role required | string |
{- "name": "test",
- "role": "Viewer",
- "description": "change user test's role to viewer"
}
{ }
{- "version": "3.1.0-beta.3",
- "systemStatus": "0",
- "neuronRunningTime": "6332",
- "neuronStatus": "0",
- "neuronMemoryUsed": "8364032",
- "neuronMemoryTotal": "4125425664",
- "ekuiperRunningTime": "6332",
- "ekuiperStatus": "0",
- "ekuiperMemoryUsed": "84316160",
- "ekuiperMemoryTotal": "4125425664",
- "submoduleStatus": {
- "alert": false,
- "monitor": false,
- "syslog": false
}, - "hwToken": "m/djk5abRRToVTyBBCXkUVyMumgbYmo2PdwdjnVeqn8=",
- "os": "Debian GNU/Linux 11",
- "kernel": "5.10.124-linuxkit",
- "arch": "x86_64",
- "clib": "glibc-2.31"
}
addr required | string address of log remote forwarding. |
enabled required | boolean whether to enable log remote forwarding. true: enable; false: disable |
network required | string now only support udp4 |
priority required | string value can be emerg/alert/crit/err/warning/notice/info/debug |
tag required | string syslog protocol tag field, used for syslog server to identify which neuronex client send the syslog message |
{- "enabled": true,
- "addr": "localhost:10514",
- "priority": 6,
- "tag": "neuronex"
}
{ }
serverName required | string |
url required | string |
clientId required | string |
clientSecret required | string |
type required | string |
enabled required | boolean |
{- "name": "sso1",
- "clientId": "VDx2v0usxxx",
- "clientSecret": "1uu9c5xxxx",
- "type": "oauth2",
- "enabled": true
}
{ }
{- "name": "sso1",
- "clientId": "VDx2v0usxxx",
- "clientSecret": "1uu9c5xxxx",
- "type": "oauth2",
- "enabled": false
}
enabled | string Optional, query configurations by 'enabled' field |
[- {
- "name": "sso1",
- "clientId": "8I744Myxxxx",
- "clientSecret": "5jMnFyxxxx",
- "type": "oauth2",
- "enabled": true
}, - {
- "name": "sso2",
- "clientId": "VDx2v0uxxxx",
- "clientSecret": "1uu9c5xxxx",
- "type": "oauth2",
- "enabled": false
}
]
name required | string |
url required | string |
clientId required | string |
clientSecret required | string |
type required | string |
enabled | boolean Default: false |
{- "name": "sso1",
- "clientId": "VDx2v0usxxx",
- "clientSecret": "1uu9c5xxxx",
- "type": "oauth2",
- "enabled": false
}
{ }
serverName required | string |
code required | string Example: code=5068471130381875 |
{- "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzcyODcxNjMsImlhdCI6MTYzNzIwMDc2MywiaXNzIjoiRU1RIFRlY2hub2xvZ2llcyBDby4sIEx0ZCBBbGwgcmlnaHRzIHJlc2VydmVkLiIsInBhc3MiOiIwMDAwIiwidXNlciI6ImFkbWluIn0.2EZzPC9djErrCeYNrK2av0smh-eKxDYeyu7cW4MyknI"
}
{- "licenseType": "TRIAL",
- "customerName": "EMQ Technologies Inc.",
- "customerContact": "support@emqx.io",
- "startDate": "2023-12-06 03:28:25",
- "expiredDate": "2123-12-06 03:28:25",
- "expired": false,
- "totalTagCount": 30,
- "usedTagCount": 0,
- "enabledPlugins": [
- "ABB COMLI",
- "Allen-Bradley ControlLogix 5500",
- "Allen-Bradley DF1",
- "Allen-Bradley MicroLogix 1400",
- "BACnet/IP",
- "Beckhoff ADS",
- "DLT645-1997",
- "DLT645-2007",
- "EtherNet/IP(CIP)",
- "GE SRTP",
- "HJ212-2017",
- "HOSTLINK CMODE",
- "IEC60870-5-104",
- "IEC61850",
- "Inovance Modbus TCP",
- "KNXnet/IP",
- "KUKA ETHERNET KRL TCP",
- "Mitsubishi 1E",
- "Mitsubishi 3E",
- "Mitsubishi FX",
- "Modbus RTU",
- "Modbus TCP",
- "Modbus TCP QH",
- "NON A11",
- "OPC UA",
- "Omron FINS TCP",
- "Omron FINS UDP",
- "Panasonic Mewtocol",
- "Profinet IO",
- "SECS GEM HSMS",
- "Siemens FetchWrite",
- "Siemens RK512",
- "Siemens S7 ISOTCP",
- "Siemens S7 ISOTCP for 300/400",
- "SparkPlugB",
- "WebSocket"
], - "hardwareToken": "FdVCScD63F5gGcB0eO5OuwQsxg5jJoa+YOxyOub9c5g=",
- "ekuiperOn": true,
- "officialEmail": "support@emqx.io"
}
license required | string
|
{- "license": "CAIaFUVNUSBUZWNobm9s..."
}
{- "type": "TRIAL",
- "customerName": "EMQ Technologies Inc.",
- "customerContact": "support@emqx.io",
- "startDate": "2023-12-06",
- "expiredDate": "2123-12-06",
- "expired": false,
- "totalTagCount": 1000,
- "usedTagCount": 1,
- "enabledPlugins": [
- "ABB COMLI",
- "Allen-Bradley ControlLogix 5500",
- "Allen-Bradley DF1",
- "Allen-Bradley MicroLogix 1400",
- "BACnet/IP",
- "Beckhoff ADS",
- "DLT645-1997",
- "DLT645-2007",
- "EtherNet/IP(CIP)",
- "GE SRTP",
- "HJ212-2017",
- "HOSTLINK CMODE",
- "IEC60870-5-104",
- "IEC61850",
- "Inovance Modbus TCP",
- "KNXnet/IP",
- "KUKA ETHERNET KRL TCP",
- "Mitsubishi 1E",
- "Mitsubishi 3E",
- "Mitsubishi FX",
- "Modbus RTU",
- "Modbus TCP",
- "Modbus TCP QH",
- "NON A11",
- "OPC UA",
- "Omron FINS TCP",
- "Omron FINS UDP",
- "Panasonic Mewtocol",
- "Profinet IO",
- "SECS GEM HSMS",
- "Siemens FetchWrite",
- "Siemens RK512",
- "Siemens S7 ISOTCP",
- "Siemens S7 ISOTCP for 300/400",
- "SparkPlugB",
- "WebSocket"
], - "hardwareToken": "",
- "ekuiperOn": true,
- "officialEmail": "support@emqx.io"
}
activationCode required | string |
{- "activationCode": "6YnJBVBOcL2pBQgwnPdtdtW..."
}
{- "code": 0,
- "message": "string"
}
license required | string
|
{- "license": "CAIaFUVNUSBUZWNobm9s..."
}
{- "code": 0,
- "message": "string"
}
license required | string
|
{- "license": "CAIaFUVNUSBUZWNobm9s..."
}
{- "code": 0,
- "message": "string"
}
enable required | boolean |
enableSSL required | boolean |
address required | string |
username required | string |
password required | string |
description required | string |
certificationRaw required | string |
privateKeyRaw required | string |
rootCARaw required | string |
registerId required | string |
{- "enable": false,
- "enableSSL": false,
- "address": "tcp://127.0.0.1:1883",
- "username": "admin",
- "password": "password",
- "description": "test",
- "certificationRaw": "cert_base64",
- "privateKeyRaw": "key_base64",
- "rootCARaw": "ca_base64",
- "registerId": "1234567890"
}
{ }
addr | string address of log remote forwarding. |
enabled | boolean whether to enable log remote forwarding. true: enable; false: disable |
network | string now only support udp4 |
priority | string value can be emerg/alert/crit/err/warning/notice/info/debug |
tag | string syslog protocol tag field, used for syslog server to identify which neuronex client send the syslog message |
{- "endpointUrl": "127.0.0.1:1456",
- "serviceId": "123456",
- "interval": 10
}
{ }
category required | integer Category 1: ekuiper, 2: neuron |
nodes | string Example: nodes=south1,south3 List of nodes to be queried |
rules | string Example: rules=rule1,rule2 List of rules to be queried |
{- "streams": {
- "neuronStream": "CREATE STREAM neuronStream() WITH (TYPE=\"neuron\",FORMAT=\"json\",CONF_KEY=\"default\",SHARED=\"TRUE\");"
}, - "tables": { },
- "rules": { },
- "nativePlugins": { },
- "portablePlugins": { },
- "sourceConfig": { },
- "sinkConfig": { },
- "connectionConfig": { },
- "Service": { },
- "Schema": { },
- "uploads": { }
}
category required | integer Category 1: ekuiper, 2: neuron |
template | string optional, Node data to be deliver if category is 1, or rule data to be deliver if category is 2 |
ruleData | string optional, Rule data to be deliver |
NodeData | string optional, Node data to be deliver |
operationId | integer optional,if operationId > 0, The operation will be logged and pushed |
{- "template": "{\n \"streams\": {\n \"neuronStream\": \"CREATE STREAM neuronStream() WITH (TYPE=\\\"neuron\\\",FORMAT=\\\"json\\\",CONF_KEY=\\\"default\\\",SHARED=\\\"TRUE\\\");\"\n },\n \"tables\": {},\n \"rules\": {},\n \"nativePlugins\": {},\n \"portablePlugins\": {},\n \"sourceConfig\": {},\n \"sinkConfig\": {},\n \"connectionConfig\": {},\n \"Service\": {},\n \"Schema\": {},\n \"uploads\": {}\n}"
}
{ }
{- "server": {
- "in": 75668145,
- "out": 75668144
}, - "outSend": {
- "in": 75668145,
- "out": 2513
}, - "sse": {
- "in": 75668145,
- "out": 75668144,
- "neuron": {
- "in": 464,
- "out": 464
}, - "neuronex": {
- "in": 75665075,
- "out": 75665074
}, - "ekuiper": {
- "in": 93,
- "out": 93
}, - "others": {
- "in": 2513,
- "out": 2513
}, - "clientNumber": 0
}
}
type | string Example: type=0 |
priority | string Example: priority=10 |
2023-12-20T07:55:49Z neuronex[1]: level=error msg="forward syslog to server error [&{0x174ba08 true 0xc00054e000 neuronex neuronex-231220 6 localhost:10514}][failed to get connection]" file="syslog/out_send.go:113" func="internal/syslog.(*SendOut).Process"
[- {
- "ruleId": "1",
- "name": "node_exception_alert",
- "desc": "Data collection node exception alert"
}, - {
- "ruleId": "2",
- "name": "rule_exception_alert",
- "desc": "Data processing rule exception alert"
}, - {
- "ruleId": "3",
- "name": "restart_alert",
- "desc": "neuronex restart alert"
}
]
{- "enabled": true,
- "interval": 15,
- "fireChecks": 0,
- "resolveChecks": 0,
- "alertsConfig": [
- {
- "ruleId": "3",
- "fireChecks": 1,
- "resolveChecks": -1,
- "ruleName": "restart_alert"
}, - {
- "ruleId": "1",
- "fireChecks": 3,
- "resolveChecks": 3,
- "ruleName": "node_exception_alert"
}, - {
- "ruleId": "2",
- "fireChecks": 1,
- "resolveChecks": 1,
- "ruleName": "rule_exception_alert"
}
]
}
enabled required | boolean false: disabled, true: enabled ,If the value is false, the other fields are non-required |
webhookUrl required | string required if enabled is true,the webhook that alert events are pushed to |
interval required | integer required if enabled is true |
fireChecks | integer required if alertsConfig is empty, the default fireChecks for all rules in alertsConfig |
resolveChecks | integer required if alertsConfig is empty, the default resolveChecks for all rules in alertsConfig |
Array of objects optional, the setting of the rule alerts to be pushed,if it is empty, it means that requires all alert types. |
{- "enabled": true,
- "interval": 15,
- "fireChecks": 0,
- "resolveChecks": 0,
- "alertsConfig": [
- {
- "ruleId": "3",
- "fireChecks": 1,
- "resolveChecks": -1
}, - {
- "ruleId": "1",
- "fireChecks": 3,
- "resolveChecks": 3
}, - {
- "ruleId": "2",
- "fireChecks": 1,
- "resolveChecks": 1
}
]
}
{ }
[- {
- "metricId": "1",
- "name": "os_info",
- "desc": "OS distro and kernel version"
}, - {
- "metricId": "2",
- "name": "cpu_percent",
- "desc": "Total CPU utilisation percentage"
}, - {
- "metricId": "3",
- "name": "cpu_cores",
- "desc": "Number of CPU cores"
}, - {
- "metricId": "4",
- "name": "mem_total_bytes",
- "desc": "Total installed memory in bytes"
}
]
{- "enabled": true,
- "interval": 15,
- "labels": {
- "instance": "127.0.0.1:8111",
- "org_id": "76995af8",
- "project_id": "c1c69f9d",
- "service_id": "55f20f8b"
}, - "metrics": [
- {
- "metricId": "13",
- "name": "north_nodes_total",
- "desc": "Number of north nodes"
}, - {
- "metricId": "14",
- "name": "north_running_nodes_total",
- "desc": "Number of north nodes in running state"
}, - {
- "metricId": "15",
- "name": "north_disconnected_nodes_total",
- "desc": "Number of north nodes disconnected"
}, - {
- "metricId": "16",
- "name": "south_nodes_total",
- "desc": "Number of south nodes"
}, - {
- "metricId": "17",
- "name": "south_running_nodes_total",
- "desc": "Number of south nodes in running state"
}, - {
- "metricId": "20",
- "name": "license_max_tags",
- "desc": "License tags limit"
}, - {
- "metricId": "21",
- "name": "license_used_tags",
- "desc": "License total used tags"
}, - {
- "metricId": "1031",
- "name": "kuiper_op_records_in_total",
- "desc": "Total number of messages received by the operation of kuiper_op"
}, - {
- "metricId": "1032",
- "name": "kuiper_op_records_out_total",
- "desc": "Total number of messages published by the operation of kuiper_op"
}, - {
- "metricId": "2000",
- "name": "running_nodes_total",
- "desc": "Number of nodes in running state"
}, - {
- "metricId": "2001",
- "name": "exception_nodes_total",
- "desc": "Number of nodes in running state"
}, - {
- "metricId": "2002",
- "name": "south_connected_nodes_total",
- "desc": "Number of south nodes connected"
}, - {
- "metricId": "2003",
- "name": "south_exception_nodes_total",
- "desc": "Number of south in exception state"
}, - {
- "metricId": "2004",
- "name": "north_connected_nodes_total",
- "desc": "Number of north nodes connected"
}, - {
- "metricId": "2005",
- "name": "north_exception_nodes_total",
- "desc": "Number of north in exception state"
}, - {
- "metricId": "2006",
- "name": "running_rules_total",
- "desc": "Number of rules in running state"
}, - {
- "metricId": "2007",
- "name": "stopped_rules_total",
- "desc": "Number of rules in stopped state"
}, - {
- "metricId": "2008",
- "name": "not_running_node",
- "desc": "Node not in running state"
}, - {
- "metricId": "2009",
- "name": "exception_node",
- "desc": "Node in exception state"
}, - {
- "metricId": "3000",
- "name": "not_running_rule",
- "desc": "Rule not in running state"
}
]
}
enabled required | boolean required,false: disabled, true: enabled ,If the value is false, the other fields are non-required |
prometheusUrl required | string required if enabled is true, the url of pushgateway,like http://localhost:9091 |
prometheusUsername | string optional, the url of username for pushgateway |
prometheusPassword | string optional, the url of password for pushgateway |
metricIds | Array of strings optional, metric id array,a value of null means push all metric data. |
interval required | integer required if enabled is true, the interval of generating metrics data,unit is second |
object optional, additional labels needed for metric, will be pushed to pushgateway along with the metrics data for grouping of metrics,key: label name,value: label value |
{- "enabled": false,
- "prometheusUsername": "",
- "prometheusPassword": "",
- "metricIds": [
- "13",
- "14",
- "15",
- "16",
- "17",
- "20",
- "21",
- "1031",
- "1032",
- "2000",
- "2001",
- "2002",
- "2003",
- "2004",
- "2005",
- "2006",
- "2007",
- "2008",
- "2009",
- "3000"
], - "interval": 15,
- "labels": {
- "category": "4",
- "org_id": "ef5f285a",
- "project_id": "c46ffe05",
- "service_id": "25a967f3",
- "type": "4"
}
}
{ }
"license_max_tags 30\nlicense_used_tags 0\nnorth_nodes_total 1\nnorth_running_nodes_total 1\nnorth_disconnected_nodes_total 1\nsouth_nodes_total 0\nsouth_running_nodes_total 0\nrunning_nodes_total 1\nexception_nodes_total 1\nsouth_connected_nodes_total 0\nsouth_exception_nodes_total 0\nnorth_connected_nodes_total 0\nnorth_exception_nodes_total 1\nrunning_rules_total 0\nstopped_rules_total 0\n"
Add Node
name required | string (node name) node name |
plugin required | string (plugin name) |
Array of objects (setting (optional)) |
{- "name": "modbus-tcp-node",
- "plugin": "Modbus TCP"
}
{- "error": 0
}
Please refer to Plugin Setting for the configuration parameters of each plugin.
node required | string |
required | object The parameter fields in json fill in different fields according to different plugins |
{- "node": "modbus-node",
- "params": {
- "param1": 1,
- "param2": "1.1.1.1",
- "param3": true,
- "param4": 11.22
}
}
{- "error": 0
}
group | string group name |
node | string node name |
interval | integer read/upload interval(ms) |
{- "group": "gconfig1",
- "node": "modbus-node",
- "interval": 10000
}
{- "error": 0
}
node required | string node name |
group required | string group name |
new_name | string group new name |
interval | integer read/upload interval(ms) |
{- "node": "modbus-node",
- "group": "gconfig1",
- "new_name": "group1",
- "interval": 10000
}
{- "error": 0
}
library required | string plugin library name |
schema_file required | string <binary> plugin schema file |
so_file required | string <binary> plugin so file |
""
{- "error": 0
}
{- "plugins": [
- {
- "kind": 1,
- "node_type": 1,
- "name": "Modbus TCP",
- "library": "libplugin-modbus-tcp.so",
- "description": "description",
- "description_zh": "描述",
- "schema": "modbus-tcp"
}, - {
- "kind": 1,
- "node_type": 2,
- "name": "MQTT",
- "library": "libplugin-mqtt.so",
- "description": "Neuron northbound MQTT plugin bases on NanoSDK.",
- "description_zh": "基于 NanoSDK 的 Neuron 北向应用 MQTT 插件",
- "schema": "mqtt"
}
]
}
{- "tag_regex": [
- {
- "type": 3,
- "regex": "^[0-9]+![3-4][0-9]+(#B|#L|)$"
}, - {
- "type": 4,
- "regex": "^[0-9]+![3-4][0-9]+(#B|#L|)$"
}, - {
- "type": 5,
- "regex": "^[0-9]+![3-4][0-9]+(#BB|#BL|#LL|#LB|)$"
}, - {
- "type": 6,
- "regex": "^[0-9]+![3-4][0-9]+(#BB|#BL|#LL|#LB|)$"
}, - {
- "type": 7,
- "regex": "^[0-9]+![3-4][0-9]+(#B|#L|)$"
}, - {
- "type": 8,
- "regex": "^[0-9]+![3-4][0-9]+(#B|#L|)$"
}, - {
- "type": 9,
- "regex": "^[0-9]+![3-4][0-9]+(#BB|#BL|#LL|#LB|)$"
}, - {
- "type": 10,
- "regex": "^[0-9]+![3-4][0-9]+(#B|#L|)$"
}, - {
- "type": 11,
- "regex": "^[0-9]+!([0-1][0-9]+|[3-4][0-9]+\\.([0-9]|[0-1][0-5]))$"
}, - {
- "type": 13,
- "regex": "^[0-9]+![3-4][0-9]+\\.[0-9]+(H|L|)$"
}
], - "group_interval": 1000,
- "connection_mode": {
- "name": "Connection Mode",
- "name_zh": "连接模式",
- "description": "Neuron as the client, or as the server",
- "description_zh": "Neuron 作为客户端或服务端",
- "attribute": "required",
- "type": "map",
- "default": 0,
- "valid": {
- "map": [
- {
- "key": "Client",
- "value": 0
}, - {
- "key": "Server",
- "value": 1
}
]
}
}, - "interval": {
- "name": "Send Interval",
- "name_zh": "指令发送间隔",
- "description": "Send reading instruction interval(ms)",
- "description_zh": "发送读指令时间间隔,单位为毫秒",
- "attribute": "required",
- "type": "int",
- "default": 20,
- "valid": {
- "min": 0,
- "max": 3000
}
}, - "host": {
- "name": "IP Address",
- "name_zh": "IP地址",
- "description": "Local IP in server mode, remote device IP in client mode",
- "description_zh": "服务端模式中填写本地 IP,客户端模式中填写目标设备 IP",
- "attribute": "required",
- "type": "string",
- "valid": {
- "regex": "/^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$/",
- "length": 30
}
}, - "port": {
- "name": "Port",
- "name_zh": "端口号",
- "description": "Local port in server mode, remote device port in client mode",
- "description_zh": "服务端模式中填写本地端口号,客户端模式中填写远程设备端口号",
- "attribute": "required",
- "type": "int",
- "default": 502,
- "valid": {
- "min": 1,
- "max": 65535
}
}, - "timeout": {
- "name": "Connection Timeout",
- "name_zh": "连接超时时间",
- "description": "Connection timeout(ms)",
- "description_zh": "连接超时时间,单位为毫秒",
- "attribute": "required",
- "type": "int",
- "default": 3000,
- "valid": {
- "min": 1000,
- "max": 65535
}
}
}
app required | string app name |
driver required | string substring match against driver name |
group required | string substring match against group name |
object when using the MQTT plugin, the topic field needs to be added |
{- "app": "mqtt",
- "driver": "modbus-tcp",
- "group": "group-1",
- "params": {
- "topic": "/neuron/mqtt/group-1"
}
}
{- "error": 0
}
app required | string app name |
driver required | string deliver name |
group required | string driver node group name |
required | object |
""
{- "error": 0
}
app required | string app name |
driver required | string driver name |
group required | string driver node group name |
required | object optional, when using the MQTT plugin, the topic field needs to be added |
{- "app": "mqtt",
- "driver": "modbus-tcp",
- "group": "group-1",
- "params": {
- "topic": "/neuron/mqtt/group-1"
}
}
{- "error": 0
}
{- "groups": [
- {
- "driver": "modbus-tcp",
- "group": "group-1",
- "params": {
- "topic": "/neuron/mqtt/group-1"
}
}, - {
- "driver": "modbus-tcp",
- "group": "group-2",
- "params": {
- "topic": "/neuron/mqtt/group-2"
}
}
]
}
app required | string app n |
required | Array of objects |
{- "app": "mqtt",
- "groups": [
- {
- "driver": "modbus1",
- "group": "group1",
- "params": {
- "topic": "/neuron/mqtt/modbus1/group1"
}
}, - {
- "driver": "modbus2",
- "group": "group2",
- "params": {
- "topic": "/neuron/mqtt/modbus2/group2"
}
}
]
}
{- "error": 0
}
Call the api to modify the log level of the node to debug, and automatically switch to the default level in about ten minutes.
node | string node name |
level required | string log level ,could be debug、info、notice、warn、error、fatal |
core | string whether to switch the core log level.The core field is optional and defaults to true. |
""
{- "error": 0
}
category | string one of |
node | string filter with node name, only meaningful when |
"# HELP core_dumped Whether there is any core dump\n# TYPE core_dumped gauge\ncore_dumped 0\n# HELP uptime_seconds Uptime in seconds\n# TYPE uptime_seconds counter\nuptime_seconds 314\n# HELP north_nodes_total Number of north nodes\n# TYPE north_nodes_total gauge\nnorth_nodes_total 1\n# HELP north_running_nodes_total Number of north nodes in running state\n# TYPE north_running_nodes_total gauge\nnorth_running_nodes_total 1\n# HELP north_disconnected_nodes_total Number of north nodes disconnected\n# TYPE north_disconnected_nodes_total gauge\nnorth_disconnected_nodes_total 1\n# HELP south_nodes_total Number of south nodes\n# TYPE south_nodes_total gauge\nsouth_nodes_total 1\n# HELP south_running_nodes_total Number of south nodes in running state\n# TYPE south_running_nodes_total gauge\nsouth_running_nodes_total 0\n# HELP south_disconnected_nodes_total Number of south nodes disconnected\n# TYPE south_disconnected_nodes_total gauge\nsouth_disconnected_nodes_total 1\n# HELP send_msgs_total Total number of messages sent\n# TYPE send_msgs_total counter\nsend_msgs_total{node=\"data-stream-processing\"} 0\n# HELP send_msg_errors_total Total number of errors sending messages\n# TYPE send_msg_errors_total counter\nsend_msg_errors_total{node=\"data-stream-processing\"} 0\n# HELP recv_msgs_total Total number of messages received\n# TYPE recv_msgs_total counter\nrecv_msgs_total{node=\"data-stream-processing\"} 0\n# HELP last_rtt_ms Last request round trip time in milliseconds\n# TYPE last_rtt_ms gauge\nlast_rtt_ms{node=\"modbus\"} 9999\n# HELP send_bytes Total number of bytes sent\n# TYPE send_bytes gauge\nsend_bytes{node=\"modbus\"} 0\n# HELP recv_bytes Total number of bytes received\n# TYPE recv_bytes gauge\nrecv_bytes{node=\"modbus\"} 0\n# HELP tag_reads_total Total number of tag reads including errors\n# TYPE tag_reads_total counter\ntag_reads_total{node=\"modbus\"} 0\n# HELP tag_read_errors_total Total number of tag read errors\n# TYPE tag_read_errors_total counter\ntag_read_errors_total{node=\"modbus\"} 0\n# HELP group_tags_total Total number of tags in the group\n# TYPE group_tags_total gauge\ngroup_tags_total{node=\"modbus\",group=\"grp\"} 1\n# HELP group_last_send_msgs Number of messages sent on last group timer invocation\n# TYPE group_last_send_msgs gauge\ngroup_last_send_msgs{node=\"modbus\",group=\"grp\"} 0\n# HELP group_last_timer_ms Time in milliseconds consumed on last group timer invocation\n# TYPE group_last_timer_ms gauge\ngroup_last_timer_ms{node=\"modbus\",group=\"grp\"} 0"
node required | string node name |
group required | string group name |
sync | boolean synchronous read ,default false |
object |
{- "node": "modbus-tcp-1",
- "group": "config_modbus_tcp_sample_2",
- "sync": false,
- "query": {
- "name": "data",
- "description": "switch"
}
}
{- "tags": [
- {
- "name": "data1",
- "value": 1
}, - {
- "name": "data2",
- "error": 2014
}, - {
- "name": "data3",
- "value": true
}
]
}
node required | string node name |
group required | string group name |
sync | boolean synchronous read ,default false |
name | string tag name substring match |
description | string tag description substring match |
currentPage required | integer current page |
pageSize required | integer number of tags per page |
isError required | boolean response error tags only |
{- "node": "modbus-tcp-1",
- "group": "config_modbus_tcp_sample_2",
- "sync": false,
- "query": {
- "name": "data",
- "description": "switch",
- "currentPage": 1,
- "pageSize": 10,
- "isError": true
}
}
{- "meta": {
- "currentPage": 1,
- "pageSize": 10,
- "total": 1
}, - "items": [
- {
- "name": "tag1",
- "type": 4,
- "address": "1!400001",
- "attribute": 8,
- "description": "",
- "precison": 0,
- "decimal": 0,
- "bias": 0,
- "value": 123
}
]
}
node required | string |
group required | string |
tag required | string |
value required | integer |
{- "node": "modbus-tcp-1",
- "group": "config_modbus_tcp_sample_2",
- "tag": "tag1",
- "value": 1234
}
{- "error": 0
}
driver required | string |
group required | string |
tag required | string |
address required | string |
attribute required | |
type required | integer |
precision required | integer |
decimal required | integer |
bias required | integer |
{- "driver": "1",
- "group": "1",
- "tag": "tag1",
- "address": "1!400002",
- "attribute": 8,
- "type": 3,
- "precision": 0,
- "decimal": 0,
- "bias": 0
}
{- "value": 29540
}
This API is used to get all Config Keys under a specific source name.
Note: When retrieving Config Keys, if the properties contain a password field (case-insensitive, such as Password), the API will not return the actual password value, but instead replace it with "******" to conceal the password information.
name required | string Example: mqtt Source name, supports built-in sources and extended sources. The built-in sources include mqtt, redis, neuron, memory, httppull, httppush, file, edgex, Extended sources include random, sql, video, zmq and user-defined sources |
{- "amd_broker": {
- "insecureSkipVerify": false,
- "protocolVersion": "3.1.1",
- "qos": 1,
- "server": "tcp://122.9.166.75:1883",
- "password": "******"
}, - "default": {
- "qos": 2,
- "server": "tcp://emqx:1883"
}, - "demo_conf": {
- "qos": 0,
- "server": "tcp://10.211.55.6:1883"
}
}
This API is used to delete a Config Key configuration under a specific source name
name required | string Example: mqtt Source name, supports built-in sources and extended sources. The built-in sources include mqtt, redis, neuron, memory, httppull, httppush, file, edgex, Extended sources include random, sql, video, zmq and user-defined sources |
confKey required | string Example: demo_conf Config Key Name。Taking the above as an example, the Config Keys are amd_broker, default, demo_conf in sequence. |
null
This API is used to register a Config Key under a specific source name
name required | string Source name, supports built-in sources and extended sources. The built-in sources include mqtt, redis, neuron, memory, httppull, httppush, file, edgex, Extended sources include random, sql, video, zmq and user-defined sources |
confKey required | string Config Key name to register |
name required | string (Source name) supports built-in sources and extended sources. The built-in sources include mqtt, redis, neuron, memory, httppull, httppush, file, edgex, Extended sources include random, sql, video, zmq and user-defined sources |
confKey required | string (Config Key name to register) |
""
{- "demo_conf": {
- "qos": 0,
- "server": "tcp://10.211.55.6:1883"
}
}
By dynamically reloading configuration, parameters such as debug and timezone can be updated for running eKuiper without restarting the application. Current supported dynamic reloadable parameters:
debug
consoleLog
fileLog
timezone
debug | boolean |
consoleLog | boolean |
fileLog | boolean |
timezone | string |
{- "debug": true,
- "consoleLog": true,
- "fileLog": true,
- "timezone": "UTC"
}
null
The API resets all existing data and then imports the new data into the system by default. But user can specify partial=1
parameter in HTTP URL to keep the existing data and apply the new data.
The API supports specifying data by means of text content or file URIs.
Example 1: Import by text content
POST http://{{host}}/data/import
Content-Type: application/json
{
"content": "{json of the ruleset}"
}
Example 2: Import by file URI
POST http://{{host}}/data/import
Content-Type: application/json
{
"file": "file:///tmp/a.json"
}
Example 3: Import data via file URI and exit (for plug-ins and static schema updates, users need to ensure that eKuiper can be restarted after exiting)
POST http://{{host}}/data/import?stop=1
Content-Type: application/json
{
"file": "file:///tmp/a.json"
}
Example 4: Keep the old data and import new data (overwrite the tables/streams/rules/source config/sink config. install plugins/schema if not exist, else ignore them)
POST http://{{host}}/data/import?partial=1
Content-Type: application/json
{
"file": "file:///tmp/a.json"
}
The file format for importing and exporting data is JSON, which can contain : streams
, tables
, rules
, plugin
, source yaml
and so on. Each type holds the the key-value pair of the name and the creation statement. In the following example file, we define stream 、rules、table、plugin、source config、sink config
{
"streams": {
"demo": "CREATE STREAM demo () WITH (DATASOURCE=\"users\", FORMAT=\"JSON\")"
},
"tables": {
"T110":"\n CREATE TABLE T110\n (\n S1 string\n )\n WITH (DATASOURCE=\"test.json\", FORMAT=\"json\", TYPE=\"file\", KIND=\"scan\", );\n "
},
"rules": {
"rule1": "{\"id\": \"rule1\",\"sql\": \"SELECT * FROM demo\",\"actions\": [{\"log\": {}}]}",
"rule2": "{\"id\": \"rule2\",\"sql\": \"SELECT * FROM demo\",\"actions\": [{ \"log\": {}}]}"
},
"nativePlugins":{
"functions_image":"{\"name\":\"image\",\"file\":\"https://packages.emqx.net/kuiper-plugins/1.8.1/debian/functions/image_amd64.zip\",\"shellParas\":[]}",
"sources_video":"{\"name\":\"video\",\"file\":\"https://packages.emqx.net/kuiper-plugins/1.8.1/debian/sources/video_amd64.zip\",\"shellParas\":[]}"
},
"portablePlugins":{
},
"sourceConfig":{
"mqtt":"{\"td\":{\"insecureSkipVerify\":false,\"password\":\"public\",\"protocolVersion\":\"3.1.1\",\"qos\":1,\"server\":\"tcp://broker.emqx.io:1883\",\"username\":\"admin\"},\"test\":{\"insecureSkipVerify\":false,\"password\":\"public\",\"protocolVersion\":\"3.1.1\",\"qos\":1,\"server\":\"tcp://127.0.0.1:1883\",\"username\":\"admin\"}}"
},
"sinkConfig":{
"edgex":"{\"test\":{\"bufferLength\":1024,\"contentType\":\"application/json\",\"enableCache\":false,\"format\":\"json\",\"messageType\":\"event\",\"omitIfEmpty\":false,\"port\":6379,\"protocol\":\"redis\",\"sendSingle\":true,\"server\":\"localhost\",\"topic\":\"application\",\"type\":\"redis\"}}"
},
"connectionConfig":{
},
"Service":{
},
"Schema":{
},
"uploads":{
}
}
stop | integer Example: stop=1 for plug-ins and static schema updates, users need to ensure that eKuiper can be restarted after exiting |
partial | integer Example: partial=1 Keep the old data and import new data (overwrite the tables/streams/rules/source config/sink config. install plugins/schema if not exist, else ignore them) |
content | string (text content) |
file | string ( file URI) |
{- "content": "{json of the ruleset}"
}
{- "ErrorMsg": "",
- "ConfigResponse": {
- "streams": { },
- "tables": { },
- "rules": { },
- "nativePlugins": { },
- "portablePlugins": { },
- "sourceConfig": { },
- "sinkConfig": { },
- "connectionConfig": { },
- "Service": { },
- "Schema": { }
}
}
This API returns data import errors. If all returns are empty, it means that the import is completely successful.
{- "streams": { },
- "tables": { },
- "rules": { },
- "nativePlugins": { },
- "portablePlugins": { },
- "sourceConfig": { },
- "sinkConfig": { },
- "connectionConfig": { },
- "Service": { },
- "Schema": { },
- "uploads": { }
}
{- "streams": { },
- "tables": { },
- "rules": { },
- "nativePlugins": { },
- "portablePlugins": { },
- "sourceConfig": { },
- "sinkConfig": { },
- "connectionConfig": { },
- "Service": { },
- "Schema": { }
}
Export specific rules related data,The export API returns a file to download.
["rule1","rule2"] required | string |
{- "streams": { },
- "tables": { },
- "rules": { },
- "nativePlugins": { },
- "portablePlugins": { },
- "sourceConfig": { },
- "sinkConfig": { },
- "connectionConfig": { },
- "Service": { },
- "Schema": { }
}
The API accepts a JSON content to create a new plugin.The plugin is identified by the name. The name must be unique.
Random
, then the name of this plugin is random
.http
or https
scheme or file
scheme to refer to a local file path of the eKuiper server. It must be a zip file with: a compiled so file and the yaml file(only required for sources). If the plugin depends on some external dependencies, a bash script named install.sh can be provided to do the dependency installation. The name of the files must match the name of the plugin.A sample zip file for a source named random.zip
Notice that, the install.sh will be run that the system may already had the lib or package. Make sure to check the path before. Below is an example install.sh to install a sample sdk lib.
dir=/usr/local/mysdk
cur=$(dirname "$0")
echo "Base path $cur"
if [ -d "$dir" ]; then
echo "SDK path $dir exists."
else
echo "Creating SDK path $dir"
mkdir -p $dir
echo "Created SDK path $dir"
fi
apt install --no-upgrade unzip
if [ -d "$dir/lib" ]; then
echo "SDK lib path $dir/lib exists."
else
echo "Unzip SDK lib to path $dir"
unzip $cur/mysdk.zip -d $dir
echo "Unzipped SDK lib to path $dir"
fi
if [ -f "/etc/ld.so.conf.d/myconfig.conf" ]; then
echo "/etc/ld.so.conf.d/myconfig.conf exists"
else
echo "Copy conf file"
cp $cur/myconfig.conf /etc/ld.so.conf.d/
echo "Copied conf file"
fi
ldconfig
echo "Done"
type required | string value only can be "sources", "sinks", "functions","portables" |
name required | string |
file required | string
|
{- "name": "random",
}
{- "name": "random",
- "file": "file:///var/plugins/sources/random.zip"
}
The API is used to print out the detailed definition of a plugin.
type required | string value only can be "sources", "sinks", "functions","portables" |
name required | string Path parameter |
{- "name": "plugin1",
- "version": "1.0.0"
}
The API is used for drop the plugin. Notice that, for native plugins, the eKuiper server needs to be restarted to take effect. The current rules will continue to run with the deleted native plugins successfully. For portable plugin, the deletion will take effect immediately. The current rules which are using that plugin may encounter errors but won't stop and can continue running if an updated plugin with the same name is created later. If this is not expected, manually stop or delete those rules before deleting a plugin.
The user can pass a query parameter to decide if eKuiper should be stopped after a delete in order to make the deletion take effect. The parameter is stop
and only when the value is 1
will the eKuiper be stopped. The user has to manually restart it.
type required | string value only can be "sources", "sinks", "functions","portables" |
name required | string |
stop | string Example: stop=1 |
{ }
Notice that, native plugins can be updated, but the new version will not take effect until the eKuiper server is restarted. Portable plugins can be updated, and the new version will take effect immediately even for the running rules. The request body is the same as the create plugin request.
type required | string value only can be "sources", "sinks", "functions","portables" |
name required | string |
{ }
The API is used for displaying all user defined functions which are defined across all plugins.
Unlike source and sink plugins, function plugin can export multiple functions at once. The exported names must be unique globally across all plugins. There will be a one to many mapping between function and its container plugin. Thus, we provide show udf(user defined function) api to query all user defined functions so that users can check the name duplication. And we provide describe udf api to find out the defined plugin of a function. We also provide the register functions api to register the udf list for an auto loaded plugin.
[- "func1",
- "func2"
]
The API is used to find out the plugin which defines the UDF. APIs to handle function plugin with multiple functions
name required | string |
{- "name": "funcName",
- "plugin": "pluginName"
}
The API aims to register all exported functions in an auto loaded function plugin or when the exported functions are changed. If the plugin was loaded by CLI create command or REST create API with functions property specified, then this is not needed. The register API will persist the functions list in the kv. Unless the exported functions are changed, users only need to register it once. APIs to handle function plugin with multiple functions
plugin_name required | string |
{- "functions": [
- "func1",
- "func2"
]
}
According to the configuration pluginHosts
in file etc/kuiper.yaml
, it returns the plugins list that can be installed at local run eKuiper instance. By default, it get the list from https://packages.emqx.net
.
type required | string value only can be "sources", "sinks", "functions","portables" |
name required | string |
file required | string |
""
The API accepts a JSON content and create and start a rule.
id required | string |
sql required | string |
required | Array of objects |
{- "id": "rule1",
- "sql": "SELECT * FROM demo",
- "actions": [
- {
- "log": { }
}
]
}
{ }
The API is used for print the detailed definition of rule.
id required | string id is the id or name of the rule. |
{- "sql": "SELECT * from demo",
- "actions": [
- {
- "log": { }
}, - {
- "mqtt": {
- "server": "tcp://127.0.0.1:1883",
- "topic": "demoSink"
}
}
]
}
The API accepts a JSON content and update a rule.
id required | string id is the id or name of the old rule. |
id required | string |
sql required | string |
required | Array of objects |
{- "id": "rule1",
- "sql": "SELECT * FROM demo",
- "actions": [
- {
- "log": { }
}
]
}
{ }
The command is used to get the status of the rule. If the rule is running, the metrics will be retrieved realtime. The status can be
id required | string |
"{\n \"source_demo_0_records_in_total\": 5,\n \"source_demo_0_records_out_total\": 5,\n \"source_demo_0_exceptions_total\": 0,\n \"source_demo_0_process_latency_ms\": 0,\n \"source_demo_0_buffer_length\": 0,\n \"source_demo_0_last_invocation\": \"2020-01-02T11:28:33.054821\",\n \"op_filter_0_records_in_total\": 5,\n \"op_filter_0_records_out_total\": 2,\n \"op_filter_0_exceptions_total\": 0,\n \"op_filter_0_process_latency_ms\": 0,\n \"op_filter_0_buffer_length\": 0,\n \"op_filter_0_last_invocation\": \"2020-01-02T11:28:33.054821\",\n}"
The command is used to get the status of the rule represented as a json string. In the json string, there are 2 fields:
id required | string |
{- "sources": [
- "string"
], - "edges": {
- "op_project": [
- "string"
], - "source_stream": [
- "string"
]
}
}
The API accepts a JSON content and validate a rule. For the API, here is the explanation of the status codes:
id required | string |
sql required | string |
required | Array of objects |
{- "id": "rule1",
- "sql": "SELECT * FROM demo",
- "actions": [
- {
- "log": { }
}
]
}
{ }
The API accepts rulesets and imports them into the system. If a stream or rule in the ruleset already exists, it is not created. The API returns text informing the number of streams and rules created. The API supports specifying rulesets by means of text content or file URIs.
The file format for importing and exporting ruleset is JSON, which can contain three parts: streams
, tables
and rules
. Each type holds the the key-value pair of the name and the creation statement. In the following example file, we define a stream and two rules.
"streams": {
"demo": "CREATE STREAM demo () WITH (DATASOURCE=\"users\", FORMAT=\"JSON\")"
},
"tables": {},
"rules": {
"rule1": "{\"id\": \"rule1\",\"sql\": \"SELECT * FROM demo\",\"actions\": [{\"log\": {}}]}",
"rule2": "{\"id\": \"rule2\",\"sql\": \"SELECT * FROM demo\",\"actions\": [{ \"log\": {}}]}"
}
}
file | string ( Import by file URI) |
content | string ( Import by text content) |
{- "file": "file:///tmp/a.json"
}
{ }
The API accepts a JSON content and create a schema. Each schema type has a standalone endpoint. Currently, only one schema type protobuf
is supported. Schema is identified by its name, so the name must be unique for each type.
Parameters
file
or content
parameter to specify. After schema created, the schema content will be written into file data/schemas/$shcema_type/$schema_name
.http
or https
scheme or file
scheme to refer to a local file path of the eKuiper server. The schema file must be the file type of the corresponding schema type. For example, protobuf schema file's extension name must be .proto.name required | string |
content | string (Schema content inside content.) |
file | string (Schema content in a file) |
soFile | string (Schema with static plugin) |
""
{ }
The API is used for print the detailed definition of a schema.
name required | string name of the schema. |
{- "type": "protobuf",
- "name": "schema1",
- "content": "message Book {required string title = 1; required int32 price = 2;}",
- "file": "ekuiper\\etc\\schemas\\protobuf\\schema1.proto"
}
The API is used for updating the schema. The request body is the same as creating a schema.
name required | string |
name required | string |
file required | string |
""
{ }
This API accepts JSON content to create new external services.
A sample zip file of the source named sample.zip
An example of a request for a file on an HTTP server:
{
"name":"random",
"file":"http://127.0.0.1/services/sample.zip"
}
An example of a request for a file on the eKuiper server:
{
"name":"random",
"file":"file:///var/services/sample.zip"
}
name required | string |
file required | string a file on an HTTP server or a file on the eKuiper server. |
{- "name": "random",
}
{ }
This API is used to update external services, and its parameters are the same as that of service registration.
name required | string |
name required | string |
file required | string |
""
{ }
The API is used for creating a stream. This API can run any stream sql statements, not only stream creation.
sql required | string |
{- "sql": "create stream my_stream (id bigint, name string, score float) WITH ( datasource = \"topic/temperature\", FORMAT = \"json\", KEY = \"id\")"
}
{ }
The API is used for print the detailed definition of stream.
id required | string |
{- "Name": "demo",
- "StreamFields": [
- {
- "Name": "temperature",
- "FieldType": {
- "Type": 2
}
}, - {
- "Name": "ts",
- "FieldType": {
- "Type": 1
}
}
], - "Options": {
- "DATASOURCE": "demo",
- "FORMAT": "JSON"
}
}
The API is used to get the stream schema. The schema is inferred from the physical and logical schema definitions.
id required | string |
{- "id": {
- "type": "bigint"
}, - "name": {
- "type": "string"
}, - "age": {
- "type": "bigint"
}, - "hobbies": {
- "type": "struct",
- "properties": {
- "indoor": {
- "type": "array",
- "items": {
- "type": "string"
}
}, - "outdoor": {
- "type": "array",
- "items": {
- "type": "string"
}
}
}
}
}
The API is used for update the stream definition.
id required | string the id or name of the old stream. |
sql required | string |
{- "sql": "create stream my_stream (id bigint, name string, score float) WITH ( datasource = \"topic/temperature\", FORMAT = \"json\", KEY = \"id\")"
}
{ }
The API is used for creating a table. This API can run any table sql statements, not only table creation.
sql required | string |
{- "sql": "create table my_table (id bigint, name string, score float) WITH ( datasource = \"lookup.json\", FORMAT = \"json\", KEY = \"id\")"
}
{ }
The API is used for displaying all of tables defined in the server.
This API accepts one parameter kind, the value could be scan
or lookup
to query each kind of tables. Other values are invalid, it will return all kinds of tables.
kind | string Example: kind=lookup query all the lookup tables. |
[- "mytable"
]
The API is used for print the detailed definition of table.
id required | string |
{- "Name": "demo",
- "StreamFields": [
- {
- "Name": "temperature",
- "FieldType": {
- "Type": 2
}
}, - {
- "Name": "ts",
- "FieldType": {
- "Type": 1
}
}
], - "Options": {
- "DATASOURCE": "lookup.json",
- "FORMAT": "JSON"
}
}
The API is used for update the table definition.
id required | string the id or name of the old table. |
{- "sql": "create table my_table (id bigint, name string, score float) WITH ( datasource = \"topic/temperature\", FORMAT = \"json\", KEY = \"id\")"
}
The API supports to upload a local file, provide the text content of file or upload a http file link. The upload request will save the file into your ${dataPath}/uploads
. It will override the existed file of the same name. The response is the absolute path of the uploaded file which you can refer in other configurations.
The API accepts a multipart file upload requests. Below is an example html file to upload file to http://127.0.0.1:9081/config/uploads
. In the form data, the file input name must be uploadFile
.
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta http-equiv="X-UA-Compatible" content="ie=edge" />
<title>Upload File</title>
</head>
<body>
<form
enctype="multipart/form-data"
action="http://127.0.0.1:9081/config/uploads"
method="post"
>
<input type="file" name="uploadFile" />
<input type="submit" value="upload" />
</form>
</body>
</html>
Provide the text content and file name to create a configuration file.
{
"name": "my.json",
"content": "{\"hello\":\"world\"}"
}
{
"name": "my.json",
"file": "http://127.0.0.1:80/my.json"
}
name required | string (configuration file name) |
content | string ( text content) |
file required | string (HTTP file link) Should put the file in HTTP Server in advance |
{- "name": "my.json",
- "content": "{\"hello\":\"world\"}"
}
{ }
Query the raw metrics information of the data stream processing, after the normal operation of the rules, you can see the metrics information in the api similar to kuiper_sink_records_in_total .
"# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 0.000185937\ngo_gc_duration_seconds{quantile=\"0.25\"} 0.000342795\ngo_gc_duration_seconds{quantile=\"0.5\"} 0.000369361\ngo_gc_duration_seconds{quantile=\"0.75\"} 0.000391542\ngo_gc_duration_seconds{quantile=\"1\"} 0.000391542\ngo_gc_duration_seconds_sum 0.001289635\ngo_gc_duration_seconds_count 4\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 15\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.20.2\"} 1\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 6.983208e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 1.1783664e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.451727e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 62815\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 8.595544e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 6.983208e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 6.209536e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 9.91232e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 38478\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 4.58752e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.6121856e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.7001045398775318e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 101293\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 4800\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 15600\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 177280\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 179520\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 1.4275312e+07\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 895921\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 655360\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 655360\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.7915528e+07\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 9\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 0.38\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 1.048576e+06\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 14\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 7.012352e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.7001045391e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 1.327280128e+09\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.\n# TYPE promhttp_metric_handler_requests_in_flight gauge\npromhttp_metric_handler_requests_in_flight 1\n# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.\n# TYPE promhttp_metric_handler_requests_total counter\npromhttp_metric_handler_requests_total{code=\"200\"} 0\npromhttp_metric_handler_requests_total{code=\"500\"} 0\npromhttp_metric_handler_requests_total{code=\"503\"} 0"
Create a trial run rule, wait for it to run. This API can check syntax, ensuring the creation of an executable trial run rule.After the rule is created successfully, the websocket endpoint starts. Users can listen to the websocket address http://locahost:10081/test/uuid to get the result output. Among them, the port and id are the above return values.
id required | string or null The id of the test rule, required, used for subsequent test rule management. Ensure uniqueness, it cannot be repeated with other test rules, otherwise the original test rule will be overwritten. This id has no association with the id of ordinary rules. |
sql required | string or null The sql statement of the test rule, required, used to define the syntax of the test rule. |
required | object or null The mock rule definition of the data source of the test rule, optional, used to define the input data of the test rule. If not defined, the real data source in SQL will be used. |
required | object or null The definition of the sink parameters of the test rule, optional. Most of the common parameters of the sink can be used, such as dataTemplate and fields. If not defined, the default sink parameters will be used. |
{- "id": "uuid",
- "sql": "select * from demo",
- "mockSource": {
- "demo": {
- "data": [
- {
- "a": 2
}, - {
- "b": 3
}
], - "interval": 100,
- "loop": true
}, - "demo1": {
- "data": [
- {
- "n": 2
}, - {
- "n": 3
}
], - "interval": 200,
- "loop": true
}
}, - "sinkProps": {
- "dataTemplate": "xxx",
- "fields": [
- "abc",
- "test"
]
}
}
{- "id": "uuid",
- "port": 10081
}
Besides defining function in plugins, user-defined functions (UDF) are also supported independently. Currently, we only support JavaScript UDF. We can use REST API or CLI to manage JavaScript functions.
Use this endpoint to create a new function.
id required | string A unique name for the function. This name must also be defined as a function in the script field. |
description required | string A brief description of the function. |
script required | string The function implementation in JavaScript. |
isAgg required | boolean A boolean indicating whether the function is an aggregate function. |
{- "id": "area",
- "description": "calculate area",
- "script": "function area(x, y) { return x * y; }",
- "isAgg": false
}
{ }
Use this endpoint to get the detailed definition of a function.Replace {id} with the name of the function you want to describe. The response will be a JSON object with the function's details.
id required | string |
{- "id": "area",
- "description": "calculate area",
- "script": "function area(x, y) { return x * y; }",
- "isAgg": false
}
Use this endpoint to delete a function.Replace {id} with the name of the function you want to delete. Note that you need to manually stop or delete any rules using the UDF before deleting it. A running rule will not be affected by the deletion of a UDF.
id required | string |
{ }
The JavaScript UDF can be updated and hot reload. Notice that, a running rule must be restarted to load the updated function.Replace {id} with the name of the function you want to update. The request body should be the same as when creating a UDF. If the function of the id does not exist, it will be created. Otherwise, it will be updated.
id required | string |
{ }