Python examples
Manage Measurements
create measurements this code creates a measurement by providing ( my swagger ) databaseuuid retrieve by manage time series databases docid\ yhpic w9fthy09n103qcd collectoruuid retrieve by manage collectors docid\ pc5 4ldexbiryshmxcdph datatype e g number, boolean, string name tank1 temperature status e g active, paused post post https //historian mycompany com 18000/api/measurements via post post https //historian mycompany com 18000/api/measurements/excel https //historian mycompany com 18000/api/measurements/excel , an excel file can be uploaded to bulk create/update measurements def create measurement(database uuid, collector uuid, datatype, name, status) payload = { "databaseuuid" database uuid, "collectoruuid" collector uuid, "datatype" datatype, "name" name, "status" status } resp = session post(f"{base url}/measurements", json=payload, timeout=30) resp raise for status() return resp json() \# fill in the database and collector uuid before executing new measurement = create measurement("my database uuid", "my collector uuid", "number", "tank1 temperature", "active") print(json dumps(new measurement, indent=2)) successful response { "uuid" "0111a5f4 954e 11f0 ad87 22da883bd9ab", "createdby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "createdat" "2025 09 19t11 44 30 637605087z", "updatedby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "updatedat" "2025 09 19t11 44 30 637605087z", "attributes" null, "metadata" null, "organizationuuid" "fbf184b2 5c29 11ef 8bf9 0242ac12000a", "name" "tank1 temperature", "description" "", "datatype" "number", "status" "active", "collectoruuid" "2369d6fe 94bc 11f0 acc7 a25bd411a45c", "collector" { "uuid" "2369d6fe 94bc 11f0 acc7 a25bd411a45c", "createdby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "createdat" "2025 09 18t18 20 21 736927z", "updatedby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "updatedat" "2025 09 18t18 21 46 542773z", "attributes" {}, "metadata" {}, "organizationuuid" "fbf184b2 5c29 11ef 8bf9 0242ac12000a", "settings" {}, "name" "my csv collector", "collectortype" "csv", "buildversion" "5 2 3", "buildos" "linux", "buildarch" "arm64", "description" "collector for csv imports", "status" "active", "measurementsettingsschema" { "$schema" "http //json schema org/draft 06/schema#", "definitions" {}, "id" "http //factry io/measurementsettings json", "properties" { "filter" { "additionalproperties" false, "default" {}, "description" "filter to parse only the rows that match the filter the filter is a json object where the key is the column index (as a string) and the value is the value to match if multiple values in the same column should be matched, separate them with a semicolon use {} to leave empty ", "examples" \[ "{\\"1\\" \\"g10391\\"}" ], "id" "/properties/filter", "minproperties" 0, "order" 2, "patternproperties" { "\[0 9]+" { "type" "string" } }, "title" "filter", "type" "object" }, "statusgood" { "description" "all values that are mapped to status 'good' multiple statuses can be configured using semicolon separated values", "id" "/properties/statusgood", "order" 3, "title" "statusgood", "type" "string" }, "tagsincsv" { "additionalproperties" false, "default" {}, "description" "an optional json configuration to add values from a certain column index (starts from 0) to a tag mutiple mappings can be foreseen using one column index and one tag name use {} to leave empty ", "examples" \[ "{\\"1\\" \\"tagname\\"}" ], "id" "/properties/tagsincsv", "minproperties" 0, "order" 5, "patternproperties" { "\[0 9]+" { "type" "string" } }, "title" "tagsincsv", "type" "object" }, "timestamplayout" { "description" "the layout of the timestamp, default the timestamplayout setting on the collector will be used more info https //pkg go dev/time#layout", "id" "/properties/timestamplayout", "order" 4, "title" "timestamplayout", "type" "string" }, "valuecolumn" { "default" 2, "description" "index of column holding the value (starts from 0)", "id" "/properties/valuecolumn", "order" 1, "title" "valuecolumn", "type" "integer" } }, "required" \[ "valuecolumn" ], "type" "object" }, "settingsschema" { "$id" "http //factry io/collectorsettings json", "$schema" "http //json schema org/draft 06/schema#", "definitions" {}, "properties" { "alternativetimezone" { "$id" "/properties/alternativetimezone", "description" "this changes the timestamp of points, statistics and logs to the machine local timezone, strips off the timezone and indicates the timezone to be the alternative timezone the timezone name is taken from the iana time zone database (https //en wikipedia org/wiki/list of tz database time zones) ", "isadvanced" true, "order" 14, "title" "alternativetimezone", "type" "string" }, "basepath" { "description" "default directory where the incoming, processed and error directories are in by default these directory names are respectively, incoming, processed and error paths are absolute add a trailing forward slash for linux or a trailing backslash for windows", "id" "/properties/basepath", "order" 201, "title" "basepath", "type" "string" }, "bufferdiskbatches" { "$id" "/properties/bufferdiskbatches", "default" 100, "description" "the maximum amount of batches that will be written to a buffer file ", "isadvanced" true, "order" 8, "title" "bufferdiskbatches", "type" "integer" }, "buffermaxbatchsize" { "$id" "/properties/buffermaxbatchsize", "default" 5000, "description" "the maximum size of a batch ", "isadvanced" true, "order" 6, "title" "buffermaxbatchsize", "type" "integer" }, "buffermaxbatches" { "$id" "/properties/buffermaxbatches", "default" 20, "description" "the maximum amount of batches the buffer will keep in memory ", "isadvanced" true, "order" 7, "title" "buffermaxbatches", "type" "integer" }, "buffermaxdiskspace" { "$id" "/properties/buffermaxdiskspace", "default" "0mb", "description" "the maximum disk space that the buffer files may take up default no limit ", "isadvanced" true, "order" 10, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "buffermaxdiskspace", "type" "string" }, "bufferminfreediskspace" { "$id" "/properties/bufferminfreediskspace", "default" "2gb", "description" "the buffer wont write to more files if the minimum free disk space is reached ", "isadvanced" true, "order" 9, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "bufferminfreediskspace", "type" "string" }, "bufferprocessinterval" { "$id" "/properties/bufferprocessinterval", "default" 250, "description" "the interval in milliseconds that the buffer processes the buffer ", "isadvanced" true, "minimum" 250, "order" 5, "title" "bufferprocessinterval", "type" "integer" }, "csvheader" { "default" "false", "description" "indicates if there is a header in the csv files ", "enum" \[ "true", "false" ], "id" "/properties/csvheader", "order" 107, "title" "csvheader", "type" "string" }, "compressionlevel" { "$id" "/properties/compressionlevel", "default" 1, "description" "the compression level grpc uses, for default compression 1 9 1 = fastest, 9 = most compressed, 1 = default ", "isadvanced" true, "maximum" 9, "minimum" 1, "order" 12, "title" "compressionlevel", "type" "integer" }, "delimiter" { "default" ",", "description" "the delimiter used in the csv file ", "id" "/properties/delimiter", "order" 106, "title" "delimiter", "type" "string" }, "errordirectorypath" { "description" "override directory in which files that contained an error will be put invalid csv files will be stored here", "id" "/properties/errordirectorypath", "order" 204, "title" "errordirectorypath", "type" "string" }, "failedmaxage" { "default" 129600, "description" "the maximum time in minutes to keep failed files 0 will keep the files indefinitely", "id" "/properties/failedmaxage", "isadvanced" true, "order" 216, "title" "failedmaxage", "type" "integer" }, "filemask" { "default" " csv", "description" "file mask of the files to use", "id" "/properties/filemask", "order" 205, "pattern" "\\\\ \\\\ \[a z]", "title" "filemask", "type" "string" }, "grpcmaxmessagesize" { "$id" "/properties/grpcmaxmessagesize", "default" "64mb", "description" "the maximum message size for grpc messages ", "isadvanced" true, "order" 13, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "grpcmaxmessagesize", "type" "string" }, "hapollinginterval" { "$id" "/properties/hapollinginterval", "default" 1000, "description" "the interval in milliseconds at which the backup collector polls the main collector's health ", "isadvanced" true, "order" 11, "title" "hapollinginterval", "type" "integer" }, "incomingdirectorypath" { "description" "override directory to monitor for files matching filemask paths are absolute", "id" "/properties/incomingdirectorypath", "order" 202, "title" "incomingdirectorypath", "type" "string" }, "loglevel" { "$id" "/properties/loglevel", "default" "info", "description" "the log level of the collector ", "enum" \[ "trace", "debug", "info" ], "isadvanced" true, "order" 1, "title" "loglevel", "type" "string" }, "processdelay" { "default" 3000, "description" "delay in milliseconds for processing new files after they were detected", "id" "/properties/processdelay", "isadvanced" true, "order" 213, "title" "processdelay", "type" "integer" }, "processinterval" { "default" 10000, "description" "interval in milliseconds in which the incoming directory is checked for new files", "id" "/properties/processinterval", "isadvanced" true, "order" 212, "title" "processinterval", "type" "integer" }, "processeddirectorypath" { "description" "override directory to which successfully processed files are moved paths are absolute", "id" "/properties/processeddirectorypath", "order" 203, "title" "processeddirectorypath", "type" "string" }, "processedmaxage" { "default" 43200, "description" "the maximum time in minutes to keep processed files 0 will keep the files indefinitely", "id" "/properties/processedmaxage", "isadvanced" true, "order" 215, "title" "processedmaxage", "type" "integer" }, "sourcetimezone" { "$id" "/properties/sourcetimezone", "description" "this changes the timestamp of points by stripping off the timezone from the timestamp and indicating the timezone to be the source timezone (f e 0h utc > 0h utc+2) do not use if the points could come from different source devices (f e polling vs monitored), since these source devices can have different time settings! the timezone name is taken from the iana time zone database (https //en wikipedia org/wiki/list of tz database time zones) ", "isadvanced" true, "order" 15, "title" "sourcetimezone", "type" "string" }, "startupdelay" { "default" 9000, "description" "initial delay for checking the incoming directory in milliseconds", "id" "/properties/startupdelay", "isadvanced" true, "order" 214, "title" "startupdelay", "type" "integer" }, "statuscolumn" { "description" "index of column holding the status (starts from 0)", "id" "/properties/statuscolumn", "order" 110, "pattern" "\[0 9] ", "title" "statuscolumn", "type" "string" }, "statusgood" { "$id" "/properties/statusgood", "description" "a comma separated list of status strings that will be interpreted as a good status ", "isadvanced" true, "order" 16, "title" "statusgood", "type" "string" }, "tickerinterval" { "$id" "/properties/tickerinterval", "default" 1000, "description" "the interval for the base ticker in milliseconds this ticker is used for the polling of measurements ", "isadvanced" true, "minimum" 250, "order" 2, "title" "tickerinterval", "type" "integer" }, "tickermaxdrift" { "$id" "/properties/tickermaxdrift", "default" 125, "description" "the maximum drift a tick may have in milliseconds must be lower than the interval ", "isadvanced" true, "order" 4, "title" "tickermaxdrift", "type" "integer" }, "tickerresolution" { "$id" "/properties/tickerresolution", "default" 25, "description" "the resolution for the base ticker in milliseconds maximum 1/10th of the the interval ", "isadvanced" true, "minimum" 2, "order" 3, "title" "tickerresolution", "type" "integer" }, "timestampcolumn" { "default" 1, "description" "index of column holding the timestamp (starts from 0)", "id" "/properties/timestampcolumn", "order" 108, "title" "timestampcolumn", "type" "integer" }, "timestamplayout" { "description" "the layout of the timestamp of the measurements, default rfc3339 will be used more info https //pkg go dev/time#layout", "id" "/properties/timestamplayout", "order" 109, "title" "timestamplayout", "type" "string" }, "trimstring" { "description" "the string to trim (leading and trailing) from the csv cells used (values, mappings, status, )", "id" "/properties/trimstring", "order" 111, "title" "trimstring", "type" "string" } }, "required" \[ "timestampcolumn", "delimiter", "csvheader", "basepath", "filemask" ], "type" "object" }, "state" {}, "health" { "health" "collecting", "timestamp" "2025 09 18t18 21 48 821003z", "collectoruuid" "2369d6fe 94bc 11f0 acc7 a25bd411a45c" }, "lastseen" "2025 09 19t11 44 27 04284971z", "ipaddress" "134 209 86 11 34951", "hauuid" null, "defaultdatabaseuuid" null, "updatecollectorto" null }, "databaseuuid" "19bdd45c 5c2d 11ef ae29 0242ac12000a", "settings" null, "labels" null } for historian versions below v8 0 0, the put put https //historian mycompany com 18000/api/measurements endpoint for bulk updates is patch patch https //historian mycompany com 18000/api/measurements instead in v8 0 0 and higher, patch patch https //historian mycompany com 18000/api/measurements still exists, but expects a different data structure this is a breaking change as listed in the v8 docid\ fjib1tgkmrovxrgg hppq update a measurement this code updates a measurement by providing ( my swagger ) databaseuuid retrieve by manage time series databases docid\ yhpic w9fthy09n103qcd collectoruuid retrieve by manage collectors docid\ pc5 4ldexbiryshmxcdph datatype e g number, boolean, string, name tank1 temperature status e g active, paused put put https //historian mycompany com 18000/api/measurements use the put put https //historian mycompany com 18000/api/measurements to create multiple measurements at once too def update measurement(measurement uuid str, payload) resp = session put(f"{base url}/measurements/{measurement uuid}", json=payload, timeout=30) resp raise for status() return resp json() update payload = { "databaseuuid" "my database uuid", "collectoruuid" "my collector uuid", "datatype" "string", "name" "tank1 temperature", "status" "paused" } updated measurement = update measurement("my measurement uuid", update payload) print(json dumps(updated measurement, indent=2)) successful response { "uuid" "0111a5f4 954e 11f0 ad87 22da883bd9ab", "createdby" null, "createdat" "0001 01 01t00 00 00z", "updatedby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "updatedat" "2025 09 19t12 14 36 464632589z", "attributes" null, "metadata" null, "organizationuuid" "fbf184b2 5c29 11ef 8bf9 0242ac12000a", "name" "tank1 temperature", "description" "", "datatype" "string", "status" "active", "collectoruuid" "2369d6fe 94bc 11f0 acc7 a25bd411a45c", "databaseuuid" "19bdd45c 5c2d 11ef ae29 0242ac12000a", "settings" null, "labels" null } delete a measurement this code deletes a measurement given the measurement uuid ( my swagger ) delete delete https //historian mycompany com 18000/api/measurements/{measurement uuid} (replace {measurement uuid} with your measurement uuid) def delete measurement(measurement uuid str) resp = session delete(f"{base url}/measurements/{measurement uuid}", timeout=30) resp raise for status() return resp json() delete measurement("my measurement uuid") successful response { }