Python examples
Manage Collectors
creating a collector this code creates a collector with ( my swagger ) name my csv collector description collector for csv imports status initial post post https //historian mycompany com 18000/api/collectors https //historian mycompany com 18000/api/collectors def create collector(name str, description optional\[str]) payload = {"name" name, "description" description, "status" "initial"} resp = session post(f"{base url}/collectors", json=payload, timeout=60) resp raise for status() return resp json() new collector = create collector("my csv collector", "collector for csv imports") print(json dumps(new collector, indent=2)) successful response { "uuid" "5e3068f8 94b6 11f0 8c78 a25bd411a45c", "createdby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "createdat" "2025 09 18t17 39 03 363129z", "updatedby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "updatedat" "2025 09 18t17 39 03 363129z", "attributes" {}, "metadata" {}, "organizationuuid" "fbf184b2 5c29 11ef 8bf9 0242ac12000a", "settings" {}, "name" "my csv collector", "collectortype" null, "buildversion" null, "buildos" null, "buildarch" null, "description" "collector for csv imports", "status" "initial", "measurementsettingsschema" {}, "settingsschema" {}, "state" {}, "lastseen" "0001 01 01t00 00 00z", "ipaddress" "", "hauuid" null, "defaultdatabaseuuid" null, "updatecollectorto" null } at this point, the collector is ready to be installed installing a collector docid\ iv4 sd56tdxcqkrkcj4kv upon installation, the collector will register itself to add information such as collector type build version settings patch patch https //historian mycompany com 18000/api/collector/collectors/{collectoruuid}/register while it is technically possible to register the collector yourself, you should not change the built in behavior of shipped collectors (opc ua, opc da, etc ) using the collectors api unless you know exactly what you’re doing activating the collector this code sets the collector status to active using the collector uuid ( my swagger ) patch patch https //historian mycompany com 18000/api/collectors/{collector uuid}/status (replace {collector uuid} with your collector uuid) def set collector status(collector uuid str, status str) resp = session patch( f"{base url}/collectors/{collector uuid}/status", json=status, timeout=30 ) resp raise for status() return resp json() collector uuid = new collector\["uuid"] activated collector = set collector status(collector uuid, "active") print(json dumps(activated collector, indent=2)) successful response { "uuid" "5e3068f8 94b6 11f0 8c78 a25bd411a45c", "createdby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "createdat" "2025 09 18t17 39 03 363129z", "updatedby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "updatedat" "2025 09 18t17 46 07 518838629z", "attributes" {}, "metadata" {}, "organizationuuid" "fbf184b2 5c29 11ef 8bf9 0242ac12000a", "settings" {}, "name" "my csv collector", "collectortype" "csv", "buildversion" "5 2 3", "buildos" "linux", "buildarch" "arm64", "description" "collector for csv imports", "status" "active", "measurementsettingsschema" { "$schema" "http //json schema org/draft 06/schema#", "definitions" {}, "id" "http //factry io/measurementsettings json", "properties" { "filter" { "additionalproperties" false, "default" {}, "description" "filter to parse only the rows that match the filter the filter is a json object where the key is the column index (as a string) and the value is the value to match if multiple values in the same column should be matched, separate them with a semicolon use {} to leave empty ", "examples" \[ "{\\"1\\" \\"g10391\\"}" ], "id" "/properties/filter", "minproperties" 0, "order" 2, "patternproperties" { "\[0 9]+" { "type" "string" } }, "title" "filter", "type" "object" }, "statusgood" { "description" "all values that are mapped to status 'good' multiple statuses can be configured using semicolon separated values", "id" "/properties/statusgood", "order" 3, "title" "statusgood", "type" "string" }, "tagsincsv" { "additionalproperties" false, "default" {}, "description" "an optional json configuration to add values from a certain column index (starts from 0) to a tag mutiple mappings can be foreseen using one column index and one tag name use {} to leave empty ", "examples" \[ "{\\"1\\" \\"tagname\\"}" ], "id" "/properties/tagsincsv", "minproperties" 0, "order" 5, "patternproperties" { "\[0 9]+" { "type" "string" } }, "title" "tagsincsv", "type" "object" }, "timestamplayout" { "description" "the layout of the timestamp, default the timestamplayout setting on the collector will be used more info https //pkg go dev/time#layout", "id" "/properties/timestamplayout", "order" 4, "title" "timestamplayout", "type" "string" }, "valuecolumn" { "default" 2, "description" "index of column holding the value (starts from 0)", "id" "/properties/valuecolumn", "order" 1, "title" "valuecolumn", "type" "integer" } }, "required" \[ "valuecolumn" ], "type" "object" }, "settingsschema" { "$id" "http //factry io/collectorsettings json", "$schema" "http //json schema org/draft 06/schema#", "definitions" {}, "properties" { "alternativetimezone" { "$id" "/properties/alternativetimezone", "description" "this changes the timestamp of points, statistics and logs to the machine local timezone, strips off the timezone and indicates the timezone to be the alternative timezone the timezone name is taken from the iana time zone database (https //en wikipedia org/wiki/list of tz database time zones) ", "isadvanced" true, "order" 14, "title" "alternativetimezone", "type" "string" }, "basepath" { "description" "default directory where the incoming, processed and error directories are in by default these directory names are respectively, incoming, processed and error paths are absolute add a trailing forward slash for linux or a trailing backslash for windows", "id" "/properties/basepath", "order" 201, "title" "basepath", "type" "string" }, "bufferdiskbatches" { "$id" "/properties/bufferdiskbatches", "default" 100, "description" "the maximum amount of batches that will be written to a buffer file ", "isadvanced" true, "order" 8, "title" "bufferdiskbatches", "type" "integer" }, "buffermaxbatchsize" { "$id" "/properties/buffermaxbatchsize", "default" 5000, "description" "the maximum size of a batch ", "isadvanced" true, "order" 6, "title" "buffermaxbatchsize", "type" "integer" }, "buffermaxbatches" { "$id" "/properties/buffermaxbatches", "default" 20, "description" "the maximum amount of batches the buffer will keep in memory ", "isadvanced" true, "order" 7, "title" "buffermaxbatches", "type" "integer" }, "buffermaxdiskspace" { "$id" "/properties/buffermaxdiskspace", "default" "0mb", "description" "the maximum disk space that the buffer files may take up default no limit ", "isadvanced" true, "order" 10, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "buffermaxdiskspace", "type" "string" }, "bufferminfreediskspace" { "$id" "/properties/bufferminfreediskspace", "default" "2gb", "description" "the buffer wont write to more files if the minimum free disk space is reached ", "isadvanced" true, "order" 9, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "bufferminfreediskspace", "type" "string" }, "bufferprocessinterval" { "$id" "/properties/bufferprocessinterval", "default" 250, "description" "the interval in milliseconds that the buffer processes the buffer ", "isadvanced" true, "minimum" 250, "order" 5, "title" "bufferprocessinterval", "type" "integer" }, "csvheader" { "default" "false", "description" "indicates if there is a header in the csv files ", "enum" \[ "true", "false" ], "id" "/properties/csvheader", "order" 107, "title" "csvheader", "type" "string" }, "compressionlevel" { "$id" "/properties/compressionlevel", "default" 1, "description" "the compression level grpc uses, for default compression 1 9 1 = fastest, 9 = most compressed, 1 = default ", "isadvanced" true, "maximum" 9, "minimum" 1, "order" 12, "title" "compressionlevel", "type" "integer" }, "delimiter" { "default" ",", "description" "the delimiter used in the csv file ", "id" "/properties/delimiter", "order" 106, "title" "delimiter", "type" "string" }, "errordirectorypath" { "description" "override directory in which files that contained an error will be put invalid csv files will be stored here", "id" "/properties/errordirectorypath", "order" 204, "title" "errordirectorypath", "type" "string" }, "failedmaxage" { "default" 129600, "description" "the maximum time in minutes to keep failed files 0 will keep the files indefinitely", "id" "/properties/failedmaxage", "isadvanced" true, "order" 216, "title" "failedmaxage", "type" "integer" }, "filemask" { "default" " csv", "description" "file mask of the files to use", "id" "/properties/filemask", "order" 205, "pattern" "\\\\ \\\\ \[a z]", "title" "filemask", "type" "string" }, "grpcmaxmessagesize" { "$id" "/properties/grpcmaxmessagesize", "default" "64mb", "description" "the maximum message size for grpc messages ", "isadvanced" true, "order" 13, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "grpcmaxmessagesize", "type" "string" }, "hapollinginterval" { "$id" "/properties/hapollinginterval", "default" 1000, "description" "the interval in milliseconds at which the backup collector polls the main collector's health ", "isadvanced" true, "order" 11, "title" "hapollinginterval", "type" "integer" }, "incomingdirectorypath" { "description" "override directory to monitor for files matching filemask paths are absolute", "id" "/properties/incomingdirectorypath", "order" 202, "title" "incomingdirectorypath", "type" "string" }, "loglevel" { "$id" "/properties/loglevel", "default" "info", "description" "the log level of the collector ", "enum" \[ "trace", "debug", "info" ], "isadvanced" true, "order" 1, "title" "loglevel", "type" "string" }, "processdelay" { "default" 3000, "description" "delay in milliseconds for processing new files after they were detected", "id" "/properties/processdelay", "isadvanced" true, "order" 213, "title" "processdelay", "type" "integer" }, "processinterval" { "default" 10000, "description" "interval in milliseconds in which the incoming directory is checked for new files", "id" "/properties/processinterval", "isadvanced" true, "order" 212, "title" "processinterval", "type" "integer" }, "processeddirectorypath" { "description" "override directory to which successfully processed files are moved paths are absolute", "id" "/properties/processeddirectorypath", "order" 203, "title" "processeddirectorypath", "type" "string" }, "processedmaxage" { "default" 43200, "description" "the maximum time in minutes to keep processed files 0 will keep the files indefinitely", "id" "/properties/processedmaxage", "isadvanced" true, "order" 215, "title" "processedmaxage", "type" "integer" }, "sourcetimezone" { "$id" "/properties/sourcetimezone", "description" "this changes the timestamp of points by stripping off the timezone from the timestamp and indicating the timezone to be the source timezone (f e 0h utc > 0h utc+2) do not use if the points could come from different source devices (f e polling vs monitored), since these source devices can have different time settings! the timezone name is taken from the iana time zone database (https //en wikipedia org/wiki/list of tz database time zones) ", "isadvanced" true, "order" 15, "title" "sourcetimezone", "type" "string" }, "startupdelay" { "default" 9000, "description" "initial delay for checking the incoming directory in milliseconds", "id" "/properties/startupdelay", "isadvanced" true, "order" 214, "title" "startupdelay", "type" "integer" }, "statuscolumn" { "description" "index of column holding the status (starts from 0)", "id" "/properties/statuscolumn", "order" 110, "pattern" "\[0 9] ", "title" "statuscolumn", "type" "string" }, "statusgood" { "$id" "/properties/statusgood", "description" "a comma separated list of status strings that will be interpreted as a good status ", "isadvanced" true, "order" 16, "title" "statusgood", "type" "string" }, "tickerinterval" { "$id" "/properties/tickerinterval", "default" 1000, "description" "the interval for the base ticker in milliseconds this ticker is used for the polling of measurements ", "isadvanced" true, "minimum" 250, "order" 2, "title" "tickerinterval", "type" "integer" }, "tickermaxdrift" { "$id" "/properties/tickermaxdrift", "default" 125, "description" "the maximum drift a tick may have in milliseconds must be lower than the interval ", "isadvanced" true, "order" 4, "title" "tickermaxdrift", "type" "integer" }, "tickerresolution" { "$id" "/properties/tickerresolution", "default" 25, "description" "the resolution for the base ticker in milliseconds maximum 1/10th of the the interval ", "isadvanced" true, "minimum" 2, "order" 3, "title" "tickerresolution", "type" "integer" }, "timestampcolumn" { "default" 1, "description" "index of column holding the timestamp (starts from 0)", "id" "/properties/timestampcolumn", "order" 108, "title" "timestampcolumn", "type" "integer" }, "timestamplayout" { "description" "the layout of the timestamp of the measurements, default rfc3339 will be used more info https //pkg go dev/time#layout", "id" "/properties/timestamplayout", "order" 109, "title" "timestamplayout", "type" "string" }, "trimstring" { "description" "the string to trim (leading and trailing) from the csv cells used (values, mappings, status, )", "id" "/properties/trimstring", "order" 111, "title" "trimstring", "type" "string" } }, "required" \[ "timestampcolumn", "delimiter", "csvheader", "basepath", "filemask" ], "type" "object" }, "state" {}, "health" { "health" "initializing collector", "timestamp" "2025 09 18t17 43 34 164741z", "collectoruuid" "5e3068f8 94b6 11f0 8c78 a25bd411a45c" }, "lastseen" "2025 09 18t17 46 04 436823086z", "ipaddress" "192 168 65 1 54314", "hauuid" null, "defaultdatabaseuuid" null, "updatecollectorto" null } list collectors in an organization this code lists the collectors in the factry historian organization ( my swagger ) get get https //historian mycompany com 18000/api/collectors def list collectors() resp = session get(f"{base url}/collectors", timeout=30) resp raise for status() return resp json() collectors = list collectors() successful response \[ { "uuid" "fbfbc206 5c29 11ef 8bf9 0242ac12000a", "createdby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "createdat" "2024 08 16t23 48 06 275993z", "updatedby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "updatedat" "2025 09 14t17 36 31 62295z", "attributes" {}, "metadata" {}, "organizationuuid" "fbf184b2 5c29 11ef 8bf9 0242ac12000a", "settings" { "batchsize" 360, "bufferdiskbatches" 100, "bufferenablediskqueue" false, "buffermaxbatchsize" 5000, "buffermaxbatches" 20, "buffermaxdiskspace" "0mb", "bufferminfreediskspace" "2gb", "bufferprocessinterval" 250, "loglevel" "info", "maxqueryperiod" "12h", "savestaterate" 5, "scripttimeout" 10000, "updaterate" 5000 }, "name" "calculation collector", "collectortype" "calculation", "buildversion" "7 3 3", "buildos" "linux", "buildarch" "amd64", "description" "internal collector for calculations", "status" "active", "measurementsettingsschema" { "$defs" { "inputdatareference" { "properties" { "aggregation" { "description" "the aggregation used", "enum" \[ "count", "integral", "mean", "median", "mode", "spread", "stddev", "sum", "first", "last", "max", "min" ], "examples" \[ "" ], "order" 3, "title" "aggregation", "type" "string" }, "alias" { "description" "an alias for the input measurement name", "order" 4, "title" "alias", "type" "string" }, "name" { "description" "the name of the input measurement", "order" 1, "title" "name", "type" "string" }, "uuid" { "description" "the uuid of the input measurement", "order" 2, "title" "uuid", "type" "string" } }, "required" \[ "uuid", "alias" ], "type" "object" }, "rawsettings" { "properties" { "inputdatareference" { "$ref" "#/$defs/inputdatareference" }, "script" { "order" 4 }, "type" { "const" "raw", "title" "name" } }, "required" \[ "type", "inputdatareference", "script" ] }, "sampledsettings" { "properties" { "inputdatareferences" { "description" "the input measurements", "items" { "$ref" "#/$defs/inputdatareference" }, "order" 3, "title" "inputdatareferences", "type" "array" }, "interval" { "title" "interval", "type" "string" }, "intervaloffset" { "title" "intervaloffset", "type" "string" }, "script" { "$ref" "#/$defs/script", "order" 4 }, "type" { "const" "sampled", "title" "name" } }, "required" \[ "type", "interval", "intervaloffset", "script" ] }, "script" { "description" "the script", "title" "script", "type" "string" } }, "$id" "http //factry io/calculationsettings json", "$schema" "http //json schema org/draft 06/schema#", "oneof" \[ { "$ref" "#/$defs/rawsettings" }, { "$ref" "#/$defs/sampledsettings" } ], "type" "object" }, "settingsschema" { "$id" "http //factry io/calculationcollectorsettings json", "$schema" "http //json schema org/draft 06/schema#", "definitions" {}, "properties" { "batchsize" { "$id" "/properties/batchsize", "default" 3600, "description" "defines the maximum number of points that get processed per run ", "isadvanced" true, "minimum" 1, "order" 2, "title" "batchsize", "type" "integer" }, "bufferdiskbatches" { "$id" "/properties/bufferdiskbatches", "default" 100, "description" "the maximum amount of batches that will be written to a buffer file ", "isadvanced" true, "order" 108, "title" "bufferdiskbatches", "type" "integer" }, "bufferenablediskqueue" { "$id" "/properties/bufferenablediskqueue", "default" true, "description" "enable the disk queue ", "isadvanced" true, "order" 111, "title" "bufferenablediskqueue", "type" "boolean" }, "buffermaxbatchsize" { "$id" "/properties/buffermaxbatchsize", "default" 5000, "description" "the maximum size of a batch ", "isadvanced" true, "order" 106, "title" "buffermaxbatchsize", "type" "integer" }, "buffermaxbatches" { "$id" "/properties/buffermaxbatches", "default" 20, "description" "the maximum amount of batches the buffer will keep in memory ", "isadvanced" true, "order" 107, "title" "buffermaxbatches", "type" "integer" }, "buffermaxdiskspace" { "$id" "/properties/buffermaxdiskspace", "default" "0mb", "description" "the maximum disk space that the buffer files may take up default no limit ", "isadvanced" true, "order" 110, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "buffermaxdiskspace", "type" "string" }, "bufferminfreediskspace" { "$id" "/properties/bufferminfreediskspace", "default" "2gb", "description" "the buffer wont write to more files if the minimum free disk space is reached ", "isadvanced" true, "order" 109, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "bufferminfreediskspace", "type" "string" }, "bufferprocessinterval" { "$id" "/properties/bufferprocessinterval", "default" 250, "description" "the interval in milliseconds that the buffer processes the buffer ", "isadvanced" true, "minimum" 250, "order" 105, "title" "bufferprocessinterval", "type" "integer" }, "loglevel" { "$id" "/properties/loglevel", "default" "info", "description" "the log level for this calculation collector logs ", "enum" \[ "trace", "debug", "info", "warning", "error", "fatal", "panic" ], "isadvanced" true, "order" 6, "title" "loglevel", "type" "string" }, "maxqueryperiod" { "$id" "/properties/maxqueryperiod", "default" "7d", "description" "the maximum query period for a calculation ", "isadvanced" true, "order" 4, "pattern" "^(\\\d+\[w,d,h,m,s])+$", "title" "maxqueryperiod", "type" "string" }, "savestaterate" { "$id" "/properties/savestaterate", "default" 5, "description" "the rate in minutes at which calculation states are saved ", "isadvanced" true, "minimum" 1, "order" 5, "title" "savestaterate", "type" "integer" }, "scripttimeout" { "$id" "/properties/scripttimeout", "default" 10000, "description" "the timeout for a calculation script in ms ", "minimum" 1000, "order" 1, "title" "scripttimeout", "type" "integer" }, "updaterate" { "$id" "/properties/updaterate", "default" 5000, "description" "the update rate at which calculations are evaluated ", "isadvanced" true, "minimum" 1000, "order" 3, "title" "updaterate", "type" "integer" } }, "required" \[], "type" "object" }, "state" {}, "health" { "health" "collecting", "timestamp" "2025 09 18t18 18 08 722798z", "collectoruuid" "fbfbc206 5c29 11ef 8bf9 0242ac12000a" }, "lastseen" "2025 09 18t18 21 55 628658471z", "ipaddress" "", "hauuid" null, "defaultdatabaseuuid" null, "updatecollectorto" null }, { "uuid" "fbfd4dec 5c29 11ef 8bf9 0242ac12000a", "createdby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "createdat" "2024 08 16t23 48 06 286763z", "updatedby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "updatedat" "2024 08 16t23 48 06 288012z", "attributes" {}, "metadata" {}, "organizationuuid" "fbf184b2 5c29 11ef 8bf9 0242ac12000a", "settings" {}, "name" "internal collector", "collectortype" "internal", "buildversion" null, "buildos" null, "buildarch" null, "description" "internal collector", "status" "active", "measurementsettingsschema" {}, "settingsschema" {}, "state" {}, "health" { "health" "collecting", "timestamp" "2025 09 18t18 18 08 912794z", "collectoruuid" "fbfd4dec 5c29 11ef 8bf9 0242ac12000a" }, "lastseen" "2025 09 18t18 21 55 628647304z", "ipaddress" "", "hauuid" null, "defaultdatabaseuuid" null, "updatecollectorto" null }, { "uuid" "2369d6fe 94bc 11f0 acc7 a25bd411a45c", "createdby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "createdat" "2025 09 18t18 20 21 736927z", "updatedby" "6c8ef372 5c29 11ef 9e88 0242ac12000a", "updatedat" "2025 09 18t18 21 46 542773z", "attributes" {}, "metadata" {}, "organizationuuid" "fbf184b2 5c29 11ef 8bf9 0242ac12000a", "settings" {}, "name" "my csv collector", "collectortype" "csv", "buildversion" "5 2 3", "buildos" "linux", "buildarch" "arm64", "description" "collector for csv imports", "status" "active", "measurementsettingsschema" { "$schema" "http //json schema org/draft 06/schema#", "definitions" {}, "id" "http //factry io/measurementsettings json", "properties" { "filter" { "additionalproperties" false, "default" {}, "description" "filter to parse only the rows that match the filter the filter is a json object where the key is the column index (as a string) and the value is the value to match if multiple values in the same column should be matched, separate them with a semicolon use {} to leave empty ", "examples" \[ "{\\"1\\" \\"g10391\\"}" ], "id" "/properties/filter", "minproperties" 0, "order" 2, "patternproperties" { "\[0 9]+" { "type" "string" } }, "title" "filter", "type" "object" }, "statusgood" { "description" "all values that are mapped to status 'good' multiple statuses can be configured using semicolon separated values", "id" "/properties/statusgood", "order" 3, "title" "statusgood", "type" "string" }, "tagsincsv" { "additionalproperties" false, "default" {}, "description" "an optional json configuration to add values from a certain column index (starts from 0) to a tag mutiple mappings can be foreseen using one column index and one tag name use {} to leave empty ", "examples" \[ "{\\"1\\" \\"tagname\\"}" ], "id" "/properties/tagsincsv", "minproperties" 0, "order" 5, "patternproperties" { "\[0 9]+" { "type" "string" } }, "title" "tagsincsv", "type" "object" }, "timestamplayout" { "description" "the layout of the timestamp, default the timestamplayout setting on the collector will be used more info https //pkg go dev/time#layout", "id" "/properties/timestamplayout", "order" 4, "title" "timestamplayout", "type" "string" }, "valuecolumn" { "default" 2, "description" "index of column holding the value (starts from 0)", "id" "/properties/valuecolumn", "order" 1, "title" "valuecolumn", "type" "integer" } }, "required" \[ "valuecolumn" ], "type" "object" }, "settingsschema" { "$id" "http //factry io/collectorsettings json", "$schema" "http //json schema org/draft 06/schema#", "definitions" {}, "properties" { "alternativetimezone" { "$id" "/properties/alternativetimezone", "description" "this changes the timestamp of points, statistics and logs to the machine local timezone, strips off the timezone and indicates the timezone to be the alternative timezone the timezone name is taken from the iana time zone database (https //en wikipedia org/wiki/list of tz database time zones) ", "isadvanced" true, "order" 14, "title" "alternativetimezone", "type" "string" }, "basepath" { "description" "default directory where the incoming, processed and error directories are in by default these directory names are respectively, incoming, processed and error paths are absolute add a trailing forward slash for linux or a trailing backslash for windows", "id" "/properties/basepath", "order" 201, "title" "basepath", "type" "string" }, "bufferdiskbatches" { "$id" "/properties/bufferdiskbatches", "default" 100, "description" "the maximum amount of batches that will be written to a buffer file ", "isadvanced" true, "order" 8, "title" "bufferdiskbatches", "type" "integer" }, "buffermaxbatchsize" { "$id" "/properties/buffermaxbatchsize", "default" 5000, "description" "the maximum size of a batch ", "isadvanced" true, "order" 6, "title" "buffermaxbatchsize", "type" "integer" }, "buffermaxbatches" { "$id" "/properties/buffermaxbatches", "default" 20, "description" "the maximum amount of batches the buffer will keep in memory ", "isadvanced" true, "order" 7, "title" "buffermaxbatches", "type" "integer" }, "buffermaxdiskspace" { "$id" "/properties/buffermaxdiskspace", "default" "0mb", "description" "the maximum disk space that the buffer files may take up default no limit ", "isadvanced" true, "order" 10, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "buffermaxdiskspace", "type" "string" }, "bufferminfreediskspace" { "$id" "/properties/bufferminfreediskspace", "default" "2gb", "description" "the buffer wont write to more files if the minimum free disk space is reached ", "isadvanced" true, "order" 9, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "bufferminfreediskspace", "type" "string" }, "bufferprocessinterval" { "$id" "/properties/bufferprocessinterval", "default" 250, "description" "the interval in milliseconds that the buffer processes the buffer ", "isadvanced" true, "minimum" 250, "order" 5, "title" "bufferprocessinterval", "type" "integer" }, "csvheader" { "default" "false", "description" "indicates if there is a header in the csv files ", "enum" \[ "true", "false" ], "id" "/properties/csvheader", "order" 107, "title" "csvheader", "type" "string" }, "compressionlevel" { "$id" "/properties/compressionlevel", "default" 1, "description" "the compression level grpc uses, for default compression 1 9 1 = fastest, 9 = most compressed, 1 = default ", "isadvanced" true, "maximum" 9, "minimum" 1, "order" 12, "title" "compressionlevel", "type" "integer" }, "delimiter" { "default" ",", "description" "the delimiter used in the csv file ", "id" "/properties/delimiter", "order" 106, "title" "delimiter", "type" "string" }, "errordirectorypath" { "description" "override directory in which files that contained an error will be put invalid csv files will be stored here", "id" "/properties/errordirectorypath", "order" 204, "title" "errordirectorypath", "type" "string" }, "failedmaxage" { "default" 129600, "description" "the maximum time in minutes to keep failed files 0 will keep the files indefinitely", "id" "/properties/failedmaxage", "isadvanced" true, "order" 216, "title" "failedmaxage", "type" "integer" }, "filemask" { "default" " csv", "description" "file mask of the files to use", "id" "/properties/filemask", "order" 205, "pattern" "\\\\ \\\\ \[a z]", "title" "filemask", "type" "string" }, "grpcmaxmessagesize" { "$id" "/properties/grpcmaxmessagesize", "default" "64mb", "description" "the maximum message size for grpc messages ", "isadvanced" true, "order" 13, "pattern" "\[0 9]+(t|g|m|k)?b", "title" "grpcmaxmessagesize", "type" "string" }, "hapollinginterval" { "$id" "/properties/hapollinginterval", "default" 1000, "description" "the interval in milliseconds at which the backup collector polls the main collector's health ", "isadvanced" true, "order" 11, "title" "hapollinginterval", "type" "integer" }, "incomingdirectorypath" { "description" "override directory to monitor for files matching filemask paths are absolute", "id" "/properties/incomingdirectorypath", "order" 202, "title" "incomingdirectorypath", "type" "string" }, "loglevel" { "$id" "/properties/loglevel", "default" "info", "description" "the log level of the collector ", "enum" \[ "trace", "debug", "info" ], "isadvanced" true, "order" 1, "title" "loglevel", "type" "string" }, "processdelay" { "default" 3000, "description" "delay in milliseconds for processing new files after they were detected", "id" "/properties/processdelay", "isadvanced" true, "order" 213, "title" "processdelay", "type" "integer" }, "processinterval" { "default" 10000, "description" "interval in milliseconds in which the incoming directory is checked for new files", "id" "/properties/processinterval", "isadvanced" true, "order" 212, "title" "processinterval", "type" "integer" }, "processeddirectorypath" { "description" "override directory to which successfully processed files are moved paths are absolute", "id" "/properties/processeddirectorypath", "order" 203, "title" "processeddirectorypath", "type" "string" }, "processedmaxage" { "default" 43200, "description" "the maximum time in minutes to keep processed files 0 will keep the files indefinitely", "id" "/properties/processedmaxage", "isadvanced" true, "order" 215, "title" "processedmaxage", "type" "integer" }, "sourcetimezone" { "$id" "/properties/sourcetimezone", "description" "this changes the timestamp of points by stripping off the timezone from the timestamp and indicating the timezone to be the source timezone (f e 0h utc > 0h utc+2) do not use if the points could come from different source devices (f e polling vs monitored), since these source devices can have different time settings! the timezone name is taken from the iana time zone database (https //en wikipedia org/wiki/list of tz database time zones) ", "isadvanced" true, "order" 15, "title" "sourcetimezone", "type" "string" }, "startupdelay" { "default" 9000, "description" "initial delay for checking the incoming directory in milliseconds", "id" "/properties/startupdelay", "isadvanced" true, "order" 214, "title" "startupdelay", "type" "integer" }, "statuscolumn" { "description" "index of column holding the status (starts from 0)", "id" "/properties/statuscolumn", "order" 110, "pattern" "\[0 9] ", "title" "statuscolumn", "type" "string" }, "statusgood" { "$id" "/properties/statusgood", "description" "a comma separated list of status strings that will be interpreted as a good status ", "isadvanced" true, "order" 16, "title" "statusgood", "type" "string" }, "tickerinterval" { "$id" "/properties/tickerinterval", "default" 1000, "description" "the interval for the base ticker in milliseconds this ticker is used for the polling of measurements ", "isadvanced" true, "minimum" 250, "order" 2, "title" "tickerinterval", "type" "integer" }, "tickermaxdrift" { "$id" "/properties/tickermaxdrift", "default" 125, "description" "the maximum drift a tick may have in milliseconds must be lower than the interval ", "isadvanced" true, "order" 4, "title" "tickermaxdrift", "type" "integer" }, "tickerresolution" { "$id" "/properties/tickerresolution", "default" 25, "description" "the resolution for the base ticker in milliseconds maximum 1/10th of the the interval ", "isadvanced" true, "minimum" 2, "order" 3, "title" "tickerresolution", "type" "integer" }, "timestampcolumn" { "default" 1, "description" "index of column holding the timestamp (starts from 0)", "id" "/properties/timestampcolumn", "order" 108, "title" "timestampcolumn", "type" "integer" }, "timestamplayout" { "description" "the layout of the timestamp of the measurements, default rfc3339 will be used more info https //pkg go dev/time#layout", "id" "/properties/timestamplayout", "order" 109, "title" "timestamplayout", "type" "string" }, "trimstring" { "description" "the string to trim (leading and trailing) from the csv cells used (values, mappings, status, )", "id" "/properties/trimstring", "order" 111, "title" "trimstring", "type" "string" } }, "required" \[ "timestampcolumn", "delimiter", "csvheader", "basepath", "filemask" ], "type" "object" }, "state" {}, "health" { "health" "collecting", "timestamp" "2025 09 18t18 21 48 821003z", "collectoruuid" "2369d6fe 94bc 11f0 acc7 a25bd411a45c" }, "lastseen" "2025 09 18t18 21 53 79472347z", "ipaddress" "134 209 86 11 41496", "hauuid" null, "defaultdatabaseuuid" null, "updatecollectorto" null } ]